From 45c750f8ced175aa3ed37a2a6a15526969607348 Mon Sep 17 00:00:00 2001 From: csgulati09 Date: Fri, 19 Apr 2024 19:23:32 +0530 Subject: [PATCH 01/38] feat: update feedback + feedback response type + _put for API call --- portkey_ai/api_resources/apis/api_resource.py | 6 + portkey_ai/api_resources/apis/feedback.py | 66 +++++++-- portkey_ai/api_resources/base_client.py | 140 ++++++++++++++++++ portkey_ai/api_resources/client.py | 2 + .../api_resources/types/feedback_type.py | 12 ++ 5 files changed, 213 insertions(+), 13 deletions(-) create mode 100644 portkey_ai/api_resources/types/feedback_type.py diff --git a/portkey_ai/api_resources/apis/api_resource.py b/portkey_ai/api_resources/apis/api_resource.py index 205b5213..1dc6daad 100644 --- a/portkey_ai/api_resources/apis/api_resource.py +++ b/portkey_ai/api_resources/apis/api_resource.py @@ -19,6 +19,9 @@ def __init__(self, client: APIClient) -> None: def _post(self, *args, **kwargs): return self._client._post(*args, **kwargs) + def _put(self, *args, **kwargs): + return self._client._put(*args, **kwargs) + class AsyncAPIResource: _client: AsyncAPIClient @@ -32,6 +35,9 @@ def __init__(self, client: AsyncAPIClient) -> None: async def _post(self, *args, **kwargs): return await self._client._post(*args, **kwargs) + + async def _put(self, *args, **kwargs): + return await self._client._put(*args, **kwargs) async def _sleep(self, seconds: float) -> None: await asyncio.sleep(seconds) diff --git a/portkey_ai/api_resources/apis/feedback.py b/portkey_ai/api_resources/apis/feedback.py index 1d14479e..c7374d6f 100644 --- a/portkey_ai/api_resources/apis/feedback.py +++ b/portkey_ai/api_resources/apis/feedback.py @@ -2,7 +2,8 @@ from portkey_ai.api_resources.apis.api_resource import APIResource, AsyncAPIResource from portkey_ai.api_resources.base_client import APIClient, AsyncAPIClient from portkey_ai.api_resources.streaming import AsyncStream, Stream -from portkey_ai.api_resources.utils import GenericResponse, PortkeyApiPaths +from portkey_ai.api_resources.types.feedback_type import FeedbackResponse +from portkey_ai.api_resources.utils import PortkeyApiPaths class Feedback(APIResource): @@ -16,26 +17,46 @@ def create( value: Optional[int] = None, weight: Optional[float] = None, metadata: Optional[Dict[str, Any]] = None - ) -> GenericResponse: + ) -> FeedbackResponse: body = dict(trace_id=trace_id, value=value, weight=weight, metadata=metadata) return self._post( PortkeyApiPaths.FEEDBACK_API, body=body, params=None, - cast_to=GenericResponse, - stream_cls=Stream[GenericResponse], + cast_to=FeedbackResponse, + stream_cls=Stream[FeedbackResponse], stream=False, headers={}, ) - def bulk_create(self, *, feedbacks: List[Dict[str, Any]]) -> GenericResponse: + def bulk_create(self, *, feedbacks: List[Dict[str, Any]]) -> FeedbackResponse: body = feedbacks return self._post( PortkeyApiPaths.FEEDBACK_API, body=body, params=None, - cast_to=GenericResponse, - stream_cls=Stream[GenericResponse], + cast_to=FeedbackResponse, + stream_cls=Stream[FeedbackResponse], + stream=False, + headers={}, + ) + + def update( + self, + *, + feedback_id: Optional[str] = None, + value: Optional[int] = None, + weight: Optional[float] = None, + metadata: Optional[Dict[str, Any]] = None + ) -> FeedbackResponse: + body = dict(value=value, weight=weight, metadata=metadata) + + return self._put( + f"{PortkeyApiPaths.FEEDBACK_API}/{feedback_id}", + body=body, + params=None, + cast_to=FeedbackResponse, + stream_cls=Stream[FeedbackResponse], stream=False, headers={}, ) @@ -52,26 +73,45 @@ async def create( value: Optional[int] = None, weight: Optional[float] = None, metadata: Optional[Dict[str, Any]] = None - ) -> GenericResponse: + ) -> FeedbackResponse: body = dict(trace_id=trace_id, value=value, weight=weight, metadata=metadata) return await self._post( PortkeyApiPaths.FEEDBACK_API, body=body, params=None, - cast_to=GenericResponse, - stream_cls=AsyncStream[GenericResponse], + cast_to=FeedbackResponse, + stream_cls=AsyncStream[FeedbackResponse], stream=False, headers={}, ) - async def bulk_create(self, *, feedbacks: List[Dict[str, Any]]) -> GenericResponse: + async def bulk_create(self, *, feedbacks: List[Dict[str, Any]]) -> FeedbackResponse: body = feedbacks return await self._post( PortkeyApiPaths.FEEDBACK_API, body=body, params=None, - cast_to=GenericResponse, - stream_cls=AsyncStream[GenericResponse], + cast_to=FeedbackResponse, + stream_cls=AsyncStream[FeedbackResponse], + stream=False, + headers={}, + ) + + async def update( + self, + *, + feedback_id: Optional[str] = None, + value: Optional[int] = None, + weight: Optional[float] = None, + metadata: Optional[Dict[str, Any]] = None + ) -> FeedbackResponse: + body = dict(value=value, weight=weight, metadata=metadata) + return await self._put( + f"{PortkeyApiPaths.FEEDBACK_API}/{feedback_id}", + body=body, + params=None, + cast_to=FeedbackResponse, + stream_cls=Stream[FeedbackResponse], stream=False, headers={}, ) diff --git a/portkey_ai/api_resources/base_client.py b/portkey_ai/api_resources/base_client.py index b40b4c98..7c763723 100644 --- a/portkey_ai/api_resources/base_client.py +++ b/portkey_ai/api_resources/base_client.py @@ -181,6 +181,76 @@ def _post( ) return res + @overload + def _put( + self, + path: str, + *, + body: Mapping[str, Any], + cast_to: Type[ResponseT], + stream: Literal[True], + stream_cls: type[StreamT], + params: Mapping[str, str], + headers: Mapping[str, str], + ) -> StreamT: + ... + + @overload + def _put( + self, + path: str, + *, + body: Mapping[str, Any], + cast_to: Type[ResponseT], + stream: Literal[False], + stream_cls: type[StreamT], + params: Mapping[str, str], + headers: Mapping[str, str], + ) -> ResponseT: + ... + + @overload + def _put( + self, + path: str, + *, + body: Mapping[str, Any], + cast_to: Type[ResponseT], + stream: bool, + stream_cls: type[StreamT], + params: Mapping[str, str], + headers: Mapping[str, str], + ) -> Union[ResponseT, StreamT]: + ... + + def _put( + self, + path: str, + *, + body: Mapping[str, Any], + cast_to: Type[ResponseT], + stream: bool, + stream_cls: type[StreamT], + params: Mapping[str, str], + headers: Mapping[str, str], + ) -> Union[ResponseT, StreamT]: + + opts = self._construct( + method="put", + url=path, + body=body, + stream=stream, + params=params, + headers=headers, + ) + res = self._request( + options=opts, + stream=stream, + cast_to=cast_to, + stream_cls=stream_cls, + ) + return res + def _construct_generate_options( self, *, @@ -530,6 +600,76 @@ async def _post( ) return res + @overload + async def _put( + self, + path: str, + *, + cast_to: Type[ResponseT], + body: Mapping[str, Any], + stream: Literal[False], + stream_cls: type[AsyncStreamT], + params: Mapping[str, str], + headers: Mapping[str, str], + ) -> ResponseT: + ... + + @overload + async def _put( + self, + path: str, + *, + cast_to: Type[ResponseT], + body: Mapping[str, Any], + stream: Literal[True], + stream_cls: type[AsyncStreamT], + params: Mapping[str, str], + headers: Mapping[str, str], + ) -> AsyncStreamT: + ... + + @overload + async def _put( + self, + path: str, + *, + cast_to: Type[ResponseT], + body: Mapping[str, Any], + stream: bool, + stream_cls: type[AsyncStreamT], + params: Mapping[str, str], + headers: Mapping[str, str], + ) -> Union[ResponseT, AsyncStreamT]: + ... + + async def _put( + self, + path: str, + *, + cast_to: Type[ResponseT], + body: Mapping[str, Any], + stream: bool, + stream_cls: type[AsyncStreamT], + params: Mapping[str, str], + headers: Mapping[str, str], + ) -> Union[ResponseT, AsyncStreamT]: + + opts = await self._construct( + method="put", + url=path, + body=body, + stream=stream, + params=params, + headers=headers, + ) + res = await self._request( + options=opts, + stream=stream, + cast_to=cast_to, + stream_cls=stream_cls, + ) + return res + async def _construct_generate_options( self, *, diff --git a/portkey_ai/api_resources/client.py b/portkey_ai/api_resources/client.py index d927906f..de2ef395 100644 --- a/portkey_ai/api_resources/client.py +++ b/portkey_ai/api_resources/client.py @@ -16,6 +16,7 @@ class Portkey(APIClient): generations: apis.Generations prompts: apis.Prompts embeddings: apis.Embeddings + feedback: apis.Feedback images: apis.Images files: apis.MainFiles models: apis.Models @@ -104,6 +105,7 @@ class AsyncPortkey(AsyncAPIClient): generations: apis.AsyncGenerations prompts: apis.AsyncPrompts embeddings: apis.AsyncEmbeddings + feedback: apis.AsyncFeedback images: apis.AsyncImages files: apis.AsyncMainFiles models: apis.AsyncModels diff --git a/portkey_ai/api_resources/types/feedback_type.py b/portkey_ai/api_resources/types/feedback_type.py new file mode 100644 index 00000000..511c1585 --- /dev/null +++ b/portkey_ai/api_resources/types/feedback_type.py @@ -0,0 +1,12 @@ +import json +from typing import Optional + +from .utils import parse_headers +from typing import List +from pydantic import BaseModel + + +class FeedbackResponse(BaseModel): + status: Optional[str] + message: Optional[str] + feedback_ids: Optional[List[str]] From 2316153766330ac642b15d9c72914e16a1ebfcae Mon Sep 17 00:00:00 2001 From: csgulati09 Date: Sat, 20 Apr 2024 15:06:31 +0530 Subject: [PATCH 02/38] fix: linting issue --- portkey_ai/api_resources/utils.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/portkey_ai/api_resources/utils.py b/portkey_ai/api_resources/utils.py index 860952e4..cb3ead6c 100644 --- a/portkey_ai/api_resources/utils.py +++ b/portkey_ai/api_resources/utils.py @@ -15,6 +15,7 @@ TextCompletionChunk, TextCompletion, ) +from portkey_ai.api_resources.types.feedback_type import FeedbackResponse from portkey_ai.api_resources.types.generation_type import ( PromptCompletion, PromptCompletionChunk, @@ -61,7 +62,7 @@ class CacheType(str, Enum, metaclass=MetaEnum): ResponseT = TypeVar( "ResponseT", - bound="Union[ChatCompletionChunk, ChatCompletions, TextCompletion, TextCompletionChunk, GenericResponse, PromptCompletion, PromptCompletionChunk, PromptRender, httpx.Response]", # noqa: E501 + bound="Union[ChatCompletionChunk, ChatCompletions, TextCompletion, TextCompletionChunk, GenericResponse, PromptCompletion, PromptCompletionChunk, PromptRender, FeedbackResponse, httpx.Response]", # noqa: E501 ) From 7aaf1a54f1483923a367f3c2a7857b934d367bea Mon Sep 17 00:00:00 2001 From: csgulati09 Date: Wed, 8 May 2024 00:28:06 +0530 Subject: [PATCH 03/38] feat: moderation support added --- portkey_ai/__init__.py | 4 ++ portkey_ai/api_resources/__init__.py | 4 ++ portkey_ai/api_resources/apis/__init__.py | 3 ++ portkey_ai/api_resources/apis/moderations.py | 52 +++++++++++++++++++ portkey_ai/api_resources/client.py | 4 ++ .../api_resources/types/moderations_type.py | 29 +++++++++++ 6 files changed, 96 insertions(+) create mode 100644 portkey_ai/api_resources/apis/moderations.py create mode 100644 portkey_ai/api_resources/types/moderations_type.py diff --git a/portkey_ai/__init__.py b/portkey_ai/__init__.py index f945a675..f079d99b 100644 --- a/portkey_ai/__init__.py +++ b/portkey_ai/__init__.py @@ -42,6 +42,8 @@ AsyncRuns, Steps, AsyncSteps, + Moderations, + AsyncModerations, ) from portkey_ai.version import VERSION @@ -103,4 +105,6 @@ "AsyncRuns", "Steps", "AsyncSteps", + "Moderations", + "AsyncModerations", ] diff --git a/portkey_ai/api_resources/__init__.py b/portkey_ai/api_resources/__init__.py index f489f0bf..87766dda 100644 --- a/portkey_ai/api_resources/__init__.py +++ b/portkey_ai/api_resources/__init__.py @@ -31,6 +31,8 @@ AsyncRuns, Steps, AsyncSteps, + Moderations, + AsyncModerations, ) from .utils import ( Modes, @@ -97,4 +99,6 @@ "AsyncRuns", "Steps", "AsyncSteps", + "Moderations", + "AsyncModerations", ] diff --git a/portkey_ai/api_resources/apis/__init__.py b/portkey_ai/api_resources/apis/__init__.py index e064e3c6..de21952c 100644 --- a/portkey_ai/api_resources/apis/__init__.py +++ b/portkey_ai/api_resources/apis/__init__.py @@ -21,6 +21,7 @@ ) from .main_files import MainFiles, AsyncMainFiles from .models import Models, AsyncModels +from .moderations import Moderations, AsyncModerations __all__ = [ "Completion", @@ -58,4 +59,6 @@ "AsyncRuns", "Steps", "AsyncSteps", + "Moderations", + "AsyncModerations", ] diff --git a/portkey_ai/api_resources/apis/moderations.py b/portkey_ai/api_resources/apis/moderations.py new file mode 100644 index 00000000..65e858fd --- /dev/null +++ b/portkey_ai/api_resources/apis/moderations.py @@ -0,0 +1,52 @@ +import json +from typing import List, Union, Any +from portkey_ai.api_resources.apis.api_resource import APIResource, AsyncAPIResource +from portkey_ai.api_resources.client import AsyncPortkey, Portkey +from openai._types import NotGiven, NOT_GIVEN +from portkey_ai.api_resources.types.moderations_type import ModerationCreateResponse + + +class Moderations(APIResource): + def __init__(self, client: Portkey) -> None: + super().__init__(client) + self.openai_client = client.openai_client + + def create( + self, + *, + input: Union[str, List[str]], + model: Union[str, NotGiven]= NOT_GIVEN, + **kwargs + )-> ModerationCreateResponse: + response = self.openai_client.with_raw_response.moderations.create( + input=input, + model=model, + **kwargs + ) + data = ModerationCreateResponse(**json.loads(response.text)) + data._headers = response.headers + + return data + + +class AsyncModerations(AsyncAPIResource): + def __init__(self, client: AsyncPortkey) -> None: + super().__init__(client) + self.openai_client = client.openai_client + + async def create( + self, + *, + input: Union[str, List[str]], + model: Union[str, NotGiven]= NOT_GIVEN, + **kwargs + )-> ModerationCreateResponse: + response = await self.openai_client.with_raw_response.moderations.create( + input=input, + model=model, + **kwargs + ) + data = ModerationCreateResponse(**json.loads(response.text)) + data._headers = response.headers + + return data \ No newline at end of file diff --git a/portkey_ai/api_resources/client.py b/portkey_ai/api_resources/client.py index d927906f..2deb1b19 100644 --- a/portkey_ai/api_resources/client.py +++ b/portkey_ai/api_resources/client.py @@ -19,6 +19,7 @@ class Portkey(APIClient): images: apis.Images files: apis.MainFiles models: apis.Models + moderations: apis.Moderations class beta: assistants: apis.Assistants @@ -66,6 +67,7 @@ def __init__( self.images = apis.Images(self) self.files = apis.MainFiles(self) self.models = apis.Models(self) + self.moderations = apis.Moderations(self) self.beta = self.beta(self) # type: ignore def copy( @@ -107,6 +109,7 @@ class AsyncPortkey(AsyncAPIClient): images: apis.AsyncImages files: apis.AsyncMainFiles models: apis.AsyncModels + moderations: apis.AsyncModerations class beta: assistants: apis.AsyncAssistants @@ -154,6 +157,7 @@ def __init__( self.images = apis.AsyncImages(self) self.files = apis.AsyncMainFiles(self) self.models = apis.AsyncModels(self) + self.moderations = apis.AsyncModerations(self) self.beta = self.beta(self) # type: ignore def copy( diff --git a/portkey_ai/api_resources/types/moderations_type.py b/portkey_ai/api_resources/types/moderations_type.py new file mode 100644 index 00000000..a62d7a0b --- /dev/null +++ b/portkey_ai/api_resources/types/moderations_type.py @@ -0,0 +1,29 @@ +import json +from typing import Dict, Optional +import httpx +from .utils import parse_headers +from typing import List, Any +from pydantic import BaseModel, PrivateAttr +from openai.types.moderation import Moderation + + +__all__ = ["ModerationCreateResponse"] + +class ModerationCreateResponse(BaseModel): + id: str + model: str + results: List[Moderation] + _headers: Optional[httpx.Headers] = PrivateAttr() + + def __str__(self): + del self._headers + return json.dumps(self.dict(), indent=4) + + def __getitem__(self, key): + return getattr(self, key, None) + + def get(self, key: str, default: Optional[Any] = None): + return getattr(self, key, None) or default + + def get_headers(self) -> Optional[Dict[str, str]]: + return parse_headers(self._headers) From 0a6407348bb1ed005ca1ba8bd2786691a247dba8 Mon Sep 17 00:00:00 2001 From: csgulati09 Date: Wed, 8 May 2024 01:56:09 +0530 Subject: [PATCH 04/38] feat: audio support added --- portkey_ai/__init__.py | 16 ++ portkey_ai/api_resources/__init__.py | 16 ++ portkey_ai/api_resources/apis/__init__.py | 9 + portkey_ai/api_resources/apis/audio.py | 204 ++++++++++++++++++ portkey_ai/api_resources/client.py | 4 + portkey_ai/api_resources/types/audio_types.py | 44 ++++ 6 files changed, 293 insertions(+) create mode 100644 portkey_ai/api_resources/apis/audio.py create mode 100644 portkey_ai/api_resources/types/audio_types.py diff --git a/portkey_ai/__init__.py b/portkey_ai/__init__.py index f079d99b..2e20b2cb 100644 --- a/portkey_ai/__init__.py +++ b/portkey_ai/__init__.py @@ -44,6 +44,14 @@ AsyncSteps, Moderations, AsyncModerations, + Audio, + Transcriptions, + Translations, + Speech, + AsyncAudio, + AsyncTranscriptions, + AsyncTranslations, + AsyncSpeech ) from portkey_ai.version import VERSION @@ -107,4 +115,12 @@ "AsyncSteps", "Moderations", "AsyncModerations", + "Audio", + "Transcriptions", + "Translations", + "Speech", + "AsyncAudio", + "AsyncTranscriptions", + "AsyncTranslations", + "AsyncSpeech", ] diff --git a/portkey_ai/api_resources/__init__.py b/portkey_ai/api_resources/__init__.py index 87766dda..28ae51e0 100644 --- a/portkey_ai/api_resources/__init__.py +++ b/portkey_ai/api_resources/__init__.py @@ -33,6 +33,14 @@ AsyncSteps, Moderations, AsyncModerations, + Audio, + Transcriptions, + Translations, + Speech, + AsyncAudio, + AsyncTranscriptions, + AsyncTranslations, + AsyncSpeech, ) from .utils import ( Modes, @@ -101,4 +109,12 @@ "AsyncSteps", "Moderations", "AsyncModerations", + "Audio", + "Transcriptions", + "Translations", + "Speech", + "AsyncAudio", + "AsyncTranscriptions", + "AsyncTranslations", + "AsyncSpeech", ] diff --git a/portkey_ai/api_resources/apis/__init__.py b/portkey_ai/api_resources/apis/__init__.py index de21952c..a50780b2 100644 --- a/portkey_ai/api_resources/apis/__init__.py +++ b/portkey_ai/api_resources/apis/__init__.py @@ -22,6 +22,7 @@ from .main_files import MainFiles, AsyncMainFiles from .models import Models, AsyncModels from .moderations import Moderations, AsyncModerations +from .audio import Audio, Transcriptions, Translations, Speech, AsyncAudio, AsyncTranscriptions, AsyncTranslations, AsyncSpeech __all__ = [ "Completion", @@ -61,4 +62,12 @@ "AsyncSteps", "Moderations", "AsyncModerations", + "Audio", + "Transcriptions", + "Translations", + "Speech", + "AsyncAudio", + "AsyncTranscriptions", + "AsyncTranslations", + "AsyncSpeech", ] diff --git a/portkey_ai/api_resources/apis/audio.py b/portkey_ai/api_resources/apis/audio.py new file mode 100644 index 00000000..363c12fe --- /dev/null +++ b/portkey_ai/api_resources/apis/audio.py @@ -0,0 +1,204 @@ +from typing import List, Literal, Union +from portkey_ai.api_resources.apis.api_resource import APIResource, AsyncAPIResource +from portkey_ai.api_resources.base_client import APIClient +from openai._types import NotGiven, NOT_GIVEN, FileTypes +from portkey_ai.api_resources.client import AsyncPortkey, Portkey + +from portkey_ai.api_resources.types.audio_types import Transcription, Translation +from portkey_ai.api_resources.utils import GenericResponse + + +class Audio(APIResource): + def __init__(self, client: APIClient) -> None: + super().__init__(client) + self.openai_client = client.openai_client + self.transcriptions = Transcriptions(client) + self.translations = Translations(client) + self.speech = Speech(client) + +class Transcriptions(APIResource): + def __init__(self, client: Portkey) -> None: + super().__init__(client) + self.openai_client = client.openai_client + + def create( + self, + *, + file:FileTypes, + model: str, + language: Union[str, NotGiven] = NOT_GIVEN, + prompt: Union[str, NotGiven] = NOT_GIVEN, + response_format: Union[str, NotGiven] = NOT_GIVEN, + temperature: Union[float, NotGiven] = NOT_GIVEN, + timestamp_granularities: Union[List[str], NotGiven] = NOT_GIVEN, + **kwargs) -> Transcription: + + response = self.openai_client.with_raw_response.audio.transcriptions.create( + file=file, + model=model, + language=language, + prompt=prompt, + response_format=response_format, + temperature=temperature, + timestamp_granularities=timestamp_granularities, + **kwargs + ) + data = Transcription(**response.json()) + data._headers = response.headers + + return data + +class Translations(APIResource): + def __init__(self, client: Portkey) -> None: + super().__init__(client) + self.openai_client = client.openai_client + + def create( + self, + *, + file:FileTypes, + model: str, + prompt: Union[str, NotGiven] = NOT_GIVEN, + response_format: Union[str, NotGiven] = NOT_GIVEN, + temperature: Union[float, NotGiven] = NOT_GIVEN, + **kwargs) -> Translation: + + response = self.openai_client.with_raw_response.audio.translations.create( + file=file, + model=model, + prompt=prompt, + response_format=response_format, + temperature=temperature, + **kwargs + ) + data = Transcription(**response.json()) + data._headers = response.headers + + return data + +class Speech(APIResource): + def __init__(self, client: Portkey) -> None: + super().__init__(client) + self.openai_client = client.openai_client + + def create( + self, + *, + input: str, + model: str, + voice: str, + response_format: Union[str, NotGiven] = NOT_GIVEN, + speed: Union[float, NotGiven] = NOT_GIVEN, + **kwargs) -> GenericResponse: + + response = self.openai_client.with_raw_response.audio.speech.create( + input=input, + model=model, + voice=voice, + response_format=response_format, + speed=speed, + **kwargs + ) + + data = GenericResponse(**response.json()) + data._headers = response.headers + + return data + + +class AsyncAudio(AsyncAPIResource): + def __init__(self, client: AsyncPortkey) -> None: + super().__init__(client) + self.openai_client = client.openai_client + self.transcriptions = AsyncTranscriptions(client) + self.translations = AsyncTranslations(client) + self.speech = AsyncSpeech(client) + +class AsyncTranscriptions(AsyncAPIResource): + def __init__(self, client: AsyncPortkey) -> None: + super().__init__(client) + self.openai_client = client.openai_client + + async def create( + self, + *, + file:FileTypes, + model: str, + language: Union[str, NotGiven] = NOT_GIVEN, + prompt: Union[str, NotGiven] = NOT_GIVEN, + response_format: Union[str, NotGiven] = NOT_GIVEN, + temperature: Union[float, NotGiven] = NOT_GIVEN, + timestamp_granularities: Union[List[str], NotGiven] = NOT_GIVEN, + **kwargs) -> Transcription: + + response = await self.openai_client.with_raw_response.audio.transcriptions.create( + file=file, + model=model, + language=language, + prompt=prompt, + response_format=response_format, + temperature=temperature, + timestamp_granularities=timestamp_granularities, + **kwargs + ) + data = Transcription(**response.json()) + data._headers = response.headers + + return data + +class AsyncTranslations(AsyncAPIResource): + def __init__(self, client: AsyncPortkey) -> None: + super().__init__(client) + self.openai_client = client.openai_client + + async def create( + self, + *, + file:FileTypes, + model: str, + prompt: Union[str, NotGiven] = NOT_GIVEN, + response_format: Union[str, NotGiven] = NOT_GIVEN, + temperature: Union[float, NotGiven] = NOT_GIVEN, + **kwargs) -> Translation: + + response = await self.openai_client.with_raw_response.audio.translations.create( + file=file, + model=model, + prompt=prompt, + response_format=response_format, + temperature=temperature, + **kwargs + ) + data = Transcription(**response.json()) + data._headers = response.headers + + return data + +class AsyncSpeech(AsyncAPIResource): + def __init__(self, client: AsyncPortkey) -> None: + super().__init__(client) + self.openai_client = client.openai_client + + async def create( + self, + *, + input: str, + model: str, + voice: str, + response_format: Union[str, NotGiven] = NOT_GIVEN, + speed: Union[float, NotGiven] = NOT_GIVEN, + **kwargs) -> GenericResponse: + + response = await self.openai_client.with_raw_response.audio.speech.create( + input=input, + model=model, + voice=voice, + response_format=response_format, + speed=speed, + **kwargs + ) + + data = GenericResponse(**response.json()) + data._headers = response.headers + + return data \ No newline at end of file diff --git a/portkey_ai/api_resources/client.py b/portkey_ai/api_resources/client.py index 2deb1b19..9793fad3 100644 --- a/portkey_ai/api_resources/client.py +++ b/portkey_ai/api_resources/client.py @@ -20,6 +20,7 @@ class Portkey(APIClient): files: apis.MainFiles models: apis.Models moderations: apis.Moderations + audio: apis.Audio class beta: assistants: apis.Assistants @@ -68,6 +69,7 @@ def __init__( self.files = apis.MainFiles(self) self.models = apis.Models(self) self.moderations = apis.Moderations(self) + self.audio = apis.Audio(self) self.beta = self.beta(self) # type: ignore def copy( @@ -110,6 +112,7 @@ class AsyncPortkey(AsyncAPIClient): files: apis.AsyncMainFiles models: apis.AsyncModels moderations: apis.AsyncModerations + audio: apis.AsyncAudio class beta: assistants: apis.AsyncAssistants @@ -158,6 +161,7 @@ def __init__( self.files = apis.AsyncMainFiles(self) self.models = apis.AsyncModels(self) self.moderations = apis.AsyncModerations(self) + self.audio = apis.AsyncAudio(self) self.beta = self.beta(self) # type: ignore def copy( diff --git a/portkey_ai/api_resources/types/audio_types.py b/portkey_ai/api_resources/types/audio_types.py new file mode 100644 index 00000000..ba677db1 --- /dev/null +++ b/portkey_ai/api_resources/types/audio_types.py @@ -0,0 +1,44 @@ +import json +from typing import Dict, Optional +import httpx +from .utils import parse_headers +from typing import List, Any +from pydantic import BaseModel, PrivateAttr + +__all__ = ["Transcription", "Translation"] + +class Transcription(BaseModel): + text: str + _headers: Optional[httpx.Headers] = PrivateAttr() + + def __str__(self): + del self._headers + return json.dumps(self.dict(), indent=4) + + def __getitem__(self, key): + return getattr(self, key, None) + + def get(self, key: str, default: Optional[Any] = None): + return getattr(self, key, None) or default + + def get_headers(self) -> Optional[Dict[str, str]]: + return parse_headers(self._headers) + + +class Translation(BaseModel): + text: str + _headers: Optional[httpx.Headers] = PrivateAttr() + + def __str__(self): + del self._headers + return json.dumps(self.dict(), indent=4) + + def __getitem__(self, key): + return getattr(self, key, None) + + def get(self, key: str, default: Optional[Any] = None): + return getattr(self, key, None) or default + + def get_headers(self) -> Optional[Dict[str, str]]: + return parse_headers(self._headers) + From f889f03ec330d3f45c91519d21303afe12b8b6c4 Mon Sep 17 00:00:00 2001 From: csgulati09 Date: Wed, 8 May 2024 02:10:22 +0530 Subject: [PATCH 05/38] feat: main_files wait_for_processing added --- portkey_ai/api_resources/apis/main_files.py | 32 +++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/portkey_ai/api_resources/apis/main_files.py b/portkey_ai/api_resources/apis/main_files.py index 566d8736..d6e34049 100644 --- a/portkey_ai/api_resources/apis/main_files.py +++ b/portkey_ai/api_resources/apis/main_files.py @@ -56,6 +56,22 @@ def retrieve_content(self, file_id, **kwargs) -> Any: response = self.openai_client.files.content(file_id=file_id, **kwargs) return response + def wait_for_processing( + self, + id: str, + *, + polling_interval: float = 5.0, + max_wait_seconds: float = 30 * 60, + **kwargs + ) -> Any: + response = self.openai_client.files.wait_for_processing( + id=id, + polling_interval=polling_interval, + max_wait_seconds=max_wait_seconds, + **kwargs + ) + return response + class AsyncMainFiles(AsyncAPIResource): def __init__(self, client: AsyncPortkey) -> None: @@ -103,3 +119,19 @@ async def content(self, file_id, **kwargs) -> Any: async def retrieve_content(self, file_id, **kwargs) -> Any: response = await self.openai_client.files.content(file_id=file_id, **kwargs) return response + + async def wait_for_processing( + self, + id: str, + *, + polling_interval: float = 5.0, + max_wait_seconds: float = 30 * 60, + **kwargs + ) -> Any: + response = await self.openai_client.files.wait_for_processing( + id=id, + polling_interval=polling_interval, + max_wait_seconds=max_wait_seconds, + **kwargs + ) + return response From 0c71f7513c24a9c3874deb00ed86cd098bf92d77 Mon Sep 17 00:00:00 2001 From: csgulati09 Date: Wed, 8 May 2024 02:43:37 +0530 Subject: [PATCH 06/38] feat: batches support added --- portkey_ai/__init__.py | 6 +- portkey_ai/api_resources/__init__.py | 4 + portkey_ai/api_resources/apis/__init__.py | 3 + portkey_ai/api_resources/apis/batches.py | 148 ++++++++++++++++++ portkey_ai/api_resources/client.py | 4 + .../api_resources/types/batches_type.py | 73 +++++++++ 6 files changed, 237 insertions(+), 1 deletion(-) create mode 100644 portkey_ai/api_resources/apis/batches.py create mode 100644 portkey_ai/api_resources/types/batches_type.py diff --git a/portkey_ai/__init__.py b/portkey_ai/__init__.py index 2e20b2cb..a2159cdd 100644 --- a/portkey_ai/__init__.py +++ b/portkey_ai/__init__.py @@ -51,7 +51,9 @@ AsyncAudio, AsyncTranscriptions, AsyncTranslations, - AsyncSpeech + AsyncSpeech, + Batches, + AsyncBatches, ) from portkey_ai.version import VERSION @@ -123,4 +125,6 @@ "AsyncTranscriptions", "AsyncTranslations", "AsyncSpeech", + "Batches", + "AsyncBatches", ] diff --git a/portkey_ai/api_resources/__init__.py b/portkey_ai/api_resources/__init__.py index 28ae51e0..05b5f30f 100644 --- a/portkey_ai/api_resources/__init__.py +++ b/portkey_ai/api_resources/__init__.py @@ -41,6 +41,8 @@ AsyncTranscriptions, AsyncTranslations, AsyncSpeech, + Batches, + AsyncBatches, ) from .utils import ( Modes, @@ -117,4 +119,6 @@ "AsyncTranscriptions", "AsyncTranslations", "AsyncSpeech", + "Batches", + "AsyncBatches", ] diff --git a/portkey_ai/api_resources/apis/__init__.py b/portkey_ai/api_resources/apis/__init__.py index a50780b2..7929fbdb 100644 --- a/portkey_ai/api_resources/apis/__init__.py +++ b/portkey_ai/api_resources/apis/__init__.py @@ -23,6 +23,7 @@ from .models import Models, AsyncModels from .moderations import Moderations, AsyncModerations from .audio import Audio, Transcriptions, Translations, Speech, AsyncAudio, AsyncTranscriptions, AsyncTranslations, AsyncSpeech +from .batches import Batches, AsyncBatches __all__ = [ "Completion", @@ -70,4 +71,6 @@ "AsyncTranscriptions", "AsyncTranslations", "AsyncSpeech", + "Batches", + "AsyncBatches", ] diff --git a/portkey_ai/api_resources/apis/batches.py b/portkey_ai/api_resources/apis/batches.py new file mode 100644 index 00000000..a26e2d13 --- /dev/null +++ b/portkey_ai/api_resources/apis/batches.py @@ -0,0 +1,148 @@ +import json +from typing import Dict, Optional, Union +from portkey_ai.api_resources.apis.api_resource import APIResource, AsyncAPIResource +from portkey_ai.api_resources.client import AsyncPortkey, Portkey +from openai._types import NotGiven, NOT_GIVEN + +from portkey_ai.api_resources.types.batches_type import Batch, BatchList + + +class Batches(APIResource): + def __init__(self, client: Portkey) -> None: + super().__init__(client) + self.openai_client = client.openai_client + + def create( + self, + *, + completion_window: str, + endpoint: str, + input_file_id: str, + metadata: Union[Optional[Dict[str, str]], NotGiven] = NOT_GIVEN, + **kwargs + ) -> Batch: + + response = self.openai_client.with_raw_response.batches.create( + completion_window=completion_window, + endpoint=endpoint, + input_file_id=input_file_id, + metadata=metadata, + **kwargs + ) + data = Batch(**json.loads(response.text)) + data._headers = response.headers + + return data + + def retrieve( + self, + batch_id, + **kwargs) -> Batch: + response = self.openai_client.with_raw_response.batches.retrieve( + batch_id=batch_id, **kwargs + ) + data = Batch(**json.loads(response.text)) + data._headers = response.headers + + return data + + def list( + self, + *, + after: Union[str, NotGiven] = NOT_GIVEN, + limit: Union[int, NotGiven] = NOT_GIVEN, + **kwargs) -> BatchList: + response = self.openai_client.with_raw_response.batches.list( + after=after, + limit=limit, + **kwargs + ) + data = BatchList(**json.loads(response.text)) + data._headers = response.headers + + return data + + def cancel( + self, + batch_id: str, + **kwargs + )-> Batch: + response = self.openai_client.with_raw_response.batches.cancel( + batch_id=batch_id, + **kwargs + ) + data = Batch(**json.loads(response.text)) + data._headers = response.headers + + return data + + +class AsyncBatches(AsyncAPIResource): + def __init__(self, client: AsyncPortkey) -> None: + super().__init__(client) + self.openai_client = client.openai_client + + async def create( + self, + *, + completion_window: str, + endpoint: str, + input_file_id: str, + metadata: Union[Optional[Dict[str, str]], NotGiven] = NOT_GIVEN, + **kwargs + ) -> Batch: + + response = await self.openai_client.with_raw_response.batches.create( + completion_window=completion_window, + endpoint=endpoint, + input_file_id=input_file_id, + metadata=metadata, + **kwargs + ) + data = Batch(**json.loads(response.text)) + data._headers = response.headers + + return data + + async def retrieve( + self, + batch_id, + **kwargs) -> Batch: + response = await self.openai_client.with_raw_response.batches.retrieve( + batch_id=batch_id, **kwargs + ) + data = Batch(**json.loads(response.text)) + data._headers = response.headers + + return data + + async def list( + self, + *, + after: Union[str, NotGiven] = NOT_GIVEN, + limit: Union[int, NotGiven] = NOT_GIVEN, + **kwargs) -> BatchList: + response = await self.openai_client.with_raw_response.batches.list( + after=after, + limit=limit, + **kwargs + ) + data = BatchList(**json.loads(response.text)) + data._headers = response.headers + + return data + + async def cancel( + self, + batch_id: str, + **kwargs + )-> Batch: + response = await self.openai_client.with_raw_response.batches.cancel( + batch_id=batch_id, + **kwargs + ) + data = Batch(**json.loads(response.text)) + data._headers = response.headers + + return data + diff --git a/portkey_ai/api_resources/client.py b/portkey_ai/api_resources/client.py index 9793fad3..ba6bf80c 100644 --- a/portkey_ai/api_resources/client.py +++ b/portkey_ai/api_resources/client.py @@ -21,6 +21,7 @@ class Portkey(APIClient): models: apis.Models moderations: apis.Moderations audio: apis.Audio + batches: apis.Batches class beta: assistants: apis.Assistants @@ -70,6 +71,7 @@ def __init__( self.models = apis.Models(self) self.moderations = apis.Moderations(self) self.audio = apis.Audio(self) + self.batches = apis.Batches(self) self.beta = self.beta(self) # type: ignore def copy( @@ -113,6 +115,7 @@ class AsyncPortkey(AsyncAPIClient): models: apis.AsyncModels moderations: apis.AsyncModerations audio: apis.AsyncAudio + batches: apis.AsyncBatches class beta: assistants: apis.AsyncAssistants @@ -162,6 +165,7 @@ def __init__( self.models = apis.AsyncModels(self) self.moderations = apis.AsyncModerations(self) self.audio = apis.AsyncAudio(self) + self.batches = apis.AsyncBatches(self) self.beta = self.beta(self) # type: ignore def copy( diff --git a/portkey_ai/api_resources/types/batches_type.py b/portkey_ai/api_resources/types/batches_type.py new file mode 100644 index 00000000..ba036689 --- /dev/null +++ b/portkey_ai/api_resources/types/batches_type.py @@ -0,0 +1,73 @@ +import json +import builtins +from typing import Dict, Optional +import httpx +from .utils import parse_headers +from typing import List, Any +from pydantic import BaseModel, PrivateAttr +from openai.types.batch_error import BatchError +from openai.types.batch_request_counts import BatchRequestCounts + +__all__ = ["Batch", "BatchList", "Errors"] + +class Errors(BaseModel): + data: Optional[List[BatchError]] = None + + object: Optional[str] = None + """The object type, which is always `list`.""" + + +class Batch(BaseModel): + id: str + completion_window: str + created_at: int + endpoint: str + input_file_id: str + object: str + status: str + cancelled_at: Optional[int] = None + cancelling_at: Optional[int] = None + completed_at: Optional[int] = None + error_file_id: Optional[str] = None + errors: Optional[Errors] = None + expired_at: Optional[int] = None + expires_at: Optional[int] = None + failed_at: Optional[int] = None + finalizing_at: Optional[int] = None + in_progress_at: Optional[int] = None + metadata: Optional[builtins.object] = None + output_file_id: Optional[str] = None + request_counts: Optional[BatchRequestCounts] = None + _headers: Optional[httpx.Headers] = PrivateAttr() + + def __str__(self): + del self._headers + return json.dumps(self.dict(), indent=4) + + def __getitem__(self, key): + return getattr(self, key, None) + + def get(self, key: str, default: Optional[Any] = None): + return getattr(self, key, None) or default + + def get_headers(self) -> Optional[Dict[str, str]]: + return parse_headers(self._headers) + + +class BatchList(BaseModel): + object: Optional[str] = None + data: Optional[List[Batch]] = None + _headers: Optional[httpx.Headers] = PrivateAttr() + + def __str__(self): + del self._headers + return json.dumps(self.dict(), indent=4) + + def __getitem__(self, key): + return getattr(self, key, None) + + def get(self, key: str, default: Optional[Any] = None): + return getattr(self, key, None) or default + + def get_headers(self) -> Optional[Dict[str, str]]: + return parse_headers(self._headers) \ No newline at end of file From 9dcf78c883655d0b2bc6e0d80adbbb77dd49c001 Mon Sep 17 00:00:00 2001 From: csgulati09 Date: Wed, 8 May 2024 03:55:55 +0530 Subject: [PATCH 07/38] feat: fine_tuning support added --- portkey_ai/__init__.py | 12 + portkey_ai/api_resources/__init__.py | 12 + portkey_ai/api_resources/apis/__init__.py | 7 + portkey_ai/api_resources/apis/fine_tuning.py | 257 ++++++++++++++++++ portkey_ai/api_resources/client.py | 4 + .../api_resources/types/fine_tuning_type.py | 163 +++++++++++ 6 files changed, 455 insertions(+) create mode 100644 portkey_ai/api_resources/apis/fine_tuning.py create mode 100644 portkey_ai/api_resources/types/fine_tuning_type.py diff --git a/portkey_ai/__init__.py b/portkey_ai/__init__.py index a2159cdd..b78418fa 100644 --- a/portkey_ai/__init__.py +++ b/portkey_ai/__init__.py @@ -54,6 +54,12 @@ AsyncSpeech, Batches, AsyncBatches, + FineTuning, + Jobs, + Checkpoints, + AsyncFineTuning, + AsyncJobs, + AsyncCheckpoints, ) from portkey_ai.version import VERSION @@ -127,4 +133,10 @@ "AsyncSpeech", "Batches", "AsyncBatches", + "FineTuning", + "Jobs", + "Checkpoints", + "AsyncFineTuning", + "AsyncJobs", + "AsyncCheckpoints", ] diff --git a/portkey_ai/api_resources/__init__.py b/portkey_ai/api_resources/__init__.py index 05b5f30f..ddfdd45b 100644 --- a/portkey_ai/api_resources/__init__.py +++ b/portkey_ai/api_resources/__init__.py @@ -43,6 +43,12 @@ AsyncSpeech, Batches, AsyncBatches, + FineTuning, + Jobs, + Checkpoints, + AsyncFineTuning, + AsyncJobs, + AsyncCheckpoints, ) from .utils import ( Modes, @@ -121,4 +127,10 @@ "AsyncSpeech", "Batches", "AsyncBatches", + "FineTuning", + "Jobs", + "Checkpoints", + "AsyncFineTuning", + "AsyncJobs", + "AsyncCheckpoints", ] diff --git a/portkey_ai/api_resources/apis/__init__.py b/portkey_ai/api_resources/apis/__init__.py index 7929fbdb..0cd89fda 100644 --- a/portkey_ai/api_resources/apis/__init__.py +++ b/portkey_ai/api_resources/apis/__init__.py @@ -24,6 +24,7 @@ from .moderations import Moderations, AsyncModerations from .audio import Audio, Transcriptions, Translations, Speech, AsyncAudio, AsyncTranscriptions, AsyncTranslations, AsyncSpeech from .batches import Batches, AsyncBatches +from .fine_tuning import FineTuning, Jobs, Checkpoints, AsyncFineTuning, AsyncJobs, AsyncCheckpoints __all__ = [ "Completion", @@ -73,4 +74,10 @@ "AsyncSpeech", "Batches", "AsyncBatches", + "FineTuning", + "Jobs", + "Checkpoints", + "AsyncFineTuning", + "AsyncJobs", + "AsyncCheckpoints", ] diff --git a/portkey_ai/api_resources/apis/fine_tuning.py b/portkey_ai/api_resources/apis/fine_tuning.py new file mode 100644 index 00000000..1f17e36a --- /dev/null +++ b/portkey_ai/api_resources/apis/fine_tuning.py @@ -0,0 +1,257 @@ +from typing import Iterable, Optional, Union +from portkey_ai.api_resources.apis.api_resource import APIResource, AsyncAPIResource +from portkey_ai.api_resources.client import AsyncPortkey, Portkey +from openai._types import NotGiven, NOT_GIVEN +from openai.types.fine_tuning import job_create_params + +from portkey_ai.api_resources.types.fine_tuning_type import ( + FineTuningJob, + FineTuningJobCheckpointList, + FineTuningJobEventList, + FineTuningJobList, +) + + +class FineTuning(APIResource): + def __init__(self, client: Portkey) -> None: + super().__init__(client) + self.openai_client = client.openai_client + self.jobs = Jobs(client) + + +class Jobs(APIResource): + def __init__(self, client: Portkey) -> None: + super().__init__(client) + self.openai_client = client.openai_client + self.checkpoints = Checkpoints(client) + + def create( + self, + *, + model: str, + training_file: str, + hyperparameters: Union[job_create_params.Hyperparameters, NotGiven] = NOT_GIVEN, + integrations: Union[ + Optional[Iterable[job_create_params.Integration]], NotGiven + ] = NOT_GIVEN, + seed: Union[Optional[int], NotGiven] = NOT_GIVEN, + suffix: Union[Optional[str], NotGiven] = NOT_GIVEN, + validation_file: Union[Optional[str], NotGiven] = NOT_GIVEN, + **kwargs, + ) -> FineTuningJob: + response = self.openai_client.with_raw_response.fine_tuning.jobs.create( + model=model, + training_file=training_file, + hyperparameters=hyperparameters, + integrations=integrations, + seed=seed, + suffix=suffix, + validation_file=validation_file, + **kwargs, + ) + data = FineTuningJob(**response.json()) + data._headers = response.headers + + return data + + def retrieve(self, fine_tuning_job_id: str, **kwargs) -> FineTuningJob: + response = self.openai_client.with_raw_response.fine_tuning.jobs.retrieve( + fine_tuning_job_id=fine_tuning_job_id, **kwargs + ) + data = FineTuningJob(**response.json()) + data._headers = response.headers + + return data + + def list( + self, + *, + after: Union[str, NotGiven] = NOT_GIVEN, + limit: Union[int, NotGiven] = NOT_GIVEN, + **kwargs, + ) -> FineTuningJobList: + response = self.openai_client.with_raw_response.fine_tuning.jobs.list( + after=after, limit=limit, **kwargs + ) + data = FineTuningJobList(**response.json()) + data._headers = response.headers + + return data + + def cancel(self, fine_tuning_job_id: str, **kwargs) -> FineTuningJob: + response = self.openai_client.with_raw_response.fine_tuning.jobs.cancel( + fine_tuning_job_id=fine_tuning_job_id, **kwargs + ) + data = FineTuningJob(**response.json()) + data._headers = response.headers + + return data + + def list_events( + self, + fine_tuning_job_id: str, + *, + after: Union[str, NotGiven] = NOT_GIVEN, + limit: Union[int, NotGiven] = NOT_GIVEN, + **kwargs, + ) -> FineTuningJobEventList: + response = self.openai_client.with_raw_response.fine_tuning.jobs.list_events( + fine_tuning_job_id=fine_tuning_job_id, after=after, limit=limit, **kwargs + ) + data = FineTuningJobEventList(**response.json()) + data._headers = response.headers + + return data + + +class Checkpoints(APIResource): + def __init__(self, client: Portkey) -> None: + super().__init__(client) + self.openai_client = client.openai_client + + def list( + self, + fine_tuning_job_id: str, + *, + after: Union[str, NotGiven] = NOT_GIVEN, + limit: Union[int, NotGiven] = NOT_GIVEN, + **kwargs, + ) -> FineTuningJobCheckpointList: + response = ( + self.openai_client.with_raw_response.fine_tuning.jobs.checkpoints.list( + fine_tuning_job_id=fine_tuning_job_id, + after=after, + limit=limit, + **kwargs, + ) + ) + + data = FineTuningJobCheckpointList(**response.json()) + data._headers = response.headers + + return data + + +class AsyncFineTuning(APIResource): + def __init__(self, client: AsyncPortkey) -> None: + super().__init__(client) + self.openai_client = client.openai_client + self.jobs = AsyncJobs(client) + + +class AsyncJobs(AsyncAPIResource): + def __init__(self, client: AsyncPortkey) -> None: + super().__init__(client) + self.openai_client = client.openai_client + self.checkpoints = AsyncCheckpoints(client) + + async def create( + self, + *, + model: str, + training_file: str, + hyperparameters: Union[job_create_params.Hyperparameters, NotGiven] = NOT_GIVEN, + integrations: Union[ + Optional[Iterable[job_create_params.Integration]], NotGiven + ] = NOT_GIVEN, + seed: Union[Optional[int], NotGiven] = NOT_GIVEN, + suffix: Union[Optional[str], NotGiven] = NOT_GIVEN, + validation_file: Union[Optional[str], NotGiven] = NOT_GIVEN, + **kwargs, + ) -> FineTuningJob: + response = await self.openai_client.with_raw_response.fine_tuning.jobs.create( + model=model, + training_file=training_file, + hyperparameters=hyperparameters, + integrations=integrations, + seed=seed, + suffix=suffix, + validation_file=validation_file, + **kwargs, + ) + data = FineTuningJob(**response.json()) + data._headers = response.headers + + return data + + async def retrieve(self, fine_tuning_job_id: str, **kwargs) -> FineTuningJob: + response = await self.openai_client.with_raw_response.fine_tuning.jobs.retrieve( + fine_tuning_job_id=fine_tuning_job_id, **kwargs + ) + data = FineTuningJob(**response.json()) + data._headers = response.headers + + return data + + async def list( + self, + *, + after: Union[str, NotGiven] = NOT_GIVEN, + limit: Union[int, NotGiven] = NOT_GIVEN, + **kwargs, + ) -> FineTuningJobList: + response = await self.openai_client.with_raw_response.fine_tuning.jobs.list( + after=after, limit=limit, **kwargs + ) + data = FineTuningJobList(**response.json()) + data._headers = response.headers + + return data + + async def cancel(self, fine_tuning_job_id: str, **kwargs) -> FineTuningJob: + response = await self.openai_client.with_raw_response.fine_tuning.jobs.cancel( + fine_tuning_job_id, **kwargs + ) + data = FineTuningJob(**response.json()) + data._headers = response.headers + + return data + + async def list_events( + self, + fine_tuning_job_id: str, + *, + after: Union[str, NotGiven] = NOT_GIVEN, + limit: Union[int, NotGiven] = NOT_GIVEN, + **kwargs, + ) -> FineTuningJobEventList: + response = ( + await self.openai_client.with_raw_response.fine_tuning.jobs.list_events( + fine_tuning_job_id=fine_tuning_job_id, + after=after, + limit=limit, + **kwargs, + ) + ) + data = FineTuningJobEventList(**response.json()) + data._headers = response.headers + + return data + + +class AsyncCheckpoints(AsyncAPIResource): + def __init__(self, client: AsyncPortkey) -> None: + super().__init__(client) + self.openai_client = client.openai_client + + async def list( + self, + fine_tuning_job_id: str, + *, + after: Union[str, NotGiven] = NOT_GIVEN, + limit: Union[int, NotGiven] = NOT_GIVEN, + **kwargs, + ) -> FineTuningJobCheckpointList: + response = ( + await self.openai_client.with_raw_response.fine_tuning.jobs.checkpoints.list( + fine_tuning_job_id=fine_tuning_job_id, + after=after, + limit=limit, + **kwargs, + ) + ) + + data = FineTuningJobCheckpointList(**response.json()) + data._headers = response.headers + + return data \ No newline at end of file diff --git a/portkey_ai/api_resources/client.py b/portkey_ai/api_resources/client.py index ba6bf80c..785e62eb 100644 --- a/portkey_ai/api_resources/client.py +++ b/portkey_ai/api_resources/client.py @@ -22,6 +22,7 @@ class Portkey(APIClient): moderations: apis.Moderations audio: apis.Audio batches: apis.Batches + fine_tuning: apis.FineTuning class beta: assistants: apis.Assistants @@ -72,6 +73,7 @@ def __init__( self.moderations = apis.Moderations(self) self.audio = apis.Audio(self) self.batches = apis.Batches(self) + self.fine_tuning = apis.FineTuning(self) self.beta = self.beta(self) # type: ignore def copy( @@ -116,6 +118,7 @@ class AsyncPortkey(AsyncAPIClient): moderations: apis.AsyncModerations audio: apis.AsyncAudio batches: apis.AsyncBatches + fine_tuning: apis.AsyncFineTuning class beta: assistants: apis.AsyncAssistants @@ -166,6 +169,7 @@ def __init__( self.moderations = apis.AsyncModerations(self) self.audio = apis.AsyncAudio(self) self.batches = apis.AsyncBatches(self) + self.fine_tuning = apis.AsyncFineTuning(self) self.beta = self.beta(self) # type: ignore def copy( diff --git a/portkey_ai/api_resources/types/fine_tuning_type.py b/portkey_ai/api_resources/types/fine_tuning_type.py new file mode 100644 index 00000000..81c11dae --- /dev/null +++ b/portkey_ai/api_resources/types/fine_tuning_type.py @@ -0,0 +1,163 @@ +import json +from typing import Dict, Optional, Union +import httpx +from .utils import parse_headers +from typing import List, Any +from pydantic import BaseModel, PrivateAttr +from openai.types.fine_tuning import FineTuningJobWandbIntegrationObject + +__all__ = ["Error", "Hyperparameters", "FineTuningJob", "FineTuningJobList", "FineTuningJobEvent", "FineTuningJobEventList", "Metrics", "FineTuningJobCheckpoint", "FineTuningJobCheckpointList"] + +class Error(BaseModel): + code: str + message: str + param: Optional[str] = None + +class Hyperparameters(BaseModel): + n_epochs: Union[str, int] + +class FineTuningJob(BaseModel): + id: str + created_at: int + error: Optional[Error] = None + fine_tuned_model: Optional[str] = None + finished_at: Optional[int] = None + hyperparameters: Hyperparameters + model: str + object: str + organization_id: str + result_files: List[str] + seed: int + status: str + trained_tokens: Optional[int] = None + training_file: str + validation_file: Optional[str] = None + estimated_finish: Optional[int] = None + integrations: Optional[List[FineTuningJobWandbIntegrationObject]] = None + _headers: Optional[httpx.Headers] = PrivateAttr() + + def __str__(self): + del self._headers + return json.dumps(self.dict(), indent=4) + + def __getitem__(self, key): + return getattr(self, key, None) + + def get(self, key: str, default: Optional[Any] = None): + return getattr(self, key, None) or default + + def get_headers(self) -> Optional[Dict[str, str]]: + return parse_headers(self._headers) + + +class FineTuningJobList(BaseModel): + object: Optional[str] = None + data: Optional[List[FineTuningJob]] = None + _headers: Optional[httpx.Headers] = PrivateAttr() + + def __str__(self): + del self._headers + return json.dumps(self.dict(), indent=4) + + def __getitem__(self, key): + return getattr(self, key, None) + + def get(self, key: str, default: Optional[Any] = None): + return getattr(self, key, None) or default + + def get_headers(self) -> Optional[Dict[str, str]]: + return parse_headers(self._headers) + + + +class FineTuningJobEvent(BaseModel): + id: str + created_at: int + level: str + message: str + object: str + _headers: Optional[httpx.Headers] = PrivateAttr() + + def __str__(self): + del self._headers + return json.dumps(self.dict(), indent=4) + + def __getitem__(self, key): + return getattr(self, key, None) + + def get(self, key: str, default: Optional[Any] = None): + return getattr(self, key, None) or default + + def get_headers(self) -> Optional[Dict[str, str]]: + return parse_headers(self._headers) + + +class FineTuningJobEventList(BaseModel): + object: Optional[str] = None + data: Optional[List[FineTuningJobEvent]] = None + _headers: Optional[httpx.Headers] = PrivateAttr() + + def __str__(self): + del self._headers + return json.dumps(self.dict(), indent=4) + + def __getitem__(self, key): + return getattr(self, key, None) + + def get(self, key: str, default: Optional[Any] = None): + return getattr(self, key, None) or default + + def get_headers(self) -> Optional[Dict[str, str]]: + return parse_headers(self._headers) + + +class Metrics(BaseModel): + full_valid_loss: Optional[float] = None + full_valid_mean_token_accuracy: Optional[float] = None + step: Optional[float] = None + train_loss: Optional[float] = None + train_mean_token_accuracy: Optional[float] = None + valid_loss: Optional[float] = None + valid_mean_token_accuracy: Optional[float] = None + +class FineTuningJobCheckpoint(BaseModel): + id: str + created_at: int + fine_tuned_model_checkpoint: str + fine_tuning_job_id: str + metrics: Metrics + object: str + step_number: int + _headers: Optional[httpx.Headers] = PrivateAttr() + + def __str__(self): + del self._headers + return json.dumps(self.dict(), indent=4) + + def __getitem__(self, key): + return getattr(self, key, None) + + def get(self, key: str, default: Optional[Any] = None): + return getattr(self, key, None) or default + + def get_headers(self) -> Optional[Dict[str, str]]: + return parse_headers(self._headers) + + +class FineTuningJobCheckpointList(BaseModel): + object: Optional[str] = None + data: Optional[List[FineTuningJobCheckpoint]] = None + _headers: Optional[httpx.Headers] = PrivateAttr() + + def __str__(self): + del self._headers + return json.dumps(self.dict(), indent=4) + + def __getitem__(self, key): + return getattr(self, key, None) + + def get(self, key: str, default: Optional[Any] = None): + return getattr(self, key, None) or default + + def get_headers(self) -> Optional[Dict[str, str]]: + return parse_headers(self._headers) \ No newline at end of file From dd85e56abab4734f3a9bb94cf46029a16fca9a50 Mon Sep 17 00:00:00 2001 From: csgulati09 Date: Wed, 8 May 2024 14:08:28 +0530 Subject: [PATCH 08/38] fix: assistants files removed --- portkey_ai/__init__.py | 4 - portkey_ai/api_resources/__init__.py | 4 - portkey_ai/api_resources/apis/__init__.py | 4 +- portkey_ai/api_resources/apis/assistants.py | 149 +++++++++--------- .../api_resources/types/assistant_type.py | 66 ++++---- 5 files changed, 106 insertions(+), 121 deletions(-) diff --git a/portkey_ai/__init__.py b/portkey_ai/__init__.py index b78418fa..9533b343 100644 --- a/portkey_ai/__init__.py +++ b/portkey_ai/__init__.py @@ -36,8 +36,6 @@ AsyncModels, ThreadFiles, AsyncThreadFiles, - AssistantFiles, - AsyncAssistantFiles, Runs, AsyncRuns, Steps, @@ -115,8 +113,6 @@ "AsyncModels", "ThreadFiles", "AsyncThreadFiles", - "AssistantFiles", - "AsyncAssistantFiles", "Runs", "AsyncRuns", "Steps", diff --git a/portkey_ai/api_resources/__init__.py b/portkey_ai/api_resources/__init__.py index ddfdd45b..ba43f4e6 100644 --- a/portkey_ai/api_resources/__init__.py +++ b/portkey_ai/api_resources/__init__.py @@ -25,8 +25,6 @@ AsyncModels, ThreadFiles, AsyncThreadFiles, - AssistantFiles, - AsyncAssistantFiles, Runs, AsyncRuns, Steps, @@ -109,8 +107,6 @@ "AsyncModels", "ThreadFiles", "AsyncThreadFiles", - "AssistantFiles", - "AsyncAssistantFiles", "Runs", "AsyncRuns", "Steps", diff --git a/portkey_ai/api_resources/apis/__init__.py b/portkey_ai/api_resources/apis/__init__.py index 0cd89fda..631f1492 100644 --- a/portkey_ai/api_resources/apis/__init__.py +++ b/portkey_ai/api_resources/apis/__init__.py @@ -6,7 +6,7 @@ from .post import Post, AsyncPost from .embeddings import Embeddings, AsyncEmbeddings from .images import Images, AsyncImages -from .assistants import Assistants, AssistantFiles, AsyncAssistants, AsyncAssistantFiles +from .assistants import Assistants, AsyncAssistants from .threads import ( Threads, Messages, @@ -50,9 +50,7 @@ "AsyncMainFiles", "Models", "AsyncModels", - "AssistantFiles", "ThreadFiles", - "AsyncAssistantFiles", "AsyncThreadFiles", "Threads", "AsyncThreads", diff --git a/portkey_ai/api_resources/apis/assistants.py b/portkey_ai/api_resources/apis/assistants.py index 40b2dc4a..3d5af960 100644 --- a/portkey_ai/api_resources/apis/assistants.py +++ b/portkey_ai/api_resources/apis/assistants.py @@ -5,9 +5,6 @@ Assistant, AssistantList, AssistantDeleted, - AssistantFile, - AssistantFileList, - AssistantFileDeleted, ) @@ -15,7 +12,6 @@ class Assistants(APIResource): def __init__(self, client: Portkey) -> None: super().__init__(client) self.openai_client = client.openai_client - self.files = AssistantFiles(client) def create(self, **kwargs) -> Assistant: response = self.openai_client.with_raw_response.beta.assistants.create(**kwargs) @@ -59,53 +55,52 @@ def delete(self, assistant_id, **kwargs) -> AssistantDeleted: return data -class AssistantFiles(APIResource): - def __init__(self, client: Portkey) -> None: - super().__init__(client) - self.openai_client = client.openai_client +# class AssistantFiles(APIResource): +# def __init__(self, client: Portkey) -> None: +# super().__init__(client) +# self.openai_client = client.openai_client - def create(self, assistant_id, file_id, **kwargs) -> AssistantFile: - response = self.openai_client.with_raw_response.beta.assistants.files.create( - assistant_id=assistant_id, file_id=file_id, **kwargs - ) - data = AssistantFile(**json.loads(response.text)) - data._headers = response.headers +# def create(self, assistant_id, file_id, **kwargs) -> AssistantFile: +# response = self.openai_client.with_raw_response.beta.assistants.files.create( +# assistant_id=assistant_id, file_id=file_id, **kwargs +# ) +# data = AssistantFile(**json.loads(response.text)) +# data._headers = response.headers - return data +# return data - def list(self, assistant_id, **kwargs) -> AssistantFileList: - response = self.openai_client.with_raw_response.beta.assistants.files.list( - assistant_id=assistant_id, **kwargs - ) - data = AssistantFileList(**json.loads(response.text)) - data._headers = response.headers +# def list(self, assistant_id, **kwargs) -> AssistantFileList: +# response = self.openai_client.with_raw_response.beta.assistants.files.list( +# assistant_id=assistant_id, **kwargs +# ) +# data = AssistantFileList(**json.loads(response.text)) +# data._headers = response.headers - return data +# return data - def retrieve(self, assistant_id, file_id, **kwargs) -> AssistantFile: - response = self.openai_client.with_raw_response.beta.assistants.files.retrieve( - assistant_id=assistant_id, file_id=file_id, **kwargs - ) - data = AssistantFile(**json.loads(response.text)) - data._headers = response.headers +# def retrieve(self, assistant_id, file_id, **kwargs) -> AssistantFile: +# response = self.openai_client.with_raw_response.beta.assistants.files.retrieve( +# assistant_id=assistant_id, file_id=file_id, **kwargs +# ) +# data = AssistantFile(**json.loads(response.text)) +# data._headers = response.headers - return data +# return data - def delete(self, assistant_id, file_id, **kwargs) -> AssistantFileDeleted: - response = self.openai_client.with_raw_response.beta.assistants.files.delete( - assistant_id=assistant_id, file_id=file_id, **kwargs - ) - data = AssistantFileDeleted(**json.loads(response.text)) - data._headers = response.headers +# def delete(self, assistant_id, file_id, **kwargs) -> AssistantFileDeleted: +# response = self.openai_client.with_raw_response.beta.assistants.files.delete( +# assistant_id=assistant_id, file_id=file_id, **kwargs +# ) +# data = AssistantFileDeleted(**json.loads(response.text)) +# data._headers = response.headers - return data +# return data class AsyncAssistants(AsyncAPIResource): def __init__(self, client: AsyncPortkey) -> None: super().__init__(client) self.openai_client = client.openai_client - self.files = AsyncAssistantFiles(client) async def create(self, **kwargs) -> Assistant: response = await self.openai_client.with_raw_response.beta.assistants.create( @@ -153,51 +148,51 @@ async def delete(self, assistant_id, **kwargs) -> AssistantDeleted: return data -class AsyncAssistantFiles(AsyncAPIResource): - def __init__(self, client: AsyncPortkey) -> None: - super().__init__(client) - self.openai_client = client.openai_client +# class AsyncAssistantFiles(AsyncAPIResource): +# def __init__(self, client: AsyncPortkey) -> None: +# super().__init__(client) +# self.openai_client = client.openai_client - async def create(self, assistant_id, file_id, **kwargs) -> AssistantFile: - response = ( - await self.openai_client.with_raw_response.beta.assistants.files.create( - assistant_id=assistant_id, file_id=file_id, **kwargs - ) - ) - data = AssistantFile(**json.loads(response.text)) - data._headers = response.headers +# async def create(self, assistant_id, file_id, **kwargs) -> AssistantFile: +# response = ( +# await self.openai_client.with_raw_response.beta.assistants.files.create( +# assistant_id=assistant_id, file_id=file_id, **kwargs +# ) +# ) +# data = AssistantFile(**json.loads(response.text)) +# data._headers = response.headers - return data +# return data - async def list(self, assistant_id, **kwargs) -> AssistantFileList: - response = ( - await self.openai_client.with_raw_response.beta.assistants.files.list( - assistant_id=assistant_id, **kwargs - ) - ) - data = AssistantFileList(**json.loads(response.text)) - data._headers = response.headers +# async def list(self, assistant_id, **kwargs) -> AssistantFileList: +# response = ( +# await self.openai_client.with_raw_response.beta.assistants.files.list( +# assistant_id=assistant_id, **kwargs +# ) +# ) +# data = AssistantFileList(**json.loads(response.text)) +# data._headers = response.headers - return data +# return data - async def retrieve(self, assistant_id, file_id, **kwargs) -> AssistantFile: - response = ( - await self.openai_client.with_raw_response.beta.assistants.files.retrieve( - assistant_id=assistant_id, file_id=file_id, **kwargs - ) - ) - data = AssistantFile(**json.loads(response.text)) - data._headers = response.headers +# async def retrieve(self, assistant_id, file_id, **kwargs) -> AssistantFile: +# response = ( +# await self.openai_client.with_raw_response.beta.assistants.files.retrieve( +# assistant_id=assistant_id, file_id=file_id, **kwargs +# ) +# ) +# data = AssistantFile(**json.loads(response.text)) +# data._headers = response.headers - return data +# return data - async def delete(self, assistant_id, file_id, **kwargs) -> AssistantFileDeleted: - response = ( - await self.openai_client.with_raw_response.beta.assistants.files.delete( - assistant_id=assistant_id, file_id=file_id, **kwargs - ) - ) - data = AssistantFileDeleted(**json.loads(response.text)) - data._headers = response.headers +# async def delete(self, assistant_id, file_id, **kwargs) -> AssistantFileDeleted: +# response = ( +# await self.openai_client.with_raw_response.beta.assistants.files.delete( +# assistant_id=assistant_id, file_id=file_id, **kwargs +# ) +# ) +# data = AssistantFileDeleted(**json.loads(response.text)) +# data._headers = response.headers - return data +# return data diff --git a/portkey_ai/api_resources/types/assistant_type.py b/portkey_ai/api_resources/types/assistant_type.py index 87720441..ddb62a14 100644 --- a/portkey_ai/api_resources/types/assistant_type.py +++ b/portkey_ai/api_resources/types/assistant_type.py @@ -94,46 +94,46 @@ def get_headers(self) -> Optional[Dict[str, str]]: return parse_headers(self._headers) -class AssistantFile(BaseModel, extra="allow"): - id: Optional[str] - assistant_id: Optional[str] - created_at: Optional[int] - object: Optional[str] - _headers: Optional[httpx.Headers] = PrivateAttr() +# class AssistantFile(BaseModel, extra="allow"): +# id: Optional[str] +# assistant_id: Optional[str] +# created_at: Optional[int] +# object: Optional[str] +# _headers: Optional[httpx.Headers] = PrivateAttr() - def __str__(self): - del self._headers - return json.dumps(self.dict(), indent=4) +# def __str__(self): +# del self._headers +# return json.dumps(self.dict(), indent=4) - def get_headers(self) -> Optional[Dict[str, str]]: - return parse_headers(self._headers) +# def get_headers(self) -> Optional[Dict[str, str]]: +# return parse_headers(self._headers) -class AssistantFileList(BaseModel, extra="allow"): - object: Optional[str] - data: Optional[List[AssistantFile]] - first_id: Optional[str] - last_id: Optional[str] - has_more: Optional[bool] - _headers: Optional[httpx.Headers] = PrivateAttr() +# class AssistantFileList(BaseModel, extra="allow"): +# object: Optional[str] +# data: Optional[List[AssistantFile]] +# first_id: Optional[str] +# last_id: Optional[str] +# has_more: Optional[bool] +# _headers: Optional[httpx.Headers] = PrivateAttr() - def __str__(self): - del self._headers - return json.dumps(self.dict(), indent=4) +# def __str__(self): +# del self._headers +# return json.dumps(self.dict(), indent=4) - def get_headers(self) -> Optional[Dict[str, str]]: - return parse_headers(self._headers) +# def get_headers(self) -> Optional[Dict[str, str]]: +# return parse_headers(self._headers) -class AssistantFileDeleted(BaseModel, extra="allow"): - id: Optional[str] - deleted: Optional[bool] - object: Optional[str] - _headers: Optional[httpx.Headers] = PrivateAttr() +# class AssistantFileDeleted(BaseModel, extra="allow"): +# id: Optional[str] +# deleted: Optional[bool] +# object: Optional[str] +# _headers: Optional[httpx.Headers] = PrivateAttr() - def __str__(self): - del self._headers - return json.dumps(self.dict(), indent=4) +# def __str__(self): +# del self._headers +# return json.dumps(self.dict(), indent=4) - def get_headers(self) -> Optional[Dict[str, str]]: - return parse_headers(self._headers) +# def get_headers(self) -> Optional[Dict[str, str]]: +# return parse_headers(self._headers) From e08dbb296d35e4f687fcf65634fc84b6444d29ee Mon Sep 17 00:00:00 2001 From: csgulati09 Date: Wed, 8 May 2024 18:34:06 +0530 Subject: [PATCH 09/38] feat: beta vector_store added, existing beta updated --- portkey_ai/__init__.py | 12 + portkey_ai/api_resources/__init__.py | 12 + portkey_ai/api_resources/apis/__init__.py | 8 + portkey_ai/api_resources/apis/threads.py | 707 +++++++++++++-- .../api_resources/apis/vector_stores.py | 802 ++++++++++++++++++ portkey_ai/api_resources/client.py | 4 + .../types/thread_message_type.py | 25 +- .../api_resources/types/vector_stores_type.py | 135 +++ 8 files changed, 1646 insertions(+), 59 deletions(-) create mode 100644 portkey_ai/api_resources/apis/vector_stores.py create mode 100644 portkey_ai/api_resources/types/vector_stores_type.py diff --git a/portkey_ai/__init__.py b/portkey_ai/__init__.py index 9533b343..3bd1355b 100644 --- a/portkey_ai/__init__.py +++ b/portkey_ai/__init__.py @@ -58,6 +58,12 @@ AsyncFineTuning, AsyncJobs, AsyncCheckpoints, + VectorStores, + VectorFiles, + VectorFileBatches, + AsyncVectorStores, + AsyncVectorFiles, + AsyncVectorFileBatches, ) from portkey_ai.version import VERSION @@ -135,4 +141,10 @@ "AsyncFineTuning", "AsyncJobs", "AsyncCheckpoints", + "VectorStores", + "VectorFiles", + "VectorFileBatches", + "AsyncVectorStores", + "AsyncVectorFiles", + "AsyncVectorFileBatches", ] diff --git a/portkey_ai/api_resources/__init__.py b/portkey_ai/api_resources/__init__.py index ba43f4e6..a13b4b9a 100644 --- a/portkey_ai/api_resources/__init__.py +++ b/portkey_ai/api_resources/__init__.py @@ -47,6 +47,12 @@ AsyncFineTuning, AsyncJobs, AsyncCheckpoints, + VectorStores, + VectorFiles, + VectorFileBatches, + AsyncVectorStores, + AsyncVectorFiles, + AsyncVectorFileBatches, ) from .utils import ( Modes, @@ -129,4 +135,10 @@ "AsyncFineTuning", "AsyncJobs", "AsyncCheckpoints", + "VectorStores", + "VectorFiles", + "VectorFileBatches", + "AsyncVectorStores", + "AsyncVectorFiles", + "AsyncVectorFileBatches", ] diff --git a/portkey_ai/api_resources/apis/__init__.py b/portkey_ai/api_resources/apis/__init__.py index 631f1492..463e3a4b 100644 --- a/portkey_ai/api_resources/apis/__init__.py +++ b/portkey_ai/api_resources/apis/__init__.py @@ -25,6 +25,8 @@ from .audio import Audio, Transcriptions, Translations, Speech, AsyncAudio, AsyncTranscriptions, AsyncTranslations, AsyncSpeech from .batches import Batches, AsyncBatches from .fine_tuning import FineTuning, Jobs, Checkpoints, AsyncFineTuning, AsyncJobs, AsyncCheckpoints +from .vector_stores import VectorStores, VectorFiles, VectorFileBatches, AsyncVectorStores, AsyncVectorFiles, AsyncVectorFileBatches + __all__ = [ "Completion", @@ -78,4 +80,10 @@ "AsyncFineTuning", "AsyncJobs", "AsyncCheckpoints", + "VectorStores", + "VectorFiles", + "VectorFileBatches", + "AsyncVectorStores", + "AsyncVectorFiles", + "AsyncVectorFileBatches", ] diff --git a/portkey_ai/api_resources/apis/threads.py b/portkey_ai/api_resources/apis/threads.py index 3ba0aecc..eff778ec 100644 --- a/portkey_ai/api_resources/apis/threads.py +++ b/portkey_ai/api_resources/apis/threads.py @@ -1,11 +1,12 @@ import json +from typing import Iterable, Optional, Union from portkey_ai.api_resources.apis.api_resource import APIResource, AsyncAPIResource from portkey_ai.api_resources.client import AsyncPortkey, Portkey from portkey_ai.api_resources.types.thread_message_type import ( - MessageFile, MessageList, ThreadMessage, + ThreadMessageDeleted, ) from portkey_ai.api_resources.types.thread_run_type import ( Run, @@ -14,7 +15,23 @@ RunStepList, ) from portkey_ai.api_resources.types.thread_type import Thread, ThreadDeleted - +from openai._types import NotGiven, NOT_GIVEN +from openai.types.beta import thread_create_and_run_params +from openai.types.beta.assistant_response_format_option_param import AssistantResponseFormatOptionParam +from openai.types.beta.assistant_tool_choice_option_param import AssistantToolChoiceOptionParam +from openai.lib.streaming import ( + AssistantEventHandler, + AssistantEventHandlerT, + AssistantStreamManager, + AsyncAssistantEventHandler, + AsyncAssistantEventHandlerT, + AsyncAssistantStreamManager, +) +from openai.types.beta.threads import ( + run_create_params, + run_submit_tool_outputs_params, +) +from openai.types.beta.assistant_tool_param import AssistantToolParam class Threads(APIResource): def __init__(self, client: Portkey) -> None: @@ -70,12 +87,97 @@ def create_and_run(self, assistant_id, **kwargs) -> Run: data._headers = response.headers return data + def create_and_run_poll( + self, + *, + assistant_id: str, + instructions: Optional[str] | NotGiven = NOT_GIVEN, + max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, + max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, + metadata: Optional[object] | NotGiven = NOT_GIVEN, + model: Union[str, None, NotGiven] = NOT_GIVEN, + response_format: Union[Optional[AssistantResponseFormatOptionParam] , NotGiven] = NOT_GIVEN, + temperature: Union[Optional[float], NotGiven] = NOT_GIVEN, + thread: Union[thread_create_and_run_params.Thread, NotGiven] = NOT_GIVEN, + tool_choice: Union[Optional[AssistantToolChoiceOptionParam], NotGiven] = NOT_GIVEN, + tool_resources: Union[Optional[thread_create_and_run_params.ToolResources], NotGiven] = NOT_GIVEN, + tools: Union[Optional[Iterable[thread_create_and_run_params.Tool]], NotGiven] = NOT_GIVEN, + top_p: Union[Optional[float], NotGiven] = NOT_GIVEN, + truncation_strategy: Union[Optional[thread_create_and_run_params.TruncationStrategy], NotGiven] = NOT_GIVEN, + poll_interval_ms: Union[int, NotGiven] = NOT_GIVEN, + **kwargs, + ) -> Run: + + response = self.openai_client.beta.threads.create_and_run_poll( + assistant_id=assistant_id, + instructions=instructions, + max_completion_tokens=max_completion_tokens, + max_prompt_tokens=max_prompt_tokens, + metadata=metadata, + model=model, + response_format=response_format, + temperature=temperature, + thread=thread, + tool_choice=tool_choice, + tool_resources=tool_resources, + tools=tools, + top_p=top_p, + truncation_strategy=truncation_strategy, + poll_interval_ms=poll_interval_ms, + **kwargs + ) + data = Run(**json.loads(response.text)) + data._headers = response.headers + + return data + + def create_and_run_stream( + self, + *, + assistant_id: str, + instructions: Union[Optional[str], NotGiven] = NOT_GIVEN, + max_completion_tokens: Union[Optional[int], NotGiven] = NOT_GIVEN, + max_prompt_tokens: Union[Optional[int], NotGiven] = NOT_GIVEN, + metadata: Union[Optional[object], NotGiven] = NOT_GIVEN, + model: Union[str, None, NotGiven] = NOT_GIVEN, + response_format: Union[Optional[AssistantResponseFormatOptionParam], NotGiven] = NOT_GIVEN, + temperature: Union[Optional[float], NotGiven] = NOT_GIVEN, + thread: Union[thread_create_and_run_params.Thread, NotGiven] = NOT_GIVEN, + tool_choice: Union[Optional[AssistantToolChoiceOptionParam], NotGiven] = NOT_GIVEN, + tool_resources: Union[Optional[thread_create_and_run_params.ToolResources], NotGiven] = NOT_GIVEN, + tools: Union[Optional[Iterable[thread_create_and_run_params.Tool]], NotGiven] = NOT_GIVEN, + top_p: Union[Optional[float], NotGiven] = NOT_GIVEN, + truncation_strategy: Union[Optional[thread_create_and_run_params.TruncationStrategy], NotGiven] = NOT_GIVEN, + event_handler: Union[AssistantEventHandlerT, None] = None, + **kwargs, + ) -> AssistantStreamManager[AssistantEventHandler] | AssistantStreamManager[AssistantEventHandlerT]: + + response = self.openai_client.beta.threads.create_and_run_stream( + assistant_id=assistant_id, + instructions=instructions, + max_completion_tokens=max_completion_tokens, + max_prompt_tokens=max_prompt_tokens, + metadata=metadata, + model=model, + response_format=response_format, + temperature=temperature, + thread=thread, + tool_choice=tool_choice, + tool_resources=tool_resources, + tools=tools, + top_p=top_p, + truncation_strategy=truncation_strategy, + event_handler=event_handler, + **kwargs + ) + data = response + return data + class Messages(APIResource): def __init__(self, client: Portkey) -> None: super().__init__(client) self.openai_client = client.openai_client - self.files = ThreadFiles(client) def create(self, thread_id, **kwargs) -> ThreadMessage: response = self.openai_client.with_raw_response.beta.threads.messages.create( @@ -110,33 +212,50 @@ def update(self, thread_id, message_id, **kwargs) -> ThreadMessage: data._headers = response.headers return data - -class ThreadFiles(APIResource): - def __init__(self, client: Portkey) -> None: - super().__init__(client) - self.openai_client = client.openai_client - - def list(self, thread_id, message_id, **kwargs) -> MessageList: - response = ( - self.openai_client.with_raw_response.beta.threads.messages.files.list( - thread_id=thread_id, message_id=message_id, **kwargs - ) + def delete( + self, + message_id: str, + *, + thread_id: str, + **kwargs + ) -> ThreadMessageDeleted: + response = self.openai_client.with_raw_response.beta.threads.messages.delete( + message_id=message_id, + thread_id=thread_id, + **kwargs ) - data = MessageList(**json.loads(response.text)) + data = ThreadMessageDeleted(**json.loads(response.text)) data._headers = response.headers return data + - def retrieve(self, thread_id, message_id, file_id, **kwargs) -> MessageFile: - response = ( - self.openai_client.with_raw_response.beta.threads.messages.files.retrieve( - thread_id=thread_id, message_id=message_id, file_id=file_id, **kwargs - ) - ) - data = MessageFile(**json.loads(response.text)) - data._headers = response.headers +# class ThreadFiles(APIResource): +# def __init__(self, client: Portkey) -> None: +# super().__init__(client) +# self.openai_client = client.openai_client - return data +# def list(self, thread_id, message_id, **kwargs) -> MessageList: +# response = ( +# self.openai_client.with_raw_response.beta.threads.messages.files.list( +# thread_id=thread_id, message_id=message_id, **kwargs +# ) +# ) +# data = MessageList(**json.loads(response.text)) +# data._headers = response.headers + +# return data + +# def retrieve(self, thread_id, message_id, file_id, **kwargs) -> MessageFile: +# response = ( +# self.openai_client.with_raw_response.beta.threads.messages.files.retrieve( +# thread_id=thread_id, message_id=message_id, file_id=file_id, **kwargs +# ) +# ) +# data = MessageFile(**json.loads(response.text)) +# data._headers = response.headers + +# return data class Runs(APIResource): @@ -201,6 +320,189 @@ def cancel(self, thread_id, run_id, **kwargs) -> Run: return data + def create_and_poll( + self, + *, + assistant_id: str, + additional_instructions: Union[Optional[str], NotGiven] = NOT_GIVEN, + additional_messages: Union[Optional[Iterable[run_create_params.AdditionalMessage]], NotGiven] = NOT_GIVEN, + instructions: Union[Optional[str], NotGiven] = NOT_GIVEN, + max_completion_tokens: Union[Optional[int], NotGiven] = NOT_GIVEN, + max_prompt_tokens: Union[Optional[int], NotGiven] = NOT_GIVEN, + metadata: Union[Optional[object], NotGiven] = NOT_GIVEN, + model: Union[str, None, NotGiven] = NOT_GIVEN, + response_format: Union[Optional[AssistantResponseFormatOptionParam], NotGiven] = NOT_GIVEN, + temperature: Union[Optional[float], NotGiven] = NOT_GIVEN, + tool_choice: Union[Optional[AssistantToolChoiceOptionParam], NotGiven] = NOT_GIVEN, + tools: Union[Optional[Iterable[AssistantToolParam]], NotGiven] = NOT_GIVEN, + top_p: Union[Optional[float], NotGiven] = NOT_GIVEN, + truncation_strategy: Union[Optional[run_create_params.TruncationStrategy], NotGiven] = NOT_GIVEN, + poll_interval_ms: Union[int, NotGiven] = NOT_GIVEN, + thread_id: str, + **kwargs, + )-> Run: + response = self.openai_client.beta.threads.runs.create_and_poll( + assistant_id=assistant_id, + additional_instructions=additional_instructions, + additional_messages=additional_messages, + instructions=instructions, + max_completion_tokens=max_completion_tokens, + max_prompt_tokens=max_prompt_tokens, + metadata=metadata, + model=model, + response_format=response_format, + temperature=temperature, + tool_choice=tool_choice, + tools=tools, + top_p=top_p, + truncation_strategy=truncation_strategy, + poll_interval_ms=poll_interval_ms, + thread_id=thread_id, + **kwargs + ) + data = Run(**json.loads(response.text)) + data._headers = response.headers + + return data + + def create_and_stream( + self, + *, + assistant_id: str, + additional_instructions: Union[Optional[str], NotGiven] = NOT_GIVEN, + additional_messages: Union[Optional[Iterable[run_create_params.AdditionalMessage]], NotGiven] = NOT_GIVEN, + instructions: Union[Optional[str], NotGiven] = NOT_GIVEN, + max_completion_tokens: Union[Optional[int], NotGiven] = NOT_GIVEN, + max_prompt_tokens: Union[Optional[int], NotGiven] = NOT_GIVEN, + metadata: Union[Optional[object], NotGiven] = NOT_GIVEN, + model: Union[str, None, NotGiven] = NOT_GIVEN, + response_format: Union[Optional[AssistantResponseFormatOptionParam], NotGiven] = NOT_GIVEN, + temperature: Union[Optional[float], NotGiven] = NOT_GIVEN, + tool_choice: Union[Optional[AssistantToolChoiceOptionParam], NotGiven] = NOT_GIVEN, + tools: Union[Optional[Iterable[AssistantToolParam]], NotGiven] = NOT_GIVEN, + top_p: Union[Optional[float], NotGiven] = NOT_GIVEN, + truncation_strategy: Union[Optional[run_create_params.TruncationStrategy], NotGiven] = NOT_GIVEN, + thread_id: str, + event_handler: Union[AssistantEventHandlerT, None] = None, + **kwargs, + )-> AssistantStreamManager[AssistantEventHandler] | AssistantStreamManager[AssistantEventHandlerT]: + response = self.openai_client.beta.threads.runs.create_and_stream( + assistant_id=assistant_id, + additional_instructions=additional_instructions, + additional_messages=additional_messages, + instructions=instructions, + max_completion_tokens=max_completion_tokens, + max_prompt_tokens=max_prompt_tokens, + metadata=metadata, + model=model, + response_format=response_format, + temperature=temperature, + tool_choice=tool_choice, + tools=tools, + top_p=top_p, + truncation_strategy=truncation_strategy, + thread_id=thread_id, + event_handler=event_handler, + **kwargs + ) + data = response + return data + + def poll( + self, + *, + run_id: str, + thread_id: str, + **kwargs, + ) -> Run: + response = self.openai_client.beta.threads.runs.poll( + run_id=run_id, + thread_id=thread_id, + **kwargs + ) + data = response + + return data + + def stream( + self, + *, + assistant_id: str, + additional_instructions: Union[Optional[str], NotGiven] = NOT_GIVEN, + additional_messages: Union[Optional[Iterable[run_create_params.AdditionalMessage]], NotGiven] = NOT_GIVEN, + instructions: Union[Optional[str], NotGiven] = NOT_GIVEN, + max_completion_tokens: Union[Optional[int], NotGiven] = NOT_GIVEN, + max_prompt_tokens: Union[Optional[int], NotGiven] = NOT_GIVEN, + metadata: Union[Optional[object], NotGiven] = NOT_GIVEN, + model: Union[str, None, NotGiven] = NOT_GIVEN, + response_format: Union[Optional[AssistantResponseFormatOptionParam], NotGiven] = NOT_GIVEN, + temperature: Union[Optional[float], NotGiven] = NOT_GIVEN, + tool_choice: Union[Optional[AssistantToolChoiceOptionParam], NotGiven] = NOT_GIVEN, + tools: Union[Optional[Iterable[AssistantToolParam]], NotGiven] = NOT_GIVEN, + top_p: Union[Optional[float], NotGiven] = NOT_GIVEN, + truncation_strategy: Union[Optional[run_create_params.TruncationStrategy], NotGiven] = NOT_GIVEN, + thread_id: str, + event_handler: Union[AssistantEventHandlerT, None] = None, + **kwargs, + )-> AssistantStreamManager[AssistantEventHandler] | AssistantStreamManager[AssistantEventHandlerT]: + + response = self.openai_client.beta.threads.runs.stream( + assistant_id=assistant_id, + additional_instructions=additional_instructions, + additional_messages=additional_messages, + instructions=instructions, + max_completion_tokens=max_completion_tokens, + max_prompt_tokens=max_prompt_tokens, + metadata=metadata, + model=model, + response_format=response_format, + temperature=temperature, + tool_choice=tool_choice, + tools=tools, + top_p=top_p, + truncation_strategy=truncation_strategy, + thread_id=thread_id, + event_handler=event_handler, + **kwargs + ) + data = response + return data + + def submit_tool_outputs_and_poll( + self, + *, + tool_outputs: Union[Iterable[run_submit_tool_outputs_params.ToolOutput]], + run_id: str, + thread_id: str, + poll_interval_ms: Union[int, NotGiven] = NOT_GIVEN, + )-> Run: + response = self.openai_client.beta.threads.runs.submit_tool_outputs_and_poll( + tool_outputs=tool_outputs, + run_id=run_id, + thread_id=thread_id, + poll_interval_ms=poll_interval_ms + ) + data = response + + return data + + def submit_tool_outputs_stream( + self, + *, + tool_outputs: Union[Iterable[run_submit_tool_outputs_params.ToolOutput]], + run_id: str, + thread_id: str, + event_handler: Union[AssistantEventHandlerT, None] = None, + ) -> AssistantStreamManager[AssistantEventHandler] | AssistantStreamManager[AssistantEventHandlerT]: + response = self.openai_client.beta.threads.runs.submit_tool_outputs_stream( + tool_outputs=tool_outputs, + run_id=run_id, + thread_id=thread_id, + event_handler=event_handler + ) + data = response + + return data class Steps(APIResource): def __init__(self, client: Portkey) -> None: @@ -284,12 +586,100 @@ async def create_and_run(self, assistant_id, **kwargs) -> Run: data._headers = response.headers return data + async def create_and_run_poll( + self, + *, + assistant_id: str, + instructions: Optional[str] | NotGiven = NOT_GIVEN, + max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, + max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, + metadata: Optional[object] | NotGiven = NOT_GIVEN, + model: Union[str, None, NotGiven] = NOT_GIVEN, + response_format: Union[Optional[AssistantResponseFormatOptionParam] , NotGiven] = NOT_GIVEN, + temperature: Union[Optional[float], NotGiven] = NOT_GIVEN, + thread: Union[thread_create_and_run_params.Thread, NotGiven] = NOT_GIVEN, + tool_choice: Union[Optional[AssistantToolChoiceOptionParam], NotGiven] = NOT_GIVEN, + tool_resources: Union[Optional[thread_create_and_run_params.ToolResources], NotGiven] = NOT_GIVEN, + tools: Union[Optional[Iterable[thread_create_and_run_params.Tool]], NotGiven] = NOT_GIVEN, + top_p: Union[Optional[float], NotGiven] = NOT_GIVEN, + truncation_strategy: Union[Optional[thread_create_and_run_params.TruncationStrategy], NotGiven] = NOT_GIVEN, + poll_interval_ms: Union[int, NotGiven] = NOT_GIVEN, + **kwargs, + ) -> Run: + + response = await self.openai_client.beta.threads.create_and_run_poll( + assistant_id=assistant_id, + instructions=instructions, + max_completion_tokens=max_completion_tokens, + max_prompt_tokens=max_prompt_tokens, + metadata=metadata, + model=model, + response_format=response_format, + temperature=temperature, + thread=thread, + tool_choice=tool_choice, + tool_resources=tool_resources, + tools=tools, + top_p=top_p, + truncation_strategy=truncation_strategy, + poll_interval_ms=poll_interval_ms, + **kwargs + ) + data = Run(**json.loads(response.text)) + data._headers = response.headers + + return data + + async def create_and_run_stream( + self, + *, + assistant_id: str, + instructions: Union[Optional[str], NotGiven] = NOT_GIVEN, + max_completion_tokens: Union[Optional[int], NotGiven] = NOT_GIVEN, + max_prompt_tokens: Union[Optional[int], NotGiven] = NOT_GIVEN, + metadata: Union[Optional[object], NotGiven] = NOT_GIVEN, + model: Union[str, None, NotGiven] = NOT_GIVEN, + response_format: Union[Optional[AssistantResponseFormatOptionParam], NotGiven] = NOT_GIVEN, + temperature: Union[Optional[float], NotGiven] = NOT_GIVEN, + thread: Union[thread_create_and_run_params.Thread, NotGiven] = NOT_GIVEN, + tool_choice: Union[Optional[AssistantToolChoiceOptionParam], NotGiven] = NOT_GIVEN, + tool_resources: Union[Optional[thread_create_and_run_params.ToolResources], NotGiven] = NOT_GIVEN, + tools: Union[Optional[Iterable[thread_create_and_run_params.Tool]], NotGiven] = NOT_GIVEN, + top_p: Union[Optional[float], NotGiven] = NOT_GIVEN, + truncation_strategy: Union[Optional[thread_create_and_run_params.TruncationStrategy], NotGiven] = NOT_GIVEN, + event_handler: Union[AsyncAssistantEventHandlerT, None] = None, + **kwargs, + ) -> ( + AsyncAssistantStreamManager[AsyncAssistantEventHandler] + | AsyncAssistantStreamManager[AsyncAssistantEventHandlerT] + ): + + response = await self.openai_client.beta.threads.create_and_run_stream( + assistant_id=assistant_id, + instructions=instructions, + max_completion_tokens=max_completion_tokens, + max_prompt_tokens=max_prompt_tokens, + metadata=metadata, + model=model, + response_format=response_format, + temperature=temperature, + thread=thread, + tool_choice=tool_choice, + tool_resources=tool_resources, + tools=tools, + top_p=top_p, + truncation_strategy=truncation_strategy, + event_handler=event_handler, + **kwargs + ) + data = response + return data + class AsyncMessages(AsyncAPIResource): def __init__(self, client: AsyncPortkey) -> None: super().__init__(client) self.openai_client = client.openai_client - self.files = AsyncThreadFiles(client) async def create(self, thread_id, **kwargs) -> ThreadMessage: response = ( @@ -332,42 +722,61 @@ async def update(self, thread_id, message_id, **kwargs) -> ThreadMessage: data._headers = response.headers return data - -class AsyncThreadFiles(AsyncAPIResource): - def __init__(self, client: AsyncPortkey) -> None: - super().__init__(client) - self.openai_client = client.openai_client - - async def list(self, thread_id, message_id, **kwargs) -> MessageList: + async def delete( + self, + message_id: str, + *, + thread_id: str, + **kwargs + ) -> ThreadMessageDeleted: response = ( - await self.openai_client.with_raw_response.beta.threads.messages.files.list( - thread_id=thread_id, message_id=message_id, **kwargs + await self.openai_client.with_raw_response.beta.threads.messages.delete( + message_id=message_id, + thread_id=thread_id, + **kwargs ) ) - data = MessageList(**json.loads(response.text)) + data = ThreadMessageDeleted(**json.loads(response.text)) data._headers = response.headers return data - async def retrieve(self, thread_id, message_id, file_id, **kwargs) -> MessageFile: - # fmt: off - response = await self.openai_client\ - .with_raw_response\ - .beta\ - .threads\ - .messages\ - .files\ - .retrieve( - thread_id=thread_id, - message_id=message_id, - file_id=file_id, - **kwargs - ) - # fmt: off - data = MessageFile(**json.loads( response.text)) - data._headers = response.headers - return data +# class AsyncThreadFiles(AsyncAPIResource): +# def __init__(self, client: AsyncPortkey) -> None: +# super().__init__(client) +# self.openai_client = client.openai_client + +# async def list(self, thread_id, message_id, **kwargs) -> MessageList: +# response = ( +# await self.openai_client.with_raw_response.beta.threads.messages.files.list( +# thread_id=thread_id, message_id=message_id, **kwargs +# ) +# ) +# data = MessageList(**json.loads(response.text)) +# data._headers = response.headers + +# return data + +# async def retrieve(self, thread_id, message_id, file_id, **kwargs) -> MessageFile: +# # fmt: off +# response = await self.openai_client\ +# .with_raw_response\ +# .beta\ +# .threads\ +# .messages\ +# .files\ +# .retrieve( +# thread_id=thread_id, +# message_id=message_id, +# file_id=file_id, +# **kwargs +# ) +# # fmt: off +# data = MessageFile(**json.loads( response.text)) +# data._headers = response.headers + +# return data class AsyncRuns(AsyncAPIResource): @@ -444,6 +853,198 @@ async def cancel(self, thread_id, run_id, **kwargs) -> Run: return data + async def create_and_poll( + self, + *, + assistant_id: str, + additional_instructions: Union[Optional[str], NotGiven] = NOT_GIVEN, + additional_messages: Union[Optional[Iterable[run_create_params.AdditionalMessage]], NotGiven] = NOT_GIVEN, + instructions: Union[Optional[str], NotGiven] = NOT_GIVEN, + max_completion_tokens: Union[Optional[int], NotGiven] = NOT_GIVEN, + max_prompt_tokens: Union[Optional[int], NotGiven] = NOT_GIVEN, + metadata: Union[Optional[object], NotGiven] = NOT_GIVEN, + model: Union[str, None, NotGiven] = NOT_GIVEN, + response_format: Union[Optional[AssistantResponseFormatOptionParam], NotGiven] = NOT_GIVEN, + temperature: Union[Optional[float], NotGiven] = NOT_GIVEN, + tool_choice: Union[Optional[AssistantToolChoiceOptionParam], NotGiven] = NOT_GIVEN, + tools: Union[Optional[Iterable[AssistantToolParam]], NotGiven] = NOT_GIVEN, + top_p: Union[Optional[float], NotGiven] = NOT_GIVEN, + truncation_strategy: Union[Optional[run_create_params.TruncationStrategy], NotGiven] = NOT_GIVEN, + poll_interval_ms: Union[int, NotGiven] = NOT_GIVEN, + thread_id: str, + **kwargs, + )-> Run: + response = await self.openai_client.beta.threads.runs.create_and_poll( + assistant_id=assistant_id, + additional_instructions=additional_instructions, + additional_messages=additional_messages, + instructions=instructions, + max_completion_tokens=max_completion_tokens, + max_prompt_tokens=max_prompt_tokens, + metadata=metadata, + model=model, + response_format=response_format, + temperature=temperature, + tool_choice=tool_choice, + tools=tools, + top_p=top_p, + truncation_strategy=truncation_strategy, + poll_interval_ms=poll_interval_ms, + thread_id=thread_id, + **kwargs + ) + data = Run(**json.loads(response.text)) + data._headers = response.headers + + return data + + async def create_and_stream( + self, + *, + assistant_id: str, + additional_instructions: Union[Optional[str], NotGiven] = NOT_GIVEN, + additional_messages: Union[Optional[Iterable[run_create_params.AdditionalMessage]], NotGiven] = NOT_GIVEN, + instructions: Union[Optional[str], NotGiven] = NOT_GIVEN, + max_completion_tokens: Union[Optional[int], NotGiven] = NOT_GIVEN, + max_prompt_tokens: Union[Optional[int], NotGiven] = NOT_GIVEN, + metadata: Union[Optional[object], NotGiven] = NOT_GIVEN, + model: Union[str, None, NotGiven] = NOT_GIVEN, + response_format: Union[Optional[AssistantResponseFormatOptionParam], NotGiven] = NOT_GIVEN, + temperature: Union[Optional[float], NotGiven] = NOT_GIVEN, + tool_choice: Union[Optional[AssistantToolChoiceOptionParam], NotGiven] = NOT_GIVEN, + tools: Union[Optional[Iterable[AssistantToolParam]], NotGiven] = NOT_GIVEN, + top_p: Union[Optional[float], NotGiven] = NOT_GIVEN, + truncation_strategy: Union[Optional[run_create_params.TruncationStrategy], NotGiven] = NOT_GIVEN, + thread_id: str, + event_handler: Union[AsyncAssistantEventHandlerT, None] = None, + **kwargs, + )-> ( + AsyncAssistantStreamManager[AsyncAssistantEventHandler] + | AsyncAssistantStreamManager[AsyncAssistantEventHandlerT] + ): + response = await self.openai_client.beta.threads.runs.create_and_stream( + assistant_id=assistant_id, + additional_instructions=additional_instructions, + additional_messages=additional_messages, + instructions=instructions, + max_completion_tokens=max_completion_tokens, + max_prompt_tokens=max_prompt_tokens, + metadata=metadata, + model=model, + response_format=response_format, + temperature=temperature, + tool_choice=tool_choice, + tools=tools, + top_p=top_p, + truncation_strategy=truncation_strategy, + thread_id=thread_id, + event_handler=event_handler, + **kwargs + ) + data = response + return data + + async def poll( + self, + *, + run_id: str, + thread_id: str, + **kwargs, + ) -> Run: + response = await self.openai_client.beta.threads.runs.poll( + run_id=run_id, + thread_id=thread_id, + **kwargs + ) + data = response + + return data + + async def stream( + self, + *, + assistant_id: str, + additional_instructions: Union[Optional[str], NotGiven] = NOT_GIVEN, + additional_messages: Union[Optional[Iterable[run_create_params.AdditionalMessage]], NotGiven] = NOT_GIVEN, + instructions: Union[Optional[str], NotGiven] = NOT_GIVEN, + max_completion_tokens: Union[Optional[int], NotGiven] = NOT_GIVEN, + max_prompt_tokens: Union[Optional[int], NotGiven] = NOT_GIVEN, + metadata: Union[Optional[object], NotGiven] = NOT_GIVEN, + model: Union[str, None, NotGiven] = NOT_GIVEN, + response_format: Union[Optional[AssistantResponseFormatOptionParam], NotGiven] = NOT_GIVEN, + temperature: Union[Optional[float], NotGiven] = NOT_GIVEN, + tool_choice: Union[Optional[AssistantToolChoiceOptionParam], NotGiven] = NOT_GIVEN, + tools: Union[Optional[Iterable[AssistantToolParam]], NotGiven] = NOT_GIVEN, + top_p: Union[Optional[float], NotGiven] = NOT_GIVEN, + truncation_strategy: Union[Optional[run_create_params.TruncationStrategy], NotGiven] = NOT_GIVEN, + thread_id: str, + event_handler: Union[AsyncAssistantEventHandlerT, None] = None, + **kwargs, + ) -> ( + AsyncAssistantStreamManager[AsyncAssistantEventHandler] + | AsyncAssistantStreamManager[AsyncAssistantEventHandlerT] + ): + response = await self.openai_client.beta.threads.runs.stream( + assistant_id=assistant_id, + additional_instructions=additional_instructions, + additional_messages=additional_messages, + instructions=instructions, + max_completion_tokens=max_completion_tokens, + max_prompt_tokens=max_prompt_tokens, + metadata=metadata, + model=model, + response_format=response_format, + temperature=temperature, + tool_choice=tool_choice, + tools=tools, + top_p=top_p, + truncation_strategy=truncation_strategy, + thread_id=thread_id, + event_handler=event_handler, + **kwargs + ) + data = response + return data + + async def submit_tool_outputs_and_poll( + self, + *, + tool_outputs: Union[Iterable[run_submit_tool_outputs_params.ToolOutput]], + run_id: str, + thread_id: str, + poll_interval_ms: Union[int, NotGiven] = NOT_GIVEN, + ) -> Run: + response = await self.openai_client.beta.threads.runs.submit_tool_outputs_and_poll( + tool_outputs=tool_outputs, + run_id=run_id, + thread_id=thread_id, + poll_interval_ms=poll_interval_ms + ) + data = response + + return data + + async def submit_tool_outputs_stream( + self, + *, + tool_outputs: Union[Iterable[run_submit_tool_outputs_params.ToolOutput]], + run_id: str, + thread_id: str, + event_handler: Union[AsyncAssistantEventHandlerT, None] = None, + ) -> ( + AsyncAssistantStreamManager[AsyncAssistantEventHandler] + | AsyncAssistantStreamManager[AsyncAssistantEventHandlerT] + ): + response = await self.openai_client.beta.threads.runs.submit_tool_outputs_stream( + tool_outputs=tool_outputs, + run_id=run_id, + thread_id=thread_id, + event_handler=event_handler + ) + data = response + + return data + class AsyncSteps(AsyncAPIResource): def __init__(self, client: AsyncPortkey) -> None: diff --git a/portkey_ai/api_resources/apis/vector_stores.py b/portkey_ai/api_resources/apis/vector_stores.py new file mode 100644 index 00000000..73ea25e1 --- /dev/null +++ b/portkey_ai/api_resources/apis/vector_stores.py @@ -0,0 +1,802 @@ +from typing import Iterable, List, Optional, Union +from portkey_ai.api_resources.apis.api_resource import APIResource, AsyncAPIResource +from portkey_ai.api_resources.client import AsyncPortkey, Portkey +from openai._types import NotGiven, NOT_GIVEN, FileTypes +from openai.types.beta import ( + vector_store_create_params, + vector_store_update_params, +) + +from portkey_ai.api_resources.types.vector_stores_type import ( + VectorStore, + VectorStoreDeleted, + VectorStoreFile, + VectorStoreFileBatch, + VectorStoreFileDeleted, + VectorStoreFileList, + VectorStoreList, +) + + +class VectorStores(APIResource): + def __init__(self, client: Portkey) -> None: + super().__init__(client) + self.openai_client = client.openai_client + self.files = VectorFiles(client) + self.file_batches = VectorFileBatches(client) + + def create( + self, + *, + expires_after: Union[ + vector_store_create_params.ExpiresAfter, NotGiven + ] = NOT_GIVEN, + file_ids: Union[List[str], NotGiven] = NOT_GIVEN, + metadata: Union[Optional[object], NotGiven] = NOT_GIVEN, + name: Union[str, NotGiven] = NOT_GIVEN, + ) -> VectorStore: + response = self.openai_client.with_raw_response.beta.vector_stores.create( + expires_after=expires_after, + file_ids=file_ids, + metadata=metadata, + name=name, + ) + data = VectorStore(**response.json()) + data._headers = response.headers + + return data + + def retrieve( + self, + vector_store_id: str, + **kwargs, + ) -> VectorStore: + response = self.openai_client.with_raw_response.beta.vector_stores.retrieve( + vector_store_id=vector_store_id, + **kwargs, + ) + data = VectorStore(**response.json()) + data._headers = response.headers + + return data + + def update( + self, + vector_store_id: str, + *, + expires_after: Union[ + vector_store_update_params.ExpiresAfter, NotGiven + ] = NOT_GIVEN, + metadata: Union[Optional[object], NotGiven] = NOT_GIVEN, + name: Union[str, NotGiven] = NOT_GIVEN, + **kwargs, + ) -> VectorStore: + response = self.openai_client.with_raw_response.beta.vector_stores.update( + vector_store_id=vector_store_id, + expires_after=expires_after, + metadata=metadata, + name=name, + **kwargs, + ) + data = VectorStore(**response.json()) + data._headers = response.headers + + return data + + def list( + self, + *, + after: Union[str, NotGiven] = NOT_GIVEN, + limit: Union[int, NotGiven] = NOT_GIVEN, + order: Union[str, NotGiven] = NOT_GIVEN, + **kwargs, + ) -> VectorStoreList: + response = self.openai_client.with_raw_response.beta.vector_stores.list( + after=after, + limit=limit, + order=order, + **kwargs, + ) + data = VectorStoreList(**response.json()) + data._headers = response.headers + + return data + + def delete( + self, + vector_store_id: str, + **kwargs, + ) -> VectorStoreDeleted: + response = self.openai_client.with_raw_response.beta.vector_stores.delete( + vector_store_id=vector_store_id, + **kwargs, + ) + data = VectorStoreDeleted(**response.json()) + data._headers = response.headers + + return data + + +class VectorFiles(APIResource): + def __init__(self, client: Portkey) -> None: + super().__init__(client) + self.openai_client = client.openai_client + + def create( + self, + vector_store_id: str, + *, + file_id: str, + **kwargs, + ) -> VectorStoreFile: + response = self.openai_client.with_raw_response.beta.vector_stores.files.create( + vector_store_id=vector_store_id, + file_id=file_id, + **kwargs, + ) + data = VectorStoreFile(**response.json()) + data._headers = response.headers + + return data + + def retrieve( + self, + file_id: str, + *, + vector_store_id: str, + **kwargs, + ) -> VectorStoreFile: + response = ( + self.openai_client.with_raw_response.beta.vector_stores.files.retrieve( + file_id=file_id, + vector_store_id=vector_store_id, + **kwargs, + ) + ) + data = VectorStoreFile(**response.json()) + data._headers = response.headers + + return data + + def list( + self, + vector_store_id: str, + *, + after: Union[str, NotGiven] = NOT_GIVEN, + before: Union[str, NotGiven] = NOT_GIVEN, + filter: Union[str, NotGiven] = NOT_GIVEN, + limit: Union[int, NotGiven] = NOT_GIVEN, + order: Union[str, NotGiven] = NOT_GIVEN, + **kwargs, + ) -> VectorStoreFileList: + response = self.openai_client.with_raw_response.beta.vector_stores.files.list( + vector_store_id=vector_store_id, + after=after, + before=before, + filter=filter, + limit=limit, + order=order, + **kwargs, + ) + data = VectorStoreFileList(**response.json()) + data._headers = response.headers + + return data + + def delete( + self, + file_id: str, + *, + vector_store_id: str, + **kwargs, + ) -> VectorStoreFileDeleted: + response = self.openai_client.with_raw_response.beta.vector_stores.files.delete( + file_id=file_id, + vector_store_id=vector_store_id, + **kwargs, + ) + data = VectorStoreFileDeleted(**response.json()) + data._headers = response.headers + + return data + + def create_and_poll( + self, + file_id: str, + *, + vector_store_id: str, + poll_interval: Union[int, NotGiven] = NOT_GIVEN, + **kwargs, + ) -> VectorStoreFile: + response = self.openai_client.beta.vector_stores.files.create_and_poll( + file_id=file_id, + vector_store_id=vector_store_id, + poll_interval=poll_interval, + **kwargs, + ) + data = response + + return data + + def poll( + self, + file_id: str, + *, + vector_store_id: str, + poll_interval: Union[int, NotGiven] = NOT_GIVEN, + **kwargs, + ) -> VectorStoreFile: + response = self.openai_client.beta.vector_stores.files.poll( + file_id=file_id, + vector_store_id=vector_store_id, + poll_interval=poll_interval, + **kwargs, + ) + data = response + + return data + + def upload( + self, + *, + vector_store_id: str, + file: FileTypes, + **kwargs, + ) -> VectorStoreFile: + response = self.openai_client.beta.vector_stores.files.upload( + vector_store_id=vector_store_id, + file=file, + **kwargs, + ) + data = response + return data + + def upload_and_poll( + self, + *, + vector_store_id: str, + file: FileTypes, + poll_interval: Union[int, NotGiven] = NOT_GIVEN, + **kwargs, + ) -> VectorStoreFile: + response = self.openai_client.beta.vector_stores.files.upload_and_poll( + vector_store_id=vector_store_id, + file=file, + poll_interval=poll_interval, + **kwargs, + ) + data = response + return data + + +class VectorFileBatches(APIResource): + def __init__(self, client: Portkey) -> None: + super().__init__(client) + self.openai_client = client.openai_client + + def create( + self, + vector_store_id: str, + *, + file_ids: List[str], + **kwargs, + ) -> VectorStoreFileBatch: + response = self.openai_client.with_raw_response.beta.vector_stores.file_batches.create( + vector_store_id=vector_store_id, + file_ids=file_ids, + **kwargs, + ) + data = VectorStoreFileBatch(**response.json()) + data._headers = response.headers + + return data + + def retrieve( + self, + batch_id:str, + *, + vector_store_id: str, + **kwargs + ) -> VectorStoreFileBatch: + response = self.openai_client.with_raw_response.beta.vector_stores.file_batches.retrieve( + batch_id=batch_id, + vector_store_id=vector_store_id, + **kwargs, + ) + data = VectorStoreFileBatch(**response.json()) + data._headers = response.headers + + return data + + def cancel( + self, + batch_id:str, + *, + vector_store_id: str, + **kwargs + ) -> VectorStoreFileBatch: + response = self.openai_client.with_raw_response.beta.vector_stores.file_batches.cancel( + batch_id=batch_id, + vector_store_id=vector_store_id, + **kwargs, + ) + data = VectorStoreFileBatch(**response.json()) + data._headers = response.headers + + return data + + def create_and_poll( + self, + vector_store_id: str, + *, + file_ids: List[str], + poll_interval: Union[int, NotGiven] = NOT_GIVEN, + **kwargs, + )-> VectorStoreFileBatch: + response = self.openai_client.beta.vector_stores.file_batches.create_and_poll( + vector_store_id=vector_store_id, + file_ids=file_ids, + poll_interval=poll_interval, + **kwargs, + ) + data = response + return data + + def list_files( + self, + batch_id: str, + *, + vector_store_id: str, + after: Union[str, NotGiven] = NOT_GIVEN, + before: Union[str, NotGiven] = NOT_GIVEN, + filter: Union[str, NotGiven] = NOT_GIVEN, + limit: Union[int, NotGiven] = NOT_GIVEN, + order: Union[str, NotGiven] = NOT_GIVEN, + **kwargs, + ) -> VectorStoreFileList: + response = self.openai_client.with_raw_response.beta.vector_stores.file_batches.list_files( + batch_id=batch_id, + vector_store_id=vector_store_id, + after=after, + before=before, + filter=filter, + limit=limit, + order=order, + **kwargs, + ) + data = VectorStoreFileBatch(**response.json()) + data._headers = response.headers + + return data + + def poll( + self, + batch_id: str, + *, + vector_store_id: str, + poll_interval: Union[int, NotGiven] = NOT_GIVEN, + **kwargs, + ) -> VectorStoreFileBatch: + response = self.openai_client.beta.vector_stores.file_batches.poll( + batch_id=batch_id, + vector_store_id=vector_store_id, + poll_interval=poll_interval, + **kwargs, + ) + data = response + + return data + + def upload_and_poll( + self, + vector_store_id: str, + *, + files: Iterable[FileTypes], + max_concurrency: int = 5, + file_ids: List[str] = [], + poll_interval_ms: Union[int, NotGiven] = NOT_GIVEN, + **kwargs + ) -> VectorStoreFileBatch: + response = self.openai_client.beta.vector_stores.file_batches.upload_and_poll( + vector_store_id=vector_store_id, + files=files, + max_concurrency=max_concurrency, + file_ids=file_ids, + poll_interval_ms=poll_interval_ms, + **kwargs, + ) + data = response + + return data + + +class AsyncVectorStores(AsyncAPIResource): + def __init__(self, client: AsyncPortkey) -> None: + super().__init__(client) + self.openai_client = client.openai_client + self.files = AsyncVectorFiles(client) + self.file_batches = AsyncVectorFileBatches(client) + + async def create( + self, + *, + expires_after: Union[ + vector_store_create_params.ExpiresAfter, NotGiven + ] = NOT_GIVEN, + file_ids: Union[List[str], NotGiven] = NOT_GIVEN, + metadata: Union[Optional[object], NotGiven] = NOT_GIVEN, + name: Union[str, NotGiven] = NOT_GIVEN, + ) -> VectorStore: + response = await self.openai_client.with_raw_response.beta.vector_stores.create( + expires_after=expires_after, + file_ids=file_ids, + metadata=metadata, + name=name, + ) + data = VectorStore(**response.json()) + data._headers = response.headers + + return data + + async def retrieve( + self, + vector_store_id: str, + **kwargs, + ) -> VectorStore: + response = ( + await self.openai_client.with_raw_response.beta.vector_stores.retrieve( + vector_store_id=vector_store_id, + **kwargs, + ) + ) + data = VectorStore(**response.json()) + data._headers = response.headers + + return data + + async def update( + self, + vector_store_id: str, + *, + expires_after: Union[ + vector_store_update_params.ExpiresAfter, NotGiven + ] = NOT_GIVEN, + metadata: Union[Optional[object], NotGiven] = NOT_GIVEN, + name: Union[str, NotGiven] = NOT_GIVEN, + **kwargs, + ) -> VectorStore: + response = await self.openai_client.with_raw_response.beta.vector_stores.update( + vector_store_id=vector_store_id, + expires_after=expires_after, + metadata=metadata, + name=name, + **kwargs, + ) + data = VectorStore(**response.json()) + data._headers = response.headers + + return data + + async def list( + self, + *, + after: Union[str, NotGiven] = NOT_GIVEN, + limit: Union[int, NotGiven] = NOT_GIVEN, + order: Union[str, NotGiven] = NOT_GIVEN, + **kwargs, + ) -> VectorStoreList: + response = await self.openai_client.with_raw_response.beta.vector_stores.list( + after=after, + limit=limit, + order=order, + **kwargs, + ) + data = VectorStoreList(**response.json()) + data._headers = response.headers + + return data + + async def delete( + self, + vector_store_id: str, + **kwargs, + ) -> VectorStoreDeleted: + response = await self.openai_client.with_raw_response.beta.vector_stores.delete( + vector_store_id=vector_store_id, + **kwargs, + ) + data = VectorStoreDeleted(**response.json()) + data._headers = response.headers + + return data + + +class AsyncVectorFiles(AsyncAPIResource): + def __init__(self, client: AsyncPortkey) -> None: + super().__init__(client) + self.openai_client = client.openai_client + + async def create( + self, + vector_store_id: str, + *, + file_id: str, + **kwargs, + ) -> VectorStoreFile: + response = await self.openai_client.with_raw_response.beta.vector_stores.files.create( + vector_store_id=vector_store_id, + file_id=file_id, + **kwargs, + ) + data = VectorStoreFile(**response.json()) + data._headers = response.headers + + return data + + async def retrieve( + self, + file_id: str, + *, + vector_store_id: str, + **kwargs, + ) -> VectorStoreFile: + response = ( + await self.openai_client.with_raw_response.beta.vector_stores.files.retrieve( + file_id=file_id, + vector_store_id=vector_store_id, + **kwargs, + ) + ) + data = VectorStoreFile(**response.json()) + data._headers = response.headers + + return data + + async def list( + self, + vector_store_id: str, + *, + after: Union[str, NotGiven] = NOT_GIVEN, + before: Union[str, NotGiven] = NOT_GIVEN, + filter: Union[str, NotGiven] = NOT_GIVEN, + limit: Union[int, NotGiven] = NOT_GIVEN, + order: Union[str, NotGiven] = NOT_GIVEN, + **kwargs, + ) -> VectorStoreFileList: + response = await self.openai_client.with_raw_response.beta.vector_stores.files.list( + vector_store_id=vector_store_id, + after=after, + before=before, + filter=filter, + limit=limit, + order=order, + **kwargs, + ) + data = VectorStoreFileList(**response.json()) + data._headers = response.headers + + return data + + async def delete( + self, + file_id: str, + *, + vector_store_id: str, + **kwargs, + ) -> VectorStoreFileDeleted: + response = await self.openai_client.with_raw_response.beta.vector_stores.files.delete( + file_id=file_id, + vector_store_id=vector_store_id, + **kwargs, + ) + data = VectorStoreFileDeleted(**response.json()) + data._headers = response + + async def create_and_poll( + self, + file_id: str, + *, + vector_store_id: str, + poll_interval: Union[int, NotGiven] = NOT_GIVEN, + **kwargs, + ) -> VectorStoreFile: + response = await self.openai_client.beta.vector_stores.files.create_and_poll( + file_id=file_id, + vector_store_id=vector_store_id, + poll_interval=poll_interval, + **kwargs, + ) + data = response + + return data + + async def poll( + self, + file_id: str, + *, + vector_store_id: str, + poll_interval: Union[int, NotGiven] = NOT_GIVEN, + **kwargs, + ) -> VectorStoreFile: + response = await self.openai_client.beta.vector_stores.files.poll( + file_id=file_id, + vector_store_id=vector_store_id, + poll_interval=poll_interval, + **kwargs, + ) + data = response + + return data + + async def upload( + self, + *, + vector_store_id: str, + file: FileTypes, + **kwargs, + ) -> VectorStoreFile: + response = await self.openai_client.beta.vector_stores.files.upload( + vector_store_id=vector_store_id, + file=file, + **kwargs, + ) + data = response + return data + + async def upload_and_poll( + self, + *, + vector_store_id: str, + file: FileTypes, + poll_interval: Union[int, NotGiven] = NOT_GIVEN, + **kwargs, + ) -> VectorStoreFile: + response = await self.openai_client.beta.vector_stores.files.upload_and_poll( + vector_store_id=vector_store_id, + file=file, + poll_interval=poll_interval, + **kwargs, + ) + data = response + return data + + +class AsyncVectorFileBatches(AsyncAPIResource): + def __init__(self, client: AsyncPortkey) -> None: + super().__init__(client) + self.openai_client = client.openai_client + + async def create( + self, + vector_store_id: str, + *, + file_ids: List[str], + **kwargs, + ) -> VectorStoreFileBatch: + response = await self.openai_client.with_raw_response.beta.vector_stores.file_batches.create( + vector_store_id=vector_store_id, + file_ids=file_ids, + **kwargs, + ) + data = VectorStoreFileBatch(**response.json()) + data._headers = response.headers + + return data + + async def retrieve( + self, + batch_id:str, + *, + vector_store_id: str, + **kwargs + ) -> VectorStoreFileBatch: + response = await self.openai_client.with_raw_response.beta.vector_stores.file_batches.retrieve( + batch_id=batch_id, + vector_store_id=vector_store_id, + **kwargs, + ) + data = VectorStoreFileBatch(**response.json()) + data._headers = response.headers + + return data + + async def cancel( + self, + batch_id:str, + *, + vector_store_id: str, + **kwargs + ) -> VectorStoreFileBatch: + response = await self.openai_client.with_raw_response.beta.vector_stores.file_batches.cancel( + batch_id=batch_id, + vector_store_id=vector_store_id, + **kwargs, + ) + data = VectorStoreFileBatch(**response.json()) + data._headers = response.headers + + return data + + async def create_and_poll( + self, + vector_store_id: str, + *, + file_ids: List[str], + poll_interval: Union[int, NotGiven] = NOT_GIVEN, + **kwargs, + )-> VectorStoreFileBatch: + response = await self.openai_client.beta.vector_stores.file_batches.create_and_poll( + vector_store_id=vector_store_id, + file_ids=file_ids, + poll_interval=poll_interval, + **kwargs, + ) + data = response + return data + + async def list_files( + self, + batch_id: str, + *, + vector_store_id: str, + after: Union[str, NotGiven] = NOT_GIVEN, + before: Union[str, NotGiven] = NOT_GIVEN, + filter: Union[str, NotGiven] = NOT_GIVEN, + limit: Union[int, NotGiven] = NOT_GIVEN, + order: Union[str, NotGiven] = NOT_GIVEN, + **kwargs, + ) -> VectorStoreFileList: + response = await self.openai_client.beta.with_raw_response.vector_stores.file_batches.list_files( + batch_id=batch_id, + vector_store_id=vector_store_id, + after=after, + before=before, + filter=filter, + limit=limit, + order=order, + **kwargs, + ) + data = VectorStoreFileBatch(**response.json()) + data._headers = response.headers + + return data + + async def poll( + self, + batch_id: str, + *, + vector_store_id: str, + poll_interval: Union[int, NotGiven] = NOT_GIVEN, + **kwargs, + ) -> VectorStoreFileBatch: + response = await self.openai_client.beta.vector_stores.file_batches.poll( + batch_id=batch_id, + vector_store_id=vector_store_id, + poll_interval=poll_interval, + **kwargs, + ) + data = response + + return data + + async def upload_and_poll( + self, + vector_store_id: str, + *, + files: Iterable[FileTypes], + max_concurrency: int = 5, + file_ids: List[str] = [], + poll_interval_ms: Union[int, NotGiven] = NOT_GIVEN, + **kwargs + ) -> VectorStoreFileBatch: + response = await self.openai_client.beta.vector_stores.file_batches.upload_and_poll( + vector_store_id=vector_store_id, + files=files, + max_concurrency=max_concurrency, + file_ids=file_ids, + poll_interval_ms=poll_interval_ms, + **kwargs, + ) + data = response + + return data \ No newline at end of file diff --git a/portkey_ai/api_resources/client.py b/portkey_ai/api_resources/client.py index 785e62eb..6dc85277 100644 --- a/portkey_ai/api_resources/client.py +++ b/portkey_ai/api_resources/client.py @@ -27,10 +27,12 @@ class Portkey(APIClient): class beta: assistants: apis.Assistants threads: apis.Threads + vector_stores: apis.VectorStores def __init__(self, client: Portkey) -> None: self.assistants = apis.Assistants(client) self.threads = apis.Threads(client) + self.vector_stores = apis.VectorStores(client) def __init__( self, @@ -123,10 +125,12 @@ class AsyncPortkey(AsyncAPIClient): class beta: assistants: apis.AsyncAssistants threads: apis.AsyncThreads + vector_stores: apis.AsyncVectorStores def __init__(self, client: AsyncPortkey) -> None: self.assistants = apis.AsyncAssistants(client) self.threads = apis.AsyncThreads(client) + self.vector_stores = apis.AsyncVectorStores(client) def __init__( self, diff --git a/portkey_ai/api_resources/types/thread_message_type.py b/portkey_ai/api_resources/types/thread_message_type.py index d754a466..59df0c4e 100644 --- a/portkey_ai/api_resources/types/thread_message_type.py +++ b/portkey_ai/api_resources/types/thread_message_type.py @@ -107,11 +107,24 @@ def get_headers(self) -> Optional[Dict[str, str]]: return parse_headers(self._headers) -class MessageFile(BaseModel, extra="allow"): - id: Optional[str] - object: Optional[str] - created_at: Optional[int] - message_id: Optional[str] +# class MessageFile(BaseModel, extra="allow"): +# id: Optional[str] +# object: Optional[str] +# created_at: Optional[int] +# message_id: Optional[str] +# _headers: Optional[httpx.Headers] = PrivateAttr() + +# def __str__(self): +# del self._headers +# return json.dumps(self.dict(), indent=4) + +# def get_headers(self) -> Optional[Dict[str, str]]: +# return parse_headers(self._headers) + +class ThreadMessageDeleted(BaseModel): + id: str + deleted: bool + object: str _headers: Optional[httpx.Headers] = PrivateAttr() def __str__(self): @@ -119,4 +132,4 @@ def __str__(self): return json.dumps(self.dict(), indent=4) def get_headers(self) -> Optional[Dict[str, str]]: - return parse_headers(self._headers) + return parse_headers(self._headers) \ No newline at end of file diff --git a/portkey_ai/api_resources/types/vector_stores_type.py b/portkey_ai/api_resources/types/vector_stores_type.py new file mode 100644 index 00000000..e33b02e7 --- /dev/null +++ b/portkey_ai/api_resources/types/vector_stores_type.py @@ -0,0 +1,135 @@ +import json +from typing import Dict, List, Optional +import httpx +from .utils import parse_headers +from pydantic import BaseModel, PrivateAttr + +__all__=["LastError", "ExpiresAfter" ,"VectorStore", "VectorStoreList", "VectorStoreDeleted", "VectorStoreFile", "VectorStoreFileList", "VectorStoreFileDeleted", "FileCounts", "VectorStoreFileBatch"] + +class FileCounts(BaseModel): + cancelled: int + completed: int + failed: int + in_progress: int + total: int + +class ExpiresAfter(BaseModel): + anchor: str + days: int + +class VectorStore(BaseModel): + id: str + created_at: int + file_counts: FileCounts + last_active_at: Optional[int] = None + metadata: Optional[object] = None + name: str + object: str + status: str + usage_bytes: int + expires_after: Optional[ExpiresAfter] = None + expires_at: Optional[int] = None + _headers: Optional[httpx.Headers] = PrivateAttr() + + def __str__(self): + del self._headers + return json.dumps(self.dict(), indent=4) + + def get_headers(self) -> Optional[Dict[str, str]]: + return parse_headers(self._headers) + +class VectorStoreList(BaseModel): + data: List[VectorStore] + object: str + _headers: Optional[httpx.Headers] = PrivateAttr() + + def __str__(self): + del self._headers + return json.dumps(self.dict(), indent=4) + + def get_headers(self) -> Optional[Dict[str, str]]: + return parse_headers(self._headers) + + +class VectorStoreDeleted(BaseModel): + id: str + deleted: bool + object: str + _headers: Optional[httpx.Headers] = PrivateAttr() + + def __str__(self): + del self._headers + return json.dumps(self.dict(), indent=4) + + def get_headers(self) -> Optional[Dict[str, str]]: + return parse_headers(self._headers) + +class LastError(BaseModel): + code: str + message: str + +class VectorStoreFile(BaseModel): + id: str + created_at: int + last_error: Optional[LastError] = None + object: str + status: str + usage_bytes: int + vector_store_id: str + _headers: Optional[httpx.Headers] = PrivateAttr() + + def __str__(self): + del self._headers + return json.dumps(self.dict(), indent=4) + + def get_headers(self) -> Optional[Dict[str, str]]: + return parse_headers(self._headers) + +class VectorStoreFileList(BaseModel): + data: List[VectorStoreFile] + object: str + _headers: Optional[httpx.Headers] = PrivateAttr() + + def __str__(self): + del self._headers + return json.dumps(self.dict(), indent=4) + + def get_headers(self) -> Optional[Dict[str, str]]: + return parse_headers(self._headers) + + +class VectorStoreFileDeleted(BaseModel): + id: str + deleted: bool + object:str + _headers: Optional[httpx.Headers] = PrivateAttr() + + def __str__(self): + del self._headers + return json.dumps(self.dict(), indent=4) + + def get_headers(self) -> Optional[Dict[str, str]]: + return parse_headers(self._headers) + +class FileCounts(BaseModel): + cancelled: int + completed: int + failed: int + in_progress: int + total: int + +class VectorStoreFileBatch(BaseModel): + id: str + created_at: int + file_counts: FileCounts + object: str + status: str + vector_store_id: str + _headers: Optional[httpx.Headers] = PrivateAttr() + + def __str__(self): + del self._headers + return json.dumps(self.dict(), indent=4) + + def get_headers(self) -> Optional[Dict[str, str]]: + return parse_headers(self._headers) From 0ffd817a40750338d513d62b1ff04effd53d73a9 Mon Sep 17 00:00:00 2001 From: csgulati09 Date: Thu, 9 May 2024 20:45:49 +0530 Subject: [PATCH 10/38] fix: still fixing linting issues --- portkey_ai/__init__.py | 4 - portkey_ai/api_resources/__init__.py | 4 - portkey_ai/api_resources/apis/__init__.py | 4 - portkey_ai/api_resources/apis/assistants.py | 92 ------------------- portkey_ai/api_resources/apis/audio.py | 27 +++--- portkey_ai/api_resources/apis/batches.py | 3 + portkey_ai/api_resources/apis/fine_tuning.py | 27 +++--- portkey_ai/api_resources/apis/main_files.py | 8 +- portkey_ai/api_resources/apis/threads.py | 81 ++-------------- .../api_resources/apis/vector_stores.py | 52 +++++------ .../api_resources/types/vector_stores_type.py | 7 -- 11 files changed, 71 insertions(+), 238 deletions(-) diff --git a/portkey_ai/__init__.py b/portkey_ai/__init__.py index 3bd1355b..c876d789 100644 --- a/portkey_ai/__init__.py +++ b/portkey_ai/__init__.py @@ -34,8 +34,6 @@ AsyncMainFiles, Models, AsyncModels, - ThreadFiles, - AsyncThreadFiles, Runs, AsyncRuns, Steps, @@ -117,8 +115,6 @@ "AsyncMainFiles", "Models", "AsyncModels", - "ThreadFiles", - "AsyncThreadFiles", "Runs", "AsyncRuns", "Steps", diff --git a/portkey_ai/api_resources/__init__.py b/portkey_ai/api_resources/__init__.py index a13b4b9a..bdc8c0c3 100644 --- a/portkey_ai/api_resources/__init__.py +++ b/portkey_ai/api_resources/__init__.py @@ -23,8 +23,6 @@ AsyncMainFiles, Models, AsyncModels, - ThreadFiles, - AsyncThreadFiles, Runs, AsyncRuns, Steps, @@ -111,8 +109,6 @@ "AsyncMainFiles", "Models", "AsyncModels", - "ThreadFiles", - "AsyncThreadFiles", "Runs", "AsyncRuns", "Steps", diff --git a/portkey_ai/api_resources/apis/__init__.py b/portkey_ai/api_resources/apis/__init__.py index 463e3a4b..90841bf0 100644 --- a/portkey_ai/api_resources/apis/__init__.py +++ b/portkey_ai/api_resources/apis/__init__.py @@ -10,12 +10,10 @@ from .threads import ( Threads, Messages, - ThreadFiles, Runs, Steps, AsyncThreads, AsyncMessages, - AsyncThreadFiles, AsyncRuns, AsyncSteps, ) @@ -52,8 +50,6 @@ "AsyncMainFiles", "Models", "AsyncModels", - "ThreadFiles", - "AsyncThreadFiles", "Threads", "AsyncThreads", "Messages", diff --git a/portkey_ai/api_resources/apis/assistants.py b/portkey_ai/api_resources/apis/assistants.py index 3d5af960..f417b494 100644 --- a/portkey_ai/api_resources/apis/assistants.py +++ b/portkey_ai/api_resources/apis/assistants.py @@ -55,48 +55,6 @@ def delete(self, assistant_id, **kwargs) -> AssistantDeleted: return data -# class AssistantFiles(APIResource): -# def __init__(self, client: Portkey) -> None: -# super().__init__(client) -# self.openai_client = client.openai_client - -# def create(self, assistant_id, file_id, **kwargs) -> AssistantFile: -# response = self.openai_client.with_raw_response.beta.assistants.files.create( -# assistant_id=assistant_id, file_id=file_id, **kwargs -# ) -# data = AssistantFile(**json.loads(response.text)) -# data._headers = response.headers - -# return data - -# def list(self, assistant_id, **kwargs) -> AssistantFileList: -# response = self.openai_client.with_raw_response.beta.assistants.files.list( -# assistant_id=assistant_id, **kwargs -# ) -# data = AssistantFileList(**json.loads(response.text)) -# data._headers = response.headers - -# return data - -# def retrieve(self, assistant_id, file_id, **kwargs) -> AssistantFile: -# response = self.openai_client.with_raw_response.beta.assistants.files.retrieve( -# assistant_id=assistant_id, file_id=file_id, **kwargs -# ) -# data = AssistantFile(**json.loads(response.text)) -# data._headers = response.headers - -# return data - -# def delete(self, assistant_id, file_id, **kwargs) -> AssistantFileDeleted: -# response = self.openai_client.with_raw_response.beta.assistants.files.delete( -# assistant_id=assistant_id, file_id=file_id, **kwargs -# ) -# data = AssistantFileDeleted(**json.loads(response.text)) -# data._headers = response.headers - -# return data - - class AsyncAssistants(AsyncAPIResource): def __init__(self, client: AsyncPortkey) -> None: super().__init__(client) @@ -146,53 +104,3 @@ async def delete(self, assistant_id, **kwargs) -> AssistantDeleted: data._headers = response.headers return data - - -# class AsyncAssistantFiles(AsyncAPIResource): -# def __init__(self, client: AsyncPortkey) -> None: -# super().__init__(client) -# self.openai_client = client.openai_client - -# async def create(self, assistant_id, file_id, **kwargs) -> AssistantFile: -# response = ( -# await self.openai_client.with_raw_response.beta.assistants.files.create( -# assistant_id=assistant_id, file_id=file_id, **kwargs -# ) -# ) -# data = AssistantFile(**json.loads(response.text)) -# data._headers = response.headers - -# return data - -# async def list(self, assistant_id, **kwargs) -> AssistantFileList: -# response = ( -# await self.openai_client.with_raw_response.beta.assistants.files.list( -# assistant_id=assistant_id, **kwargs -# ) -# ) -# data = AssistantFileList(**json.loads(response.text)) -# data._headers = response.headers - -# return data - -# async def retrieve(self, assistant_id, file_id, **kwargs) -> AssistantFile: -# response = ( -# await self.openai_client.with_raw_response.beta.assistants.files.retrieve( -# assistant_id=assistant_id, file_id=file_id, **kwargs -# ) -# ) -# data = AssistantFile(**json.loads(response.text)) -# data._headers = response.headers - -# return data - -# async def delete(self, assistant_id, file_id, **kwargs) -> AssistantFileDeleted: -# response = ( -# await self.openai_client.with_raw_response.beta.assistants.files.delete( -# assistant_id=assistant_id, file_id=file_id, **kwargs -# ) -# ) -# data = AssistantFileDeleted(**json.loads(response.text)) -# data._headers = response.headers - -# return data diff --git a/portkey_ai/api_resources/apis/audio.py b/portkey_ai/api_resources/apis/audio.py index 363c12fe..f87c7611 100644 --- a/portkey_ai/api_resources/apis/audio.py +++ b/portkey_ai/api_resources/apis/audio.py @@ -1,15 +1,17 @@ -from typing import List, Literal, Union +import json +from typing import Any, List, Union from portkey_ai.api_resources.apis.api_resource import APIResource, AsyncAPIResource from portkey_ai.api_resources.base_client import APIClient from openai._types import NotGiven, NOT_GIVEN, FileTypes from portkey_ai.api_resources.client import AsyncPortkey, Portkey +import typing from portkey_ai.api_resources.types.audio_types import Transcription, Translation from portkey_ai.api_resources.utils import GenericResponse class Audio(APIResource): - def __init__(self, client: APIClient) -> None: + def __init__(self, client: Portkey) -> None: super().__init__(client) self.openai_client = client.openai_client self.transcriptions = Transcriptions(client) @@ -21,6 +23,7 @@ def __init__(self, client: Portkey) -> None: super().__init__(client) self.openai_client = client.openai_client + @typing.no_type_check def create( self, *, @@ -43,7 +46,7 @@ def create( timestamp_granularities=timestamp_granularities, **kwargs ) - data = Transcription(**response.json()) + data = Transcription(**json.loads(response.text)) data._headers = response.headers return data @@ -71,7 +74,7 @@ def create( temperature=temperature, **kwargs ) - data = Transcription(**response.json()) + data = Translation(**json.loads(response.text)) data._headers = response.headers return data @@ -81,6 +84,7 @@ def __init__(self, client: Portkey) -> None: super().__init__(client) self.openai_client = client.openai_client + @typing.no_type_check def create( self, *, @@ -89,7 +93,7 @@ def create( voice: str, response_format: Union[str, NotGiven] = NOT_GIVEN, speed: Union[float, NotGiven] = NOT_GIVEN, - **kwargs) -> GenericResponse: + **kwargs) -> Any: response = self.openai_client.with_raw_response.audio.speech.create( input=input, @@ -100,7 +104,7 @@ def create( **kwargs ) - data = GenericResponse(**response.json()) + data = GenericResponse(**json.loads(response.text)) data._headers = response.headers return data @@ -119,6 +123,7 @@ def __init__(self, client: AsyncPortkey) -> None: super().__init__(client) self.openai_client = client.openai_client + @typing.no_type_check async def create( self, *, @@ -141,7 +146,7 @@ async def create( timestamp_granularities=timestamp_granularities, **kwargs ) - data = Transcription(**response.json()) + data = Transcription(**json.loads(response.text)) data._headers = response.headers return data @@ -169,7 +174,7 @@ async def create( temperature=temperature, **kwargs ) - data = Transcription(**response.json()) + data = Translation(**json.loads(response.text)) data._headers = response.headers return data @@ -179,6 +184,7 @@ def __init__(self, client: AsyncPortkey) -> None: super().__init__(client) self.openai_client = client.openai_client + @typing.no_type_check async def create( self, *, @@ -187,7 +193,7 @@ async def create( voice: str, response_format: Union[str, NotGiven] = NOT_GIVEN, speed: Union[float, NotGiven] = NOT_GIVEN, - **kwargs) -> GenericResponse: + **kwargs) -> Any: response = await self.openai_client.with_raw_response.audio.speech.create( input=input, @@ -198,7 +204,6 @@ async def create( **kwargs ) - data = GenericResponse(**response.json()) - data._headers = response.headers + data = response return data \ No newline at end of file diff --git a/portkey_ai/api_resources/apis/batches.py b/portkey_ai/api_resources/apis/batches.py index a26e2d13..6eaeff14 100644 --- a/portkey_ai/api_resources/apis/batches.py +++ b/portkey_ai/api_resources/apis/batches.py @@ -1,5 +1,6 @@ import json from typing import Dict, Optional, Union +import typing from portkey_ai.api_resources.apis.api_resource import APIResource, AsyncAPIResource from portkey_ai.api_resources.client import AsyncPortkey, Portkey from openai._types import NotGiven, NOT_GIVEN @@ -12,6 +13,7 @@ def __init__(self, client: Portkey) -> None: super().__init__(client) self.openai_client = client.openai_client + @typing.no_type_check def create( self, *, @@ -82,6 +84,7 @@ def __init__(self, client: AsyncPortkey) -> None: super().__init__(client) self.openai_client = client.openai_client + @typing.no_type_check async def create( self, *, diff --git a/portkey_ai/api_resources/apis/fine_tuning.py b/portkey_ai/api_resources/apis/fine_tuning.py index 1f17e36a..6d185607 100644 --- a/portkey_ai/api_resources/apis/fine_tuning.py +++ b/portkey_ai/api_resources/apis/fine_tuning.py @@ -1,3 +1,4 @@ +import json from typing import Iterable, Optional, Union from portkey_ai.api_resources.apis.api_resource import APIResource, AsyncAPIResource from portkey_ai.api_resources.client import AsyncPortkey, Portkey @@ -49,7 +50,7 @@ def create( validation_file=validation_file, **kwargs, ) - data = FineTuningJob(**response.json()) + data = FineTuningJob(**json.loads(response.text)) data._headers = response.headers return data @@ -58,7 +59,7 @@ def retrieve(self, fine_tuning_job_id: str, **kwargs) -> FineTuningJob: response = self.openai_client.with_raw_response.fine_tuning.jobs.retrieve( fine_tuning_job_id=fine_tuning_job_id, **kwargs ) - data = FineTuningJob(**response.json()) + data = FineTuningJob(**json.loads(response.text)) data._headers = response.headers return data @@ -73,7 +74,7 @@ def list( response = self.openai_client.with_raw_response.fine_tuning.jobs.list( after=after, limit=limit, **kwargs ) - data = FineTuningJobList(**response.json()) + data = FineTuningJobList(**json.loads(response.text)) data._headers = response.headers return data @@ -82,7 +83,7 @@ def cancel(self, fine_tuning_job_id: str, **kwargs) -> FineTuningJob: response = self.openai_client.with_raw_response.fine_tuning.jobs.cancel( fine_tuning_job_id=fine_tuning_job_id, **kwargs ) - data = FineTuningJob(**response.json()) + data = FineTuningJob(**json.loads(response.text)) data._headers = response.headers return data @@ -98,7 +99,7 @@ def list_events( response = self.openai_client.with_raw_response.fine_tuning.jobs.list_events( fine_tuning_job_id=fine_tuning_job_id, after=after, limit=limit, **kwargs ) - data = FineTuningJobEventList(**response.json()) + data = FineTuningJobEventList(**json.loads(response.text)) data._headers = response.headers return data @@ -126,13 +127,13 @@ def list( ) ) - data = FineTuningJobCheckpointList(**response.json()) + data = FineTuningJobCheckpointList(**json.loads(response.text)) data._headers = response.headers return data -class AsyncFineTuning(APIResource): +class AsyncFineTuning(AsyncAPIResource): def __init__(self, client: AsyncPortkey) -> None: super().__init__(client) self.openai_client = client.openai_client @@ -169,7 +170,7 @@ async def create( validation_file=validation_file, **kwargs, ) - data = FineTuningJob(**response.json()) + data = FineTuningJob(**json.loads(response.text)) data._headers = response.headers return data @@ -178,7 +179,7 @@ async def retrieve(self, fine_tuning_job_id: str, **kwargs) -> FineTuningJob: response = await self.openai_client.with_raw_response.fine_tuning.jobs.retrieve( fine_tuning_job_id=fine_tuning_job_id, **kwargs ) - data = FineTuningJob(**response.json()) + data = FineTuningJob(**json.loads(response.text)) data._headers = response.headers return data @@ -193,7 +194,7 @@ async def list( response = await self.openai_client.with_raw_response.fine_tuning.jobs.list( after=after, limit=limit, **kwargs ) - data = FineTuningJobList(**response.json()) + data = FineTuningJobList(**json.loads(response.text)) data._headers = response.headers return data @@ -202,7 +203,7 @@ async def cancel(self, fine_tuning_job_id: str, **kwargs) -> FineTuningJob: response = await self.openai_client.with_raw_response.fine_tuning.jobs.cancel( fine_tuning_job_id, **kwargs ) - data = FineTuningJob(**response.json()) + data = FineTuningJob(**json.loads(response.text)) data._headers = response.headers return data @@ -223,7 +224,7 @@ async def list_events( **kwargs, ) ) - data = FineTuningJobEventList(**response.json()) + data = FineTuningJobEventList(**json.loads(response.text)) data._headers = response.headers return data @@ -251,7 +252,7 @@ async def list( ) ) - data = FineTuningJobCheckpointList(**response.json()) + data = FineTuningJobCheckpointList(**json.loads(response.text)) data._headers = response.headers return data \ No newline at end of file diff --git a/portkey_ai/api_resources/apis/main_files.py b/portkey_ai/api_resources/apis/main_files.py index d6e34049..d3eba110 100644 --- a/portkey_ai/api_resources/apis/main_files.py +++ b/portkey_ai/api_resources/apis/main_files.py @@ -60,13 +60,13 @@ def wait_for_processing( self, id: str, *, - polling_interval: float = 5.0, + poll_interval: float = 5.0, max_wait_seconds: float = 30 * 60, **kwargs ) -> Any: response = self.openai_client.files.wait_for_processing( id=id, - polling_interval=polling_interval, + poll_interval=poll_interval, max_wait_seconds=max_wait_seconds, **kwargs ) @@ -124,13 +124,13 @@ async def wait_for_processing( self, id: str, *, - polling_interval: float = 5.0, + poll_interval: float = 5.0, max_wait_seconds: float = 30 * 60, **kwargs ) -> Any: response = await self.openai_client.files.wait_for_processing( id=id, - polling_interval=polling_interval, + poll_interval=poll_interval, max_wait_seconds=max_wait_seconds, **kwargs ) diff --git a/portkey_ai/api_resources/apis/threads.py b/portkey_ai/api_resources/apis/threads.py index eff778ec..ad2d9b0d 100644 --- a/portkey_ai/api_resources/apis/threads.py +++ b/portkey_ai/api_resources/apis/threads.py @@ -91,10 +91,10 @@ def create_and_run_poll( self, *, assistant_id: str, - instructions: Optional[str] | NotGiven = NOT_GIVEN, - max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[object] | NotGiven = NOT_GIVEN, + instructions: Union[Optional[str], NotGiven] = NOT_GIVEN, + max_completion_tokens: Union[Optional[int], NotGiven] = NOT_GIVEN, + max_prompt_tokens: Union[Optional[int], NotGiven] = NOT_GIVEN, + metadata: Union[Optional[object], NotGiven] = NOT_GIVEN, model: Union[str, None, NotGiven] = NOT_GIVEN, response_format: Union[Optional[AssistantResponseFormatOptionParam] , NotGiven] = NOT_GIVEN, temperature: Union[Optional[float], NotGiven] = NOT_GIVEN, @@ -228,34 +228,6 @@ def delete( data._headers = response.headers return data - - -# class ThreadFiles(APIResource): -# def __init__(self, client: Portkey) -> None: -# super().__init__(client) -# self.openai_client = client.openai_client - -# def list(self, thread_id, message_id, **kwargs) -> MessageList: -# response = ( -# self.openai_client.with_raw_response.beta.threads.messages.files.list( -# thread_id=thread_id, message_id=message_id, **kwargs -# ) -# ) -# data = MessageList(**json.loads(response.text)) -# data._headers = response.headers - -# return data - -# def retrieve(self, thread_id, message_id, file_id, **kwargs) -> MessageFile: -# response = ( -# self.openai_client.with_raw_response.beta.threads.messages.files.retrieve( -# thread_id=thread_id, message_id=message_id, file_id=file_id, **kwargs -# ) -# ) -# data = MessageFile(**json.loads(response.text)) -# data._headers = response.headers - -# return data class Runs(APIResource): @@ -742,43 +714,6 @@ async def delete( return data -# class AsyncThreadFiles(AsyncAPIResource): -# def __init__(self, client: AsyncPortkey) -> None: -# super().__init__(client) -# self.openai_client = client.openai_client - -# async def list(self, thread_id, message_id, **kwargs) -> MessageList: -# response = ( -# await self.openai_client.with_raw_response.beta.threads.messages.files.list( -# thread_id=thread_id, message_id=message_id, **kwargs -# ) -# ) -# data = MessageList(**json.loads(response.text)) -# data._headers = response.headers - -# return data - -# async def retrieve(self, thread_id, message_id, file_id, **kwargs) -> MessageFile: -# # fmt: off -# response = await self.openai_client\ -# .with_raw_response\ -# .beta\ -# .threads\ -# .messages\ -# .files\ -# .retrieve( -# thread_id=thread_id, -# message_id=message_id, -# file_id=file_id, -# **kwargs -# ) -# # fmt: off -# data = MessageFile(**json.loads( response.text)) -# data._headers = response.headers - -# return data - - class AsyncRuns(AsyncAPIResource): def __init__(self, client: AsyncPortkey) -> None: super().__init__(client) @@ -981,8 +916,8 @@ async def stream( event_handler: Union[AsyncAssistantEventHandlerT, None] = None, **kwargs, ) -> ( - AsyncAssistantStreamManager[AsyncAssistantEventHandler] - | AsyncAssistantStreamManager[AsyncAssistantEventHandlerT] + Union[AsyncAssistantStreamManager[AsyncAssistantEventHandler], + AsyncAssistantStreamManager[AsyncAssistantEventHandlerT]] ): response = await self.openai_client.beta.threads.runs.stream( assistant_id=assistant_id, @@ -1032,8 +967,8 @@ async def submit_tool_outputs_stream( thread_id: str, event_handler: Union[AsyncAssistantEventHandlerT, None] = None, ) -> ( - AsyncAssistantStreamManager[AsyncAssistantEventHandler] - | AsyncAssistantStreamManager[AsyncAssistantEventHandlerT] + Union[AsyncAssistantStreamManager[AsyncAssistantEventHandler], + AsyncAssistantStreamManager[AsyncAssistantEventHandlerT]] ): response = await self.openai_client.beta.threads.runs.submit_tool_outputs_stream( tool_outputs=tool_outputs, diff --git a/portkey_ai/api_resources/apis/vector_stores.py b/portkey_ai/api_resources/apis/vector_stores.py index 73ea25e1..e546f00e 100644 --- a/portkey_ai/api_resources/apis/vector_stores.py +++ b/portkey_ai/api_resources/apis/vector_stores.py @@ -41,7 +41,7 @@ def create( metadata=metadata, name=name, ) - data = VectorStore(**response.json()) + data = VectorStore(**json.loads(response.text)) data._headers = response.headers return data @@ -55,7 +55,7 @@ def retrieve( vector_store_id=vector_store_id, **kwargs, ) - data = VectorStore(**response.json()) + data = VectorStore(**json.loads(response.text)) data._headers = response.headers return data @@ -78,7 +78,7 @@ def update( name=name, **kwargs, ) - data = VectorStore(**response.json()) + data = VectorStore(**json.loads(response.text)) data._headers = response.headers return data @@ -97,7 +97,7 @@ def list( order=order, **kwargs, ) - data = VectorStoreList(**response.json()) + data = VectorStoreList(**json.loads(response.text)) data._headers = response.headers return data @@ -111,7 +111,7 @@ def delete( vector_store_id=vector_store_id, **kwargs, ) - data = VectorStoreDeleted(**response.json()) + data = VectorStoreDeleted(**json.loads(response.text)) data._headers = response.headers return data @@ -134,7 +134,7 @@ def create( file_id=file_id, **kwargs, ) - data = VectorStoreFile(**response.json()) + data = VectorStoreFile(**json.loads(response.text)) data._headers = response.headers return data @@ -153,7 +153,7 @@ def retrieve( **kwargs, ) ) - data = VectorStoreFile(**response.json()) + data = VectorStoreFile(**json.loads(response.text)) data._headers = response.headers return data @@ -178,7 +178,7 @@ def list( order=order, **kwargs, ) - data = VectorStoreFileList(**response.json()) + data = VectorStoreFileList(**json.loads(response.text)) data._headers = response.headers return data @@ -195,7 +195,7 @@ def delete( vector_store_id=vector_store_id, **kwargs, ) - data = VectorStoreFileDeleted(**response.json()) + data = VectorStoreFileDeleted(**json.loads(response.text)) data._headers = response.headers return data @@ -286,7 +286,7 @@ def create( file_ids=file_ids, **kwargs, ) - data = VectorStoreFileBatch(**response.json()) + data = VectorStoreFileBatch(**json.loads(response.text)) data._headers = response.headers return data @@ -303,7 +303,7 @@ def retrieve( vector_store_id=vector_store_id, **kwargs, ) - data = VectorStoreFileBatch(**response.json()) + data = VectorStoreFileBatch(**json.loads(response.text)) data._headers = response.headers return data @@ -320,7 +320,7 @@ def cancel( vector_store_id=vector_store_id, **kwargs, ) - data = VectorStoreFileBatch(**response.json()) + data = VectorStoreFileBatch(**json.loads(response.text)) data._headers = response.headers return data @@ -364,7 +364,7 @@ def list_files( order=order, **kwargs, ) - data = VectorStoreFileBatch(**response.json()) + data = VectorStoreFileBatch(**json.loads(response.text)) data._headers = response.headers return data @@ -433,7 +433,7 @@ async def create( metadata=metadata, name=name, ) - data = VectorStore(**response.json()) + data = VectorStore(**json.loads(response.text)) data._headers = response.headers return data @@ -449,7 +449,7 @@ async def retrieve( **kwargs, ) ) - data = VectorStore(**response.json()) + data = VectorStore(**json.loads(response.text)) data._headers = response.headers return data @@ -472,7 +472,7 @@ async def update( name=name, **kwargs, ) - data = VectorStore(**response.json()) + data = VectorStore(**json.loads(response.text)) data._headers = response.headers return data @@ -491,7 +491,7 @@ async def list( order=order, **kwargs, ) - data = VectorStoreList(**response.json()) + data = VectorStoreList(**json.loads(response.text)) data._headers = response.headers return data @@ -505,7 +505,7 @@ async def delete( vector_store_id=vector_store_id, **kwargs, ) - data = VectorStoreDeleted(**response.json()) + data = VectorStoreDeleted(**json.loads(response.text)) data._headers = response.headers return data @@ -528,7 +528,7 @@ async def create( file_id=file_id, **kwargs, ) - data = VectorStoreFile(**response.json()) + data = VectorStoreFile(**json.loads(response.text)) data._headers = response.headers return data @@ -547,7 +547,7 @@ async def retrieve( **kwargs, ) ) - data = VectorStoreFile(**response.json()) + data = VectorStoreFile(**json.loads(response.text)) data._headers = response.headers return data @@ -572,7 +572,7 @@ async def list( order=order, **kwargs, ) - data = VectorStoreFileList(**response.json()) + data = VectorStoreFileList(**json.loads(response.text)) data._headers = response.headers return data @@ -589,7 +589,7 @@ async def delete( vector_store_id=vector_store_id, **kwargs, ) - data = VectorStoreFileDeleted(**response.json()) + data = VectorStoreFileDeleted(**json.loads(response.text)) data._headers = response async def create_and_poll( @@ -678,7 +678,7 @@ async def create( file_ids=file_ids, **kwargs, ) - data = VectorStoreFileBatch(**response.json()) + data = VectorStoreFileBatch(**json.loads(response.text)) data._headers = response.headers return data @@ -695,7 +695,7 @@ async def retrieve( vector_store_id=vector_store_id, **kwargs, ) - data = VectorStoreFileBatch(**response.json()) + data = VectorStoreFileBatch(**json.loads(response.text)) data._headers = response.headers return data @@ -712,7 +712,7 @@ async def cancel( vector_store_id=vector_store_id, **kwargs, ) - data = VectorStoreFileBatch(**response.json()) + data = VectorStoreFileBatch(**json.loads(response.text)) data._headers = response.headers return data @@ -756,7 +756,7 @@ async def list_files( order=order, **kwargs, ) - data = VectorStoreFileBatch(**response.json()) + data = VectorStoreFileBatch(**json.loads(response.text)) data._headers = response.headers return data diff --git a/portkey_ai/api_resources/types/vector_stores_type.py b/portkey_ai/api_resources/types/vector_stores_type.py index e33b02e7..dbf1c06c 100644 --- a/portkey_ai/api_resources/types/vector_stores_type.py +++ b/portkey_ai/api_resources/types/vector_stores_type.py @@ -110,13 +110,6 @@ def __str__(self): def get_headers(self) -> Optional[Dict[str, str]]: return parse_headers(self._headers) - -class FileCounts(BaseModel): - cancelled: int - completed: int - failed: int - in_progress: int - total: int class VectorStoreFileBatch(BaseModel): id: str From 1002f0afe79e227d21c3ac2e3d815bcdba0dc031 Mon Sep 17 00:00:00 2001 From: csgulati09 Date: Thu, 9 May 2024 21:37:30 +0530 Subject: [PATCH 11/38] fix: still fixing linting issues --- .../api_resources/apis/chat_complete.py | 2 +- portkey_ai/api_resources/apis/complete.py | 4 +- portkey_ai/api_resources/apis/threads.py | 44 +++++++++------- .../api_resources/apis/vector_stores.py | 52 +++++++++++-------- 4 files changed, 58 insertions(+), 44 deletions(-) diff --git a/portkey_ai/api_resources/apis/chat_complete.py b/portkey_ai/api_resources/apis/chat_complete.py index 2cae9381..5364a4d3 100644 --- a/portkey_ai/api_resources/apis/chat_complete.py +++ b/portkey_ai/api_resources/apis/chat_complete.py @@ -43,7 +43,7 @@ def __init__(self, client: Portkey) -> None: super().__init__(client) self.openai_client = client.openai_client - def stream_create( + def stream_create( # type: ignore[return] self, model, messages, stream, temperature, max_tokens, top_p, **kwargs ) -> Union[ChatCompletions, Iterator[ChatCompletionChunk]]: with self.openai_client.with_streaming_response.chat.completions.create( diff --git a/portkey_ai/api_resources/apis/complete.py b/portkey_ai/api_resources/apis/complete.py index 84686da7..778a4316 100644 --- a/portkey_ai/api_resources/apis/complete.py +++ b/portkey_ai/api_resources/apis/complete.py @@ -17,7 +17,7 @@ def __init__(self, client: Portkey) -> None: self.openai_client = client.openai_client self.client = client - def stream_create( + def stream_create( # type: ignore[return] self, model, prompt, stream, temperature, max_tokens, top_p, **kwargs ) -> Union[TextCompletion, Iterator[TextCompletionChunk]]: with self.openai_client.with_streaming_response.completions.create( @@ -97,7 +97,7 @@ def __init__(self, client: AsyncPortkey) -> None: super().__init__(client) self.openai_client = client.openai_client - async def stream_create( + async def stream_create( self, model, prompt, stream, temperature, max_tokens, top_p, **kwargs ) -> Union[TextCompletion, AsyncIterator[TextCompletionChunk]]: async with self.openai_client.with_streaming_response.completions.create( diff --git a/portkey_ai/api_resources/apis/threads.py b/portkey_ai/api_resources/apis/threads.py index ad2d9b0d..27707459 100644 --- a/portkey_ai/api_resources/apis/threads.py +++ b/portkey_ai/api_resources/apis/threads.py @@ -1,5 +1,6 @@ import json from typing import Iterable, Optional, Union +import typing from portkey_ai.api_resources.apis.api_resource import APIResource, AsyncAPIResource from portkey_ai.api_resources.client import AsyncPortkey, Portkey @@ -126,11 +127,11 @@ def create_and_run_poll( poll_interval_ms=poll_interval_ms, **kwargs ) - data = Run(**json.loads(response.text)) - data._headers = response.headers + data = response return data + @typing.no_type_check def create_and_run_stream( self, *, @@ -150,7 +151,7 @@ def create_and_run_stream( truncation_strategy: Union[Optional[thread_create_and_run_params.TruncationStrategy], NotGiven] = NOT_GIVEN, event_handler: Union[AssistantEventHandlerT, None] = None, **kwargs, - ) -> AssistantStreamManager[AssistantEventHandler] | AssistantStreamManager[AssistantEventHandlerT]: + ) -> Union[AssistantStreamManager[AssistantEventHandler], AssistantStreamManager[AssistantEventHandlerT]]: response = self.openai_client.beta.threads.create_and_run_stream( assistant_id=assistant_id, @@ -332,11 +333,11 @@ def create_and_poll( thread_id=thread_id, **kwargs ) - data = Run(**json.loads(response.text)) - data._headers = response.headers + data = response return data + @typing.no_type_check def create_and_stream( self, *, @@ -357,7 +358,7 @@ def create_and_stream( thread_id: str, event_handler: Union[AssistantEventHandlerT, None] = None, **kwargs, - )-> AssistantStreamManager[AssistantEventHandler] | AssistantStreamManager[AssistantEventHandlerT]: + )-> Union[AssistantStreamManager[AssistantEventHandler], AssistantStreamManager[AssistantEventHandlerT]]: response = self.openai_client.beta.threads.runs.create_and_stream( assistant_id=assistant_id, additional_instructions=additional_instructions, @@ -396,6 +397,7 @@ def poll( return data + @typing.no_type_check def stream( self, *, @@ -416,7 +418,7 @@ def stream( thread_id: str, event_handler: Union[AssistantEventHandlerT, None] = None, **kwargs, - )-> AssistantStreamManager[AssistantEventHandler] | AssistantStreamManager[AssistantEventHandlerT]: + )-> Union[AssistantStreamManager[AssistantEventHandler], AssistantStreamManager[AssistantEventHandlerT]]: response = self.openai_client.beta.threads.runs.stream( assistant_id=assistant_id, @@ -465,7 +467,7 @@ def submit_tool_outputs_stream( run_id: str, thread_id: str, event_handler: Union[AssistantEventHandlerT, None] = None, - ) -> AssistantStreamManager[AssistantEventHandler] | AssistantStreamManager[AssistantEventHandlerT]: + ) -> Union[AssistantStreamManager[AssistantEventHandler], AssistantStreamManager[AssistantEventHandlerT]]: response = self.openai_client.beta.threads.runs.submit_tool_outputs_stream( tool_outputs=tool_outputs, run_id=run_id, @@ -562,10 +564,10 @@ async def create_and_run_poll( self, *, assistant_id: str, - instructions: Optional[str] | NotGiven = NOT_GIVEN, - max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[object] | NotGiven = NOT_GIVEN, + instructions: Union[Optional[str], NotGiven] = NOT_GIVEN, + max_completion_tokens: Union[Optional[int], NotGiven] = NOT_GIVEN, + max_prompt_tokens: Union[Optional[int], NotGiven] = NOT_GIVEN, + metadata: Union[Optional[object], NotGiven] = NOT_GIVEN, model: Union[str, None, NotGiven] = NOT_GIVEN, response_format: Union[Optional[AssistantResponseFormatOptionParam] , NotGiven] = NOT_GIVEN, temperature: Union[Optional[float], NotGiven] = NOT_GIVEN, @@ -597,11 +599,11 @@ async def create_and_run_poll( poll_interval_ms=poll_interval_ms, **kwargs ) - data = Run(**json.loads(response.text)) - data._headers = response.headers + data = response return data + @typing.no_type_check async def create_and_run_stream( self, *, @@ -622,8 +624,8 @@ async def create_and_run_stream( event_handler: Union[AsyncAssistantEventHandlerT, None] = None, **kwargs, ) -> ( - AsyncAssistantStreamManager[AsyncAssistantEventHandler] - | AsyncAssistantStreamManager[AsyncAssistantEventHandlerT] + Union[AsyncAssistantStreamManager[AsyncAssistantEventHandler], + AsyncAssistantStreamManager[AsyncAssistantEventHandlerT]] ): response = await self.openai_client.beta.threads.create_and_run_stream( @@ -828,11 +830,11 @@ async def create_and_poll( thread_id=thread_id, **kwargs ) - data = Run(**json.loads(response.text)) - data._headers = response.headers + data = response return data + @typing.no_type_check async def create_and_stream( self, *, @@ -854,9 +856,10 @@ async def create_and_stream( event_handler: Union[AsyncAssistantEventHandlerT, None] = None, **kwargs, )-> ( - AsyncAssistantStreamManager[AsyncAssistantEventHandler] - | AsyncAssistantStreamManager[AsyncAssistantEventHandlerT] + Union[AsyncAssistantStreamManager[AsyncAssistantEventHandler], + AsyncAssistantStreamManager[AsyncAssistantEventHandlerT]] ): + response = await self.openai_client.beta.threads.runs.create_and_stream( assistant_id=assistant_id, additional_instructions=additional_instructions, @@ -895,6 +898,7 @@ async def poll( return data + @typing.no_type_check async def stream( self, *, diff --git a/portkey_ai/api_resources/apis/vector_stores.py b/portkey_ai/api_resources/apis/vector_stores.py index e546f00e..787e9a9b 100644 --- a/portkey_ai/api_resources/apis/vector_stores.py +++ b/portkey_ai/api_resources/apis/vector_stores.py @@ -1,4 +1,6 @@ +import json from typing import Iterable, List, Optional, Union +import typing from portkey_ai.api_resources.apis.api_resource import APIResource, AsyncAPIResource from portkey_ai.api_resources.client import AsyncPortkey, Portkey from openai._types import NotGiven, NOT_GIVEN, FileTypes @@ -83,6 +85,7 @@ def update( return data + @typing.no_type_check def list( self, *, @@ -158,6 +161,7 @@ def retrieve( return data + @typing.no_type_check def list( self, vector_store_id: str, @@ -205,13 +209,13 @@ def create_and_poll( file_id: str, *, vector_store_id: str, - poll_interval: Union[int, NotGiven] = NOT_GIVEN, + poll_interval_ms: Union[int, NotGiven] = NOT_GIVEN, **kwargs, ) -> VectorStoreFile: response = self.openai_client.beta.vector_stores.files.create_and_poll( file_id=file_id, vector_store_id=vector_store_id, - poll_interval=poll_interval, + poll_interval_ms=poll_interval_ms, **kwargs, ) data = response @@ -223,13 +227,13 @@ def poll( file_id: str, *, vector_store_id: str, - poll_interval: Union[int, NotGiven] = NOT_GIVEN, + poll_interval_ms: Union[int, NotGiven] = NOT_GIVEN, **kwargs, ) -> VectorStoreFile: response = self.openai_client.beta.vector_stores.files.poll( file_id=file_id, vector_store_id=vector_store_id, - poll_interval=poll_interval, + poll_interval_ms=poll_interval_ms, **kwargs, ) data = response @@ -256,13 +260,13 @@ def upload_and_poll( *, vector_store_id: str, file: FileTypes, - poll_interval: Union[int, NotGiven] = NOT_GIVEN, + poll_interval_ms: Union[int, NotGiven] = NOT_GIVEN, **kwargs, ) -> VectorStoreFile: response = self.openai_client.beta.vector_stores.files.upload_and_poll( vector_store_id=vector_store_id, file=file, - poll_interval=poll_interval, + poll_interval_ms=poll_interval_ms, **kwargs, ) data = response @@ -330,18 +334,19 @@ def create_and_poll( vector_store_id: str, *, file_ids: List[str], - poll_interval: Union[int, NotGiven] = NOT_GIVEN, + poll_interval_ms: Union[int, NotGiven] = NOT_GIVEN, **kwargs, )-> VectorStoreFileBatch: response = self.openai_client.beta.vector_stores.file_batches.create_and_poll( vector_store_id=vector_store_id, file_ids=file_ids, - poll_interval=poll_interval, + poll_interval_ms=poll_interval_ms, **kwargs, ) data = response return data + @typing.no_type_check def list_files( self, batch_id: str, @@ -374,13 +379,13 @@ def poll( batch_id: str, *, vector_store_id: str, - poll_interval: Union[int, NotGiven] = NOT_GIVEN, + poll_interval_ms: Union[int, NotGiven] = NOT_GIVEN, **kwargs, ) -> VectorStoreFileBatch: response = self.openai_client.beta.vector_stores.file_batches.poll( batch_id=batch_id, vector_store_id=vector_store_id, - poll_interval=poll_interval, + poll_interval_ms=poll_interval_ms, **kwargs, ) data = response @@ -477,6 +482,7 @@ async def update( return data + @typing.no_type_check async def list( self, *, @@ -552,6 +558,7 @@ async def retrieve( return data + @typing.no_type_check async def list( self, vector_store_id: str, @@ -590,20 +597,22 @@ async def delete( **kwargs, ) data = VectorStoreFileDeleted(**json.loads(response.text)) - data._headers = response + data._headers = response.headers + + return data async def create_and_poll( self, file_id: str, *, vector_store_id: str, - poll_interval: Union[int, NotGiven] = NOT_GIVEN, + poll_interval_ms: Union[int, NotGiven] = NOT_GIVEN, **kwargs, ) -> VectorStoreFile: response = await self.openai_client.beta.vector_stores.files.create_and_poll( file_id=file_id, vector_store_id=vector_store_id, - poll_interval=poll_interval, + poll_interval_ms=poll_interval_ms, **kwargs, ) data = response @@ -615,13 +624,13 @@ async def poll( file_id: str, *, vector_store_id: str, - poll_interval: Union[int, NotGiven] = NOT_GIVEN, + poll_interval_ms: Union[int, NotGiven] = NOT_GIVEN, **kwargs, ) -> VectorStoreFile: response = await self.openai_client.beta.vector_stores.files.poll( file_id=file_id, vector_store_id=vector_store_id, - poll_interval=poll_interval, + poll_interval_ms=poll_interval_ms, **kwargs, ) data = response @@ -648,13 +657,13 @@ async def upload_and_poll( *, vector_store_id: str, file: FileTypes, - poll_interval: Union[int, NotGiven] = NOT_GIVEN, + poll_interval_ms: Union[int, NotGiven] = NOT_GIVEN, **kwargs, ) -> VectorStoreFile: response = await self.openai_client.beta.vector_stores.files.upload_and_poll( vector_store_id=vector_store_id, file=file, - poll_interval=poll_interval, + poll_interval_ms=poll_interval_ms, **kwargs, ) data = response @@ -722,18 +731,19 @@ async def create_and_poll( vector_store_id: str, *, file_ids: List[str], - poll_interval: Union[int, NotGiven] = NOT_GIVEN, + poll_interval_ms: Union[int, NotGiven] = NOT_GIVEN, **kwargs, )-> VectorStoreFileBatch: response = await self.openai_client.beta.vector_stores.file_batches.create_and_poll( vector_store_id=vector_store_id, file_ids=file_ids, - poll_interval=poll_interval, + poll_interval_ms=poll_interval_ms, **kwargs, ) data = response return data + @typing.no_type_check async def list_files( self, batch_id: str, @@ -766,13 +776,13 @@ async def poll( batch_id: str, *, vector_store_id: str, - poll_interval: Union[int, NotGiven] = NOT_GIVEN, + poll_interval_ms: Union[int, NotGiven] = NOT_GIVEN, **kwargs, ) -> VectorStoreFileBatch: response = await self.openai_client.beta.vector_stores.file_batches.poll( batch_id=batch_id, vector_store_id=vector_store_id, - poll_interval=poll_interval, + poll_interval_ms=poll_interval_ms, **kwargs, ) data = response From 374205f015a72cf9be2b4483048f560cfdb59bbd Mon Sep 17 00:00:00 2001 From: csgulati09 Date: Thu, 9 May 2024 21:56:38 +0530 Subject: [PATCH 12/38] fix: still fixing linting issues --- portkey_ai/api_resources/apis/threads.py | 20 ++++++------- .../api_resources/apis/vector_stores.py | 29 ++++++++++--------- 2 files changed, 25 insertions(+), 24 deletions(-) diff --git a/portkey_ai/api_resources/apis/threads.py b/portkey_ai/api_resources/apis/threads.py index 27707459..7e7a1a72 100644 --- a/portkey_ai/api_resources/apis/threads.py +++ b/portkey_ai/api_resources/apis/threads.py @@ -129,7 +129,7 @@ def create_and_run_poll( ) data = response - return data + return data # type: ignore[return-value] @typing.no_type_check def create_and_run_stream( @@ -335,7 +335,7 @@ def create_and_poll( ) data = response - return data + return data # type: ignore[return-value] @typing.no_type_check def create_and_stream( @@ -395,7 +395,7 @@ def poll( ) data = response - return data + return data # type: ignore[return-value] @typing.no_type_check def stream( @@ -458,7 +458,7 @@ def submit_tool_outputs_and_poll( ) data = response - return data + return data # type: ignore[return-value] def submit_tool_outputs_stream( self, @@ -601,7 +601,7 @@ async def create_and_run_poll( ) data = response - return data + return data # type: ignore[return-value] @typing.no_type_check async def create_and_run_stream( @@ -832,7 +832,7 @@ async def create_and_poll( ) data = response - return data + return data # type: ignore[return-value] @typing.no_type_check async def create_and_stream( @@ -896,7 +896,7 @@ async def poll( ) data = response - return data + return data # type: ignore[return-value] @typing.no_type_check async def stream( @@ -961,9 +961,9 @@ async def submit_tool_outputs_and_poll( ) data = response - return data + return data # type: ignore[return-value] - async def submit_tool_outputs_stream( + def submit_tool_outputs_stream( self, *, tool_outputs: Union[Iterable[run_submit_tool_outputs_params.ToolOutput]], @@ -974,7 +974,7 @@ async def submit_tool_outputs_stream( Union[AsyncAssistantStreamManager[AsyncAssistantEventHandler], AsyncAssistantStreamManager[AsyncAssistantEventHandlerT]] ): - response = await self.openai_client.beta.threads.runs.submit_tool_outputs_stream( + response = self.openai_client.beta.threads.runs.submit_tool_outputs_stream( tool_outputs=tool_outputs, run_id=run_id, thread_id=thread_id, diff --git a/portkey_ai/api_resources/apis/vector_stores.py b/portkey_ai/api_resources/apis/vector_stores.py index 787e9a9b..fa5eee2a 100644 --- a/portkey_ai/api_resources/apis/vector_stores.py +++ b/portkey_ai/api_resources/apis/vector_stores.py @@ -220,7 +220,7 @@ def create_and_poll( ) data = response - return data + return data #type: ignore[return-value] def poll( self, @@ -238,7 +238,7 @@ def poll( ) data = response - return data + return data #type: ignore[return-value] def upload( self, @@ -253,7 +253,7 @@ def upload( **kwargs, ) data = response - return data + return data #type: ignore[return-value] def upload_and_poll( self, @@ -270,7 +270,7 @@ def upload_and_poll( **kwargs, ) data = response - return data + return data #type: ignore[return-value] class VectorFileBatches(APIResource): @@ -344,7 +344,7 @@ def create_and_poll( **kwargs, ) data = response - return data + return data #type: ignore[return-value] @typing.no_type_check def list_files( @@ -390,7 +390,7 @@ def poll( ) data = response - return data + return data # type: ignore[return-value] def upload_and_poll( self, @@ -412,7 +412,7 @@ def upload_and_poll( ) data = response - return data + return data # type: ignore[return-value] class AsyncVectorStores(AsyncAPIResource): @@ -617,7 +617,7 @@ async def create_and_poll( ) data = response - return data + return data # type: ignore[return-value] async def poll( self, @@ -635,7 +635,7 @@ async def poll( ) data = response - return data + return data # type: ignore[return-value] async def upload( self, @@ -650,7 +650,7 @@ async def upload( **kwargs, ) data = response - return data + return data # type: ignore[return-value] async def upload_and_poll( self, @@ -667,7 +667,7 @@ async def upload_and_poll( **kwargs, ) data = response - return data + return data # type: ignore[return-value] class AsyncVectorFileBatches(AsyncAPIResource): @@ -741,7 +741,8 @@ async def create_and_poll( **kwargs, ) data = response - return data + + return data # type: ignore[return-value] @typing.no_type_check async def list_files( @@ -787,7 +788,7 @@ async def poll( ) data = response - return data + return data # type: ignore[return-value] async def upload_and_poll( self, @@ -809,4 +810,4 @@ async def upload_and_poll( ) data = response - return data \ No newline at end of file + return data # type: ignore[return-value] \ No newline at end of file From b7d1c276306fc85ca6389a7acced04b9c0b9668e Mon Sep 17 00:00:00 2001 From: csgulati09 Date: Fri, 10 May 2024 10:29:43 +0530 Subject: [PATCH 13/38] fix: linting issues fixed --- portkey_ai/api_resources/apis/__init__.py | 29 +- portkey_ai/api_resources/apis/audio.py | 151 ++++---- portkey_ai/api_resources/apis/batches.py | 79 ++-- portkey_ai/api_resources/apis/complete.py | 4 +- portkey_ai/api_resources/apis/fine_tuning.py | 14 +- portkey_ai/api_resources/apis/moderations.py | 38 +- portkey_ai/api_resources/apis/threads.py | 364 +++++++++++------- .../api_resources/apis/vector_stores.py | 232 ++++++----- .../api_resources/types/assistant_type.py | 3 - portkey_ai/api_resources/types/audio_types.py | 6 +- .../api_resources/types/batches_type.py | 3 +- .../api_resources/types/fine_tuning_type.py | 25 +- .../api_resources/types/moderations_type.py | 1 + .../types/thread_message_type.py | 3 +- .../api_resources/types/vector_stores_type.py | 27 +- 15 files changed, 551 insertions(+), 428 deletions(-) diff --git a/portkey_ai/api_resources/apis/__init__.py b/portkey_ai/api_resources/apis/__init__.py index 90841bf0..04ff6c8b 100644 --- a/portkey_ai/api_resources/apis/__init__.py +++ b/portkey_ai/api_resources/apis/__init__.py @@ -20,10 +20,33 @@ from .main_files import MainFiles, AsyncMainFiles from .models import Models, AsyncModels from .moderations import Moderations, AsyncModerations -from .audio import Audio, Transcriptions, Translations, Speech, AsyncAudio, AsyncTranscriptions, AsyncTranslations, AsyncSpeech +from .audio import ( + Audio, + Transcriptions, + Translations, + Speech, + AsyncAudio, + AsyncTranscriptions, + AsyncTranslations, + AsyncSpeech, +) from .batches import Batches, AsyncBatches -from .fine_tuning import FineTuning, Jobs, Checkpoints, AsyncFineTuning, AsyncJobs, AsyncCheckpoints -from .vector_stores import VectorStores, VectorFiles, VectorFileBatches, AsyncVectorStores, AsyncVectorFiles, AsyncVectorFileBatches +from .fine_tuning import ( + FineTuning, + Jobs, + Checkpoints, + AsyncFineTuning, + AsyncJobs, + AsyncCheckpoints, +) +from .vector_stores import ( + VectorStores, + VectorFiles, + VectorFileBatches, + AsyncVectorStores, + AsyncVectorFiles, + AsyncVectorFileBatches, +) __all__ = [ diff --git a/portkey_ai/api_resources/apis/audio.py b/portkey_ai/api_resources/apis/audio.py index f87c7611..7bdf29a3 100644 --- a/portkey_ai/api_resources/apis/audio.py +++ b/portkey_ai/api_resources/apis/audio.py @@ -1,7 +1,6 @@ import json from typing import Any, List, Union from portkey_ai.api_resources.apis.api_resource import APIResource, AsyncAPIResource -from portkey_ai.api_resources.base_client import APIClient from openai._types import NotGiven, NOT_GIVEN, FileTypes from portkey_ai.api_resources.client import AsyncPortkey, Portkey import typing @@ -18,6 +17,7 @@ def __init__(self, client: Portkey) -> None: self.translations = Translations(client) self.speech = Speech(client) + class Transcriptions(APIResource): def __init__(self, client: Portkey) -> None: super().__init__(client) @@ -25,17 +25,17 @@ def __init__(self, client: Portkey) -> None: @typing.no_type_check def create( - self, - *, - file:FileTypes, - model: str, - language: Union[str, NotGiven] = NOT_GIVEN, - prompt: Union[str, NotGiven] = NOT_GIVEN, - response_format: Union[str, NotGiven] = NOT_GIVEN, - temperature: Union[float, NotGiven] = NOT_GIVEN, - timestamp_granularities: Union[List[str], NotGiven] = NOT_GIVEN, - **kwargs) -> Transcription: - + self, + *, + file: FileTypes, + model: str, + language: Union[str, NotGiven] = NOT_GIVEN, + prompt: Union[str, NotGiven] = NOT_GIVEN, + response_format: Union[str, NotGiven] = NOT_GIVEN, + temperature: Union[float, NotGiven] = NOT_GIVEN, + timestamp_granularities: Union[List[str], NotGiven] = NOT_GIVEN, + **kwargs + ) -> Transcription: response = self.openai_client.with_raw_response.audio.transcriptions.create( file=file, model=model, @@ -50,22 +50,23 @@ def create( data._headers = response.headers return data - + + class Translations(APIResource): def __init__(self, client: Portkey) -> None: super().__init__(client) self.openai_client = client.openai_client def create( - self, - *, - file:FileTypes, - model: str, - prompt: Union[str, NotGiven] = NOT_GIVEN, - response_format: Union[str, NotGiven] = NOT_GIVEN, - temperature: Union[float, NotGiven] = NOT_GIVEN, - **kwargs) -> Translation: - + self, + *, + file: FileTypes, + model: str, + prompt: Union[str, NotGiven] = NOT_GIVEN, + response_format: Union[str, NotGiven] = NOT_GIVEN, + temperature: Union[float, NotGiven] = NOT_GIVEN, + **kwargs + ) -> Translation: response = self.openai_client.with_raw_response.audio.translations.create( file=file, model=model, @@ -78,7 +79,8 @@ def create( data._headers = response.headers return data - + + class Speech(APIResource): def __init__(self, client: Portkey) -> None: super().__init__(client) @@ -86,15 +88,15 @@ def __init__(self, client: Portkey) -> None: @typing.no_type_check def create( - self, - *, - input: str, - model: str, - voice: str, - response_format: Union[str, NotGiven] = NOT_GIVEN, - speed: Union[float, NotGiven] = NOT_GIVEN, - **kwargs) -> Any: - + self, + *, + input: str, + model: str, + voice: str, + response_format: Union[str, NotGiven] = NOT_GIVEN, + speed: Union[float, NotGiven] = NOT_GIVEN, + **kwargs + ) -> Any: response = self.openai_client.with_raw_response.audio.speech.create( input=input, model=model, @@ -118,6 +120,7 @@ def __init__(self, client: AsyncPortkey) -> None: self.translations = AsyncTranslations(client) self.speech = AsyncSpeech(client) + class AsyncTranscriptions(AsyncAPIResource): def __init__(self, client: AsyncPortkey) -> None: super().__init__(client) @@ -125,47 +128,50 @@ def __init__(self, client: AsyncPortkey) -> None: @typing.no_type_check async def create( - self, - *, - file:FileTypes, - model: str, - language: Union[str, NotGiven] = NOT_GIVEN, - prompt: Union[str, NotGiven] = NOT_GIVEN, - response_format: Union[str, NotGiven] = NOT_GIVEN, - temperature: Union[float, NotGiven] = NOT_GIVEN, - timestamp_granularities: Union[List[str], NotGiven] = NOT_GIVEN, - **kwargs) -> Transcription: - - response = await self.openai_client.with_raw_response.audio.transcriptions.create( - file=file, - model=model, - language=language, - prompt=prompt, - response_format=response_format, - temperature=temperature, - timestamp_granularities=timestamp_granularities, - **kwargs + self, + *, + file: FileTypes, + model: str, + language: Union[str, NotGiven] = NOT_GIVEN, + prompt: Union[str, NotGiven] = NOT_GIVEN, + response_format: Union[str, NotGiven] = NOT_GIVEN, + temperature: Union[float, NotGiven] = NOT_GIVEN, + timestamp_granularities: Union[List[str], NotGiven] = NOT_GIVEN, + **kwargs + ) -> Transcription: + response = ( + await self.openai_client.with_raw_response.audio.transcriptions.create( + file=file, + model=model, + language=language, + prompt=prompt, + response_format=response_format, + temperature=temperature, + timestamp_granularities=timestamp_granularities, + **kwargs + ) ) data = Transcription(**json.loads(response.text)) data._headers = response.headers return data + class AsyncTranslations(AsyncAPIResource): def __init__(self, client: AsyncPortkey) -> None: super().__init__(client) self.openai_client = client.openai_client async def create( - self, - *, - file:FileTypes, - model: str, - prompt: Union[str, NotGiven] = NOT_GIVEN, - response_format: Union[str, NotGiven] = NOT_GIVEN, - temperature: Union[float, NotGiven] = NOT_GIVEN, - **kwargs) -> Translation: - + self, + *, + file: FileTypes, + model: str, + prompt: Union[str, NotGiven] = NOT_GIVEN, + response_format: Union[str, NotGiven] = NOT_GIVEN, + temperature: Union[float, NotGiven] = NOT_GIVEN, + **kwargs + ) -> Translation: response = await self.openai_client.with_raw_response.audio.translations.create( file=file, model=model, @@ -178,7 +184,8 @@ async def create( data._headers = response.headers return data - + + class AsyncSpeech(AsyncAPIResource): def __init__(self, client: AsyncPortkey) -> None: super().__init__(client) @@ -186,15 +193,15 @@ def __init__(self, client: AsyncPortkey) -> None: @typing.no_type_check async def create( - self, - *, - input: str, - model: str, - voice: str, - response_format: Union[str, NotGiven] = NOT_GIVEN, - speed: Union[float, NotGiven] = NOT_GIVEN, - **kwargs) -> Any: - + self, + *, + input: str, + model: str, + voice: str, + response_format: Union[str, NotGiven] = NOT_GIVEN, + speed: Union[float, NotGiven] = NOT_GIVEN, + **kwargs + ) -> Any: response = await self.openai_client.with_raw_response.audio.speech.create( input=input, model=model, @@ -206,4 +213,4 @@ async def create( data = response - return data \ No newline at end of file + return data diff --git a/portkey_ai/api_resources/apis/batches.py b/portkey_ai/api_resources/apis/batches.py index 6eaeff14..8fd789a3 100644 --- a/portkey_ai/api_resources/apis/batches.py +++ b/portkey_ai/api_resources/apis/batches.py @@ -22,8 +22,7 @@ def create( input_file_id: str, metadata: Union[Optional[Dict[str, str]], NotGiven] = NOT_GIVEN, **kwargs - ) -> Batch: - + ) -> Batch: response = self.openai_client.with_raw_response.batches.create( completion_window=completion_window, endpoint=endpoint, @@ -35,11 +34,8 @@ def create( data._headers = response.headers return data - - def retrieve( - self, - batch_id, - **kwargs) -> Batch: + + def retrieve(self, batch_id, **kwargs) -> Batch: response = self.openai_client.with_raw_response.batches.retrieve( batch_id=batch_id, **kwargs ) @@ -47,37 +43,31 @@ def retrieve( data._headers = response.headers return data - + def list( - self, - *, - after: Union[str, NotGiven] = NOT_GIVEN, - limit: Union[int, NotGiven] = NOT_GIVEN, - **kwargs) -> BatchList: + self, + *, + after: Union[str, NotGiven] = NOT_GIVEN, + limit: Union[int, NotGiven] = NOT_GIVEN, + **kwargs + ) -> BatchList: response = self.openai_client.with_raw_response.batches.list( - after=after, - limit=limit, - **kwargs + after=after, limit=limit, **kwargs ) data = BatchList(**json.loads(response.text)) data._headers = response.headers return data - - def cancel( - self, - batch_id: str, - **kwargs - )-> Batch: + + def cancel(self, batch_id: str, **kwargs) -> Batch: response = self.openai_client.with_raw_response.batches.cancel( - batch_id=batch_id, - **kwargs + batch_id=batch_id, **kwargs ) data = Batch(**json.loads(response.text)) data._headers = response.headers return data - + class AsyncBatches(AsyncAPIResource): def __init__(self, client: AsyncPortkey) -> None: @@ -93,8 +83,7 @@ async def create( input_file_id: str, metadata: Union[Optional[Dict[str, str]], NotGiven] = NOT_GIVEN, **kwargs - ) -> Batch: - + ) -> Batch: response = await self.openai_client.with_raw_response.batches.create( completion_window=completion_window, endpoint=endpoint, @@ -106,11 +95,8 @@ async def create( data._headers = response.headers return data - - async def retrieve( - self, - batch_id, - **kwargs) -> Batch: + + async def retrieve(self, batch_id, **kwargs) -> Batch: response = await self.openai_client.with_raw_response.batches.retrieve( batch_id=batch_id, **kwargs ) @@ -118,34 +104,27 @@ async def retrieve( data._headers = response.headers return data - + async def list( - self, - *, - after: Union[str, NotGiven] = NOT_GIVEN, - limit: Union[int, NotGiven] = NOT_GIVEN, - **kwargs) -> BatchList: + self, + *, + after: Union[str, NotGiven] = NOT_GIVEN, + limit: Union[int, NotGiven] = NOT_GIVEN, + **kwargs + ) -> BatchList: response = await self.openai_client.with_raw_response.batches.list( - after=after, - limit=limit, - **kwargs + after=after, limit=limit, **kwargs ) data = BatchList(**json.loads(response.text)) data._headers = response.headers return data - - async def cancel( - self, - batch_id: str, - **kwargs - )-> Batch: + + async def cancel(self, batch_id: str, **kwargs) -> Batch: response = await self.openai_client.with_raw_response.batches.cancel( - batch_id=batch_id, - **kwargs + batch_id=batch_id, **kwargs ) data = Batch(**json.loads(response.text)) data._headers = response.headers return data - diff --git a/portkey_ai/api_resources/apis/complete.py b/portkey_ai/api_resources/apis/complete.py index 778a4316..9f4150fc 100644 --- a/portkey_ai/api_resources/apis/complete.py +++ b/portkey_ai/api_resources/apis/complete.py @@ -17,7 +17,7 @@ def __init__(self, client: Portkey) -> None: self.openai_client = client.openai_client self.client = client - def stream_create( # type: ignore[return] + def stream_create( # type: ignore[return] self, model, prompt, stream, temperature, max_tokens, top_p, **kwargs ) -> Union[TextCompletion, Iterator[TextCompletionChunk]]: with self.openai_client.with_streaming_response.completions.create( @@ -97,7 +97,7 @@ def __init__(self, client: AsyncPortkey) -> None: super().__init__(client) self.openai_client = client.openai_client - async def stream_create( + async def stream_create( self, model, prompt, stream, temperature, max_tokens, top_p, **kwargs ) -> Union[TextCompletion, AsyncIterator[TextCompletionChunk]]: async with self.openai_client.with_streaming_response.completions.create( diff --git a/portkey_ai/api_resources/apis/fine_tuning.py b/portkey_ai/api_resources/apis/fine_tuning.py index 6d185607..d1be95cc 100644 --- a/portkey_ai/api_resources/apis/fine_tuning.py +++ b/portkey_ai/api_resources/apis/fine_tuning.py @@ -243,16 +243,14 @@ async def list( limit: Union[int, NotGiven] = NOT_GIVEN, **kwargs, ) -> FineTuningJobCheckpointList: - response = ( - await self.openai_client.with_raw_response.fine_tuning.jobs.checkpoints.list( - fine_tuning_job_id=fine_tuning_job_id, - after=after, - limit=limit, - **kwargs, - ) + response = await self.openai_client.with_raw_response.fine_tuning.jobs.checkpoints.list( # noqa: E501 + fine_tuning_job_id=fine_tuning_job_id, + after=after, + limit=limit, + **kwargs, ) data = FineTuningJobCheckpointList(**json.loads(response.text)) data._headers = response.headers - return data \ No newline at end of file + return data diff --git a/portkey_ai/api_resources/apis/moderations.py b/portkey_ai/api_resources/apis/moderations.py index 65e858fd..8b6f55dc 100644 --- a/portkey_ai/api_resources/apis/moderations.py +++ b/portkey_ai/api_resources/apis/moderations.py @@ -1,5 +1,5 @@ import json -from typing import List, Union, Any +from typing import List, Union from portkey_ai.api_resources.apis.api_resource import APIResource, AsyncAPIResource from portkey_ai.api_resources.client import AsyncPortkey, Portkey from openai._types import NotGiven, NOT_GIVEN @@ -12,22 +12,20 @@ def __init__(self, client: Portkey) -> None: self.openai_client = client.openai_client def create( - self, - *, - input: Union[str, List[str]], - model: Union[str, NotGiven]= NOT_GIVEN, - **kwargs - )-> ModerationCreateResponse: + self, + *, + input: Union[str, List[str]], + model: Union[str, NotGiven] = NOT_GIVEN, + **kwargs + ) -> ModerationCreateResponse: response = self.openai_client.with_raw_response.moderations.create( - input=input, - model=model, - **kwargs + input=input, model=model, **kwargs ) data = ModerationCreateResponse(**json.loads(response.text)) data._headers = response.headers return data - + class AsyncModerations(AsyncAPIResource): def __init__(self, client: AsyncPortkey) -> None: @@ -35,18 +33,16 @@ def __init__(self, client: AsyncPortkey) -> None: self.openai_client = client.openai_client async def create( - self, - *, - input: Union[str, List[str]], - model: Union[str, NotGiven]= NOT_GIVEN, - **kwargs - )-> ModerationCreateResponse: + self, + *, + input: Union[str, List[str]], + model: Union[str, NotGiven] = NOT_GIVEN, + **kwargs + ) -> ModerationCreateResponse: response = await self.openai_client.with_raw_response.moderations.create( - input=input, - model=model, - **kwargs + input=input, model=model, **kwargs ) data = ModerationCreateResponse(**json.loads(response.text)) data._headers = response.headers - return data \ No newline at end of file + return data diff --git a/portkey_ai/api_resources/apis/threads.py b/portkey_ai/api_resources/apis/threads.py index 7e7a1a72..a1e1c9c8 100644 --- a/portkey_ai/api_resources/apis/threads.py +++ b/portkey_ai/api_resources/apis/threads.py @@ -18,8 +18,12 @@ from portkey_ai.api_resources.types.thread_type import Thread, ThreadDeleted from openai._types import NotGiven, NOT_GIVEN from openai.types.beta import thread_create_and_run_params -from openai.types.beta.assistant_response_format_option_param import AssistantResponseFormatOptionParam -from openai.types.beta.assistant_tool_choice_option_param import AssistantToolChoiceOptionParam +from openai.types.beta.assistant_response_format_option_param import ( + AssistantResponseFormatOptionParam, +) +from openai.types.beta.assistant_tool_choice_option_param import ( + AssistantToolChoiceOptionParam, +) from openai.lib.streaming import ( AssistantEventHandler, AssistantEventHandlerT, @@ -34,6 +38,7 @@ ) from openai.types.beta.assistant_tool_param import AssistantToolParam + class Threads(APIResource): def __init__(self, client: Portkey) -> None: super().__init__(client) @@ -97,18 +102,27 @@ def create_and_run_poll( max_prompt_tokens: Union[Optional[int], NotGiven] = NOT_GIVEN, metadata: Union[Optional[object], NotGiven] = NOT_GIVEN, model: Union[str, None, NotGiven] = NOT_GIVEN, - response_format: Union[Optional[AssistantResponseFormatOptionParam] , NotGiven] = NOT_GIVEN, + response_format: Union[ + Optional[AssistantResponseFormatOptionParam], NotGiven + ] = NOT_GIVEN, temperature: Union[Optional[float], NotGiven] = NOT_GIVEN, thread: Union[thread_create_and_run_params.Thread, NotGiven] = NOT_GIVEN, - tool_choice: Union[Optional[AssistantToolChoiceOptionParam], NotGiven] = NOT_GIVEN, - tool_resources: Union[Optional[thread_create_and_run_params.ToolResources], NotGiven] = NOT_GIVEN, - tools: Union[Optional[Iterable[thread_create_and_run_params.Tool]], NotGiven] = NOT_GIVEN, + tool_choice: Union[ + Optional[AssistantToolChoiceOptionParam], NotGiven + ] = NOT_GIVEN, + tool_resources: Union[ + Optional[thread_create_and_run_params.ToolResources], NotGiven + ] = NOT_GIVEN, + tools: Union[ + Optional[Iterable[thread_create_and_run_params.Tool]], NotGiven + ] = NOT_GIVEN, top_p: Union[Optional[float], NotGiven] = NOT_GIVEN, - truncation_strategy: Union[Optional[thread_create_and_run_params.TruncationStrategy], NotGiven] = NOT_GIVEN, + truncation_strategy: Union[ + Optional[thread_create_and_run_params.TruncationStrategy], NotGiven + ] = NOT_GIVEN, poll_interval_ms: Union[int, NotGiven] = NOT_GIVEN, **kwargs, - ) -> Run: - + ) -> Run: response = self.openai_client.beta.threads.create_and_run_poll( assistant_id=assistant_id, instructions=instructions, @@ -125,11 +139,11 @@ def create_and_run_poll( top_p=top_p, truncation_strategy=truncation_strategy, poll_interval_ms=poll_interval_ms, - **kwargs + **kwargs, ) data = response - return data # type: ignore[return-value] + return data # type: ignore[return-value] @typing.no_type_check def create_and_run_stream( @@ -141,18 +155,30 @@ def create_and_run_stream( max_prompt_tokens: Union[Optional[int], NotGiven] = NOT_GIVEN, metadata: Union[Optional[object], NotGiven] = NOT_GIVEN, model: Union[str, None, NotGiven] = NOT_GIVEN, - response_format: Union[Optional[AssistantResponseFormatOptionParam], NotGiven] = NOT_GIVEN, + response_format: Union[ + Optional[AssistantResponseFormatOptionParam], NotGiven + ] = NOT_GIVEN, temperature: Union[Optional[float], NotGiven] = NOT_GIVEN, thread: Union[thread_create_and_run_params.Thread, NotGiven] = NOT_GIVEN, - tool_choice: Union[Optional[AssistantToolChoiceOptionParam], NotGiven] = NOT_GIVEN, - tool_resources: Union[Optional[thread_create_and_run_params.ToolResources], NotGiven] = NOT_GIVEN, - tools: Union[Optional[Iterable[thread_create_and_run_params.Tool]], NotGiven] = NOT_GIVEN, + tool_choice: Union[ + Optional[AssistantToolChoiceOptionParam], NotGiven + ] = NOT_GIVEN, + tool_resources: Union[ + Optional[thread_create_and_run_params.ToolResources], NotGiven + ] = NOT_GIVEN, + tools: Union[ + Optional[Iterable[thread_create_and_run_params.Tool]], NotGiven + ] = NOT_GIVEN, top_p: Union[Optional[float], NotGiven] = NOT_GIVEN, - truncation_strategy: Union[Optional[thread_create_and_run_params.TruncationStrategy], NotGiven] = NOT_GIVEN, + truncation_strategy: Union[ + Optional[thread_create_and_run_params.TruncationStrategy], NotGiven + ] = NOT_GIVEN, event_handler: Union[AssistantEventHandlerT, None] = None, **kwargs, - ) -> Union[AssistantStreamManager[AssistantEventHandler], AssistantStreamManager[AssistantEventHandlerT]]: - + ) -> Union[ + AssistantStreamManager[AssistantEventHandler], + AssistantStreamManager[AssistantEventHandlerT], + ]: response = self.openai_client.beta.threads.create_and_run_stream( assistant_id=assistant_id, instructions=instructions, @@ -169,7 +195,7 @@ def create_and_run_stream( top_p=top_p, truncation_strategy=truncation_strategy, event_handler=event_handler, - **kwargs + **kwargs, ) data = response return data @@ -214,16 +240,10 @@ def update(self, thread_id, message_id, **kwargs) -> ThreadMessage: return data def delete( - self, - message_id: str, - *, - thread_id: str, - **kwargs + self, message_id: str, *, thread_id: str, **kwargs ) -> ThreadMessageDeleted: response = self.openai_client.with_raw_response.beta.threads.messages.delete( - message_id=message_id, - thread_id=thread_id, - **kwargs + message_id=message_id, thread_id=thread_id, **kwargs ) data = ThreadMessageDeleted(**json.loads(response.text)) data._headers = response.headers @@ -298,22 +318,30 @@ def create_and_poll( *, assistant_id: str, additional_instructions: Union[Optional[str], NotGiven] = NOT_GIVEN, - additional_messages: Union[Optional[Iterable[run_create_params.AdditionalMessage]], NotGiven] = NOT_GIVEN, + additional_messages: Union[ + Optional[Iterable[run_create_params.AdditionalMessage]], NotGiven + ] = NOT_GIVEN, instructions: Union[Optional[str], NotGiven] = NOT_GIVEN, max_completion_tokens: Union[Optional[int], NotGiven] = NOT_GIVEN, max_prompt_tokens: Union[Optional[int], NotGiven] = NOT_GIVEN, metadata: Union[Optional[object], NotGiven] = NOT_GIVEN, model: Union[str, None, NotGiven] = NOT_GIVEN, - response_format: Union[Optional[AssistantResponseFormatOptionParam], NotGiven] = NOT_GIVEN, + response_format: Union[ + Optional[AssistantResponseFormatOptionParam], NotGiven + ] = NOT_GIVEN, temperature: Union[Optional[float], NotGiven] = NOT_GIVEN, - tool_choice: Union[Optional[AssistantToolChoiceOptionParam], NotGiven] = NOT_GIVEN, + tool_choice: Union[ + Optional[AssistantToolChoiceOptionParam], NotGiven + ] = NOT_GIVEN, tools: Union[Optional[Iterable[AssistantToolParam]], NotGiven] = NOT_GIVEN, top_p: Union[Optional[float], NotGiven] = NOT_GIVEN, - truncation_strategy: Union[Optional[run_create_params.TruncationStrategy], NotGiven] = NOT_GIVEN, + truncation_strategy: Union[ + Optional[run_create_params.TruncationStrategy], NotGiven + ] = NOT_GIVEN, poll_interval_ms: Union[int, NotGiven] = NOT_GIVEN, thread_id: str, **kwargs, - )-> Run: + ) -> Run: response = self.openai_client.beta.threads.runs.create_and_poll( assistant_id=assistant_id, additional_instructions=additional_instructions, @@ -331,34 +359,45 @@ def create_and_poll( truncation_strategy=truncation_strategy, poll_interval_ms=poll_interval_ms, thread_id=thread_id, - **kwargs + **kwargs, ) data = response - return data # type: ignore[return-value] + return data # type: ignore[return-value] @typing.no_type_check def create_and_stream( - self, + self, *, assistant_id: str, additional_instructions: Union[Optional[str], NotGiven] = NOT_GIVEN, - additional_messages: Union[Optional[Iterable[run_create_params.AdditionalMessage]], NotGiven] = NOT_GIVEN, + additional_messages: Union[ + Optional[Iterable[run_create_params.AdditionalMessage]], NotGiven + ] = NOT_GIVEN, instructions: Union[Optional[str], NotGiven] = NOT_GIVEN, max_completion_tokens: Union[Optional[int], NotGiven] = NOT_GIVEN, max_prompt_tokens: Union[Optional[int], NotGiven] = NOT_GIVEN, metadata: Union[Optional[object], NotGiven] = NOT_GIVEN, model: Union[str, None, NotGiven] = NOT_GIVEN, - response_format: Union[Optional[AssistantResponseFormatOptionParam], NotGiven] = NOT_GIVEN, + response_format: Union[ + Optional[AssistantResponseFormatOptionParam], NotGiven + ] = NOT_GIVEN, temperature: Union[Optional[float], NotGiven] = NOT_GIVEN, - tool_choice: Union[Optional[AssistantToolChoiceOptionParam], NotGiven] = NOT_GIVEN, + tool_choice: Union[ + Optional[AssistantToolChoiceOptionParam], NotGiven + ] = NOT_GIVEN, tools: Union[Optional[Iterable[AssistantToolParam]], NotGiven] = NOT_GIVEN, top_p: Union[Optional[float], NotGiven] = NOT_GIVEN, - truncation_strategy: Union[Optional[run_create_params.TruncationStrategy], NotGiven] = NOT_GIVEN, + truncation_strategy: Union[ + Optional[run_create_params.TruncationStrategy], NotGiven + ] = NOT_GIVEN, thread_id: str, event_handler: Union[AssistantEventHandlerT, None] = None, **kwargs, - )-> Union[AssistantStreamManager[AssistantEventHandler], AssistantStreamManager[AssistantEventHandlerT]]: + ) -> Union[ + AssistantStreamManager[AssistantEventHandler], + AssistantStreamManager[AssistantEventHandlerT], + ]: response = self.openai_client.beta.threads.runs.create_and_stream( assistant_id=assistant_id, additional_instructions=additional_instructions, @@ -376,26 +415,24 @@ def create_and_stream( truncation_strategy=truncation_strategy, thread_id=thread_id, event_handler=event_handler, - **kwargs + **kwargs, ) data = response return data - + def poll( - self, - *, + self, + *, run_id: str, thread_id: str, **kwargs, ) -> Run: response = self.openai_client.beta.threads.runs.poll( - run_id=run_id, - thread_id=thread_id, - **kwargs + run_id=run_id, thread_id=thread_id, **kwargs ) data = response - return data # type: ignore[return-value] + return data # type: ignore[return-value] @typing.no_type_check def stream( @@ -403,23 +440,33 @@ def stream( *, assistant_id: str, additional_instructions: Union[Optional[str], NotGiven] = NOT_GIVEN, - additional_messages: Union[Optional[Iterable[run_create_params.AdditionalMessage]], NotGiven] = NOT_GIVEN, + additional_messages: Union[ + Optional[Iterable[run_create_params.AdditionalMessage]], NotGiven + ] = NOT_GIVEN, instructions: Union[Optional[str], NotGiven] = NOT_GIVEN, max_completion_tokens: Union[Optional[int], NotGiven] = NOT_GIVEN, max_prompt_tokens: Union[Optional[int], NotGiven] = NOT_GIVEN, metadata: Union[Optional[object], NotGiven] = NOT_GIVEN, model: Union[str, None, NotGiven] = NOT_GIVEN, - response_format: Union[Optional[AssistantResponseFormatOptionParam], NotGiven] = NOT_GIVEN, + response_format: Union[ + Optional[AssistantResponseFormatOptionParam], NotGiven + ] = NOT_GIVEN, temperature: Union[Optional[float], NotGiven] = NOT_GIVEN, - tool_choice: Union[Optional[AssistantToolChoiceOptionParam], NotGiven] = NOT_GIVEN, + tool_choice: Union[ + Optional[AssistantToolChoiceOptionParam], NotGiven + ] = NOT_GIVEN, tools: Union[Optional[Iterable[AssistantToolParam]], NotGiven] = NOT_GIVEN, top_p: Union[Optional[float], NotGiven] = NOT_GIVEN, - truncation_strategy: Union[Optional[run_create_params.TruncationStrategy], NotGiven] = NOT_GIVEN, + truncation_strategy: Union[ + Optional[run_create_params.TruncationStrategy], NotGiven + ] = NOT_GIVEN, thread_id: str, event_handler: Union[AssistantEventHandlerT, None] = None, **kwargs, - )-> Union[AssistantStreamManager[AssistantEventHandler], AssistantStreamManager[AssistantEventHandlerT]]: - + ) -> Union[ + AssistantStreamManager[AssistantEventHandler], + AssistantStreamManager[AssistantEventHandlerT], + ]: response = self.openai_client.beta.threads.runs.stream( assistant_id=assistant_id, additional_instructions=additional_instructions, @@ -437,7 +484,7 @@ def stream( truncation_strategy=truncation_strategy, thread_id=thread_id, event_handler=event_handler, - **kwargs + **kwargs, ) data = response return data @@ -448,17 +495,17 @@ def submit_tool_outputs_and_poll( tool_outputs: Union[Iterable[run_submit_tool_outputs_params.ToolOutput]], run_id: str, thread_id: str, - poll_interval_ms: Union[int, NotGiven] = NOT_GIVEN, - )-> Run: + poll_interval_ms: Union[int, NotGiven] = NOT_GIVEN, + ) -> Run: response = self.openai_client.beta.threads.runs.submit_tool_outputs_and_poll( tool_outputs=tool_outputs, run_id=run_id, thread_id=thread_id, - poll_interval_ms=poll_interval_ms + poll_interval_ms=poll_interval_ms, ) data = response - return data # type: ignore[return-value] + return data # type: ignore[return-value] def submit_tool_outputs_stream( self, @@ -466,17 +513,21 @@ def submit_tool_outputs_stream( tool_outputs: Union[Iterable[run_submit_tool_outputs_params.ToolOutput]], run_id: str, thread_id: str, - event_handler: Union[AssistantEventHandlerT, None] = None, - ) -> Union[AssistantStreamManager[AssistantEventHandler], AssistantStreamManager[AssistantEventHandlerT]]: - response = self.openai_client.beta.threads.runs.submit_tool_outputs_stream( + event_handler: Union[AssistantEventHandlerT, None] = None, + ) -> Union[ + AssistantStreamManager[AssistantEventHandler], + AssistantStreamManager[AssistantEventHandlerT], + ]: + response = self.openai_client.beta.threads.runs.submit_tool_outputs_stream( # type: ignore[type-var] tool_outputs=tool_outputs, run_id=run_id, thread_id=thread_id, - event_handler=event_handler + event_handler=event_handler, ) data = response - return data + return data # type: ignore[return-value] + class Steps(APIResource): def __init__(self, client: Portkey) -> None: @@ -569,18 +620,27 @@ async def create_and_run_poll( max_prompt_tokens: Union[Optional[int], NotGiven] = NOT_GIVEN, metadata: Union[Optional[object], NotGiven] = NOT_GIVEN, model: Union[str, None, NotGiven] = NOT_GIVEN, - response_format: Union[Optional[AssistantResponseFormatOptionParam] , NotGiven] = NOT_GIVEN, + response_format: Union[ + Optional[AssistantResponseFormatOptionParam], NotGiven + ] = NOT_GIVEN, temperature: Union[Optional[float], NotGiven] = NOT_GIVEN, thread: Union[thread_create_and_run_params.Thread, NotGiven] = NOT_GIVEN, - tool_choice: Union[Optional[AssistantToolChoiceOptionParam], NotGiven] = NOT_GIVEN, - tool_resources: Union[Optional[thread_create_and_run_params.ToolResources], NotGiven] = NOT_GIVEN, - tools: Union[Optional[Iterable[thread_create_and_run_params.Tool]], NotGiven] = NOT_GIVEN, + tool_choice: Union[ + Optional[AssistantToolChoiceOptionParam], NotGiven + ] = NOT_GIVEN, + tool_resources: Union[ + Optional[thread_create_and_run_params.ToolResources], NotGiven + ] = NOT_GIVEN, + tools: Union[ + Optional[Iterable[thread_create_and_run_params.Tool]], NotGiven + ] = NOT_GIVEN, top_p: Union[Optional[float], NotGiven] = NOT_GIVEN, - truncation_strategy: Union[Optional[thread_create_and_run_params.TruncationStrategy], NotGiven] = NOT_GIVEN, + truncation_strategy: Union[ + Optional[thread_create_and_run_params.TruncationStrategy], NotGiven + ] = NOT_GIVEN, poll_interval_ms: Union[int, NotGiven] = NOT_GIVEN, **kwargs, - ) -> Run: - + ) -> Run: response = await self.openai_client.beta.threads.create_and_run_poll( assistant_id=assistant_id, instructions=instructions, @@ -597,12 +657,12 @@ async def create_and_run_poll( top_p=top_p, truncation_strategy=truncation_strategy, poll_interval_ms=poll_interval_ms, - **kwargs + **kwargs, ) data = response - return data # type: ignore[return-value] - + return data # type: ignore[return-value] + @typing.no_type_check async def create_and_run_stream( self, @@ -613,21 +673,32 @@ async def create_and_run_stream( max_prompt_tokens: Union[Optional[int], NotGiven] = NOT_GIVEN, metadata: Union[Optional[object], NotGiven] = NOT_GIVEN, model: Union[str, None, NotGiven] = NOT_GIVEN, - response_format: Union[Optional[AssistantResponseFormatOptionParam], NotGiven] = NOT_GIVEN, + response_format: Union[ + Optional[AssistantResponseFormatOptionParam], NotGiven + ] = NOT_GIVEN, temperature: Union[Optional[float], NotGiven] = NOT_GIVEN, thread: Union[thread_create_and_run_params.Thread, NotGiven] = NOT_GIVEN, - tool_choice: Union[Optional[AssistantToolChoiceOptionParam], NotGiven] = NOT_GIVEN, - tool_resources: Union[Optional[thread_create_and_run_params.ToolResources], NotGiven] = NOT_GIVEN, - tools: Union[Optional[Iterable[thread_create_and_run_params.Tool]], NotGiven] = NOT_GIVEN, + tool_choice: Union[ + Optional[AssistantToolChoiceOptionParam], NotGiven + ] = NOT_GIVEN, + tool_resources: Union[ + Optional[thread_create_and_run_params.ToolResources], NotGiven + ] = NOT_GIVEN, + tools: Union[ + Optional[Iterable[thread_create_and_run_params.Tool]], NotGiven + ] = NOT_GIVEN, top_p: Union[Optional[float], NotGiven] = NOT_GIVEN, - truncation_strategy: Union[Optional[thread_create_and_run_params.TruncationStrategy], NotGiven] = NOT_GIVEN, + truncation_strategy: Union[ + Optional[thread_create_and_run_params.TruncationStrategy], NotGiven + ] = NOT_GIVEN, event_handler: Union[AsyncAssistantEventHandlerT, None] = None, **kwargs, ) -> ( - Union[AsyncAssistantStreamManager[AsyncAssistantEventHandler], - AsyncAssistantStreamManager[AsyncAssistantEventHandlerT]] + Union[ + AsyncAssistantStreamManager[AsyncAssistantEventHandler], + AsyncAssistantStreamManager[AsyncAssistantEventHandlerT], + ] ): - response = await self.openai_client.beta.threads.create_and_run_stream( assistant_id=assistant_id, instructions=instructions, @@ -644,7 +715,7 @@ async def create_and_run_stream( top_p=top_p, truncation_strategy=truncation_strategy, event_handler=event_handler, - **kwargs + **kwargs, ) data = response return data @@ -697,17 +768,11 @@ async def update(self, thread_id, message_id, **kwargs) -> ThreadMessage: return data async def delete( - self, - message_id: str, - *, - thread_id: str, - **kwargs + self, message_id: str, *, thread_id: str, **kwargs ) -> ThreadMessageDeleted: response = ( await self.openai_client.with_raw_response.beta.threads.messages.delete( - message_id=message_id, - thread_id=thread_id, - **kwargs + message_id=message_id, thread_id=thread_id, **kwargs ) ) data = ThreadMessageDeleted(**json.loads(response.text)) @@ -795,22 +860,30 @@ async def create_and_poll( *, assistant_id: str, additional_instructions: Union[Optional[str], NotGiven] = NOT_GIVEN, - additional_messages: Union[Optional[Iterable[run_create_params.AdditionalMessage]], NotGiven] = NOT_GIVEN, + additional_messages: Union[ + Optional[Iterable[run_create_params.AdditionalMessage]], NotGiven + ] = NOT_GIVEN, instructions: Union[Optional[str], NotGiven] = NOT_GIVEN, max_completion_tokens: Union[Optional[int], NotGiven] = NOT_GIVEN, max_prompt_tokens: Union[Optional[int], NotGiven] = NOT_GIVEN, metadata: Union[Optional[object], NotGiven] = NOT_GIVEN, model: Union[str, None, NotGiven] = NOT_GIVEN, - response_format: Union[Optional[AssistantResponseFormatOptionParam], NotGiven] = NOT_GIVEN, + response_format: Union[ + Optional[AssistantResponseFormatOptionParam], NotGiven + ] = NOT_GIVEN, temperature: Union[Optional[float], NotGiven] = NOT_GIVEN, - tool_choice: Union[Optional[AssistantToolChoiceOptionParam], NotGiven] = NOT_GIVEN, + tool_choice: Union[ + Optional[AssistantToolChoiceOptionParam], NotGiven + ] = NOT_GIVEN, tools: Union[Optional[Iterable[AssistantToolParam]], NotGiven] = NOT_GIVEN, top_p: Union[Optional[float], NotGiven] = NOT_GIVEN, - truncation_strategy: Union[Optional[run_create_params.TruncationStrategy], NotGiven] = NOT_GIVEN, + truncation_strategy: Union[ + Optional[run_create_params.TruncationStrategy], NotGiven + ] = NOT_GIVEN, poll_interval_ms: Union[int, NotGiven] = NOT_GIVEN, thread_id: str, **kwargs, - )-> Run: + ) -> Run: response = await self.openai_client.beta.threads.runs.create_and_poll( assistant_id=assistant_id, additional_instructions=additional_instructions, @@ -828,11 +901,11 @@ async def create_and_poll( truncation_strategy=truncation_strategy, poll_interval_ms=poll_interval_ms, thread_id=thread_id, - **kwargs + **kwargs, ) data = response - return data # type: ignore[return-value] + return data # type: ignore[return-value] @typing.no_type_check async def create_and_stream( @@ -840,26 +913,35 @@ async def create_and_stream( *, assistant_id: str, additional_instructions: Union[Optional[str], NotGiven] = NOT_GIVEN, - additional_messages: Union[Optional[Iterable[run_create_params.AdditionalMessage]], NotGiven] = NOT_GIVEN, + additional_messages: Union[ + Optional[Iterable[run_create_params.AdditionalMessage]], NotGiven + ] = NOT_GIVEN, instructions: Union[Optional[str], NotGiven] = NOT_GIVEN, max_completion_tokens: Union[Optional[int], NotGiven] = NOT_GIVEN, max_prompt_tokens: Union[Optional[int], NotGiven] = NOT_GIVEN, metadata: Union[Optional[object], NotGiven] = NOT_GIVEN, model: Union[str, None, NotGiven] = NOT_GIVEN, - response_format: Union[Optional[AssistantResponseFormatOptionParam], NotGiven] = NOT_GIVEN, + response_format: Union[ + Optional[AssistantResponseFormatOptionParam], NotGiven + ] = NOT_GIVEN, temperature: Union[Optional[float], NotGiven] = NOT_GIVEN, - tool_choice: Union[Optional[AssistantToolChoiceOptionParam], NotGiven] = NOT_GIVEN, + tool_choice: Union[ + Optional[AssistantToolChoiceOptionParam], NotGiven + ] = NOT_GIVEN, tools: Union[Optional[Iterable[AssistantToolParam]], NotGiven] = NOT_GIVEN, top_p: Union[Optional[float], NotGiven] = NOT_GIVEN, - truncation_strategy: Union[Optional[run_create_params.TruncationStrategy], NotGiven] = NOT_GIVEN, + truncation_strategy: Union[ + Optional[run_create_params.TruncationStrategy], NotGiven + ] = NOT_GIVEN, thread_id: str, event_handler: Union[AsyncAssistantEventHandlerT, None] = None, **kwargs, - )-> ( - Union[AsyncAssistantStreamManager[AsyncAssistantEventHandler], - AsyncAssistantStreamManager[AsyncAssistantEventHandlerT]] + ) -> ( + Union[ + AsyncAssistantStreamManager[AsyncAssistantEventHandler], + AsyncAssistantStreamManager[AsyncAssistantEventHandlerT], + ] ): - response = await self.openai_client.beta.threads.runs.create_and_stream( assistant_id=assistant_id, additional_instructions=additional_instructions, @@ -877,7 +959,7 @@ async def create_and_stream( truncation_strategy=truncation_strategy, thread_id=thread_id, event_handler=event_handler, - **kwargs + **kwargs, ) data = response return data @@ -890,38 +972,46 @@ async def poll( **kwargs, ) -> Run: response = await self.openai_client.beta.threads.runs.poll( - run_id=run_id, - thread_id=thread_id, - **kwargs + run_id=run_id, thread_id=thread_id, **kwargs ) data = response - return data # type: ignore[return-value] - + return data # type: ignore[return-value] + @typing.no_type_check async def stream( self, *, assistant_id: str, additional_instructions: Union[Optional[str], NotGiven] = NOT_GIVEN, - additional_messages: Union[Optional[Iterable[run_create_params.AdditionalMessage]], NotGiven] = NOT_GIVEN, + additional_messages: Union[ + Optional[Iterable[run_create_params.AdditionalMessage]], NotGiven + ] = NOT_GIVEN, instructions: Union[Optional[str], NotGiven] = NOT_GIVEN, max_completion_tokens: Union[Optional[int], NotGiven] = NOT_GIVEN, max_prompt_tokens: Union[Optional[int], NotGiven] = NOT_GIVEN, metadata: Union[Optional[object], NotGiven] = NOT_GIVEN, model: Union[str, None, NotGiven] = NOT_GIVEN, - response_format: Union[Optional[AssistantResponseFormatOptionParam], NotGiven] = NOT_GIVEN, + response_format: Union[ + Optional[AssistantResponseFormatOptionParam], NotGiven + ] = NOT_GIVEN, temperature: Union[Optional[float], NotGiven] = NOT_GIVEN, - tool_choice: Union[Optional[AssistantToolChoiceOptionParam], NotGiven] = NOT_GIVEN, + tool_choice: Union[ + Optional[AssistantToolChoiceOptionParam], NotGiven + ] = NOT_GIVEN, tools: Union[Optional[Iterable[AssistantToolParam]], NotGiven] = NOT_GIVEN, top_p: Union[Optional[float], NotGiven] = NOT_GIVEN, - truncation_strategy: Union[Optional[run_create_params.TruncationStrategy], NotGiven] = NOT_GIVEN, + truncation_strategy: Union[ + Optional[run_create_params.TruncationStrategy], NotGiven + ] = NOT_GIVEN, thread_id: str, event_handler: Union[AsyncAssistantEventHandlerT, None] = None, **kwargs, ) -> ( - Union[AsyncAssistantStreamManager[AsyncAssistantEventHandler], - AsyncAssistantStreamManager[AsyncAssistantEventHandlerT]] + Union[ + AsyncAssistantStreamManager[AsyncAssistantEventHandler], + AsyncAssistantStreamManager[AsyncAssistantEventHandlerT], + ] ): response = await self.openai_client.beta.threads.runs.stream( assistant_id=assistant_id, @@ -940,49 +1030,51 @@ async def stream( truncation_strategy=truncation_strategy, thread_id=thread_id, event_handler=event_handler, - **kwargs + **kwargs, ) data = response return data - + async def submit_tool_outputs_and_poll( self, *, tool_outputs: Union[Iterable[run_submit_tool_outputs_params.ToolOutput]], run_id: str, thread_id: str, - poll_interval_ms: Union[int, NotGiven] = NOT_GIVEN, + poll_interval_ms: Union[int, NotGiven] = NOT_GIVEN, ) -> Run: - response = await self.openai_client.beta.threads.runs.submit_tool_outputs_and_poll( - tool_outputs=tool_outputs, - run_id=run_id, - thread_id=thread_id, - poll_interval_ms=poll_interval_ms + response = ( + await self.openai_client.beta.threads.runs.submit_tool_outputs_and_poll( + tool_outputs=tool_outputs, + run_id=run_id, + thread_id=thread_id, + poll_interval_ms=poll_interval_ms, + ) ) data = response - return data # type: ignore[return-value] - + return data # type: ignore[return-value] + def submit_tool_outputs_stream( self, *, tool_outputs: Union[Iterable[run_submit_tool_outputs_params.ToolOutput]], run_id: str, thread_id: str, - event_handler: Union[AsyncAssistantEventHandlerT, None] = None, - ) -> ( - Union[AsyncAssistantStreamManager[AsyncAssistantEventHandler], - AsyncAssistantStreamManager[AsyncAssistantEventHandlerT]] - ): - response = self.openai_client.beta.threads.runs.submit_tool_outputs_stream( + event_handler: Union[AsyncAssistantEventHandlerT, None] = None, + ) -> Union[ + AsyncAssistantStreamManager[AsyncAssistantEventHandler], + AsyncAssistantStreamManager[AsyncAssistantEventHandlerT], + ]: + response = self.openai_client.beta.threads.runs.submit_tool_outputs_stream( # type: ignore[type-var] tool_outputs=tool_outputs, run_id=run_id, thread_id=thread_id, - event_handler=event_handler + event_handler=event_handler, ) data = response - return data + return data # type: ignore[return-value] class AsyncSteps(AsyncAPIResource): diff --git a/portkey_ai/api_resources/apis/vector_stores.py b/portkey_ai/api_resources/apis/vector_stores.py index fa5eee2a..4d9029ae 100644 --- a/portkey_ai/api_resources/apis/vector_stores.py +++ b/portkey_ai/api_resources/apis/vector_stores.py @@ -220,7 +220,7 @@ def create_and_poll( ) data = response - return data #type: ignore[return-value] + return data # type: ignore[return-value] def poll( self, @@ -238,7 +238,7 @@ def poll( ) data = response - return data #type: ignore[return-value] + return data # type: ignore[return-value] def upload( self, @@ -253,8 +253,8 @@ def upload( **kwargs, ) data = response - return data #type: ignore[return-value] - + return data # type: ignore[return-value] + def upload_and_poll( self, *, @@ -270,7 +270,7 @@ def upload_and_poll( **kwargs, ) data = response - return data #type: ignore[return-value] + return data # type: ignore[return-value] class VectorFileBatches(APIResource): @@ -285,24 +285,22 @@ def create( file_ids: List[str], **kwargs, ) -> VectorStoreFileBatch: - response = self.openai_client.with_raw_response.beta.vector_stores.file_batches.create( - vector_store_id=vector_store_id, - file_ids=file_ids, - **kwargs, + response = ( + self.openai_client.with_raw_response.beta.vector_stores.file_batches.create( + vector_store_id=vector_store_id, + file_ids=file_ids, + **kwargs, + ) ) data = VectorStoreFileBatch(**json.loads(response.text)) data._headers = response.headers return data - + def retrieve( - self, - batch_id:str, - *, - vector_store_id: str, - **kwargs + self, batch_id: str, *, vector_store_id: str, **kwargs ) -> VectorStoreFileBatch: - response = self.openai_client.with_raw_response.beta.vector_stores.file_batches.retrieve( + response = self.openai_client.with_raw_response.beta.vector_stores.file_batches.retrieve( # noqa: E501 batch_id=batch_id, vector_store_id=vector_store_id, **kwargs, @@ -310,33 +308,31 @@ def retrieve( data = VectorStoreFileBatch(**json.loads(response.text)) data._headers = response.headers - return data - + return data + def cancel( - self, - batch_id:str, - *, - vector_store_id: str, - **kwargs + self, batch_id: str, *, vector_store_id: str, **kwargs ) -> VectorStoreFileBatch: - response = self.openai_client.with_raw_response.beta.vector_stores.file_batches.cancel( - batch_id=batch_id, - vector_store_id=vector_store_id, - **kwargs, + response = ( + self.openai_client.with_raw_response.beta.vector_stores.file_batches.cancel( + batch_id=batch_id, + vector_store_id=vector_store_id, + **kwargs, + ) ) data = VectorStoreFileBatch(**json.loads(response.text)) data._headers = response.headers return data - + def create_and_poll( - self, - vector_store_id: str, - *, - file_ids: List[str], - poll_interval_ms: Union[int, NotGiven] = NOT_GIVEN, - **kwargs, - )-> VectorStoreFileBatch: + self, + vector_store_id: str, + *, + file_ids: List[str], + poll_interval_ms: Union[int, NotGiven] = NOT_GIVEN, + **kwargs, + ) -> VectorStoreFileBatch: response = self.openai_client.beta.vector_stores.file_batches.create_and_poll( vector_store_id=vector_store_id, file_ids=file_ids, @@ -344,8 +340,8 @@ def create_and_poll( **kwargs, ) data = response - return data #type: ignore[return-value] - + return data # type: ignore[return-value] + @typing.no_type_check def list_files( self, @@ -359,7 +355,7 @@ def list_files( order: Union[str, NotGiven] = NOT_GIVEN, **kwargs, ) -> VectorStoreFileList: - response = self.openai_client.with_raw_response.beta.vector_stores.file_batches.list_files( + response = self.openai_client.with_raw_response.beta.vector_stores.file_batches.list_files( # noqa: E501 batch_id=batch_id, vector_store_id=vector_store_id, after=after, @@ -373,7 +369,7 @@ def list_files( data._headers = response.headers return data - + def poll( self, batch_id: str, @@ -390,8 +386,8 @@ def poll( ) data = response - return data # type: ignore[return-value] - + return data # type: ignore[return-value] + def upload_and_poll( self, vector_store_id: str, @@ -400,7 +396,7 @@ def upload_and_poll( max_concurrency: int = 5, file_ids: List[str] = [], poll_interval_ms: Union[int, NotGiven] = NOT_GIVEN, - **kwargs + **kwargs, ) -> VectorStoreFileBatch: response = self.openai_client.beta.vector_stores.file_batches.upload_and_poll( vector_store_id=vector_store_id, @@ -412,7 +408,7 @@ def upload_and_poll( ) data = response - return data # type: ignore[return-value] + return data # type: ignore[return-value] class AsyncVectorStores(AsyncAPIResource): @@ -501,7 +497,7 @@ async def list( data._headers = response.headers return data - + async def delete( self, vector_store_id: str, @@ -529,10 +525,12 @@ async def create( file_id: str, **kwargs, ) -> VectorStoreFile: - response = await self.openai_client.with_raw_response.beta.vector_stores.files.create( - vector_store_id=vector_store_id, - file_id=file_id, - **kwargs, + response = ( + await self.openai_client.with_raw_response.beta.vector_stores.files.create( + vector_store_id=vector_store_id, + file_id=file_id, + **kwargs, + ) ) data = VectorStoreFile(**json.loads(response.text)) data._headers = response.headers @@ -546,12 +544,10 @@ async def retrieve( vector_store_id: str, **kwargs, ) -> VectorStoreFile: - response = ( - await self.openai_client.with_raw_response.beta.vector_stores.files.retrieve( - file_id=file_id, - vector_store_id=vector_store_id, - **kwargs, - ) + response = await self.openai_client.with_raw_response.beta.vector_stores.files.retrieve( # noqa: E501 + file_id=file_id, + vector_store_id=vector_store_id, + **kwargs, ) data = VectorStoreFile(**json.loads(response.text)) data._headers = response.headers @@ -570,14 +566,16 @@ async def list( order: Union[str, NotGiven] = NOT_GIVEN, **kwargs, ) -> VectorStoreFileList: - response = await self.openai_client.with_raw_response.beta.vector_stores.files.list( - vector_store_id=vector_store_id, - after=after, - before=before, - filter=filter, - limit=limit, - order=order, - **kwargs, + response = ( + await self.openai_client.with_raw_response.beta.vector_stores.files.list( + vector_store_id=vector_store_id, + after=after, + before=before, + filter=filter, + limit=limit, + order=order, + **kwargs, + ) ) data = VectorStoreFileList(**json.loads(response.text)) data._headers = response.headers @@ -591,10 +589,12 @@ async def delete( vector_store_id: str, **kwargs, ) -> VectorStoreFileDeleted: - response = await self.openai_client.with_raw_response.beta.vector_stores.files.delete( - file_id=file_id, - vector_store_id=vector_store_id, - **kwargs, + response = ( + await self.openai_client.with_raw_response.beta.vector_stores.files.delete( + file_id=file_id, + vector_store_id=vector_store_id, + **kwargs, + ) ) data = VectorStoreFileDeleted(**json.loads(response.text)) data._headers = response.headers @@ -617,8 +617,8 @@ async def create_and_poll( ) data = response - return data # type: ignore[return-value] - + return data # type: ignore[return-value] + async def poll( self, file_id: str, @@ -635,8 +635,8 @@ async def poll( ) data = response - return data # type: ignore[return-value] - + return data # type: ignore[return-value] + async def upload( self, *, @@ -650,8 +650,8 @@ async def upload( **kwargs, ) data = response - return data # type: ignore[return-value] - + return data # type: ignore[return-value] + async def upload_and_poll( self, *, @@ -667,8 +667,8 @@ async def upload_and_poll( **kwargs, ) data = response - return data # type: ignore[return-value] - + return data # type: ignore[return-value] + class AsyncVectorFileBatches(AsyncAPIResource): def __init__(self, client: AsyncPortkey) -> None: @@ -682,7 +682,7 @@ async def create( file_ids: List[str], **kwargs, ) -> VectorStoreFileBatch: - response = await self.openai_client.with_raw_response.beta.vector_stores.file_batches.create( + response = await self.openai_client.with_raw_response.beta.vector_stores.file_batches.create( # noqa: E501 vector_store_id=vector_store_id, file_ids=file_ids, **kwargs, @@ -691,15 +691,11 @@ async def create( data._headers = response.headers return data - + async def retrieve( - self, - batch_id:str, - *, - vector_store_id: str, - **kwargs + self, batch_id: str, *, vector_store_id: str, **kwargs ) -> VectorStoreFileBatch: - response = await self.openai_client.with_raw_response.beta.vector_stores.file_batches.retrieve( + response = await self.openai_client.with_raw_response.beta.vector_stores.file_batches.retrieve( # noqa: E501 batch_id=batch_id, vector_store_id=vector_store_id, **kwargs, @@ -707,16 +703,12 @@ async def retrieve( data = VectorStoreFileBatch(**json.loads(response.text)) data._headers = response.headers - return data - + return data + async def cancel( - self, - batch_id:str, - *, - vector_store_id: str, - **kwargs + self, batch_id: str, *, vector_store_id: str, **kwargs ) -> VectorStoreFileBatch: - response = await self.openai_client.with_raw_response.beta.vector_stores.file_batches.cancel( + response = await self.openai_client.with_raw_response.beta.vector_stores.file_batches.cancel( # noqa: E501 batch_id=batch_id, vector_store_id=vector_store_id, **kwargs, @@ -725,25 +717,27 @@ async def cancel( data._headers = response.headers return data - + async def create_and_poll( - self, - vector_store_id: str, - *, - file_ids: List[str], - poll_interval_ms: Union[int, NotGiven] = NOT_GIVEN, - **kwargs, - )-> VectorStoreFileBatch: - response = await self.openai_client.beta.vector_stores.file_batches.create_and_poll( - vector_store_id=vector_store_id, - file_ids=file_ids, - poll_interval_ms=poll_interval_ms, - **kwargs, + self, + vector_store_id: str, + *, + file_ids: List[str], + poll_interval_ms: Union[int, NotGiven] = NOT_GIVEN, + **kwargs, + ) -> VectorStoreFileBatch: + response = ( + await self.openai_client.beta.vector_stores.file_batches.create_and_poll( + vector_store_id=vector_store_id, + file_ids=file_ids, + poll_interval_ms=poll_interval_ms, + **kwargs, + ) ) data = response - return data # type: ignore[return-value] - + return data # type: ignore[return-value] + @typing.no_type_check async def list_files( self, @@ -757,7 +751,7 @@ async def list_files( order: Union[str, NotGiven] = NOT_GIVEN, **kwargs, ) -> VectorStoreFileList: - response = await self.openai_client.beta.with_raw_response.vector_stores.file_batches.list_files( + response = await self.openai_client.beta.with_raw_response.vector_stores.file_batches.list_files( # noqa: E501 batch_id=batch_id, vector_store_id=vector_store_id, after=after, @@ -771,7 +765,7 @@ async def list_files( data._headers = response.headers return data - + async def poll( self, batch_id: str, @@ -788,8 +782,8 @@ async def poll( ) data = response - return data # type: ignore[return-value] - + return data # type: ignore[return-value] + async def upload_and_poll( self, vector_store_id: str, @@ -798,16 +792,18 @@ async def upload_and_poll( max_concurrency: int = 5, file_ids: List[str] = [], poll_interval_ms: Union[int, NotGiven] = NOT_GIVEN, - **kwargs + **kwargs, ) -> VectorStoreFileBatch: - response = await self.openai_client.beta.vector_stores.file_batches.upload_and_poll( - vector_store_id=vector_store_id, - files=files, - max_concurrency=max_concurrency, - file_ids=file_ids, - poll_interval_ms=poll_interval_ms, - **kwargs, + response = ( + await self.openai_client.beta.vector_stores.file_batches.upload_and_poll( + vector_store_id=vector_store_id, + files=files, + max_concurrency=max_concurrency, + file_ids=file_ids, + poll_interval_ms=poll_interval_ms, + **kwargs, + ) ) data = response - return data # type: ignore[return-value] \ No newline at end of file + return data # type: ignore[return-value] diff --git a/portkey_ai/api_resources/types/assistant_type.py b/portkey_ai/api_resources/types/assistant_type.py index ddb62a14..28ec2f37 100644 --- a/portkey_ai/api_resources/types/assistant_type.py +++ b/portkey_ai/api_resources/types/assistant_type.py @@ -9,9 +9,6 @@ "Assistant", "AssistantList", "AssistantDeleted", - "AssistantFile", - "AssistantFileList", - "AssistantFileDeleted", "ToolCodeInterpreter", "ToolRetrieval", "ToolFunction", diff --git a/portkey_ai/api_resources/types/audio_types.py b/portkey_ai/api_resources/types/audio_types.py index ba677db1..49891986 100644 --- a/portkey_ai/api_resources/types/audio_types.py +++ b/portkey_ai/api_resources/types/audio_types.py @@ -2,11 +2,12 @@ from typing import Dict, Optional import httpx from .utils import parse_headers -from typing import List, Any +from typing import Any from pydantic import BaseModel, PrivateAttr __all__ = ["Transcription", "Translation"] + class Transcription(BaseModel): text: str _headers: Optional[httpx.Headers] = PrivateAttr() @@ -23,7 +24,7 @@ def get(self, key: str, default: Optional[Any] = None): def get_headers(self) -> Optional[Dict[str, str]]: return parse_headers(self._headers) - + class Translation(BaseModel): text: str @@ -41,4 +42,3 @@ def get(self, key: str, default: Optional[Any] = None): def get_headers(self) -> Optional[Dict[str, str]]: return parse_headers(self._headers) - diff --git a/portkey_ai/api_resources/types/batches_type.py b/portkey_ai/api_resources/types/batches_type.py index ba036689..4837d8ff 100644 --- a/portkey_ai/api_resources/types/batches_type.py +++ b/portkey_ai/api_resources/types/batches_type.py @@ -10,6 +10,7 @@ __all__ = ["Batch", "BatchList", "Errors"] + class Errors(BaseModel): data: Optional[List[BatchError]] = None @@ -70,4 +71,4 @@ def get(self, key: str, default: Optional[Any] = None): return getattr(self, key, None) or default def get_headers(self) -> Optional[Dict[str, str]]: - return parse_headers(self._headers) \ No newline at end of file + return parse_headers(self._headers) diff --git a/portkey_ai/api_resources/types/fine_tuning_type.py b/portkey_ai/api_resources/types/fine_tuning_type.py index 81c11dae..59316b14 100644 --- a/portkey_ai/api_resources/types/fine_tuning_type.py +++ b/portkey_ai/api_resources/types/fine_tuning_type.py @@ -6,16 +6,29 @@ from pydantic import BaseModel, PrivateAttr from openai.types.fine_tuning import FineTuningJobWandbIntegrationObject -__all__ = ["Error", "Hyperparameters", "FineTuningJob", "FineTuningJobList", "FineTuningJobEvent", "FineTuningJobEventList", "Metrics", "FineTuningJobCheckpoint", "FineTuningJobCheckpointList"] +__all__ = [ + "Error", + "Hyperparameters", + "FineTuningJob", + "FineTuningJobList", + "FineTuningJobEvent", + "FineTuningJobEventList", + "Metrics", + "FineTuningJobCheckpoint", + "FineTuningJobCheckpointList", +] + class Error(BaseModel): code: str message: str param: Optional[str] = None + class Hyperparameters(BaseModel): n_epochs: Union[str, int] + class FineTuningJob(BaseModel): id: str created_at: int @@ -48,7 +61,7 @@ def get(self, key: str, default: Optional[Any] = None): def get_headers(self) -> Optional[Dict[str, str]]: return parse_headers(self._headers) - + class FineTuningJobList(BaseModel): object: Optional[str] = None @@ -67,7 +80,6 @@ def get(self, key: str, default: Optional[Any] = None): def get_headers(self) -> Optional[Dict[str, str]]: return parse_headers(self._headers) - class FineTuningJobEvent(BaseModel): @@ -109,7 +121,7 @@ def get(self, key: str, default: Optional[Any] = None): def get_headers(self) -> Optional[Dict[str, str]]: return parse_headers(self._headers) - + class Metrics(BaseModel): full_valid_loss: Optional[float] = None @@ -120,6 +132,7 @@ class Metrics(BaseModel): valid_loss: Optional[float] = None valid_mean_token_accuracy: Optional[float] = None + class FineTuningJobCheckpoint(BaseModel): id: str created_at: int @@ -142,7 +155,7 @@ def get(self, key: str, default: Optional[Any] = None): def get_headers(self) -> Optional[Dict[str, str]]: return parse_headers(self._headers) - + class FineTuningJobCheckpointList(BaseModel): object: Optional[str] = None @@ -160,4 +173,4 @@ def get(self, key: str, default: Optional[Any] = None): return getattr(self, key, None) or default def get_headers(self) -> Optional[Dict[str, str]]: - return parse_headers(self._headers) \ No newline at end of file + return parse_headers(self._headers) diff --git a/portkey_ai/api_resources/types/moderations_type.py b/portkey_ai/api_resources/types/moderations_type.py index a62d7a0b..c85547ae 100644 --- a/portkey_ai/api_resources/types/moderations_type.py +++ b/portkey_ai/api_resources/types/moderations_type.py @@ -9,6 +9,7 @@ __all__ = ["ModerationCreateResponse"] + class ModerationCreateResponse(BaseModel): id: str model: str diff --git a/portkey_ai/api_resources/types/thread_message_type.py b/portkey_ai/api_resources/types/thread_message_type.py index 59df0c4e..0ceea6f2 100644 --- a/portkey_ai/api_resources/types/thread_message_type.py +++ b/portkey_ai/api_resources/types/thread_message_type.py @@ -121,6 +121,7 @@ def get_headers(self) -> Optional[Dict[str, str]]: # def get_headers(self) -> Optional[Dict[str, str]]: # return parse_headers(self._headers) + class ThreadMessageDeleted(BaseModel): id: str deleted: bool @@ -132,4 +133,4 @@ def __str__(self): return json.dumps(self.dict(), indent=4) def get_headers(self) -> Optional[Dict[str, str]]: - return parse_headers(self._headers) \ No newline at end of file + return parse_headers(self._headers) diff --git a/portkey_ai/api_resources/types/vector_stores_type.py b/portkey_ai/api_resources/types/vector_stores_type.py index dbf1c06c..188a8ea1 100644 --- a/portkey_ai/api_resources/types/vector_stores_type.py +++ b/portkey_ai/api_resources/types/vector_stores_type.py @@ -4,7 +4,19 @@ from .utils import parse_headers from pydantic import BaseModel, PrivateAttr -__all__=["LastError", "ExpiresAfter" ,"VectorStore", "VectorStoreList", "VectorStoreDeleted", "VectorStoreFile", "VectorStoreFileList", "VectorStoreFileDeleted", "FileCounts", "VectorStoreFileBatch"] +__all__ = [ + "LastError", + "ExpiresAfter", + "VectorStore", + "VectorStoreList", + "VectorStoreDeleted", + "VectorStoreFile", + "VectorStoreFileList", + "VectorStoreFileDeleted", + "FileCounts", + "VectorStoreFileBatch", +] + class FileCounts(BaseModel): cancelled: int @@ -13,10 +25,12 @@ class FileCounts(BaseModel): in_progress: int total: int + class ExpiresAfter(BaseModel): anchor: str days: int + class VectorStore(BaseModel): id: str created_at: int @@ -38,6 +52,7 @@ def __str__(self): def get_headers(self) -> Optional[Dict[str, str]]: return parse_headers(self._headers) + class VectorStoreList(BaseModel): data: List[VectorStore] object: str @@ -63,11 +78,13 @@ def __str__(self): def get_headers(self) -> Optional[Dict[str, str]]: return parse_headers(self._headers) - + + class LastError(BaseModel): code: str message: str + class VectorStoreFile(BaseModel): id: str created_at: int @@ -84,7 +101,8 @@ def __str__(self): def get_headers(self) -> Optional[Dict[str, str]]: return parse_headers(self._headers) - + + class VectorStoreFileList(BaseModel): data: List[VectorStoreFile] object: str @@ -101,7 +119,7 @@ def get_headers(self) -> Optional[Dict[str, str]]: class VectorStoreFileDeleted(BaseModel): id: str deleted: bool - object:str + object: str _headers: Optional[httpx.Headers] = PrivateAttr() def __str__(self): @@ -111,6 +129,7 @@ def __str__(self): def get_headers(self) -> Optional[Dict[str, str]]: return parse_headers(self._headers) + class VectorStoreFileBatch(BaseModel): id: str created_at: int From 52e5f463e95f1f28da4f798951df30e63dec4f6c Mon Sep 17 00:00:00 2001 From: csgulati09 Date: Tue, 14 May 2024 06:52:41 -0400 Subject: [PATCH 14/38] feat: test cases for moderations --- tests/test_async_moderations.py | 144 ++++++++++++++++++++++++++++++++ tests/test_moderations.py | 141 +++++++++++++++++++++++++++++++ 2 files changed, 285 insertions(+) create mode 100644 tests/test_async_moderations.py create mode 100644 tests/test_moderations.py diff --git a/tests/test_async_moderations.py b/tests/test_async_moderations.py new file mode 100644 index 00000000..253fe0f0 --- /dev/null +++ b/tests/test_async_moderations.py @@ -0,0 +1,144 @@ +from __future__ import annotations +import inspect + +import os +from os import walk +from typing import Any, Dict, List +import pytest +from uuid import uuid4 +from portkey_ai import Portkey +from time import sleep +from dotenv import load_dotenv +from .utils import read_json_file + + +load_dotenv(override=True) + +base_url = os.environ.get("PORTKEY_BASE_URL") +api_key = os.environ.get("PORTKEY_API_KEY") +virtual_api_key = os.environ.get("OPENAI_VIRTUAL_KEY") + +CONFIGS_PATH = "./tests/configs/moderations" + + +def get_configs(folder_path) -> List[Dict[str, Any]]: + config_files = [] + for dirpath, _, file_names in walk(folder_path): + for f in file_names: + config_files.append(read_json_file(os.path.join(dirpath, f))) + + return config_files + + +class TestModerations: + client = Portkey + parametrize = pytest.mark.parametrize("client", [client], ids=["strict"]) + models = read_json_file("./tests/models.json") + + def get_metadata(self): + return { + "case": "testing", + "function": inspect.currentframe().f_back.f_code.co_name, + "random_id": str(uuid4()), + } + + # -------------------------- + # Test-1 + t1_params = [] + t = [] + for k, v in models.items(): + for i in v["chat"]: + t.append((client, k, os.environ.get(v["env_variable"]), i)) + + t1_params.extend(t) + + @pytest.mark.asyncio + @pytest.mark.parametrize("client, provider, auth, model", t1_params) + async def test_method_single_with_vk_and_provider( + self, client: Any, provider: str, auth: str, model + ) -> None: + portkey = client( + base_url=base_url, + api_key=api_key, + provider=f"{provider}", + Authorization=f"{auth}", + trace_id=str(uuid4()), + metadata=self.get_metadata(), + ) + + moderations = await portkey.mocerations.create( + input="I want to kill them.", model="text-moderation-stable" + ) + + assert isinstance(moderations.id, str) is True + + # -------------------------- + # Test -2 + t2_params = [] + for i in get_configs(f"{CONFIGS_PATH}/single_with_basic_config"): + t2_params.append((client, i)) + + @pytest.mark.asyncio + @pytest.mark.parametrize("client, config", t2_params) + async def test_method_single_with_basic_config(self, client: Any, config: Dict) -> None: + portkey = client( + base_url=base_url, + api_key=api_key, + trace_id=str(uuid4()), + metadata=self.get_metadata(), + config=config, + ) + + moderations = await portkey.mocerations.create( + input="I want to kill them.", model="text-moderation-stable" + ) + + assert isinstance(moderations.id, str) is True + + # -------------------------- + # Test-3 + t3_params = [] + for i in get_configs(f"{CONFIGS_PATH}/single_provider_with_vk_retry_cache"): + t3_params.append((client, i)) + + @pytest.mark.asyncio + @pytest.mark.parametrize("client, config", t3_params) + async def test_method_single_provider_with_vk_retry_cache( + self, client: Any, config: Dict + ) -> None: + # 1. Make a new cache the cache + # 2. Make a cache hit and see if the response contains the data. + random_id = str(uuid4()) + metadata = self.get_metadata() + portkey = client( + base_url=base_url, + api_key=api_key, + trace_id=random_id, + virtual_key=virtual_api_key, + metadata=metadata, + config=config, + ) + + moderations = await portkey.mocerations.create( + input="I want to kill them.", model="text-moderation-stable" + ) + + assert isinstance(moderations.id, str) is True + # Sleeping for the cache to reflect across the workers. The cache has an + # eventual consistency and not immediate consistency. + sleep(20) + portkey_2 = client( + base_url=base_url, + api_key=api_key, + trace_id=random_id, + virtual_key=virtual_api_key, + metadata=metadata, + config=config, + ) + + cached_moderations = await portkey_2.mocerations.create( + input="I want to kill them.", model="text-moderation-stable" + ) + + + assert isinstance(cached_moderations.id, str) is True diff --git a/tests/test_moderations.py b/tests/test_moderations.py new file mode 100644 index 00000000..c57cb951 --- /dev/null +++ b/tests/test_moderations.py @@ -0,0 +1,141 @@ +from __future__ import annotations +import inspect + +import os +from os import walk +from typing import Any, Dict, List +import pytest +from uuid import uuid4 +from portkey_ai import Portkey +from time import sleep +from dotenv import load_dotenv +from .utils import read_json_file + + +load_dotenv(override=True) + +base_url = os.environ.get("PORTKEY_BASE_URL") +api_key = os.environ.get("PORTKEY_API_KEY") +virtual_api_key = os.environ.get("OPENAI_VIRTUAL_KEY") + +CONFIGS_PATH = "./tests/configs/moderations" + + +def get_configs(folder_path) -> List[Dict[str, Any]]: + config_files = [] + for dirpath, _, file_names in walk(folder_path): + for f in file_names: + config_files.append(read_json_file(os.path.join(dirpath, f))) + + return config_files + + +class TestModerations: + client = Portkey + parametrize = pytest.mark.parametrize("client", [client], ids=["strict"]) + models = read_json_file("./tests/models.json") + + def get_metadata(self): + return { + "case": "testing", + "function": inspect.currentframe().f_back.f_code.co_name, + "random_id": str(uuid4()), + } + + # -------------------------- + # Test-1 + t1_params = [] + t = [] + for k, v in models.items(): + for i in v["chat"]: + t.append((client, k, os.environ.get(v["env_variable"]), i)) + + t1_params.extend(t) + + @pytest.mark.parametrize("client, provider, auth, model", t1_params) + def test_method_single_with_vk_and_provider( + self, client: Any, provider: str, auth: str, model + ) -> None: + portkey = client( + base_url=base_url, + api_key=api_key, + provider=f"{provider}", + Authorization=f"{auth}", + trace_id=str(uuid4()), + metadata=self.get_metadata(), + ) + + moderations = portkey.mocerations.create( + input="I want to kill them.", model="text-moderation-stable" + ) + + assert isinstance(moderations.id, str) is True + + # -------------------------- + # Test -2 + t2_params = [] + for i in get_configs(f"{CONFIGS_PATH}/single_with_basic_config"): + t2_params.append((client, i)) + + @pytest.mark.parametrize("client, config", t2_params) + def test_method_single_with_basic_config(self, client: Any, config: Dict) -> None: + portkey = client( + base_url=base_url, + api_key=api_key, + trace_id=str(uuid4()), + metadata=self.get_metadata(), + config=config, + ) + + moderations = portkey.mocerations.create( + input="I want to kill them.", model="text-moderation-stable" + ) + + assert isinstance(moderations.id, str) is True + + # -------------------------- + # Test-3 + t3_params = [] + for i in get_configs(f"{CONFIGS_PATH}/single_provider_with_vk_retry_cache"): + t3_params.append((client, i)) + + @pytest.mark.parametrize("client, config", t3_params) + def test_method_single_provider_with_vk_retry_cache( + self, client: Any, config: Dict + ) -> None: + # 1. Make a new cache the cache + # 2. Make a cache hit and see if the response contains the data. + random_id = str(uuid4()) + metadata = self.get_metadata() + portkey = client( + base_url=base_url, + api_key=api_key, + trace_id=random_id, + virtual_key=virtual_api_key, + metadata=metadata, + config=config, + ) + + moderations = portkey.mocerations.create( + input="I want to kill them.", model="text-moderation-stable" + ) + + assert isinstance(moderations.id, str) is True + # Sleeping for the cache to reflect across the workers. The cache has an + # eventual consistency and not immediate consistency. + sleep(20) + portkey_2 = client( + base_url=base_url, + api_key=api_key, + trace_id=random_id, + virtual_key=virtual_api_key, + metadata=metadata, + config=config, + ) + + cached_moderations = portkey_2.mocerations.create( + input="I want to kill them.", model="text-moderation-stable" + ) + + + assert isinstance(cached_moderations.id, str) is True From 1cc9885f15d911bc249a74b100e20b97d4564433 Mon Sep 17 00:00:00 2001 From: csgulati09 Date: Tue, 14 May 2024 06:59:32 -0400 Subject: [PATCH 15/38] fix: type of metadata --- portkey_ai/api_resources/client.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/portkey_ai/api_resources/client.py b/portkey_ai/api_resources/client.py index d927906f..5c75effa 100644 --- a/portkey_ai/api_resources/client.py +++ b/portkey_ai/api_resources/client.py @@ -37,7 +37,7 @@ def __init__( config: Optional[Union[Mapping, str]] = None, provider: Optional[str] = None, trace_id: Optional[str] = None, - metadata: Optional[str] = None, + metadata: Optional[dict[str, str]] = None, **kwargs, ) -> None: super().__init__( @@ -77,7 +77,7 @@ def copy( config: Optional[Union[Mapping, str]] = None, provider: Optional[str] = None, trace_id: Optional[str] = None, - metadata: Optional[str] = None, + metadata: Optional[dict[str, str]] = None, **kwargs, ) -> Portkey: return self.__class__( @@ -125,7 +125,7 @@ def __init__( config: Optional[Union[Mapping, str]] = None, provider: Optional[str] = None, trace_id: Optional[str] = None, - metadata: Optional[str] = None, + metadata: Optional[dict[str, str]] = None, **kwargs, ) -> None: super().__init__( @@ -165,7 +165,7 @@ def copy( config: Optional[Union[Mapping, str]] = None, provider: Optional[str] = None, trace_id: Optional[str] = None, - metadata: Optional[str] = None, + metadata: Optional[dict[str, str]] = None, **kwargs, ) -> AsyncPortkey: return self.__class__( From 95473390b7949cf789c9d426690c7afabf635407 Mon Sep 17 00:00:00 2001 From: csgulati09 Date: Tue, 14 May 2024 07:05:50 -0400 Subject: [PATCH 16/38] fix: metadata type in base_client --- portkey_ai/api_resources/base_client.py | 4 ++-- portkey_ai/api_resources/client.py | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/portkey_ai/api_resources/base_client.py b/portkey_ai/api_resources/base_client.py index b40b4c98..0f6ef6f5 100644 --- a/portkey_ai/api_resources/base_client.py +++ b/portkey_ai/api_resources/base_client.py @@ -53,7 +53,7 @@ def __init__( config: Optional[Union[Mapping, str]] = None, provider: Optional[str] = None, trace_id: Optional[str] = None, - metadata: Optional[str] = None, + metadata: Union[Optional[dict[str, str]], str] = None, **kwargs, ) -> None: self.api_key = api_key or default_api_key() @@ -402,7 +402,7 @@ def __init__( config: Optional[Union[Mapping, str]] = None, provider: Optional[str] = None, trace_id: Optional[str] = None, - metadata: Optional[str] = None, + metadata: Union[Optional[dict[str, str]], str] = None, **kwargs, ) -> None: self.api_key = api_key or default_api_key() diff --git a/portkey_ai/api_resources/client.py b/portkey_ai/api_resources/client.py index 5c75effa..9c0bba16 100644 --- a/portkey_ai/api_resources/client.py +++ b/portkey_ai/api_resources/client.py @@ -37,7 +37,7 @@ def __init__( config: Optional[Union[Mapping, str]] = None, provider: Optional[str] = None, trace_id: Optional[str] = None, - metadata: Optional[dict[str, str]] = None, + metadata: Union[Optional[dict[str, str]], str] = None, **kwargs, ) -> None: super().__init__( @@ -77,7 +77,7 @@ def copy( config: Optional[Union[Mapping, str]] = None, provider: Optional[str] = None, trace_id: Optional[str] = None, - metadata: Optional[dict[str, str]] = None, + metadata: Union[Optional[dict[str, str]], str] = None, **kwargs, ) -> Portkey: return self.__class__( @@ -125,7 +125,7 @@ def __init__( config: Optional[Union[Mapping, str]] = None, provider: Optional[str] = None, trace_id: Optional[str] = None, - metadata: Optional[dict[str, str]] = None, + metadata: Union[Optional[dict[str, str]], str] = None, **kwargs, ) -> None: super().__init__( @@ -165,7 +165,7 @@ def copy( config: Optional[Union[Mapping, str]] = None, provider: Optional[str] = None, trace_id: Optional[str] = None, - metadata: Optional[dict[str, str]] = None, + metadata: Union[Optional[dict[str, str]], str] = None, **kwargs, ) -> AsyncPortkey: return self.__class__( From aaef3380730850143884d49b5d06763cbb15ec8e Mon Sep 17 00:00:00 2001 From: csgulati09 Date: Tue, 14 May 2024 08:23:28 -0400 Subject: [PATCH 17/38] feat: added support for cache_namespace header --- portkey_ai/api_resources/base_client.py | 6 ++++++ portkey_ai/api_resources/client.py | 8 ++++++++ portkey_ai/api_resources/utils.py | 1 + 3 files changed, 15 insertions(+) diff --git a/portkey_ai/api_resources/base_client.py b/portkey_ai/api_resources/base_client.py index b40b4c98..465de7da 100644 --- a/portkey_ai/api_resources/base_client.py +++ b/portkey_ai/api_resources/base_client.py @@ -54,6 +54,7 @@ def __init__( provider: Optional[str] = None, trace_id: Optional[str] = None, metadata: Optional[str] = None, + cache_namespace: Optional[str] = None, **kwargs, ) -> None: self.api_key = api_key or default_api_key() @@ -63,6 +64,7 @@ def __init__( self.provider = provider self.trace_id = trace_id self.metadata = metadata + self.cache_namespace = cache_namespace self.kwargs = kwargs self.custom_headers = createHeaders( @@ -72,6 +74,7 @@ def __init__( provider=provider, trace_id=trace_id, metadata=metadata, + cache_namespace=cache_namespace, **kwargs, ) @@ -403,6 +406,7 @@ def __init__( provider: Optional[str] = None, trace_id: Optional[str] = None, metadata: Optional[str] = None, + cache_namespace: Optional[str] = None, **kwargs, ) -> None: self.api_key = api_key or default_api_key() @@ -412,6 +416,7 @@ def __init__( self.provider = provider self.trace_id = trace_id self.metadata = metadata + self.cache_namespace=cache_namespace self.kwargs = kwargs self.custom_headers = createHeaders( @@ -421,6 +426,7 @@ def __init__( provider=provider, trace_id=trace_id, metadata=metadata, + cache_namespace=cache_namespace, **kwargs, ) diff --git a/portkey_ai/api_resources/client.py b/portkey_ai/api_resources/client.py index d927906f..81ca40be 100644 --- a/portkey_ai/api_resources/client.py +++ b/portkey_ai/api_resources/client.py @@ -38,6 +38,7 @@ def __init__( provider: Optional[str] = None, trace_id: Optional[str] = None, metadata: Optional[str] = None, + cache_namespace: Optional[str] = None, **kwargs, ) -> None: super().__init__( @@ -48,6 +49,7 @@ def __init__( provider=provider, trace_id=trace_id, metadata=metadata, + cache_namespace=cache_namespace, **kwargs, ) @@ -78,6 +80,7 @@ def copy( provider: Optional[str] = None, trace_id: Optional[str] = None, metadata: Optional[str] = None, + cache_namespace: Optional[str] = None, **kwargs, ) -> Portkey: return self.__class__( @@ -88,6 +91,7 @@ def copy( provider=provider or self.provider, trace_id=trace_id or self.trace_id, metadata=metadata or self.metadata, + cache_namespace=cache_namespace or self.cache_namespace, **self.kwargs, **kwargs, ) @@ -126,6 +130,7 @@ def __init__( provider: Optional[str] = None, trace_id: Optional[str] = None, metadata: Optional[str] = None, + cache_namespace: Optional[str] = None, **kwargs, ) -> None: super().__init__( @@ -136,6 +141,7 @@ def __init__( provider=provider, trace_id=trace_id, metadata=metadata, + cache_namespace=cache_namespace, **kwargs, ) @@ -166,6 +172,7 @@ def copy( provider: Optional[str] = None, trace_id: Optional[str] = None, metadata: Optional[str] = None, + cache_namespace: Optional[str] = None, **kwargs, ) -> AsyncPortkey: return self.__class__( @@ -176,6 +183,7 @@ def copy( provider=provider or self.provider, trace_id=trace_id or self.trace_id, metadata=metadata or self.metadata, + cache_namespace=cache_namespace or self.cache_namespace, **self.kwargs, **kwargs, ) diff --git a/portkey_ai/api_resources/utils.py b/portkey_ai/api_resources/utils.py index 860952e4..cf5d2998 100644 --- a/portkey_ai/api_resources/utils.py +++ b/portkey_ai/api_resources/utils.py @@ -248,6 +248,7 @@ class Constructs(BaseModel): deployment_id: Optional[str] = None resource_name: Optional[str] = None api_version: Optional[str] = None + cache_namespace: Optional[str] = None class LLMOptions(Constructs, ConversationInput, ModelParams): From ed0cab9526d459dd61933a1ad2c3189c961a8fa7 Mon Sep 17 00:00:00 2001 From: csgulati09 Date: Fri, 17 May 2024 14:09:44 -0400 Subject: [PATCH 18/38] fix: assistant response type --- portkey_ai/api_resources/types/assistant_type.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/portkey_ai/api_resources/types/assistant_type.py b/portkey_ai/api_resources/types/assistant_type.py index 28ec2f37..f5967195 100644 --- a/portkey_ai/api_resources/types/assistant_type.py +++ b/portkey_ai/api_resources/types/assistant_type.py @@ -35,7 +35,7 @@ class Assistant(BaseModel): id: Optional[str] created_at: Optional[int] description: Optional[str] = None - file_ids: Optional[List[str]] + file_ids: Optional[List[str]] = None instructions: Optional[str] = None metadata: Optional[object] = None model: Optional[str] From 87bf8f3723eca8ae76cf47cf2298604d69c023a5 Mon Sep 17 00:00:00 2001 From: csgulati09 Date: Sat, 18 May 2024 11:55:42 -0400 Subject: [PATCH 19/38] fix: fixed test cases --- tests/test_assistants.py | 1 - tests/test_async_moderations.py | 12 ++++++------ tests/test_moderations.py | 8 ++++---- 3 files changed, 10 insertions(+), 11 deletions(-) diff --git a/tests/test_assistants.py b/tests/test_assistants.py index 5622be49..15c21c2a 100644 --- a/tests/test_assistants.py +++ b/tests/test_assistants.py @@ -112,7 +112,6 @@ def test_method_all_params(self, client: Any, virtual_key: str) -> None: assistant = portkey.beta.assistants.create( model=model, description="string", - file_ids=["file-m9QiEaDT9Le28LydiUTsUwDv"], instructions="You are a personal math tutor." + "Write and run code to answer math questions.", metadata=metadata, diff --git a/tests/test_async_moderations.py b/tests/test_async_moderations.py index 253fe0f0..c522c75e 100644 --- a/tests/test_async_moderations.py +++ b/tests/test_async_moderations.py @@ -6,7 +6,7 @@ from typing import Any, Dict, List import pytest from uuid import uuid4 -from portkey_ai import Portkey +from portkey_ai import AsyncPortkey from time import sleep from dotenv import load_dotenv from .utils import read_json_file @@ -31,7 +31,7 @@ def get_configs(folder_path) -> List[Dict[str, Any]]: class TestModerations: - client = Portkey + client = AsyncPortkey parametrize = pytest.mark.parametrize("client", [client], ids=["strict"]) models = read_json_file("./tests/models.json") @@ -66,7 +66,7 @@ async def test_method_single_with_vk_and_provider( metadata=self.get_metadata(), ) - moderations = await portkey.mocerations.create( + moderations = await portkey.moderations.create( input="I want to kill them.", model="text-moderation-stable" ) @@ -89,7 +89,7 @@ async def test_method_single_with_basic_config(self, client: Any, config: Dict) config=config, ) - moderations = await portkey.mocerations.create( + moderations = await portkey.moderations.create( input="I want to kill them.", model="text-moderation-stable" ) @@ -119,7 +119,7 @@ async def test_method_single_provider_with_vk_retry_cache( config=config, ) - moderations = await portkey.mocerations.create( + moderations = await portkey.moderations.create( input="I want to kill them.", model="text-moderation-stable" ) @@ -136,7 +136,7 @@ async def test_method_single_provider_with_vk_retry_cache( config=config, ) - cached_moderations = await portkey_2.mocerations.create( + cached_moderations = await portkey_2.moderations.create( input="I want to kill them.", model="text-moderation-stable" ) diff --git a/tests/test_moderations.py b/tests/test_moderations.py index c57cb951..3a961906 100644 --- a/tests/test_moderations.py +++ b/tests/test_moderations.py @@ -65,7 +65,7 @@ def test_method_single_with_vk_and_provider( metadata=self.get_metadata(), ) - moderations = portkey.mocerations.create( + moderations = portkey.moderations.create( input="I want to kill them.", model="text-moderation-stable" ) @@ -87,7 +87,7 @@ def test_method_single_with_basic_config(self, client: Any, config: Dict) -> Non config=config, ) - moderations = portkey.mocerations.create( + moderations = portkey.moderations.create( input="I want to kill them.", model="text-moderation-stable" ) @@ -116,7 +116,7 @@ def test_method_single_provider_with_vk_retry_cache( config=config, ) - moderations = portkey.mocerations.create( + moderations = portkey.moderations.create( input="I want to kill them.", model="text-moderation-stable" ) @@ -133,7 +133,7 @@ def test_method_single_provider_with_vk_retry_cache( config=config, ) - cached_moderations = portkey_2.mocerations.create( + cached_moderations = portkey_2.moderations.create( input="I want to kill them.", model="text-moderation-stable" ) From ed3cf0159a630f3bf75330955ac959819ac545a0 Mon Sep 17 00:00:00 2001 From: csgulati09 Date: Sat, 18 May 2024 16:00:08 -0400 Subject: [PATCH 20/38] feat: audio test cases added --- portkey_ai/api_resources/apis/audio.py | 11 +- tests/test_async_audio_speech.py | 151 ++++++++++++++++++++++++ tests/test_async_audio_transcript.py | 153 +++++++++++++++++++++++++ tests/test_async_audio_translation.py | 153 +++++++++++++++++++++++++ tests/test_audio_speech.py | 148 ++++++++++++++++++++++++ tests/test_audio_transcript.py | 150 ++++++++++++++++++++++++ tests/test_audio_translation.py | 150 ++++++++++++++++++++++++ 7 files changed, 909 insertions(+), 7 deletions(-) create mode 100644 tests/test_async_audio_speech.py create mode 100644 tests/test_async_audio_transcript.py create mode 100644 tests/test_async_audio_translation.py create mode 100644 tests/test_audio_speech.py create mode 100644 tests/test_audio_transcript.py create mode 100644 tests/test_audio_translation.py diff --git a/portkey_ai/api_resources/apis/audio.py b/portkey_ai/api_resources/apis/audio.py index 7bdf29a3..9b062db7 100644 --- a/portkey_ai/api_resources/apis/audio.py +++ b/portkey_ai/api_resources/apis/audio.py @@ -97,19 +97,16 @@ def create( speed: Union[float, NotGiven] = NOT_GIVEN, **kwargs ) -> Any: - response = self.openai_client.with_raw_response.audio.speech.create( + response = self.openai_client.audio.speech.create( input=input, model=model, voice=voice, response_format=response_format, speed=speed, **kwargs - ) - - data = GenericResponse(**json.loads(response.text)) - data._headers = response.headers + ) - return data + return response class AsyncAudio(AsyncAPIResource): @@ -202,7 +199,7 @@ async def create( speed: Union[float, NotGiven] = NOT_GIVEN, **kwargs ) -> Any: - response = await self.openai_client.with_raw_response.audio.speech.create( + response = await self.openai_client.audio.speech.create( input=input, model=model, voice=voice, diff --git a/tests/test_async_audio_speech.py b/tests/test_async_audio_speech.py new file mode 100644 index 00000000..06a5ee72 --- /dev/null +++ b/tests/test_async_audio_speech.py @@ -0,0 +1,151 @@ +from __future__ import annotations +import inspect + +import os +from os import walk +from typing import Any, Dict, List +import pytest +from uuid import uuid4 +from portkey_ai import AsyncPortkey +from time import sleep +from dotenv import load_dotenv +from .utils import read_json_file + + +load_dotenv(override=True) + +base_url = os.environ.get("PORTKEY_BASE_URL") +api_key = os.environ.get("PORTKEY_API_KEY") +virtual_api_key = os.environ.get("OPENAI_VIRTUAL_KEY") + +CONFIGS_PATH = "./tests/configs/audio" + + +def get_configs(folder_path) -> List[Dict[str, Any]]: + config_files = [] + for dirpath, _, file_names in walk(folder_path): + for f in file_names: + config_files.append(read_json_file(os.path.join(dirpath, f))) + + return config_files + + +class TestAudioSpeech: + client = AsyncPortkey + parametrize = pytest.mark.parametrize("client", [client], ids=["strict"]) + models = read_json_file("./tests/models.json") + + def get_metadata(self): + return { + "case": "testing", + "function": inspect.currentframe().f_back.f_code.co_name, + "random_id": str(uuid4()), + } + + # -------------------------- + # Test-1 + t1_params = [] + t = [] + for k, v in models.items(): + for i in v["chat"]: + t.append((client, k, os.environ.get(v["env_variable"]), i)) + + t1_params.extend(t) + + @pytest.mark.asyncio + @pytest.mark.parametrize("client, provider, auth, model", t1_params) + async def test_method_single_with_vk_and_provider( + self, client: Any, provider: str, auth: str, model + ) -> None: + portkey = client( + base_url=base_url, + api_key=api_key, + provider=f"{provider}", + Authorization=f"{auth}", + trace_id=str(uuid4()), + metadata=self.get_metadata(), + ) + + audio = await portkey.audio.speech.create( + model="tts-1", + voice="alloy", + input="The quick brown fox jumped over the lazy dog." + ) + + assert isinstance(audio.content, bytes) is True + + # -------------------------- + # Test -2 + t2_params = [] + for i in get_configs(f"{CONFIGS_PATH}/single_with_basic_config"): + t2_params.append((client, i)) + + @pytest.mark.asyncio + @pytest.mark.parametrize("client, config", t2_params) + async def test_method_single_with_basic_config(self, client: Any, config: Dict) -> None: + portkey = client( + base_url=base_url, + api_key=api_key, + trace_id=str(uuid4()), + metadata=self.get_metadata(), + config=config, + ) + + audio = await portkey.audio.speech.create( + model="tts-1", + voice="alloy", + input="The quick brown fox jumped over the lazy dog." + ) + + assert isinstance(audio.content, bytes) is True + + # -------------------------- + # Test-3 + t3_params = [] + for i in get_configs(f"{CONFIGS_PATH}/single_provider_with_vk_retry_cache"): + t3_params.append((client, i)) + + @pytest.mark.asyncio + @pytest.mark.parametrize("client, config", t3_params) + async def test_method_single_provider_with_vk_retry_cache( + self, client: Any, config: Dict + ) -> None: + # 1. Make a new cache the cache + # 2. Make a cache hit and see if the response contains the data. + random_id = str(uuid4()) + metadata = self.get_metadata() + portkey = client( + base_url=base_url, + api_key=api_key, + trace_id=random_id, + virtual_key=virtual_api_key, + metadata=metadata, + config=config, + ) + + audio = await portkey.audio.speech.create( + model="tts-1", + voice="alloy", + input="The quick brown fox jumped over the lazy dog." + ) + + assert isinstance(audio.content, bytes) is True + # Sleeping for the cache to reflect across the workers. The cache has an + # eventual consistency and not immediate consistency. + sleep(20) + portkey_2 = client( + base_url=base_url, + api_key=api_key, + trace_id=random_id, + virtual_key=virtual_api_key, + metadata=metadata, + config=config, + ) + + cached_audio = await portkey_2.audio.speech.create( + model="tts-1", + voice="alloy", + input="The quick brown fox jumped over the lazy dog." + ) + + assert isinstance(cached_audio.content, bytes) is True diff --git a/tests/test_async_audio_transcript.py b/tests/test_async_audio_transcript.py new file mode 100644 index 00000000..d058126c --- /dev/null +++ b/tests/test_async_audio_transcript.py @@ -0,0 +1,153 @@ +from __future__ import annotations +import inspect + +import os +from os import walk +from typing import Any, Dict, List +import pytest +from uuid import uuid4 +from portkey_ai import AsyncPortkey +from time import sleep +from dotenv import load_dotenv +from .utils import read_json_file + + +load_dotenv(override=True) + +base_url = os.environ.get("PORTKEY_BASE_URL") +api_key = os.environ.get("PORTKEY_API_KEY") +virtual_api_key = os.environ.get("OPENAI_VIRTUAL_KEY") + +CONFIGS_PATH = "./tests/configs/audio" + + +def get_configs(folder_path) -> List[Dict[str, Any]]: + config_files = [] + for dirpath, _, file_names in walk(folder_path): + for f in file_names: + config_files.append(read_json_file(os.path.join(dirpath, f))) + + return config_files + + +class TestAudioTranscript: + client = AsyncPortkey + parametrize = pytest.mark.parametrize("client", [client], ids=["strict"]) + models = read_json_file("./tests/models.json") + + def get_metadata(self): + return { + "case": "testing", + "function": inspect.currentframe().f_back.f_code.co_name, + "random_id": str(uuid4()), + } + + # -------------------------- + # Test-4 + t4_params = [] + t4 = [] + for k, v in models.items(): + for i in v["chat"]: + t4.append((client, k, os.environ.get(v["env_variable"]), i)) + + t4_params.extend(t4) + + @pytest.mark.asyncio + @pytest.mark.parametrize("client, provider, auth, model", t4_params) + async def test_method_single_with_vk_and_provider( + self, client: Any, provider: str, auth: str, model + ) -> None: + portkey = client( + base_url=base_url, + api_key=api_key, + provider=f"{provider}", + Authorization=f"{auth}", + trace_id=str(uuid4()), + metadata=self.get_metadata(), + ) + + audio_file = open("/Users/chandeep/Documents/Workspace/Portkey/SDK/python latest version/portkey-python-sdk/tests/configs/audio/speech.mp3", "rb") + + transcript = await portkey.audio.transcriptions.create( + model="whisper-1", + file=audio_file + ) + + assert isinstance(transcript.text, str) is True + + # -------------------------- + # Test -5 + t5_params = [] + for i in get_configs(f"{CONFIGS_PATH}/single_with_basic_config"): + t5_params.append((client, i)) + + @pytest.mark.asyncio + @pytest.mark.parametrize("client, config", t5_params) + async def test_method_single_with_basic_config(self, client: Any, config: Dict) -> None: + portkey = client( + base_url=base_url, + api_key=api_key, + trace_id=str(uuid4()), + metadata=self.get_metadata(), + config=config, + ) + audio_file = open("/Users/chandeep/Documents/Workspace/Portkey/SDK/python latest version/portkey-python-sdk/tests/configs/audio/speech.mp3", "rb") + + transcript = await portkey.audio.transcriptions.create( + model="whisper-1", + file=audio_file + ) + + assert isinstance(transcript.text, str) is True + + # -------------------------- + # Test-6 + t6_params = [] + for i in get_configs(f"{CONFIGS_PATH}/single_provider_with_vk_retry_cache"): + t6_params.append((client, i)) + + @pytest.mark.asyncio + @pytest.mark.parametrize("client, config", t6_params) + async def test_method_single_provider_with_vk_retry_cache( + self, client: Any, config: Dict + ) -> None: + # 1. Make a new cache the cache + # 2. Make a cache hit and see if the response contains the data. + random_id = str(uuid4()) + metadata = self.get_metadata() + portkey = client( + base_url=base_url, + api_key=api_key, + trace_id=random_id, + virtual_key=virtual_api_key, + metadata=metadata, + config=config, + ) + + audio_file = open("/Users/chandeep/Documents/Workspace/Portkey/SDK/python latest version/portkey-python-sdk/tests/configs/audio/speech.mp3", "rb") + + transcript = await portkey.audio.transcriptions.create( + model="whisper-1", + file=audio_file + ) + + assert isinstance(transcript.text, str) is True + # Sleeping for the cache to reflect across the workers. The cache has an + # eventual consistency and not immediate consistency. + sleep(20) + portkey_2 = client( + base_url=base_url, + api_key=api_key, + trace_id=random_id, + virtual_key=virtual_api_key, + metadata=metadata, + config=config, + ) + + cached_transcript = await portkey_2.audio.transcriptions.create( + model="whisper-1", + file=audio_file + ) + + assert isinstance(cached_transcript.text, str) is True + diff --git a/tests/test_async_audio_translation.py b/tests/test_async_audio_translation.py new file mode 100644 index 00000000..b2c4e43f --- /dev/null +++ b/tests/test_async_audio_translation.py @@ -0,0 +1,153 @@ +from __future__ import annotations +import inspect + +import os +from os import walk +from typing import Any, Dict, List +import pytest +from uuid import uuid4 +from portkey_ai import AsyncPortkey +from time import sleep +from dotenv import load_dotenv +from .utils import read_json_file + + +load_dotenv(override=True) + +base_url = os.environ.get("PORTKEY_BASE_URL") +api_key = os.environ.get("PORTKEY_API_KEY") +virtual_api_key = os.environ.get("OPENAI_VIRTUAL_KEY") + +CONFIGS_PATH = "./tests/configs/audio" + + +def get_configs(folder_path) -> List[Dict[str, Any]]: + config_files = [] + for dirpath, _, file_names in walk(folder_path): + for f in file_names: + config_files.append(read_json_file(os.path.join(dirpath, f))) + + return config_files + + +class TestAudioTranslations: + client = AsyncPortkey + parametrize = pytest.mark.parametrize("client", [client], ids=["strict"]) + models = read_json_file("./tests/models.json") + + def get_metadata(self): + return { + "case": "testing", + "function": inspect.currentframe().f_back.f_code.co_name, + "random_id": str(uuid4()), + } + + # -------------------------- + # Test-4 + t4_params = [] + t4 = [] + for k, v in models.items(): + for i in v["chat"]: + t4.append((client, k, os.environ.get(v["env_variable"]), i)) + + t4_params.extend(t4) + + @pytest.mark.asyncio + @pytest.mark.parametrize("client, provider, auth, model", t4_params) + async def test_method_single_with_vk_and_provider( + self, client: Any, provider: str, auth: str, model + ) -> None: + portkey = client( + base_url=base_url, + api_key=api_key, + provider=f"{provider}", + Authorization=f"{auth}", + trace_id=str(uuid4()), + metadata=self.get_metadata(), + ) + + audio_file = open("/Users/chandeep/Documents/Workspace/Portkey/SDK/python latest version/portkey-python-sdk/tests/configs/audio/speech.mp3", "rb") + + translations = await portkey.audio.translations.create( + model="whisper-1", + file=audio_file + ) + + assert isinstance(translations.text, str) is True + + # -------------------------- + # Test -5 + t5_params = [] + for i in get_configs(f"{CONFIGS_PATH}/single_with_basic_config"): + t5_params.append((client, i)) + + @pytest.mark.asyncio + @pytest.mark.parametrize("client, config", t5_params) + async def test_method_single_with_basic_config(self, client: Any, config: Dict) -> None: + portkey = client( + base_url=base_url, + api_key=api_key, + trace_id=str(uuid4()), + metadata=self.get_metadata(), + config=config, + ) + audio_file = open("/Users/chandeep/Documents/Workspace/Portkey/SDK/python latest version/portkey-python-sdk/tests/configs/audio/speech.mp3", "rb") + + translations = await portkey.audio.translations.create( + model="whisper-1", + file=audio_file + ) + + assert isinstance(translations.text, str) is True + + # -------------------------- + # Test-6 + t6_params = [] + for i in get_configs(f"{CONFIGS_PATH}/single_provider_with_vk_retry_cache"): + t6_params.append((client, i)) + + @pytest.mark.asyncio + @pytest.mark.parametrize("client, config", t6_params) + async def test_method_single_provider_with_vk_retry_cache( + self, client: Any, config: Dict + ) -> None: + # 1. Make a new cache the cache + # 2. Make a cache hit and see if the response contains the data. + random_id = str(uuid4()) + metadata = self.get_metadata() + portkey = client( + base_url=base_url, + api_key=api_key, + trace_id=random_id, + virtual_key=virtual_api_key, + metadata=metadata, + config=config, + ) + + audio_file = open("/Users/chandeep/Documents/Workspace/Portkey/SDK/python latest version/portkey-python-sdk/tests/configs/audio/speech.mp3", "rb") + + translations = await portkey.audio.translations.create( + model="whisper-1", + file=audio_file + ) + + assert isinstance(translations.text, str) is True + # Sleeping for the cache to reflect across the workers. The cache has an + # eventual consistency and not immediate consistency. + sleep(20) + portkey_2 = client( + base_url=base_url, + api_key=api_key, + trace_id=random_id, + virtual_key=virtual_api_key, + metadata=metadata, + config=config, + ) + + cached_translations = await portkey_2.audio.translations.create( + model="whisper-1", + file=audio_file + ) + + assert isinstance(cached_translations.text, str) is True + diff --git a/tests/test_audio_speech.py b/tests/test_audio_speech.py new file mode 100644 index 00000000..b429036e --- /dev/null +++ b/tests/test_audio_speech.py @@ -0,0 +1,148 @@ +from __future__ import annotations +import inspect + +import os +from os import walk +from typing import Any, Dict, List +import pytest +from uuid import uuid4 +from portkey_ai import Portkey +from time import sleep +from dotenv import load_dotenv +from .utils import read_json_file + + +load_dotenv(override=True) + +base_url = os.environ.get("PORTKEY_BASE_URL") +api_key = os.environ.get("PORTKEY_API_KEY") +virtual_api_key = os.environ.get("OPENAI_VIRTUAL_KEY") + +CONFIGS_PATH = "./tests/configs/audio" + + +def get_configs(folder_path) -> List[Dict[str, Any]]: + config_files = [] + for dirpath, _, file_names in walk(folder_path): + for f in file_names: + config_files.append(read_json_file(os.path.join(dirpath, f))) + + return config_files + + +class TestAudioSpeech: + client = Portkey + parametrize = pytest.mark.parametrize("client", [client], ids=["strict"]) + models = read_json_file("./tests/models.json") + + def get_metadata(self): + return { + "case": "testing", + "function": inspect.currentframe().f_back.f_code.co_name, + "random_id": str(uuid4()), + } + + # -------------------------- + # Test-1 + t1_params = [] + t = [] + for k, v in models.items(): + for i in v["chat"]: + t.append((client, k, os.environ.get(v["env_variable"]), i)) + + t1_params.extend(t) + + @pytest.mark.parametrize("client, provider, auth, model", t1_params) + def test_method_single_with_vk_and_provider( + self, client: Any, provider: str, auth: str, model + ) -> None: + portkey = client( + base_url=base_url, + api_key=api_key, + provider=f"{provider}", + Authorization=f"{auth}", + trace_id=str(uuid4()), + metadata=self.get_metadata(), + ) + + audio = portkey.audio.speech.create( + model="tts-1", + voice="alloy", + input="The quick brown fox jumped over the lazy dog." + ) + + assert isinstance(audio.content, bytes) is True + + # -------------------------- + # Test -2 + t2_params = [] + for i in get_configs(f"{CONFIGS_PATH}/single_with_basic_config"): + t2_params.append((client, i)) + + @pytest.mark.parametrize("client, config", t2_params) + def test_method_single_with_basic_config(self, client: Any, config: Dict) -> None: + portkey = client( + base_url=base_url, + api_key=api_key, + trace_id=str(uuid4()), + metadata=self.get_metadata(), + config=config, + ) + + audio = portkey.audio.speech.create( + model="tts-1", + voice="alloy", + input="The quick brown fox jumped over the lazy dog." + ) + + assert isinstance(audio.content, bytes) is True + + # -------------------------- + # Test-3 + t3_params = [] + for i in get_configs(f"{CONFIGS_PATH}/single_provider_with_vk_retry_cache"): + t3_params.append((client, i)) + + @pytest.mark.parametrize("client, config", t3_params) + def test_method_single_provider_with_vk_retry_cache( + self, client: Any, config: Dict + ) -> None: + # 1. Make a new cache the cache + # 2. Make a cache hit and see if the response contains the data. + random_id = str(uuid4()) + metadata = self.get_metadata() + portkey = client( + base_url=base_url, + api_key=api_key, + trace_id=random_id, + virtual_key=virtual_api_key, + metadata=metadata, + config=config, + ) + + audio = portkey.audio.speech.create( + model="tts-1", + voice="alloy", + input="The quick brown fox jumped over the lazy dog." + ) + + assert isinstance(audio.content, bytes) is True + # Sleeping for the cache to reflect across the workers. The cache has an + # eventual consistency and not immediate consistency. + sleep(20) + portkey_2 = client( + base_url=base_url, + api_key=api_key, + trace_id=random_id, + virtual_key=virtual_api_key, + metadata=metadata, + config=config, + ) + + cached_audio = portkey_2.audio.speech.create( + model="tts-1", + voice="alloy", + input="The quick brown fox jumped over the lazy dog." + ) + + assert isinstance(cached_audio.content, bytes) is True diff --git a/tests/test_audio_transcript.py b/tests/test_audio_transcript.py new file mode 100644 index 00000000..206361c1 --- /dev/null +++ b/tests/test_audio_transcript.py @@ -0,0 +1,150 @@ +from __future__ import annotations +import inspect + +import os +from os import walk +from typing import Any, Dict, List +import pytest +from uuid import uuid4 +from portkey_ai import Portkey +from time import sleep +from dotenv import load_dotenv +from .utils import read_json_file + + +load_dotenv(override=True) + +base_url = os.environ.get("PORTKEY_BASE_URL") +api_key = os.environ.get("PORTKEY_API_KEY") +virtual_api_key = os.environ.get("OPENAI_VIRTUAL_KEY") + +CONFIGS_PATH = "./tests/configs/audio" + + +def get_configs(folder_path) -> List[Dict[str, Any]]: + config_files = [] + for dirpath, _, file_names in walk(folder_path): + for f in file_names: + config_files.append(read_json_file(os.path.join(dirpath, f))) + + return config_files + + +class TestAudioTranscript: + client = Portkey + parametrize = pytest.mark.parametrize("client", [client], ids=["strict"]) + models = read_json_file("./tests/models.json") + + def get_metadata(self): + return { + "case": "testing", + "function": inspect.currentframe().f_back.f_code.co_name, + "random_id": str(uuid4()), + } + + # -------------------------- + # Test-4 + t4_params = [] + t4 = [] + for k, v in models.items(): + for i in v["chat"]: + t4.append((client, k, os.environ.get(v["env_variable"]), i)) + + t4_params.extend(t4) + + @pytest.mark.parametrize("client, provider, auth, model", t4_params) + def test_method_single_with_vk_and_provider( + self, client: Any, provider: str, auth: str, model + ) -> None: + portkey = client( + base_url=base_url, + api_key=api_key, + provider=f"{provider}", + Authorization=f"{auth}", + trace_id=str(uuid4()), + metadata=self.get_metadata(), + ) + + audio_file = open("/Users/chandeep/Documents/Workspace/Portkey/SDK/python latest version/portkey-python-sdk/tests/configs/audio/speech.mp3", "rb") + + transcript = portkey.audio.transcriptions.create( + model="whisper-1", + file=audio_file + ) + + assert isinstance(transcript.text, str) is True + + # -------------------------- + # Test -5 + t5_params = [] + for i in get_configs(f"{CONFIGS_PATH}/single_with_basic_config"): + t5_params.append((client, i)) + + @pytest.mark.parametrize("client, config", t5_params) + def test_method_single_with_basic_config(self, client: Any, config: Dict) -> None: + portkey = client( + base_url=base_url, + api_key=api_key, + trace_id=str(uuid4()), + metadata=self.get_metadata(), + config=config, + ) + audio_file = open("/Users/chandeep/Documents/Workspace/Portkey/SDK/python latest version/portkey-python-sdk/tests/configs/audio/speech.mp3", "rb") + + transcript = portkey.audio.transcriptions.create( + model="whisper-1", + file=audio_file + ) + + assert isinstance(transcript.text, str) is True + + # -------------------------- + # Test-6 + t6_params = [] + for i in get_configs(f"{CONFIGS_PATH}/single_provider_with_vk_retry_cache"): + t6_params.append((client, i)) + + @pytest.mark.parametrize("client, config", t6_params) + def test_method_single_provider_with_vk_retry_cache( + self, client: Any, config: Dict + ) -> None: + # 1. Make a new cache the cache + # 2. Make a cache hit and see if the response contains the data. + random_id = str(uuid4()) + metadata = self.get_metadata() + portkey = client( + base_url=base_url, + api_key=api_key, + trace_id=random_id, + virtual_key=virtual_api_key, + metadata=metadata, + config=config, + ) + + audio_file = open("/Users/chandeep/Documents/Workspace/Portkey/SDK/python latest version/portkey-python-sdk/tests/configs/audio/speech.mp3", "rb") + + transcript = portkey.audio.transcriptions.create( + model="whisper-1", + file=audio_file + ) + + assert isinstance(transcript.text, str) is True + # Sleeping for the cache to reflect across the workers. The cache has an + # eventual consistency and not immediate consistency. + sleep(20) + portkey_2 = client( + base_url=base_url, + api_key=api_key, + trace_id=random_id, + virtual_key=virtual_api_key, + metadata=metadata, + config=config, + ) + + cached_transcript = portkey_2.audio.transcriptions.create( + model="whisper-1", + file=audio_file + ) + + assert isinstance(cached_transcript.text, str) is True + diff --git a/tests/test_audio_translation.py b/tests/test_audio_translation.py new file mode 100644 index 00000000..1812ab71 --- /dev/null +++ b/tests/test_audio_translation.py @@ -0,0 +1,150 @@ +from __future__ import annotations +import inspect + +import os +from os import walk +from typing import Any, Dict, List +import pytest +from uuid import uuid4 +from portkey_ai import Portkey +from time import sleep +from dotenv import load_dotenv +from .utils import read_json_file + + +load_dotenv(override=True) + +base_url = os.environ.get("PORTKEY_BASE_URL") +api_key = os.environ.get("PORTKEY_API_KEY") +virtual_api_key = os.environ.get("OPENAI_VIRTUAL_KEY") + +CONFIGS_PATH = "./tests/configs/audio" + + +def get_configs(folder_path) -> List[Dict[str, Any]]: + config_files = [] + for dirpath, _, file_names in walk(folder_path): + for f in file_names: + config_files.append(read_json_file(os.path.join(dirpath, f))) + + return config_files + + +class TestAudioTranslations: + client = Portkey + parametrize = pytest.mark.parametrize("client", [client], ids=["strict"]) + models = read_json_file("./tests/models.json") + + def get_metadata(self): + return { + "case": "testing", + "function": inspect.currentframe().f_back.f_code.co_name, + "random_id": str(uuid4()), + } + + # -------------------------- + # Test-1 + t4_params = [] + t4 = [] + for k, v in models.items(): + for i in v["chat"]: + t4.append((client, k, os.environ.get(v["env_variable"]), i)) + + t4_params.extend(t4) + + @pytest.mark.parametrize("client, provider, auth, model", t4_params) + def test_method_single_with_vk_and_provider( + self, client: Any, provider: str, auth: str, model + ) -> None: + portkey = client( + base_url=base_url, + api_key=api_key, + provider=f"{provider}", + Authorization=f"{auth}", + trace_id=str(uuid4()), + metadata=self.get_metadata(), + ) + + audio_file = open("/Users/chandeep/Documents/Workspace/Portkey/SDK/python latest version/portkey-python-sdk/tests/configs/audio/speech.mp3", "rb") + + translations = portkey.audio.translations.create( + model="whisper-1", + file=audio_file + ) + + assert isinstance(translations.text, str) is True + + # -------------------------- + # Test -2 + t5_params = [] + for i in get_configs(f"{CONFIGS_PATH}/single_with_basic_config"): + t5_params.append((client, i)) + + @pytest.mark.parametrize("client, config", t5_params) + def test_method_single_with_basic_config(self, client: Any, config: Dict) -> None: + portkey = client( + base_url=base_url, + api_key=api_key, + trace_id=str(uuid4()), + metadata=self.get_metadata(), + config=config, + ) + audio_file = open("/Users/chandeep/Documents/Workspace/Portkey/SDK/python latest version/portkey-python-sdk/tests/configs/audio/speech.mp3", "rb") + + translations = portkey.audio.translations.create( + model="whisper-1", + file=audio_file + ) + + assert isinstance(translations.text, str) is True + + # -------------------------- + # Test-3 + t6_params = [] + for i in get_configs(f"{CONFIGS_PATH}/single_provider_with_vk_retry_cache"): + t6_params.append((client, i)) + + @pytest.mark.parametrize("client, config", t6_params) + def test_method_single_provider_with_vk_retry_cache( + self, client: Any, config: Dict + ) -> None: + # 1. Make a new cache the cache + # 2. Make a cache hit and see if the response contains the data. + random_id = str(uuid4()) + metadata = self.get_metadata() + portkey = client( + base_url=base_url, + api_key=api_key, + trace_id=random_id, + virtual_key=virtual_api_key, + metadata=metadata, + config=config, + ) + + audio_file = open("/Users/chandeep/Documents/Workspace/Portkey/SDK/python latest version/portkey-python-sdk/tests/configs/audio/speech.mp3", "rb") + + translations = portkey.audio.translations.create( + model="whisper-1", + file=audio_file + ) + + assert isinstance(translations.text, str) is True + # Sleeping for the cache to reflect across the workers. The cache has an + # eventual consistency and not immediate consistency. + sleep(20) + portkey_2 = client( + base_url=base_url, + api_key=api_key, + trace_id=random_id, + virtual_key=virtual_api_key, + metadata=metadata, + config=config, + ) + + cached_translations = portkey_2.audio.translations.create( + model="whisper-1", + file=audio_file + ) + + assert isinstance(cached_translations.text, str) is True + From 1a3fbf61f69c57313c3db6da763d674521622d05 Mon Sep 17 00:00:00 2001 From: csgulati09 Date: Sat, 18 May 2024 16:09:35 -0400 Subject: [PATCH 21/38] feat: added config files for audio and moderations --- .../audio/single_provider/single_provider.json | 3 +++ .../single_provider_with_vk_retry_cache.json | 13 +++++++++++++ .../single_with_basic_config.json | 3 +++ tests/configs/audio/speech.mp3 | Bin 0 -> 58080 bytes .../single_provider/single_provider.json | 3 +++ .../single_provider_with_vk_retry_cache.json | 13 +++++++++++++ .../single_with_basic_config.json | 3 +++ 7 files changed, 38 insertions(+) create mode 100644 tests/configs/audio/single_provider/single_provider.json create mode 100644 tests/configs/audio/single_provider_with_vk_retry_cache/single_provider_with_vk_retry_cache.json create mode 100644 tests/configs/audio/single_with_basic_config/single_with_basic_config.json create mode 100644 tests/configs/audio/speech.mp3 create mode 100644 tests/configs/moderations/single_provider/single_provider.json create mode 100644 tests/configs/moderations/single_provider_with_vk_retry_cache/single_provider_with_vk_retry_cache.json create mode 100644 tests/configs/moderations/single_with_basic_config/single_with_basic_config.json diff --git a/tests/configs/audio/single_provider/single_provider.json b/tests/configs/audio/single_provider/single_provider.json new file mode 100644 index 00000000..9471258a --- /dev/null +++ b/tests/configs/audio/single_provider/single_provider.json @@ -0,0 +1,3 @@ +{ + "virtual_key": "openai-virtual-key" +} \ No newline at end of file diff --git a/tests/configs/audio/single_provider_with_vk_retry_cache/single_provider_with_vk_retry_cache.json b/tests/configs/audio/single_provider_with_vk_retry_cache/single_provider_with_vk_retry_cache.json new file mode 100644 index 00000000..52281ce7 --- /dev/null +++ b/tests/configs/audio/single_provider_with_vk_retry_cache/single_provider_with_vk_retry_cache.json @@ -0,0 +1,13 @@ +{ + "virtual_key": "openai-virtual-key", + "cache": { + "mode": "semantic", + "max_age": 60 + }, + "retry": { + "attempts": 5, + "on_status_codes": [ + 429 + ] + } +} \ No newline at end of file diff --git a/tests/configs/audio/single_with_basic_config/single_with_basic_config.json b/tests/configs/audio/single_with_basic_config/single_with_basic_config.json new file mode 100644 index 00000000..9471258a --- /dev/null +++ b/tests/configs/audio/single_with_basic_config/single_with_basic_config.json @@ -0,0 +1,3 @@ +{ + "virtual_key": "openai-virtual-key" +} \ No newline at end of file diff --git a/tests/configs/audio/speech.mp3 b/tests/configs/audio/speech.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..ea039efb7a1386809e710cc984cb096b8f3baf7d GIT binary patch literal 58080 zcmaI7cT`hb*Y>?r2_awt0g_NmLNOp^CjHWRq9^W&@^L}gWG4@#d-`~FGy5^d5{`-D;5D2TW zYj6*i13+iGsSbR(kph^rL5LV%Sf8q}U=g$ZibykQOwKwThe=>$XX_ zR%Kt&(uW-4lW z9-VXiumzqq-F<6S(0aFp0Y^Z-X;gD}Me#*+%&R|&27;nhlTY>S`q}5)d7U`W`>1c% z_kVQ?2NFrIU0tAt&Spg|ReQ%14?V|Os@R6nYuOFNtWwN`+?fKfETPqZ;6CoM*jmgY zlOYwp14%!d`8my`wK6^G?QIpeZrY`&&;Uq;O72NR_1=TJm$o0{x0|}UohKf+VkwPR zL7g{TTho&*WkH`*oJ`H(?%gidNxO4*ee|w{5{`=ZcIH4>!poy4Ki(g_We}2M#&;)F zgi5xGm%cV}^Rhe56lGD^ibDLY^&0W7e*8g@IOFo7n4cb@p>7yB;M3l}2=+q^%~EyY zQ@$eMfKLBmeWh?EZP?5HKF^bC)|~KF*D9i};klKeHj=?a2KUV{1m>xYu~lZ`5Z^_5 zZoGeg@pVgWx7Rf}x(Mj5v+Ob>ET)D{LnI#JSchXq8<)2i-d&OaY#2~g6O7U($aTZ2 z^UgSnOQK1-ixK41pFiJkkyKcLUs5q^(838Sq&=2P$(BdK){)$bC~Ht6=G*fb&+c4U zHCy5I(5wco0mJ41aMnjLHfuuHlun7iaAHLFz(2#~Ld3#>Cq)?bz8ffO9^g@Y<_ikgYC4CJ65Va|F`B{=`ci7R z=}_{(@%Fwi|9q|d`7`?X_|#JV$xf`p*~stZuAyIsj(F<*U3k9SNbSc@N>P8*Iy})9 zA4Ua;7zal%$OrBW`G`kZ*4F70KpGmkZnp*i0PJTjDyC4<8IYsONd3rUL)Pbn+K2v0 z|JuZq$*tzKY^P~<+0Pk24p{w!F(!1GF_%HPqcKw8ET^s4q6p+~MY1PyKpB7vhyuAS z9QOb6U9bls_a*@%ASA2gGAYf604UrdW^zDJcjmPf3bOOiuESk1?y)BwsHwysw2-SU zj2G$&yj;nAnU2b)h*>VA6p50%07*k;5V8`D?jkpuAlDhi-_z2ybMW~p+&WIkn2V2l zvHsPEN{o@k3!m#5ELTlwE}yHMMGIG0x%|OztBWi;gpEXxN}|a7_@xt>aKc+M1$r&3 zR9Ipz7ex$7u*n93`juXj!|)2Af$i32Yef^(Iw5miR;hq2r)5dZZVO;oM-2)`GZ6@> zuh-c_nLp$cyi)EM(oj>guG1ukkpFDr)&orTq~OR55>nWFhA?Uv$Mbkv3NpxICKSc$ z_qV&q`S318?4Ob=(`scC{Y0Y$lAbMb3P4B%*}xOUfI0?VrBH!bu>!lz|HMvQS9Q*Z zR-0D$frBQaARi?FsmtxA<7qxEwJc88!=9;Nr`Vvqq&)_AZsDud&L=K3#He>MjB~5= zUYh1?ax_imxgFs2`|pvSd-dHsGA8xFkv5ySMAPZ;XFRAqAm?u2cnD>KP0Z_iBYOK~ z4jv6}5=ZBw4!HG4$x_%2L#@0h`(bW4c3)%vn-kAdcY^F^yZ{QF%7Mb-Kumx}0q~pe zrewH1!T-~T2xmbxje?kAT`gnsCy^a87Ls0@os{far9p{NI*UcMbj$XA)!RFKuanK| zqyUG_8aIY^1Nh-p448AS(1R41qmS#$NZzh&Pr)(_hcvBd5@|S+HX;gKbOY!SPU$Cf z+!J6P7lT!LueA=CS<9|DPeokX-bT34dM8}zlm^%^MP#^CRySb}si8`m2xxIDbexdn z3`piFyz8EymfNH;X7FZwJ7i~DW%{{DuRnJ(hqGq=hr}kvuW36HhYIU<{6TGdQvq;#O6QKrZ^0~&Bp@_O2LpR+5o;^9OU7OM1 z`R(!ATd9M0?jBIJa2a-sG;uc!IQdMdlqsk@qZB0EKL9{Pk}S79r2?17u3Ehqs3{7x zzf{`lgK}ZVcvf9#X-#R$MdiPX7=Jo0R7Dl{JbKdY9ld?@dBGF!5xn8>;<3V-``|2d zjYJSQH^%D3bjmq{w#*49I7a@4h*_mDbdcY2M`of*II;winwAbuxzU?#jE+@b=^u@I zBKBbX<{OyiV7EZ~^sX$qGP)`-MRg??O#LX{evtQYg=b90fnClxn7SsT)Ono^)0eNz zZ*3)+2(yXz`igt0RAC*LY&q|08{o5U(aE<=1fx9OJ?esrV^u@iK_E<>MkxO z$s|$}%dZiZo2O+Rpkl`Htz-ATr5n957fIJbqb`iK%V+K2PL7l#FsNFAS<8^u;8{+L zo;?@cx5GHK@9@^GC+<$)q%w2^3yk*`q@b-YiUqaWPvn>p;%zQ|%BTM>bJ>o-H^p%= zJ_3c&{9nQS7oen(@zNdm!JZIUNW*uP+&a}*{-ypz6cVRjMAaw^D)JX970|Fr+)k^G zeM9?#7ZpKJ#%XTSkjTq0xFy50AIT<9$uqCCA?SagHWVnY^H|R?JFj(9PUTlLyybyb zF8?rs9)s{q@IwJT!hnuJgBUyoGLV;xPC7EHq%f zqe!?p7Il$oZ8Dh+#N3LGIpiFx3|BR|$HWsMMnk8Z9CO2bDEp!Kz6#o;vQVDWss$f) z{{h!D0sDa>FS{W0E_e^zpW6c{qhDEmP+tRSb@X6`NkQcv7PLdj(y)BreJ&O@DnO}y zF#|eT4@iO}gXzn{?6I3Xth_Z!Cra8l;M7>rD&TRCX+yef9S>`LIhz}icP;r~Z=Gx9 z630v3^CX%Up+(aqD*1%QqD2PCz$O!4rcP16`c2Q9

QUyZkx}TN8=3@&^0WB&0Tu zh05lihU2s^5+Hy2{>fGek=MGLjaT3{SQRlzQ0^bVqw+W2rwdqyySwK_|Ci) z(sca|^PHVsI6Jm zzrXucYsI^Qm<-cUC4FA0?L?TvxzH_ zj{(7@$M-HAqP=|d{`lLz(5mkr-lvb6{QWg!|Ix=`SXj)vB(Bigr$GtxdCAUGrr6_9IT7r zpN}SoL{(o5d-5f0tT?>4nGZ;JR+2V5K@?cYJMvTGg&6MXW`M5_$m;r9qLPNIeZqe8 zO-|LdHxKybuUzt~;ktA(99ftGGfTRyk8*ko9O@lXSQ|}@9kxhy>q}9mU~v%yT!_^a zO8suLT^^}piG?%_LY<#MmykGA01I%Tt@iHyS5~uM?8sgp_(21bDG^ek0|KhN9u4bl z@z31cy@N~rxTbh9?6bqG#js?VY>Lclp56gJh*VK$wLW`UWx4L{&PqLj!H;jt`4+AT zNi3O7BaC13vBJx((-HF&c)hI_xO?6_&*(R~~o} z+=8o>kSM4&Ci|%c)czd`(tazOY(C=~d1H50HB|@y8OniA z{pPznF~c6juXLLoLb-6EqkrEw#WIk>LY9V8mTLQ90&Nn4pg7_(Nqc4>M@Gl$Gr74e z!dXfYyH%zuf&O5|0t%rs$F{+e-d~t6ptZHocwQ$MIo*3R&R#7WMz9>m0PnygaYMrQ`I>z2feghK$Od$g|s<_g4%6ILuikl|5b%j;zSy+7@haQ9pz8(J2>p9b3Eu z%$~Y4A9GFUq0AiKT>eVUA8IrupDyV*F|pHv_wMo9xUefPmOgE5k#zV5mH8Zd`Jx|t zy4f&s6Q8Pzq2xLL*1=h=dRHa z2R@vx0vdNyDFhbeJ!en80#X60BbMYsa zyq!&tZgF(izqbTFr=&Fcx{f|*N>zK%b#@OR6y-u3_w~O>pU@ecQ}_p99){w_nGurh z3UC0>;Gna=3{%oE-|`j|om(V<@-lgkFGeD=y#~*gta5T2EOwj7=FBup3^Rt6$9=;K zq8%Z+KJ;gR8eCt3m%v^G7nPeDXI500?WxFh%w2_pHNPsiQ&JqY5|zK{>?& zc>W=#n7feQd?nEmd-s)3Z+f$#1LPTl5HoaPwu=IZj4_*3zY^cmuLxQ@c=WluE(LU$ zX}bDo@}r>#?G1hFlk^nEmYa6tE5`u!y7aDG?nMaFo;WW}Pik;o??w5ZbI*A3{>3wm zfuX466z>6VhtT&adTz7#V%ie}_C8A-{+=y)f2GAsZ!71@ljyFa4Hw9jcf0@CRQ|jx z;gRLVQsXb)SEf`4-ZeKGRgH~3JydyRclTh82rfJ| zMo~i{xxt@a49bz4AdA|VVk4*}%!r;11ytK-tjre+FFIgS8iQp`s2ba_l5N=ddq z!fZ>__Qhdd1_TLT#PV9vVux0JUXTh|rflvb7&Rd#WSNNP%pt-^Z-u~KO}9K9wy{NS zh)wXeZ&ic|X|7f*!3==pjGm^;m*sLH9|&Sa4{tp(C0>KeA~`h(Cf##ghu71j6I7=y zAd4VZD1!pq*k(#zK$Jg02@XJJv|*f4v*^4!UU87jq;x~OS+B>_K;MlogjFu5x#x@o z_8lAKnIvZwBJu?7H{Z(@B9Cq~}4MpMRX{4Q1HwXZ}~q#4DZ+ZpyQ z^?amIy;XK4nE-`g0uyHE@v2ndl-#8le>~`q1wX`q0v|k}5_TyEnu&E<;>9vJa42V! zJ`CC7^QTDgR2256cy>7($Rs${YeIdn#(b=qi=Q;8Q-9NST~YvnV@imFlFHO|sFnmC zRhsBR9JQ=wv#Mv=RY`;#h(I2JMM#&O3NQ_1GIQp;x{w32pX4g?P4?#m&~+PrJ*0wtZpIf1MyH=38u|iT*#WZBn~Sd()1r z*ZW%esi*PNp5u2sw|zxm^x!yeyF`ij(F(4O1IpnvXGLc&zB0vD`oQUGB+aVKeY zw$xDJP))*t;Gj+elCt3h=&Yt@jDTOO+7{O>6tnZ!T@B1szJc~-h=9d9vuACCA&qxl zti$@Ku6Z8#Q%?`QP@A1z?f{Gz$d@L7>cxo@L|fj?sd;3yM+LnkYDI zaT3!Hq+V7r+J}bmY7x3ZL9mLHsU&oKsO-qc9tzo48motV`xf1+}%95aZJ@(D;m5&bP{rsO^LNJl&`T zrHPk+Z^qvfkG9!uS|7aM`Y8T_&f9`_mS}?YHz*I zfmVPH`lHX+(aN%uoin=CZPg82p9shd`O?>cE-^ z7tPwv!PA1N(X;ZU-}?QWQe!vEc-`YNR@34xmUUme=awxq!YjyH01GF#2)%W%Cw%fw zGtEA4I56b|uX*$+3gUkwxXjqs)^pAz3t2=vgz3;U4>;6C(e6lL?ebgie)gxu;g8~r z96N`@dSt&obB%jV&EIeBP=6;2Uz|Q>c+!9T=`G%=8N+DK$4=Y)r*hx3Q_uD;aXKL8 zt%6KJp|YU)mLD3O<$Xq4IAOOzrCnF^J1D#grKYr<87?GRrYO-L!Z8YEFD=eM!TwWK z`^_ud1*Q=z=CPm#9NrIc3|R8WQXO>yE(FkhG#;8YY_z(FHBiC6Va$aa92lBqs=@t2 zq&n+PIlR}=%3K%e-ffyaX%%66ICO?@b$*+HP1sSZLr4wk64S0cNYNbbH*yVO1I9~6 zavz3VnGk%kQ3NfQ8?4#ref}xvPhQT2afhy1WZ9{hp3U}7)WHF|4i>dTkVwr5@Ft0>OfU(A&d-J;w;cJ%&_idFsVi84fCxUyg`imwTb5iy zm9H7fDi9ocQo}UybCM5fsh%iQE-`$N)7q}~uNA)aPu$T|mW3#ZG5xeJEpY#bz;~k; z#`RXs{W`Yx2ht23ZpR+Y4s0{!ZrZ=`-jX%H(pp0U*D+v@iqYu($*>NLy~>l$cwI&D z4woQH#{#G)QfkgF?)dy?>6(jReu6!EH%0uh#Pnk6MaS|1ugEf!$5XS}-lvUZx3-*r z$}gxo_{R@JU%I)0Hm{XVragE(bfkwMCp;=V`|P)btt{vu_)EwCbD^v} zq^mWQt4$fC8?>sbw@%V82x9(xHSxtUN9n#3gy(w1GC~jG0JiE7$dGmxyfNf)qJ)fb1Ul&?LQy7X#c%&J5thT{wKrKzKu;XzOQ zYdZ`Li7cU`vl;|JjajNiO{(9Vi%~GA<^Iv$4_fFn!o+TDuF*X+lna0_3mA(YbaG~p zYZW;ryJZx7EoR&)R5JmICqe&NQ*4*(6%*A|5;V-jY~f>>wJvgn9yw&(w1^=3J9|3Z zVD3V|)pKR|-}+tJiMLy1e(LNYC7mV(Tec2ey>|DOlI36X5O7P6)_I-MzZ2>V8^fY2 z{j>6_!8BIzgO4XNdxm^dSk+6pnkm=4pEhiF3ifEKlUZ%}7^`}dOlZa1#<+RUd%k$o zVY6~?0e``J-=1A|3FUtuoOfc|!hIR}G&#H~<&Y4tfO|P+$y>Q;L)@}P< z@4Qmyq_r!{Yd!bRKTr{=mj6W>pts&MNN)dSfa&M=&+Kk|YSjZgL?>~$1TtES+593~^LXL>C2xsP;M`zlz;yc>A$f97{l{pLd%qoN{p# zH`q$PbYTv8Kg&nr5!fkC{D<_t&RAt-d)G{)*&%Y6d_?5P!a?ksL0-B z!NM-W^)+3%`%#;UfAjtMpZm$}qkzo!Gz;KJhs=2+zLOoSIKHB_Cv?~pUDtg5(AB6L z@~Lc2xlO-QImE)leQnr~v2o!KEtl9x>5=x2#x4{9w#fkNyI3WID3O>+7pYbuGoNI{miWXfT?wL1Gw!xP-kDB4i@CL@9HF;L89Bz(BrZF&VA#0 zIpBxC(8xw}WxcKad#$q4Sk&#dub)*1_)*AY0+wI~Cs31g#+bBi68>Uu#As2*qmY!% zrsa9J!eXk!$QgS~zeRh*-CN>jQmZ)+l9?^c5wUZOLE%@e$U{txh#^5HRt=McvEt`` z!NTN)s#jMtA6y2+1|{z41%fCer79Z75C8>)dBpoo7>usBHR;L1uK0@3@4GF&0PQhw z$K$z3ABa?16IshYuDT*JlQx%yh3Cm6QDMx1PftEx;>DH`kBk{QSrH4y#B!c6cX$Or z3q-X^?jk3og^Kjcbw0VH(2eoAG(gRlupCIxq0^Q}!{6(RCn6B^dFEK1h9MH;Vgx4b zJVD@Xk6iwm&t|XF{EvVBv)$eh@~i&43O%94xL$f`!+|Xq(vA&>3A%A2PP;dQwK&Lf z#)&5NuS^(11Ih!1#lyqqI@(4Ii2PBu>kJ&#zi5ESlu%70ZQ~DD-_TwAe(!hpkEN6^ zyNo<*b#Jj)*g;Bh(w}LBEOr>!%ya0c&gd0!usD#XxokFb@VpT4LjazwJYHWeJoY@V zmYAPyZn7p}`}KyEzrsHsBOJ{5XYYmi0)>p6movDs1|gkeauLqES0N>a@J)c+VTby4 z?Jy?*(0WJX@shKl0@+d)!dO|Az=5|faZ9os*+eWtz*}^aKvstrRj;F%;EQV@P=c;* zM=OB`I{mNlTUoFVlp3xo5z6;S8R{*)Py3~(pjG0qg<{lp0w=q+fZ_lxa_KkI$7r%<6<^#Xej-DKp^ijNAyf32Pl#MHMgg=}F4l{# z?{#DF`c3Bvsy1h{zA9r|;RI9SjGmB~U}}gMv9YpXK-qI<-KZbJ{j|Zw0j#YejCTXd zl27_WaRe77X&O!_bd+Pr22jy+RO4bXqQ((audr$Su}=$kkN@J{-i|$%zxfVFCD=Q_ z?mq$cMBn*#q^;LFTZ>1A`|V+XC^!fr-GNRN8Zz4+qvXF+Z#?JkiBosVhfjp0X`Prm zFriqu)!KH;^zojf?8OrFgZ;KwwZlsYe=VF%yKX?Z{NiQh`FAJYjvsvTj7_cBd))l) zjX*Pw_rXT&a#zYlo0olBMu~STS3Sb+rF^;Uhf{h-cTXdTDFxHLJO8a*+PrG~yKm{p zYmD{8=N%g!u03_E4wvukVt}tXWm)<6kEgFcpEEHrs`yBd_(tm#j=wD&`23wtIDLtK z@*l!Kk95E7Kk%3D(0|zl1c1iS!zl6v@Fe8DX4*RD2^|A~YzeLby44IxjP<1Vu;M=; zzxqk&wuHEKc5RNS(h^u-UW@G!e2raRF-@4;?K02Z<`9-=hSrf3`xUaKKHJ z#i)wouCcaXtJv@LsgmRLLG@_a^$CxOQ_ib)m~SQ{C-mW}rfI6vL0&WB!o3?LPu;HF zHaQfUR4C$1)CYUxoH=rU10W{cK*(lFA-OivQLYd|^Qvp9Xj&9Eya|n;;2H><2XZ~Z z3&JeXX1XVxV40jL{`9&Z9^Do$XH)0(1v&80smz+lC!$B&+*|ZfYMWHWjUf7H1ImE0rjz zp6U8ofV_Lg1xay^nedcn9vYyyeAp~pFudsUWm`|5mOA2K@ZF+kmpXNAUOUy_tf%++ zZ`i)UJbyI!nxWCxNcy>h`urR9mDHrF0ow{^vkm3b0g*2gw-lJA8ru)*Dod*Fo*1*r z(dggvq2ZK8-W2E2>PnAJnV8?g=3?0EoK^i#ejonzqtML@<)7k5AGctKfifS-!pk++aT4VY}76nR7AqXTLCl*>n?Zz8JVQdFI!CuD@ZUas|w6%i2YF?5V52`MyoY+jHQ*^52D& z+h)bZdz+544PSfx@p8{l>pylDuNX?{S6>{{ zOFO5Pv*Yf&yWH6ifj>O9X$@1aSAVK{&Pa>sARZf$t#fQuc;!7z@ZEAtB@uf!6a0QN z0qd~q(4Z+s%zyqOyL0$x)3Z4}W1Ho5+pp|ONJN}CWEU!YIC1`0vy<7GhO5VKYwjB; zO+%>1-~Q#V{%_v7wN<$(EL;xoHYypDP}if>QQ@p8$G6xUF@d`u|l$7_SxNj z-t|h*?$w($gkP;BWv3NA@+sQY;b7)R-xhQy_z zs$obXQJ@sV3ORD)#;C}6|D=%7OjWbrd>`(V*t<1+PRmFbQi8(z9~d(U_NESoygIfP z`lFPE4cc03>o;cl!{qbR)0qOIh)bou@`w!A0}_YTH4S3jubM$8@1>&!o#`AWS5^P( zq4Z-`J^M5w~IA>NDcyfBc|7LVjo8yM< z%W*%S`yRn1#Elt!Zk!Iy3k=1VCx}> zqAVrxjzP&>EjnpcxE5DNf^_{P{!2StKYY1m|{ z!TFDV|4xwDGl9<)oc-Du6teQov$lOKpobNblqUU{gcj@(ucb;MW9+mIz zcyR}QEN{b^j@N^~4A;?vTZzoPx;on%t$B^*>Uj%Fv3qS|{`6tq$KYK2Q$13XQlHG% zCkS~#9{k1hO_f+>Z0mpd{?Eks*A7&MkY!9MRi#8ym`uo4aTJZRa+FNUtnh?w!YVkF(>(lm69?)j&UKR)MoYVCuz$ANVFsN}HnzGELk>L0r9 zIQVfB#BC|Ee8BW&!iD+G_}5ACn^gzjZ+#PrpRPh~yBV%-_or$+csMt0_VLnFVo1&Z zo$qhp!c0I{YA<>#mM_&P?rd!Y)upz=+9K!y9XGmN0WXEH5uwQM>#g1(p=Zk15du^p zor}+;=LB;ERS9~`VODYvJ>Vi|uAcB1d~u^Vg9Gaa6j_?8G+pMU|GemMilj`%7w4EFclZ-%!V_pYXeFW zNliNws=HM!Ontr~S71QQk+U8q=M*6IK`9s%KFzOCEE*=~=QhEdGh;VVtO*NAz;;qb zQCK*oN+}8xT=gW^Lx=vG@2~p}dqe2w)%d)iQ7Y?!?UT;_#ocNG^`{7)f0tk>oK>b3 z=FuF|qslXD#wz0tihWt_y9`luwRHh&?TuafLLv>?XOAUM6#3LVe7bTg>gZ+T2F|xL zO;0~OHZr(;DDL`L;{2fur~P*y1T*&$>OaOcW*0h*L(ZfOG%nb{dE58rbE;si!`luYxb-Y!XtTXZErnuD9uC95L-Rcv`WiRoL2ZZN)eJt2dlk8NA z&ya|#HwCMwT~yJ9nm0606c54(J?_-^UK)ziIWv$B7W@Fr^$VpJJD9SlMQ9xL%; zVEi|XIzB|p4b(K=IMT}{gzvTYKmQSBY8$mIzTXfI#CC@5LgFv(t`{||K^BKmL=+el zLFdhx$6CvYU<&KCdx(4L1+>!3p_-c9^`|SRj}?)s zC085M7SmI8_8#hCAG80osrE$h4s5}tt|w6`>Ef$Dry`lBpK5J+&PrC5H&>`Qowa^z_QCYFu9nwn~PRoFaUATOw9p1*tCnG3LpM zvYm6q)J%>m1O{TfDX%}xGbF1alkZt|R6*!ze!nW`$jaA!ooU+mT|0VA4+j6{`#yQl z9svGLD{hOltR8lP<4%3(bGF$NFyOaGcJGb`5L~wNj;sCaGff{K{#B3f_IbK?WzJf| zhaLBW&F>8x_UUaHJo#gLkNwy^WzNa+Q*vB&kiK?WdtIYp%F)+l8^NB(O@8da=W9pq zeG`*W?UzzqjrNv>UiW{Fy6pFoLNt?H%E!tIu05`B-gOsK9C+fFO>aAmSiS8Ta671( zscQJR`iCkp?a_zR`Q_B)7B5u{<8AvvDoJHr<`Lvi#Q%lI&Bv?wW`BzRRk(~XvD4E$ zdH&&bgJ+lSW*Q!3{D1lW4~GS8INLW5x`kYStnoSgTG?8({Jr9&qcNIkRW#wFi!c3zmBN{0{pxLx}>s{Eg{<9D^7OSFl~5S za$6Vt=tFmBI=bz3{mKRuUuZx%w-6GGyNj z&%Pq5FMO5WdF+}hcb~*Q8VAQV+q?)KmNquIg*!G1QRsarNUqbP9ja|WEe>%n4WkW7j!eX3UXbgiqg&nk)@go`JQVqDS>lbGiLkK+ zFQ6=!qhVQOvnxiKsDmM*Fw|9L@&}Fqbhdz>gW<<&wHaU>OUqQr_+luR3_hj*NxKTWdpi!q+Z#ancT1?d7d#V`n zM=R4|6`jRzjhoD9NlW5aU+j4PUy}M;PCtQ?MeU_iBw@eP@;Dd`uP#OkE`F`18fIJ^ z^4T?23mjfbLOKS8^z!61t6&tFCP_X-;wxlt2rZ5jr$tH_l(ur{w|@WK8)9#ZzRz4B z^Iv~EQw26*zS2)C;vxjY$o8RsSz%MiyZ}dFnHJ2QN}Ij+C}o^Ea%_mcW$0(*+KHgB zRwX6hrP{X@x+{iu+3Jzw0*^Y|5ciyoe1#MiDR}f%h3Xp6+FC~y6a4h7mlZ<-Z%3H= zB-xfm$%Bi?EEYNX!{{Y2az=MZ2@M1H_CNFt0Yv;E0MM{xaxl$<4Ac*@DgSO&U=A`? z=xys_s>_aP2A}AlwQP!%N<{5a|CgP(^s>ZAGz^YbX22$v46sBb<|)N*Sk+!*;thx* zb9bXOH!NSpik*0y5<2#G_Z|cra7G5z23y=lHTT;@GrB`|6nwp(T3)!SWUJ%0ErrgH z5BpT9izF*vx=sgL8%_FixXmk1JYA`*nYvzJU=Y4y26hYv(Dpw@Vra6;v5Ebo@ebRY zo|S}WwnyA{^sSQ_Ps>vI4?^(m9@cz!Cn^-paKT7!Ky)!q{tIq?37r}b2amw03eVxK zi-zUdLD|)H3SC>r162N>^UbD;I$ZX+|5}`=(~;ow5jnYtyIFnBT~avyD7-H=G0Yyl z%B+`yc7*B*WLv!EtF!Z??Bwi}#w26J+Zb5el_(T#=GK z$Po@_=H){O4&gXdzxsfFL|IgVU9LV}AL0`F5Dv$4t~Q`Nc%#HjV=b<(n+dAKtsk0M zX2MN22m=cE#|&d9s6CosY%=#dzhujaCV0oU};91<2uO$0?ed8WpXZ5M6YP z;hJ}-`ZTY^*yVfIPLC>IWhP%udYB*X(l7LcP?b2jLsiTXRM>vRc!{5q3kY+n8#-C1 zP)GkaNVj58_D|GJYf}uuL_(LO>D8i&t$SY*DmqaR!0ZleaKiQ1U{3!^*Z%xipth3+ zrNMPIijQilMKQVS5+Xg9$x}v3a>NQ zN(ZHWRz{syhqh?Vp=3hxXAEvg%Ruvr9@R!0URC-xA^Nv|-z5F8x8n^nG7JuoaWz>( zDsX}aDhTW7^QhIy{$O)k5J`2duW}qVziD*Nj1&ckB4wXYLAO_jY4x&xEvB;Z_;Y~i;8aQ_rdT4}%vywENkc#nN%?miXnBt;7>A#!*mLMhq zeB~zde7>b7XP>=Vv46uB~M9CxTx-ajR`@T0G*wNc__e6{ADqXi7x(ALQ4XlzPTZ>$S`tAnNq&6X>0slTV7aI?+O- z$R`Y--T*=+DXm5$2>`FzO}4N28LfT!S?KjwaUT!^e?cP znaE{LA++lD5jthGNreCq2*R>*K`a(bFDN)YJAxEUmjME?qoUI{2MXm75Hh#+_0zpk z#MZ_7t9@-IQvP*j>|R60v+LU;KgOToJuM%14qS_Q=ygzE_{=j(_k<~N&yJXl!SeE% zWG4@2Fz$tonxA2Rv|9)M*q^n&RS6!hz?JI-`2*Vm79O&jq85YV0{GO^6)4CVDcUySCoOdh?INC-B4E8WqqWIj1YOuv&mfE?t+J6gBkC24Rqcm z(mz5TuBM)}5xH7nCVl9-sBR27nyE{3hAoc$s%PxRP(RU?RNiCga1e@kzaja!Rn58? z4fz(Q}KxC7DVADoUncGyOzKB^OLE!}1o5UC7e_ zO*AMrgsO!Dn&mBd`Xy$>+oxH=qGDp3T~jo1!o9kb^p0ovL?KiaWTaobnXgnX0m;Al z-iT?pcVvH>*X_HKK)9;&t;ms0?TI9J6G zR6ULq90wdq^Sdq{`a9gRB7NoGTkq}{?$owy+4oqxauaK3688_ZsKBF zwYq$FHtC&p^QMAl!+p^IqWwRq?*HYb_M%uOi;>GGW-Bvh<%9ho7Nx8U_miTa75n5% zW~$3@)h0-`v9?#v-fH%`Vm28TWg`0&gVH1OB0&hWTf5Vrj)2KoI)rsuRW*WXscE)Q z_yi#}I8;^E>kb82a;y?ZZ*Hz&JA~|K8W;Jt?s-YHY4ryc4PtX$Q;J;41Nn6W+2}-! z!bwS)Gx)Y!2%VseDmVkMLdu+?{nx0<*cR1H!l-TlcVhNhv3xScMI_(1$if;mz<@!>S^^qrNTZ>0n=MMs!;pKZlmGbMCC*oF zoJsn;@h`P0zdJXZSHJKt4%ayoXSLb)ZU^i2dR*lLhgVyLE03T4YZu^fo*wn+Zr7@N z!GB$!Exj1c(yE|y_($kbwhlyXqBu5Ga_x^5K{}!Cqxx~blOT zj4sRHRqp)u7gf{Rx76s;V9h?t<8rr!HRazbEZ3=^Ai|4XYYLmKTk?PNZEYpLK@uPU zPSCtYdR50nEF}wspnMiOaY6oqtSnkY^8C$)%^;+QnU1{vC(kAOoF?TpSdJR-q~Cmq zlO|fgAduP-{Zap{VPkl6JRWgn!(-z}xsq62zMs$ZWJ}Pf@GiJ}5kG{I14dQZV)<_< znM24R)7FwqF`H+(N?-06uC9}*-vDtePM2JdT8ncwR?)2*Oo3Wt0UTNcJvM6q(k0+p zpN|8p@?2JS1{|M_i^z=4EfycUuTsoW55<`3284?5Mj-4#6i&G+kW2YVOjXKO4{$^3BsJ0NR=MeAnm z2hbyUtf6BIZT=8-QZRsJtGx<>hdCj#bq*VQGhSGWC!W!PIspj9guU`4%Mb{W#X1||-?kK&e zey4Qr?h=}0b#LhGNc*IIn7&?v;8uZz_k*%FtcYiiyh|ZrJzNj}y6EU3Nqu@?Bs=IC~=__J*;8t=1YaS=Q$+ z5;#!*Gaot^H_LDXKF!;Oug6*mtT{YS;~*Oz5Kcup1k+5nX+V`Q_S*-Yc>L^)*K10P zn72mwcA;~vWB#Xfh86X1W_#Q&@(@;;1#t4RL6}>EF!7EFx|mD`ee5u{sIENyBk!`j z6TECpKiNup-E_16AI9D*s>!u``+ZUg5Fmj738025ohP9Ph^~YtC1B_s6M9nv0)nC@ zl+Xgw2?z>Oq)Su48me?r5JAxeA_!LOx^(Hrckl7;vA=Ks$M|xTk%OH4#=P%2?|IE@ z!lFl$gl#SsS3O{EqPthVJx`7va7n7L`lWKV^{R>TDU#`t*uy6a0^+6QuHCP1xwcnu z$~eaBajvFczxw7kiL8eg??*l`F>}n+8NYk4U-L@XDgJnUcNEq85tFc17jW!n%$4T7 zdJd-o-49<%F8*KnI)|k)G|_6Q8Zl4^g@Wcg6eJcCk5_>L7RWQOuE6Q52yQUAK_R;Y zzZf%8+Zu{nQ6b2B+3KtSx}tS~-A1IH4^m;7AAoZifh*}Uh^kz1WlEr2rS(x&Mxcxo zk{*;C$v(sNUg-|%q(RrK5xhqq6#|d?8uFQ;>g4U!F@jM@!;Y0r-e^!WZ9i z%VYdeVLqM7_uYqoiDbj_8rl>b`8XtBL6xr{f)^Lb;OPH}-*>FqKmDUP415@Hf;+JK z9pLREvLaf0>Nu|K*EwPD3t9z=9D0K%XzapktaT1CpHqAK`FYpV`Fjo0RDOR%gDINA z!Zi#M!6@_=vF4as-qgHNHB7avzh1Z(b3%%S2^%{h+G1I3`dG4Dk#yBum(0(mp46JX zKRY=d9=39*N%U*;3(Y$w%^pqD3)3&=rd!XuUs*an89kx(`{o()gMV)Ow|ZajuYWyr z%uJ(m?VRtMOogYC|9sW@ee~`5<2%OHuI~xC8}r2H)9tfg&zx^sNq^LQcInQCu3M+- z{#U;LjoAPB3!>pVPlAmaQn*f&&*Kl8VQIpUhI!keId)S2Gr)2{et!teJP@1laa(gW ztx2eNpB(wlM`|k8vqneG<#BWKt>wx_JjyLb>BLa|^OA-@9~_Q^s_#yWQ+o*+xz?7B z`m1gjfO63YE-QEY-cW-g)`yOmYi5n)b%t9!BX<4`rAYE8awhv?htq|dbIfV8=3*ML zcL3hl^A+5XI(IsQQKi}ef`eFbEg=?Kg$@!b?qYzxDWV8`rdq4)T-8ef9~EF*WuWAJcr!4#7DDe>PCdcR4l;wD?Z^|vK8y8A{uF++v)4mXc(W1qky@c-Uf!-4 zHIK07va<~qF!y%VzU%bS7I_fki?|^;FH^ZKh-gBlq#Bz3zUW6)*Hd>1xC| z5!;0r4gFA|59U7))D*Y6KTmEEg4-QRH2X_B&%)UwVpojchKT9Q;Tp#WjY)Om=g(DS zJB(kRbsDdc_;lLjW8U1S$XC(-$9&NcyJCPcxMVnWsOS*i zwq|A1Z}1SktM+23s3#j@Dhf7p;RbW<)9(!y@1e(2-&IcHduQ(^kFIxL3=T%hct*ww z)v$zb?uT{td@=hGxO!yId=2I1KJL7&EHUVL96p$OV5lD)6=4U}`siyMtC70ag_gS5 zRn|o@Ja_6}c1iv*`_8vl_~v5xH8}jey2#a1)zh~fTm=X~qgi95T<&;TR_x^}^$~E* zMchQJ8~4Q5Ztey(K?U#Wh;5K%?`PQ+&*m3x>3bI**kGIYXnqfJe7}ja9$e znp!&?(B_)?ET4>XGq=y#m0{kQKGSr(}96{Hb16epmbK@m}uK&$VXl!9`A&Abtf z3=Vsss=sBq&}~?kLkR02fBm;0MYOD{ z`ExbCifa}vsl3w!K7#TD6v@Q3wiNh5n79Thom&h+4c621B!2+br9_1US7N>_Izio} zK+=nR9sx=6_{XX%IHrOTAe=gK@6?e_MzR+0y1{PUCWOZi{^_qVsx2n01+R2QIvV`p zyAbioWch*L41je1xASu^O#zpxV8AyyY9Xf9bX>g_{LuXDQTN=MbGpECr<{vPqARp6 zxzzckV^6Wv7iCDeqUwm%3PTw^t|ocG@GZRQk$Q;Cy z=DRjTXq15}9-}oq;G6C!@W>O~M@sC<>cU_D(Ej?F+|gYnDOy32p({0aE{vZ=gd|Fx zdVJ?^_ktl6hsRAGias>ZaaXW?JzZkqiB$h18v0gM*-3z)h_uT@if6o z)w6hmdGeD=Qw*N6UF>@bGEh7l$mo#4)hUtjU;{V>dc~*dEFR9j?gN3DT$XetR7m$1 zVXnxrF0}l}kw1MQ4j!DDkXy}0hOCFy9}I{H4x)2umPvu1l*(-Cu)Lo**cUmkIDFFf z0iiUhIyg6Epfah#N$4L{;7PKTJj7hQs!CqyqjJp(G*2?BBEbPyosq+*H|uG|6&?0+A6`B!ZI!c8l^oX?K_*5cPD^~_xrM6Bf@3CtwCH4Z3Z zUXxYBEbn9(!>z>`c4Sd&`F;gvjM#Vox3Q0}xF`3#h(EW}^6BwY|77glf-8zCgC~3N z>1uv8P>c*{0Ih*82Y~`{7^+GWTolnq(iMr@Z#w`Nq(G(Md9o0wZHloy--@%z-V()Q zArIP)CXJl9>igu^QO)l!THj??cb^^o``U-km(k`*@9(V4T|X$h?tINL1uzqUB9mf* z+AG#*N)-)(1lTx+5rT^p(CFJ2-EzDq0*0jQl!QY+c06~~O=)1bLz!dsYo?9US2#u{F5 zMbx09LtKECt!0pf-X3*y8eF%`Y?kZbftH4FQT@x5;W6_xYRQab&}<5ZM@-fYBXcr! z18q4DoXXlid{+`&P2DUW&j7~##Ht!I$}}1bvJB>Fxx|~v!JV8eVG!cBc3Ux-7@PuY zs)!9X=Vm;6Xb4A_(ZR0JpifT>pHF?C?+zF*msS^ulP&S+$r%}P0egvNTNppw$hFy{ zK6$e1wB_^#^Au#lBc$H@lVXqo>@p)u9RxRrHSffcsqfaVRx>3 z?VO}FQAQe$f`at9bdenL+Pzkcvc~It)IR};edYtQ8I5@y7xj9t4ho`O1s^5>W#3vQ zdcCLTxB3U&OF64wXfFMal{s5W1Or(WfsgtX-|k$;EdlUZ^RaJ@9nw zWLz?%b{|o@$7&F+Zscj~?JwbuP;s(;t z1FM*-h|Ekd$VR#rs%w7?>IvAaz=W&}@2S*~aLP(PAghZig}P4q(xoGhL<)0KDlQm=!eo?5rBr9w-h(2Q_=(0sZn4TFy?c)8u(tfS6$(acxbet@bcE_X zWiF{V@_Gf!nO&dcoY!QHw$~tWGjigm>D7g0m$J;l?!eMSNPGKvi(T-agF6+xX|Xfa zBpq+3*qIyn$yAQUDg{1pL7(Oz{5h6s4J$z!VU5X|VoJb?m`CNamuyG-+gTQtug3|J z&f!ydR=v)6u%s6F9GxQ>TsN$lPYR;Z+tUPdBfg|madjfD;npB!2nuD7THrB)kd^{A zc>lKzqdmDF`mlcGXg_;UNTy7)s6{~|bKCT7=}z2*iDC@fAFi%`vV3{Ub)ekE5OKTx z^f!Cu;|;b?+}TSa z=>`lmz6(Xr5v2s$WSXiIAM&o%4EYMq8Bs^#$}n6<>g={XOa|`Uqee3VZMDR)uo!qu zGgacO@LW2dLu{I)EB^4k5oKu_Kpg3sj1sG8454BMD7JM=Q4`xjydHFc0cV(Ah%s!P zWHXKp@YLwxY~7$CIcj}j7?sipUL; z!Ut+vrFa4n@*UcC`zfg?^SnYhUH_Oce5z9jOG-`8lFoowlYqrh(=MD4PtntLMP0Z@ zlD8MbvW{l(uyxWxLSV7EOHy8D%b|P`Whw1;2(X4ScU@*@&->NbG8*go>q_i;#4V;( zGGwy!Fo9>0UMOj%>kK$=UJ`5$WLcH7aE1XIcKZyA&S10JvkA%tWpV{_jxQfaHZgi1 zt%m*|CSd>nm4SnpDzYLUwGFXp$spUrnvA3ZL8TB%d+H2>ZBUFkpuG|7$V-sd| z{POV9j~1=0KG%6KLap!kVc4m#6D@ILG0ewLc#Ai{*|RASNhfeaG*C!j2x1`2Wj#;~ zVxS-d%uo=?6ho#cK=iTB^R_%e75yi9<6;(s?acrnDgwM6bn_#|W7-SwnCoer@}tr+ zF8*BxT@kV2T`XKb#JtZXiX=5eyF$PqlUCPX2e|mAaghqa_Pmo8&`gJ(OB>J`i+3`r z%r{(_QA5Lad$(8(xo;#1N(>@JtHcpHHUU(n5eE|{*2P%bWbz|;%r$NYE^JO-pKAcX ztMD&_@vw^!q#w4%QrPY&p{#$I9s-WT=ta^fZJTLDn1PTCsLuNysGG5nW$3g$0wEO= zz|1J!7{6so&RQs2V{EE90K$Lx{!Z{US%Ub_0Qo$-!O3$e7njGl2Cs8or36r{agw8p zdI%jBjZp|UnADIVV(?J)aV`BJOTc5*76L&_PS2Q&E2xW^NqKvtkS(HJeOhVjeFkY^#UcD+;A6%V1(9L;2OqimCwKm7|$mtdWpCSGeKrqe><*Q?0Ez zYUaO^aFgn|Zg2s!7(eq25DyeCQ(_Kz-5)`wf)-AyXG)81S5<468{>MLLJmE!%d4w1 zk1%fPs14>IOx0MkuTv2uHgfK5uv`Yo0&R_wMV!q1)+RXu_3Frd9saj;h6`B5CG z+V`HM^b8Q;0H8NOtTo1pa3X9JM5G`g<7DoV8TY#~a0kkE)D-}y2kd>Po4Zz0I^i?sdvvL$}e^1Jo=NfT{oDcR_a zzcGohZq~>zW}rfCUT?u!&(xAO?*9kYdRi zUslqg;+AIK$wX-Q+0I~w21P192MVADnhe)iuEN1f2U-KF6$9DJ-SgjL?XAmIp9X23 z62hS~;L~UvaIoNAHc*UjUnv#?{uY3=4AgJ^`HeseY>niB z!|MG3Y{?+c78D@+J~R~~I_|44%AQoyBCA_!?;(HtNmmc<%se-+ZYiFZjsAI6GC+-Q zEFo*Fl4)1h6iDx{C_p9<@C$;VYK>xxbuma;H^BH#z3g_!Quo}x(@BMj8HnsR)5jSy zoo8+NJ~0b;qUXCD6m)qO@rUn&6FgIQ3;r^Y%9Gqp=64ULQBYeW2UmrPCR@x%T8uHv zLM6mhk~)}tV`Rv6WA;!@erQBNi>TN*t;(CU%qBT9m*1yxMz+0O0DxH4uA#Yu&I0aO zTL7sJ>|3BG+IA+W^BYISzzkGpxe}T8$GC`DspeY}1)fJ(_4Jq4e;hgu7ck6!f*Cs`7k$P;=Gv-Qu2Z#AyRnlI4kQ~sT`uUDi6<(d0xp4HplB-LD3{u$H``3EiMviWRs@K{N(!l%wOSJT^O|aFtUF zf+9ePSpbIh21dw*AIXf>KT2Qis40N`J6Ic7(~;!hBwByO2(o4BQva-*wuMqKrYWhb zdq%5HLyMpqINpgA3A(;UQPLm2R|BFMGh`XnL6u`_a z%!!lB&J~lOHwID%-9OSK(R>{OSe}v8xFZMe4RGW)3aRw3*qpGmKhlnMh3SQyXuQg< zGja6j&r}zE>s@!_UZo*o=&IHVe1cQSNplSNr%v=Bwf0AFgw<7!2L3vg?JiT9+A${o z2HkwDdv$+L)umg7dmf$ox2Ut>Ms+det=4Dst!cx}t8XN38%#D0k$){jIz@d4hU?4c z{Izz)`N_r`R_^)xrd-?4S)Y=H<8=mwA13$OTPRlL$9(=D(kFqnh=5K5=~#UPTbE?! zG$;+Uf}O&JLRBW@57<>zk|#JI62qP(i+~nJCBMAeo5-EXvg0T|Q{V3(Ltt zVTG<@zekkWkIG{yNR*>dl+DHf(uM|64~qy$Ik0au^ltrrJc2^%UOiNYaIEWNSC?nn z*1%HoQXpd5mO}_WsZdOUkdKj*tIC6nYVel{DeTIhgl+o`4M_R5b0_4S>q(;w5~Y3Q zlqFIgE@4I6E8KW*-m63SX$L0{y(}H|yqor|r(uh!t3cmCvib#Sm_61omEx@ldEc~K znY@fYd^e7KGIa?0@}U=*=yUJ+H0{&2iC=ENMrc!nok6@K-G{6w;k-`CM_{R*(aS!?grl`9_l=Z&qN;6qb^NBH+s|1PH) z+Xtr`>{fkxnE3F~;3Ws#k*$-Nj*m>kYyGAFdEBCWJe;n%6{2_2^^m5*^5r+z^Gh4| z$=O|EO!kLqkK4l5Y~_-i_jhT!@<@O9Zs)!jHXP1az6=J5tZJ3$8cd5S3+JVSM}#a6 zbbP}j6_<5D^IaAiF~(pQYE_6)tPO+I+R*VXsfuT4)#X!vo1jgk1pWObI%UZN_y$V= z!b2NRC5wr+Fom;1p%uXz&Vd=M@VNw*upNVV+Spp|*Gs~@ffg1UHh@lXyNH&NKxR!2 zodYqJLxSZ#_{>WSd~ym9P*bBMJMk8R;-I+V3S}5*<_J!UU(06@StPT#1B-NHq+(h;rlJ-lc^cFz&SabziSQic$Sb74xqaRvhoqs={?4 z;C9&Cv6=pilAG0V2J`CB`jK>;9(Y&&TN!R-08L9X-Qq;|e^x zaHW~&;a!5BzfsZk$4td-8l!ysuAiPeD}tYzz~3g%cRpXZ#LuMpWZcSY99w^3 zPZe`J7Jfb=A@SO6dimW?pRT5TyG$%S$Krd^%-FJ=c&JRgu zGIS=_if+KwL&OB80VR4NXM=Tv@(KbnIztYdM%`#@!kuO{X3xR^!A{-zE3QSHO zCoKRNWzZ&UFktAmH#XR)b1s>@dQvBDW}HBITx`|AR(knA?=fP(xkM(VWwPBw3U zs_~nKCjn9_P_&8{83o>f)u`|$ZNxxEZ2#&k97Yq_MTW#qG6cPNmZ^%Nn)Izdd|wa_QR9_Zk)OBs8QM9G`_1|NlDn&#w%$Fj1NYA-eN`<$fBxCtNTb4^969ZgN^k^HU(n z20(xeP6|m3dI^l@v>AU#z~J0^o85;E)Q-T_4oHPiwA8wH1g7-o5LYY{0!DxP(ufmrmMdy%vV-O+0J_X6 zavL*TU1qvbjL=yTp}jlR&%BU?kSxWN?tQRVb(}o*vE{k73Jx>dBI%BVN~2u&M~P&K z%b13E%DGzmV(_4f7x%QV5z62{`t}DAW~K`8-?6c>TsW8yP2O?UeOi$pkXY(`Q|yMq zxKDO;+7&+Yk!QHi_R!EnxxP>j3!ica%W_+{^y4nl->)uJmcjR7bDXwaKc90SOuUQZ zDfZm|OQ%_BNVQGUSyNMnCH!hYOsO-!7+&X=zIZ(tu^xKPK+l4llg^4f~7o6wp z^vvW^rr_%9g>2gFftq*S&-X3*KL0Ehv9r(ln~XAa(DiuW)?MOjW=TwkU&)nSF(8ZR@}yiy<-u3pdu*+(XU=Y(Y&RlJsoI#Xv*26inqD0$r#Ouxu!Y6bT6g_X2xC zEM*6{Z59QJ2;@OPU z!beLWfhhbyb;|%%FzKu=$V=V!ZWj=Bq}`s~a>@{7n|+`@U5)P za&BsSW8PBU?ZcV-PM=p7@iYB(?aGDgtcf$0jXQgJ#U*VEKOOiM;*XC^mjzy?YH_L` zsy^_}7UP9i4n`y7TJ^KV!*k-2_dJfwditW8oT)04b!`5S!}c(Qc=kERt8TLZd%4#Y z0e&~PFp;1cgS&Gs>DO#@UEmIiGUy8&0vVJOvi5EsgNGjZByN1^0pxNW+FWVbGQQ}@S#V`Ef`{9I&DGG9789)UMMwh1xKMEaO zDMN4}h%%y=|9NGd+8l~>fK%jy(9^!3AJ$YA-M6W#Tl0YnY2TG|X_iaJ-@P7ro~v4xB2qpjX-}rK zOsVEQGpV@CP%X<%8;O(Q13`g;eqx?YdjM;*g3E=dOjKvOKK&ups#lwyc#xYgS2+GE zo;ehfm(+BEi$hra=t2!a`+#p#snK}K&>xwOmUMP2l|W1}@1 zwT|O*QWDva3}3j-C!K51=qa^hw6iqcJEVizG5e*Gm?8{>9;pb|_0`OX(sCCGA!*8SewTM~|`kiLFH%mi~}X z?Y9cP`I+`ghh~%)?34-O&LYW3= zEIB>A&?-RY^<1O37o;YdT~*EIBy#(`QYe#yukTMPD)7$k;yH=%H000W$gim4vrmNd zn~G)zKYX}v@!qF1?Df@xc%fwUDJhO3fvt#TuY{kqLeWVib{t9I1sd-IzbRrCwmv3~lu}6Vf~6kAr)~R|G1&3UEj30%5e_o5%*eo{G^cC;n@o@O_+b2t1Pt<)TmB+t+3JBw%D3IN~yrRS* zV*2Yk%j_(ot`8-Pp)yq9`#G0xga9I#(-C*nUhZ5Qc#G;+7KV(>&CTMcsJ^2e>t4hT z)*z!DhvFoY|HYqH(bSHgXO3W@YDkWnDl$bWF?)r;*Cb@GK$@xuZ6b@c zH3P7}Q5P)@gjwwdHW0qQL^;MCudT_Sm*Gzv;)5>nV{G`II~CG&&X085-ph0O+3Cq_ z*QUVurcgdjR2;hP&i{;JQdUFYkMz~mYUI9%zIi74N z10!Z(Rm*4v;S{RAkibQ#Uvl%>OL7=c#asZ?DG+ffZ7XF_TbR7iti8=giIJQa|E_nw z-Sxzv=ve;L#3MN~y`j5Qr6f3X-3}OmaIB>{Bhm=9J=_o@=-^O3k4j22uWC?dGB5zT zZ}Rt<%fFxgzP&ZHpe}WKZR_gWx~_*M_1s zF!xKor!e>!d(q4hO&Ht?k9P%AwMeu`AcNDtnOu!;w?v}Qc-i!vK}qv2DFiS8ADG!T zlL^Af6sK$cHvw;A2dR@YbjfoNg8?POsy$>&XEN}&;e#A9?LLXu1QxHFxIG{bp#eZm zS(GaVdDVCW4q+@i)R(pkmPN`EwM;#BmnGo@;OYluXwZB;maD=#TH*Xmt^YxeVqjA2 z)IKBb5aOXJPx%QWD~*kH zPR)j`c6IaD-Ss1lLv~bVl54m3i9qOXSwnsgKmVH^-KrU1LLe0pD!s>>JLkP7OtS7r zuIq#z-{OzwdPyEHJEj}yDCO-pafN%kTU^Tfco+AuPhCaTnX1sYpFXuIJ@k$=VYXO} zwH~|LnRT61er0|At))tVdbfAFiG0yGKjy8UJ>kj4OyjBWU!{I!t@h}_&L>Z+iyr(J z&vu3^Anrsv4sPMK>gSqv2K6G-MWiP*Rd^t4m5rlCX^}fdC(ptTOVSqjMrn+nDK*5(bi4Js zCrEvsh%Cy0FSZpD4JjD4J}g5BR|*~^aH-a+!USSHV?cwtL3a<%ugLO-yUEfNkSPbXdJjV12Si@^oALXi-GS`7&+BRFg-3Lum21k zwrXbC8N3QM0Q2;s3NA;|DpeJ2t&UA*!|>*rIBEDFzJKO_LFiY53Mi;343JdryQyK( zrezj!jja;_y`hhMTUQ)%_pnMHHJ)yHe$g%MN6|<8s^+EJR7_>@#K*VsA$8ckOiTax zP}K9&=dU1|3Q(|)(CT0H+{ZWGaO2Q@@2&@iy>{}EP39WjW<7~Z^U!|>TgvYc2^+Vn zCHpI#8^GwC)=G?Aa!Z;S0F=t9+M4XTN7rp!_t;3A*X!+-2x-`T#DZOFDvWvG3mRk` z{M_cJdO^bUu85$f0x1GCz57Ad9>hguHrXMLmrj}IDGV=NJu+MRL1=H}gQx$``$h}E zAR{^KR7Nh4u8P_p*oIl(eo76X>4aMMOEOVdq_iRF2 zW2k=Wz%omcnUNH7EJ9qEw@Zu`n~SC(CKV*muG<~|ddg^in&Jv)IlpYMEQPB`P0pzd zqcc#vJZ5d!Xgd;a2h*$X^`-AcYloqQN;6MhtXAjLtIBNx<|cPk%kL1l*|5|pqRJid zI~?c=aWMMgT$7p*M0z*0_t|4-O$^U77ex}#XP44 zPlX#Q!~qNKZn_yZoMTg+=y?QQ)eD|F-K;*a|a-N;u5 zTq|xS8ZI6iecOCusKoM#n&6fB(%0|Zm1(zK^M(%p&KM6AL9ZF1QpH!VMdf^DEfsT8 z``!+=sM6C~>*}#_#_& zm?BmsgcA57XyOpclX_L2F?2M6!1F0)E#_rR?f|xB18@heb#UO5GMl=esjY~tvrGMlV*d7^3OlEpjS_%jrEY0q9XBfGC6v`Zwlv-BNb(=P&kxU; zD&W3?e0IpdPSo69`HRV(KLpeA`WMg!u^u^k(g#JnR6j*Ze#83ib7^if_}Z$zl58F&LI5Zc? z#()8lcXSLhT!H1uvzMNoWLr=v+qM>@lr95N@ViA<6u5N^CvT=ZtD=VW-vR8T(}^oN zobk4tBqQ?&3TC)6T$ZU8b9CMtf_K2Z4rIRuQ#RQv)cN3_*f^NR0TwVJLcOVCyE9NT@)|&gn_j3%+ z6fg4SLzY2!(4;3Ob5^G`#l>wTF=S?ZV+K;(j`UtodTVCT968J!**vEjUEAhcFC0u) zVg9U4_qonW-=+TPCypyFcgfOdbo>#m)w|J4$@iw?>-cCfLha61R}zIOkz+#gP5XAI zz6?((^gN(|^WLDlzzu?};5_T+DXJQ&&7PR*D*KDAsj%9u4w!0PM6x33olESrx6Qcp zP6Ze899E%tbQJTUMU4VEdal$geKsPm| zCS;sq(<^Q9pY*p$4Yn#Qa3>v*aoV#Vd<8oOFxJU_jxJJkF6a*L+UW!V@1j47u=bc_ zeykQziv~3Y8z=}g-?{e-i^5WA5X=r`Pf{pzC#R#HkEUf2rlS|a!O zmY^L#A+l=-ilh#hwz+;46mZelyR8;OFejiy@xII`A2fOR5p?f{h5qh2htX>h)*8FvPV^^@g&4}iWg-D}p z(Cz%Qn`u*CS~rQ6R+Ft8rspSa?s7+{916M_fttC{^c)TYosu87G=<2_pMUVH^KZBt z#|tA+538h7+$!5jHtiNQPG&!?*Z9V%x<_~^X=Uy`7U8eX43vYly8Se;>(=t#kuS|f zz+xxH#o_9Bc1Ms)oN|qgYnH^lELF0zs6k{MMWz@Zgb2 zhH0n8*st#D)RQ4@|Bc^2Pt5=QsPLeWr2a{LG=#vrqZ5yofG%xsi9ugcwHa#IO@mq_ zO#Hh-`z1&fU~oK#Ec#@u%_Nj;dNE%6C$3<32 z>}T?Z&RCpZS=40Rcn|tfjvZ5DvcLXuGnUupmX<})IPuC@Z?dUndsD4f@Qz2C9=7W* zdyLv~4za_^EmtSHc0cz%sS&3VVOTVoZhB?+3cn3|>7Nt6Z|s|fY7B~f`7zJqz84?h z7hQ8*F>L7CPt~Vp)M_aVQ$5~vyD?@4Mza6PD&NV)zSPHtl76&L9BK zu>Jp2AN|3rLX&{R3T$vZuPoZns)J~Mj^!{%jrU&3V1VU6xhHkz5rmur83YGkb7+T` z=+hO(8B}nwc(AiPJ!{0&MXuHwB8F#dm*Ishq>uyI943O^-VdnA8?9bm6V7jIiELIS{trM^z2Hm zj2$mC@kJWUP9Pv#7-9>w2J%wj&dU)VRkzaaC+6-?&in_>@}71GJqU4)x_L)Rky8^7E6 zvv9Kd9=sH8v-P{i`NrMfVuH@)+*2I*p+vCdt<*J(6ZW82N{o*YAcVvcpp7rPy=nun z;ioutW~)>eUPkJ{%+b7tAqpDJp6W;2q{ZMPsP*M|UkN+9jDZNy3KjWWF#82~7yoHb`?1gSL!l`I$-PMxvbeJWcv98+p8QJuD!L}m50_f+~SQj3E9~eH6 znoe6k8KDi>+&pXSY7!A%mLf+XjLBIk)oGq6y^#@1ic>F%@iESs>b(87OE4o_6Lxo3 zs3yTjbSLDfulp5z)*x?R7v-`A{!pGy+NZCE^_yr)jv9=sf7CU!ro1>!Rljx_McRpveRJL>W(U8-aIYKBodBuv8 z4Begb-8A+6{cRl$HCd#}L1M(51CiMhhTpMS?iZQs9NW7SXa ztl@V4O)#iUVyY!HNC^w<2U8L~yXinzbB4`Fi_{3hUD@*(mYXIx9?x-|{^8OZ%4IuR*K; z8g&d?#RniA9DyUh+T5wJAI@(#Z-<)9e>L)aF_k>o7G>$Z?*nd!sp_eIKT=}0Si&_9$FUb%3|L6cB@KKNVa z4;2OVJP=&TpR`o(9x-Ly<@GM-TTUTI3MXJ8+RpeOhJY(akri{LGF>nzkZ#QifWm>z zf0zL))IbOvDS__*UEAlg(5*$|l(Rp7E;gb^8I`z)1(N7d#Dm3T;f#KngjAb51Lklc zXo`3yP?FfRqpZP%!ugzCUY?&xk#Cxa+n!KUT01@D-*XK}-2A`uE1Ic_=~pA323}j? zm1a;w$V9uRF-v>}V}rvMHy@VDOM5G(WYh5Rr2#Z=EDfhS%CdO$X?vq?fAKFJ=@Xah z93XVSm49`j6J?;8c z=ECmIh0_IH8xMDFW>`$V9(zvG)VQP7<^H(kgbt2J?n!KQnRsGyYU9Q}iSa}H=F6~q z55Q<>AcPu1+I#~mS(zr)iG5UPz6|~)S-30t3uz{;23`G`)Uo$tW1KkYfReI&bf5lf zEeokU4EPdTqm;)bVd#$*2IDe#bDa*b-(UcVI-37xdYs0R*JXR7VA^Uv!)1{2fKh?w*wjtpf2JTuE7?&3`QLPqcf zQDiPw64|hY*B-KkCR;ZV10s?0)=_tnY@+}%_HurWz&S=-ab_UR?c0V)O1AzQ9VG4k zxxJlKQym>dtheqCu3HakVm%wUGeW5k9Sp3V5_@_;=Nk6m5xQV;e5IkU=0E-$zhmc2 zallujhV^$khj$pFT|&L-Hv*5Y&=t%vXowtyq3Q32bNn;{viHPV%23dn&|>Yf_Qo3F z<8@thT}%V%XG(QX&Mz*=RmdE_J0+@9)(kY9ID4=;#_TqIR9(7Mw^rPLkF0$R>msQs zBG(PK$6g`n$%yN(2SJ91WcpOA^67TFA4b%blBAoZe2)}<>Dx7b9q)5l<#6#H*~9l* z(Tz(v)jDa*-)?E@i9`&)pu2jb;f@;Q!0T@m(`$I-3u59TiV%**?5Q-%`N4tfWvjjc z2Q3Yyg6`VGW2Cn)U{H|)fSzn4{oM}AhWgvdQ)&NGvNH|^UV@l`$nn zfs7uq@US>IpCSv6!S9fMLflA9aU=gI;YOahxmexpoF#kSf+=+ep{H>(o15@gxQYAB zUdg|W8}hd2uE6K@q~5#|ICrJzVrl6k)^PchETaLO`SZVPcF_Y`cWr6uu28A1)7YO7{>$T>B$ z$?slm=8Yg1NLrbssw%pxvrJFnRGg0lvlG`Np()F(Q zG#Sg_m@ALsyr=y(T%1|w-H*jR4c2yafs^Z&=J3yodu&b>DK$6Ukr5&OGpNP9Lfz;h1K#sGj-K;2rGm4>NlvxbR2Rb(IGYqW&bJyzXDwD|ePx`cI22nA3 zaJzsf@?bGXcZ{8Gdzx*zAoBj@R-yJ5b{SOd*4bAJSo1LhgP7ht6wm(uV(rbtq3++e z@%O9-GmJ4b)&_$i@g9bdYV6yPwJ6n$EhJe=bgO$7#x`U(L=g!gdq_27%QCrb6;f%F zN~`wezJJqwJkRI&9KYZ9cRc?*|IIP~p2u~)U)ObB=XoBB$ap6gAaRo?YbB@AEMjSl zMFO>i3r!O@rHL6MP}xR~la5YF;Fn{MHoyFh?@COxH9F-VD&MWNNj(pZPbwrH;lGF2 zR|Q9JuF&a`#4A%VH^NpdX{fysUI-%vf*?W2OZ%9tsb{DqGkdqdrU4(#MFPt2uf-}n z8jThgpKMBWCV4u5adY=K?aj`cuke5T*4J9$j=fU)zC(ZPn<3LF7`xJ}*IX5!zsER3 zS}SXl(H0vJCATl|UJo|E4Z)udiY)FkDi$AkjNY@6v*3IKyC!)ykt>~bvpNk91b0yC ze`;(oyyP60I=0-G^>X{?Hq_=ZR+ZC1d554do7ZCxvvN*;y!E-bKX#nWq5%-rBs!Uw z3{fdQp%uyfD#I*HjA6URwVdl5G$KSGu(^Vzl?i=#@&up|s4o!m7){sqXKM=|U?_@@z$qT!Wri;yG8qkh3| z$g9?e26)u#2OE(1ys%ACge(Xf-TP<%>GpamifO6*8{dDD9jpP=KZ*P^{QHUqRW0yY zJ*Xq+>!$bESTex~G&01JF!Eq5v7YWHw415D)@&=yVus#R2f#dR6`4=1SMT5Rf(xz6 z+L*y9HH@^ke++LH!@*b%CO^H(2x4c)>Y zZX-evIIZc>cSGPuo2Z!1$6im|!YB1r8Y8=Oe-7XjWS2kE_nf}>EO3^Dg z4%<|p43lL;Om&rJ`NjZLCntzso}`c`ZtvD>g|bJMw+W&P0Q)k}?aYr0sk9Zv_zDkH z6fC(qrp~*b@Tffcop(E3Pfza5L!T2W2K|qphh1c~tel{!1lR;?QPMnDxLSD`{ER)O zEJjbb3|9Fg0ieWr-4xo5b!TZZ6@6pjhB8b8cAoO>{*|uM zdfvhN7cNgO&@m~mj08=t|A1-tB1_``96IO7X7QdU5-ef?VCi4|(d!h+{#lYwSFt5Z ztWtv~S#r&I>IVy~KiKD7-J5kiGZQv4q~*O?pYBBi0d78#m~JjrtL4@5ebFvzN9)Ij zjvwwdKK07pXSe72sXgOszF_Nz2`Oou90P)K|jK zD8R?CfF7n*d_?}K*uLn{;5Q+h7KV;>WU^gYYtxCV&v8y+x5E1p4_B}BsfH7^LY|Oz zQ)gzKRm`Wo*t1)2ShaI^;=sE)Iyf2Q@}%kMLPCvnX6hMNEjDVK= zeDG_0qdZos-IVaKq<&#u`xCA9G8p@T`z~Qw1{fEk^U%W_u{zO7kEs(Sb4yvrzT|%4 ztbM_T``Kf6e{P^PPB#8J6w}yIlKItP*V+6Fe}}*Bgx@^-nbS+WnOr?H%5Ywx4-uv6 zHl_)|LA!>c0_}HK--t{7b@H=Y`MUD>2PXIasc^f$W8YPe^5Q$}3Z>7lwqG_ZvTq$r zxD)?0*f?ZT2Z>VOT!8OsX1nKyCL!Di`ALOINhBd0%5vXVCl8S3r?k&l+XGL5okD^} zQ|aaaAMc}EghX?BV~GcmoUtj*Z=+Ejo)n86NX7DnseBvkHQh#vRyAO#1>^4C6N7@` zEi%ng||F3ay414Y{%nCu(K;6S1>0al)fuIjPDE+|5cugqJUA5I#o8X6u< zdKZ_0+gaXuRj~aWfrTSDAu2XI5l5 zhQm-pD1ybuTe=8(!wJSTvbG{6{s~iV7tDZU@wX0AUN4M#F=X35gnU`~yKWRVX)-hC zI$XGu{_Xe5%aJ&80R8%-BtNVgxl0|X&Zwd}eB`(fy>Vogq$_5dp3sZ0`;BjV$Wvo5V}c+`sO>;?dje`}u7DZH=qc z_0n^9zq{h|cJ?00#uS*Uhki&U)!3~bT--0~Z{|Ot7nvc|(#K&RtGP71-4_)L`4-yD z+!XX};8l~h-~v_Fgcd)P;^)0LHq>BJVzFXpyLAA#RaQt49b3Pz7G(t23H>QCh@iK?KJ3@_SXmL1-K+ z;7_$o`TVpzs9-yHEFEIZL)xVyoxkpad?!hwq-r4jK zBw$>(B8TBYVx$4K@b0`96so_IG*-lY*)w3#DKjU_dJ~ z6bUMEoRFK$D~@^jE0wW6#3oqfa*_Mw1xa5SQyD}scwkRxn=f2?ZpBnvM$ZVl0d-kF z0(Zih+DaVb${Y6WR6c1Yq^jfy^@VkYdh6jf)!5FXN9sDHn>Dh8t?u6b>T$uit&_3_&+x~THxmKlw7%cAAe0$Qooj(D3$Li zUHEKUm%<90qJ{)%r-08fiZJ*gJDpV5&Fhf3I}TbKGS$$G{qN!s^CrZ4J!u1wlKYgA5*h)oT!^iA zY2EZ;1aL6iqk{pbI1G{{VRSDB&M%jovD3!Nas!J7C3`xfw+@rbHsi?2lMbKHikb8G zN@R#YuB|m)|-q=Iz_t* z#~1tpMM(#e)qX);Nc@G{*UWYxZcM-K_r$M-eJgQ46A=tKToHvVSiX8#JKQtt^u#gN z^d61aSyd);qTB^qOq@zBWrQ*eU*cF`Rho1=E>-Bj=gAO4r+m0@KEeBY2_Z~y(9`T) zEhrSi$Xed1Zhykn8HP+`R&A4VCTwVvQf+c^M#ivQ!$ACQMDAl0$2OUJ-&SZ3@VgIK=n@rANCk-N_eU)7rJ;tN7Pj^g@T4M+AmJfDXe5=_2Icj8jkZ5oK(!anM!%O&T*=bnvOFwH z{(V0u+E|lZB>0-pL=d=Mc^TW)%=N&s9e)|WI${|y>;*wB?N2<5Mn$CZDM;Jls$9%1 z=7z|`Cy;m#oO>>VSNyuRk|7)vFj=3euf}rzq2twI!Y6|;kA%%$zTpN> z23z&;m>{`@^4^(m<%WAp#Un06uCAi)$5+SbkI>7IGzd0T#U3rz65xUpvKqHq>U#M+ zQZI5FRRIkctaG))PRh3fN}*&GZck=^;L`*YMm5`52}7GP&k)!`9b|LQMWp;J&3D!~ z#?K*!5ijOA1Ls>h-}-c4qjMq&06UV?-c=@t$Yf7Z2##kVk`NsNmYtmT?k|B8=Mjql z5op1aIa6{iA|ZAYpmL}UGX7+48?Vg5q*>$J>d!556A6>%3C)+JQAi^{G8}v$H5jycrs^UL9l0_*k3M?=*a{N$dkSmd!r(~Ht&o1PgN4}$hu zkE&RiC>g9a07p$JUO0LAc-&F|e8`qKihU%ldBHV?6Gsg92$(sV0Ea6R1$AhHP;9F( z&4YS%bJQg`73YRDmJ61NI5?SCDq#snFj`OWC+TR=9_Sp%U<~gCdSnK|oHbThTs?l^ zZ+ySTFIig>KX+`u9(qVtLX)_gu8D4o1;+%z(op&GS=~!c_MvUVs-%H>bx~1Ot+Om6 z6sARs%!}=)k2OU>PzSwa9+@~8<70*6JsT%fUzZh~Gp&ggq#GE41q{>WbpAp26w{Y^ z(wX&n4CFN+jq6rw<6eKOhs5&Y;OXok{nNZrciFP$JWU4YOMNRgwy&zt%S{P?%bczE zBCA};I!Ajp8QQ{OK!kt;lTcriueA~o9sqa9B*~;;pi!s+s`t@Cf!!X4E#U7y&dOKd z4rM|G1Lxb9K{qL(#Fj}Gs<&hRV}(k2^dG)xB0;5rY|UI&vSDwEbGpAAXXN&cf}JqX zaQEMSzuDujFIQsRG&W%IGGluZUVcauPBk0e=}-O=cRzhU`EQF4s;&2u?gkVheWe|X z0Y+gF#iCt&zWI`B&h3PVjibrUgSV89^&Z(uQBrpI^G4x<*B?7ZR1A2z6w-)yzE87( z`rbbWgs)nMBwoxUp@<#;CRGeLic?r3Hy1V^SP}v5Ho@t2dI0mLHW(M|t7MU6%aciD z!g+M{aIsSa`SNg}hX-kLQiKZM6A!_g*CZgRRK<0Q43?+ntyR}A=!A6G3)|MX0)*a;*zdlmT86;~42H znbkz*rs@eo+^AECEMHv~3P+iyW}td zE{B$@ZfeKQ5GxdFk%uxsH@=&qA?=z|&o?&Nrs9?~mF1KwD???|U7pCOH%WplNJ$h_ zMzS9y7p>7A4A?)%tjwy2l&*Tn8Y{Yaxv_l>6BSLF#V zZ3RWgmWz33(p44b`TS3_M+oMmAtm{d1Cc4eD!{JAsK~XLkub{HkSLe$4Ncag-k)to zqX)eYeh!b{yz!{UvGHG`3D>mN0Bva}Z(ZM7{X?|iAf<$s;0O9)^P zj~s-(gHt^IDWv9yN;DMbF7DpwRv^ubMG688T{{JX1R|%JWvQu>3{PeW30zytRTmst z$d=5LHmbk2glie@yQCG=WXUHDyTLhgouF%8HS`@siZ))mj}`m)4}q#JOQmepyHHlp zD&0~bi&f8#>!_*(n8TBTO9mI;Ws@uJ2R4|NS&bFyt68z@)z5SW&gKfn7s~QlT0f|U zn^Ad)xGtAnw0wDZ)SAIR2ftd!?A1)Q&pn?~T4fur*siSrG-9=RE%=|Y%PxzCeVF!P z29ePz)!$_h%NuXBP7{hs+|4OvR@3iBAk)=xzw!ML7ix_KKEJDAYwKvbD3@)ak?qW| zH?`wsvdoUAIfuvs`%RAq2;pU#1i(M9{4Am+V(k~vwn|lnWf≦S*&Z8q3fLE`nu7 zK_*e`YM6>BTG_qKs$-Fdy)yAAKRir&nxxLK*q79tGlY@td8H$`dzRR-JbM#VYKqpmOqti8#RO9b&4mqw1^Bi30Mtvqk50`1;F$AWPl7|Laq>7O(+(iWb26ahqfoslesvg z!fCX(bV;8)g&U+QU8A|to1=-3KyYcYU7bHt+?;iPJOP-Oi`rrCnw8!Cza5adoA> z2GPxHmB080a>4N2ys1lTX}8kqjNG<=y4oBLy@B!VkT~sxwtNgP-s{g(X@6#WShTV; zEah&9&u)*Xk*Y0^x40IJ(+M&*UqHgpG0k1gRGvK`#3au|-pcO)UnWz<-lg=dJi=zUm~yXZXo4l)xPK`W(BUsOPGN{yE(26&FT9}TDlIL^(fC=*5isLU2Khx zf#Mvi2VFn1ya|RzkPD(N?sOShC6Cy^9iH9lp9S99Ykisk1q4JWz;+;;5gW`?Z&FeN z)fM#+HAJ*OGSeq&!Hr3yGFG?h)W$xOw^^Fg)-eY0 z2+y8*5(I3*fJ93Qfdmo+rY`POn{!qbcPo2g_TDNB^Fu5g?8>25f8I%#vFEq2c4&R& zTgNwwC3K^jJpIeR@8^?#RzD5St^)dWMMZw19ABDnXUV4MgTqP3tr**~{+GdDZut(W z)>hW*n`ZR6&edFQEwBhB)<-f@LPuAapkJgdSqUyyg`%}FaQn!D^N|%b>Mdrnhf6mZ z#+|Freg#~|;v3gr8P@;Wb*wgj^plH)#Z$*%Q}QCdy^`v>}tsueIG+@otfwCoKhczo)4v=BD&msOp+~2feYCwSK*^+`^g$3DUP(X_ zZeOds7A1)i{EhF@3HR>~I+e>183-MKQJM4ir5U!wo3-om=zL&snI>W}D`xC{p-^b` z6KM|J0m6U+4H>B~_E6zmrJUZu<}A&t@+zo|keR@ADLs-tT587Ormc=@7Tt6c#nNHe zeo&nSU4}tfm7!0;hyVsOa0;M$$uMaPschrTKfXSk>>5||c^RXB2@8>$keHYVs`x@i z?K2HTjf5~|J@>kjfh7X%{_W?(?t@s5iCy>;C8P!xBwD3z=+J@YsOIv{{`-ggqaNLV z4;RJSbvD?Avslm8m8At_$?a+&msM3&pvvN!+&k6$CdFG(CF*X$R9A6Ye9ap=^l!sR zU&NpeApUidlXIN85WgU`z zmU)eBh_uMSqQ-epsqHAp*VoB|S5ifY4imH2po44(OsIo1u7L=f!CrU=7AOSefmykC zW?(rSh60wrd9<0iWiGMLQd<15$2WR1r>ke{bimIzv(K+@!FsJ~lAdf{|5M%L@ki6b z)lUib`KN`7uwW7$8%Bjz48mc0#o~HZ3Zd#Id|fhF9Ce{Y7>$i8Leg(-Q#5Z)ne$wa zg!0cOn)d{a+W*FPIEiYFhdfvY%*-UZ7+(mKM$^(>ovIi(uL$SbR>gv&5oAUcmJuXk zDtJBe0@Y1m8fbzs_^3!olh{L!(SU)>^z1roBpSirPXQx}Mzo(80k&|UH$~!kOiUWl zGG#=j+5>!;y_v-G8f>x-(F}NE`QTlk45bAF2L8AmXo7)@JsjYooD>DrfaELu!s;<} zMz8{m>xQgXed_dq=7^v{MGf#Q)!TKETDgE+9p2?gI$)pS2p$vOh7k42qMSR2$Lku~ za*edfy6={tvVxvR|Eek9jz`;+4O+BUjAcg z#MZd{?Btf6JCCDs8q~di#3(3_KJ9vOXtm+KW8D{;;ja&VZ^z%Iy4=rjuNu_x@I;>O zyNn2`oZr_IzYk6o#CH^Lj>b0aUbDGtut1RHmELq}OkJ z+n7d@{O->SgfK0;$Ppt_#89|uOz3msd6*1TbDxih@QSVpC~sg*K>o7MN2|)R*}x#u z4k=E;U$>{H-XsF8^|C4Z`-70T@RyK6+}%^s2W7*e`yO1=t1#HR`sId6Wr4k8dajR> z*t1OKNPjo9e%O2V)75Xs2_x3z&AZMv2c{0sjIVc^b(pdTmP|f;ivK4*#q`wV?c_$g zYxhqcQVz(mGJG-TbfT<0<}Y!gb+xELbZk}!_r90yZ>Ep9=y`gPw61sEf7EY%_wekI ztD7z~@0p{<)yMUm+cW$hkp9=6O7;%3TR+_9iFIsa6;gFTJ=9Nm#YWO#uB(M_1MV=& zW$M(lOVBVKB+d=|OIlH8`jKe`RtF)J0y7zk8p{gEa;sd+Z3G4!k};Jj$;lFN+EfXZ zY0|u8r!q7uzn@~o|7FXuv#q3m%cnJDLfiO{$Jomhj$L({aGGrk61XlfCGH1?SlIf= zct|UWX57IAN|2?mpyzoqEE*Oy5KFUzVR0-}fzUW{;nHC71$h9&-d4PbpNjm6aRLaPa&|ISCh^$SGI{#Aen z;Z{LyS+tQimXIBP^}kMf@&kDt)KOwrpqilKfKC1HdO=wKdZNmoA!cm`esx!0Tdwc$ zu*rjly+y?WuZfs3e`jr#_A&Bu=FzRBQ$}`1DJ?wxaBtT{ieXKOYhC%?zhC;NIjPNS z#^YCdTIL9MLR%fWq0Nd#!wkz>&ucH1_LEf)@Xca;(UbEvUE`f!-r9CfXB|pt@w4t{ zj1>JEIv9U5{#9v}+UXjLd&b#eTD4Udj~~8WRl4IH=IdO^iSWIc5 zzm8Yi2$U5cC~I73R2|PKItT#auPdTSbS9L3$&PW<8cT@5{-_xn`TCMYztowe3F|vkek*^4*esUI6 zBwCSb&%~8!kQ#SyXmDV+i0QpC;w>f_2Tr$2K_C%Io_z&M6*JabD$_L>`=zwX&AwQZ zhv!Lk)R)(ypXm$Y@5a5j-*?dVQ3`4D_W0$dZk56VBjvCga%04$hnV3SaA@1r*}#b> z&5ObQFz08JPuSSfaXn&^e_)^L~21u_z6nG=ss9le6f)}cE11a znxzN&Le)^K7vC^*Xl(5=`Xq)rVL3~G3>z8|ZxoMC2Fw=wqvO$pd&i#8>*ace$zI~m zJ5ggt-RMeqm$^6`eLv6Zc8)o{5mqq{mk7njp5E2+qbG2Ugc*ind131l_!qhxPcTT8 z7h~*bx84c)b{xM!tQ_A!^i)KZ=Sv9#h*CZT6pRO}I?G_eMr3Kb7~0aTJa14Lt^->( z1SvoP4XFyiAi@oxF}wr9@PNU+Ik6~#JT!tVD-pG>xfqIl2Hp*!Wa!x6{rSO(C2PFw z*>`}jmyT~04oO1>W*AEdk#JNF=w@IbgAk&`^#&-SXhI4h?~IUaiEjs5iS<#`FU3~? zP-F;MlR<|$L15P3yzk=^GciQhn zo_n5U`12}t=K11o173uI@p!`}k3)vC0r64yeyB7=ML{m)_?N_t#M*E--)*+vf8AeK z87%P}H;H-b8zt%N(%kwkLQ7Y5H5eiwa`JPh+yuVaWO5i9k?u7(q#)jdh!zFF?AYg- zopN;=%S1*5phvE5JP0~Fxr%X&jHr;)t4#CBsu=M8=L5Y^t%MXnQtfabw*` z6H#`68VgV ztCd&R)LzW>nz?)1XgEdx63ukLGx8ql@{MqCC#WKd^K;WRt_0KQf*Ro02r}8^Z9;gf2$mh;nym+Q`{MpB!x9=VO z{NlsFmYo|)?_IpVeJ$Xc>*Sw@#&vL+VV-+0b{Tc-#Y{1`?RgvXEaph*+Jm~b_1O)t zeD`-O987+2%m4rV{rw;5BHi><{FuX;9A6GN(4R%OU?<%qz|a6^b%?Q5tTEg4)kUjPXtT^wp=B;VNvd=B>0R;sIC@A(LFcmZgSEb+Sqx$>+Ss0&Q9$1T zK@TUT(A{yN15_FM*^vlg0E%u;LrwG1ejn8nHyZ^*N93TP^|own_aPqle4P_n-4p6U zX&Yx+GaOFHc*KVg2(ASRDhl7yi_g59PInM|Pj2ojjh#WdUDOL8g~7wF zngW9`1ts}CqMLOxG6WPFcJO8DZ+sUI9knLFKR?7{Ro&nYo+unGKtO?rV>+$i{uA8UktF0%UEZr$H?=T(T#{4NGwXPdoK8XVK z!WmgPN7tX^E0cwz(rr~Xa;vHx()CRPCoc7y`hY2^1ccK zeXOPa#;p~z;=?k>m=l7=PHV04@zDpzH)VJ)9q>?AIs{*c-Et?W7}c9MPre=1*Ku*r zRN<#rQFSsGx^*s=g{LbRmdA2ccH8tdWd%&`K*z^4SLWn5|DW(hku}i@FP_dPVZ@YM z@Nv4Kc*o$>=#U*6WW^2#y|ssuCg(#lXxIVtO+HNMGBLJ=Ug}N0_bW;KLmoZR1>>6{ z(ZZ%P{AgSBh%k9(;ZYi4wrJ|%XQKFJ#oG(9j|Qr~`$fz`E52CBwz)hAN1T#MR#k#R5E%oIsyaPwDpValr({bVfH=b?=eyQ&w{tH1s~>zA z{aZiyC2`b zdaCeE^RCW&)*dG`f4q6Uxgn^KH9_ljB@uBvPQP`ka)uAdJY8jeNW zu6XTO|H-GM{MQSNE5BY0-Z}xR$uY0(G=`t|n&KEX64Y(Rzf0>QwJwfJQG4%kk2 z=n~LhHN^rY8=>S>XgCClgAPC&p@q1#4`N^TTnPC*lVeqyUZ;%Yv-osxiqJ?%gw6;^ zhWos$e%J3m$KX~!0f|3wTl%ElPPMW&Dr10^pONS0Mc7ID{^x-&Q0Pg<7tOf9hyGt* z(`~&I|4^(*3GPxdYaeGkzg^K)zkUB|o%I|48ArsQe%mT$m+a>r8dq3;prTbisdxQ4 zwfpL6Y=BfZaV=z<276(j?CR6&YwhaYOX18>ZG9_k7MSUH!Czg~TDrW$H2KAs+^g9> zn+)?7^X&6>3{)Sh@mvC)_I1D5SKmTG&0XAiTk+erBH5kF$InG1e?D1;-w*!W*>W&5 z-Ts~VeD*J&E#&lr+TxYH@=qF$?!eqyJo}#l{x3S=fBhsrU{F|3nVLkO$WKrn6NN2j zLg3bnMMx@>2wV@gmaSu&z?K*tS+Pvtv_8gm>9t@iXN?gIHDqK0_?X!^mfebx#7D&? zl+uT7=vK-8adTQHxj#$5cp5lCfL=bugn?9;^=D`qz zRF^PMm3IT|Q4WN=$NeP$9YS-2udHP#jiv)R92(l z7XH)ET7C|m*6$6l$Ku=f%8gfLTK68+3sei}t^f*n72d7a6N?6c0;i*P`vw2Lubvzp zvigYrhnhEe)v#pgc-RrD-dUC72j=~9eujDS)>e^UYze^?bVgGg({y>W_iMhpk;`w(|OM*wArhKokfYubPzUXlUws zR99@;&3fZ_KeKFwt@Xd*W$+k1oW?M7(*7e9RXxUOiB#vNj9#!m!inh&-NVp?1~5E1 zAVUXolo2A4+HdCAFse8`jBPA8h7wD_Fk{&roWlWp3CfP6%S2F6~tB7~61Cr9kE zn{odkv4P(m8x;t)xR#jbRKkNG_JFX(b0M3XdApt<()k`Rj12qkedRja|9EkmefEx_ZnmSjGf5aVZ!EKkHyhBy#B8E^}2M~47{R#~5bfSQrQ%&Bvd%?9)Ic}#o#QfsX4 z@`2;c;l{gXAVk~1T{K{1PyIBJKW@=KE{qrsMGZk{1H>M2{!mpVrDDyDs!UO@1uLv6 ztz=VbJH=tYEN_Jjaf&F>^DYeE&Lb{2L8S!Ueh|3592X`Ha7|?Fb<>3)=MeO3u(wZm z+{z)m=OO+o6F@UEu7?PyA@8iCgMUV_nt~%rnDr*;vZEKuQ~Cv*NEJ=GN}q>n^nOC9 zk3bJYu#l5%N(3$H0?!QsPZ>d;62V5o`a!E*vpL_udP#Z-6oCSKv5*89G!H;AW7Qkn zA22Od2~yQPC;}XkR)LZc%R*}oIbm5(P50S}RJV6L$FVmvgrIWSGAm<%@o#@l?zD#Z zOZ-LaCAVj$sMEk|R;54yhX_=m1U&=+RF&*O2zIgr92^DQme-bLiv%o8Ks*r*z-V;< zAp6O~J!43v;l#2Y@4g_R=msARr{j70tM<-Mw;xP@QS*ZBQQRfj09o^JaV4zc+OL9M@u+@uQ^c;i`4n zQt|b|nG;hF!=}?U>Rb%g20zHOjT&yy&HPXg4?rQXa$+l@sNwa3YZAy)g(f)4vAsWM>LLCVu2GZ!eB8V8Q<-_ov4=`^ES zC~r}Br|{{PS~KV{kCSM${6@ax23l2Qi38T}85Ld)r1504!tIsyVDM*Mrv-W;Wjur$ zsYOk>l3-*kSj7nz1uDW`0bGg?yvh^BgR7TZR%;pK9WHmCRt*d3A7uGc)fpn9gD7uw zOks3Jfi~tv9+*-W=BXHn$L0A}TJ%gD-2zRwmlXlbwDWn(;i1eJMvvt#nQVwIi!7uG zM9Tp#8*s4w#MmGsq;@%0fRo~|DFjV4Koc6~u_`%dqhb-4+3|I*)SH)YL3LV!G!2=t1D8H=qyxsp60xAI}Yd9O`3PlO+| z-rDXy`<|o$I!Nis&=sY{`cMa!pC{;o(o^88%JTIfsX#257!d14{LZDpqp&Qb6g#6< z8zL0SO8gIpByrQIA4aBZlUkoY>O#CeW%P^9!Kd42%D#T&1^gPY{HG^>!>?c2v*Me& zr=LPavyYw}d6?weEAQ(YMf$b#PPTLSH_TQq^WBTc459aymA$)0j%|99`1;kgoBa>J z>}~kpT^;-%#wl{Wbk&Xyn2fiaHMN15V|IpX28xDsAdV2ta+fI}WzRA~op3!^mudGz zeuG6#@D;O^6JQlXlJA~a90XAJ44*260Bc^?!i}-#?6r{UNTQcnxZ-vtOFOf!i(BUH z;RwxUkY*+g_Y<*n8)R5|K8JT)lDWv3fo-EfHmKU?v(n0lWjEQmN@cFr#ren#(Y**n zZ^vf0+>xlt?1=lmbXG>J02cI?DgejU$h>T7DzbwAOu`hXVy zd^`B9UvBGNLfiqD;BtzyYz2pWKyd2tr+wnyXT6ZcV%OJJpzKv6UiF*(r@|CtF})5Q z`qv9eZrSu5AXJpB+zyGq^zPwRuaD9cmv7IoZN^0*YP{##Ewy&H@r~gnpS^w|oGpGb z7o*pGj`xA{_ir*eSH0`S+pHhi#O^Kg$#r#9J+7uyKUe(g8u<^V4BUm+A0(xl>J+}c zj(J+DMm2n=Qe7XWP7#On)6@frBQdFUYSMbg2#;`&vK=6mB-^qJ?{l~Yxkd)5lUSoq zu-zUJF{UhR0L~vu5U&MOOy|H1!BCY*>jm4TZah@A8ax>Lt>O5fll>t|?&gh@Hx+B1 z9dB&uf77-!F~;?M4^ru?3ZPeQk%b^@&J9q8BLOjLlf=wuQWu7i@yNz*DQC>TsnM;M zQy#UfC6Bi~*~`84CSb@91327xF>tTuRKU#dP73D8wUl8nwwDrN`je5{j%Z{+G&f&p=`ix?alNU^q#;JB!#1#5}p1JDuc(Gg>W;BR)p1i5rGRF>KH z;Sh|JJr1U*ri?zX3#}MKkF*{xk4p{z)X;WYA3TsPgGO!C(k3ihFlMT=vJascTA4i9 zBV^(JTRGM~)F=J-Cua9*W90DEMzS~*8Zn<&Ap}NV+IWCurH7!UW#p8pU;oG&^EiN!8W7NCz_}EF-Wse`0nXoDMueizuU|+5mB>sgEI#Dcg~AS#$W@d zO`~7douJx1v*rCTUJdBr2b)$~LhKH@enuG_-g{KapeW0)di6KHFOP&;t)kz86^|b` zMqmR?sw)^X+agt>^J-xM)X}gPNyP?1AG%VnCht9;OFQ&ULh+|oTb(qKIocm`cTrq0aHkyxZ@Ta9u4v_Cq})jh=lD^aS6RE}-(}lU~lW6#royj4}9w z;#YTYqWZ+ey&lRisZ;y6D4TYkr)&(Be)D)oXW>};?Z1D?j9*fdWJG2hR5!9#b1SLPAqgJ8D4V)fR$d@@qtyCDMbV11 zzO&E@_737zA6Q#i_y7^>1QLSnBePs^oi}M@@9wT$a}eCL65L_tq48X?Nfs`@(aTVX zP2)@yXT&+aF~jiG7Sjhw(Jux92O8*xG2*C=#)DK(60)KFZpy|}L%Yx0!%&VEG`o((?Mx5UW zd+AwX?XL1-RPW-8OeC(Lu2MxDbHK^n?ay=Gh)p-P~`~K z;PU7x>j5=Rw-3|S2e8tWjRydsIGUn})vo73$nN&o$1F~cFB`(dThQc{OM(!HH!Toxw zFaiKdgun4!iSe_VdilBCD+Bwau*nYrHT8f%XEXp}_P3jxeO90a8O=3+-z`|WvgVg+ zo>=VS#p+(UEPD{9?AUZAG5nXi>LJn*MriiV)I86;$0AZ!K6cZyW;2T{lA&l*_cBRV z__gC>A}+FMF#09w?6gX|m+ZjM-uCz{jR7ZI;z|vudMP|_$AjorrOtvMr@hPLnNE6 zVj<#Od?~i%Q?^yQHM`|_ay=-4V^R17=!5FT;nnPw)%{MS@m95A_2i;tQY-t{_baZ8|d%b`(}XkC>o}qJi+bY?TSpR$Kh+tfHnorp~**wu-`T# z4VJduXpS8VuWl5UJA;S0QRfw`n!${(8qjsP=1^gKrpHH%l2rm&-9 z2l`&b+@S`3w7heX^~3=WTPs4Eq!X1Oi+Elh6T+E5 z0;)G3Z(g4wZ{D~<^Po~Rb8LOShXr;@v2h*$nBB7eaSdIjIhT3-7UF9Roks1m-jh_U zgW$)I(bz+|ShVD;C9f3pV7=38s3um{rtQP<-zf~|3l~QH`k;VMs?{1|I3R`=JH59X z4^!`I`@(lQOWC0JS8^pw=T6YT8n^zBI4x+w{Y_kOA7LSX)J{A)@E59l+J6g zfY_BF@2z8clx;1MCN}!G_H^ntt@aM#l7lDPW@d4XV^`g~N=3<`_#IL-=%#LOnk4g# zT~<`6WWvr^o-d(2M-Pq!wCX0)gP6Ho8EvYg^3|msF11TRx#9r%pnm7s=aC3}4E2Gs zi~_<c*)J$lXy<@8E`*8?emZYcsoc zKw7!ry+6&zq3Ve`SE0OCu!(t_%1JL*UlCH!FLq@ zZ)`ZZW0MfSI`2~Plv%MxND*^pL%Is0jsdtyUJC>N#7!Bc9%L3Dx~|V3-x{~{N!T2I9U z*FveM(IrjXUsfG!7o*`4>?p!JeingKA!%H^c@AKOQ7a~~&yBgII2yK2Rh6R3C0Dy3 z8vbkbenD)rkeqzh$&$Y@DN&MlGw>u?usaRafE_-%8leOaOKdM|2AI3CI&A(tr6wr% zR`Vb7@Jx9aCjOeT5yWtbTD=UZacq}5{L@y1siI&GPb#!Sh@4GJSd|@?Iin8aZ7QOE zL$eUYg&mu`b5#!us0nd;DYamjGi}?o)+O)`r>YNPY>VH>aINl;$=YhQ7w*i0m?{e& zlLt?=rnb6bhIA5OUoH+_#UgmCAwXFZ1-Q&O;)a)n5SLR55V(yR;XSLAJ;%gzmCj;T zqAgvk8bkvz%4Gz~qLd>nwif9K_?D!3N*c5LFa7@SnT_6~E+)Q_f^$J7L9OqXPd!g$ z?)gYhbdjqLk|aZL;ACOoMQmJW0yA);#Q;SVRPr|7tqEF#KcZ)ie?OF)!b;v5^;WuW zU^0*Wv4hQxxNEMPxVG~dXYM?><2wEP4e37kPm%b4tZ5_NJC2RXTE@Z@D2o&F$zY$e zi{vTC@AU}M2-gB^0l~K%8icwj)SiY3J7jCOKHIh3F0r--sd~n02wR=fGzu?)16-dZpphU!^PvWXo&R+GEX7OOwrg7n?bp60!4hFZO@KaUFeDcJGW;Q~dv91x>@sF=U49n48Jiyci1(&r1;+}3`)aw0YJlRkW|#((LV6ot_T!OS}^T%BI^ zRCX|Ijkaz)DEZmWO37>#n*gKC)_+Z(&p#K}v$}#)zT542P22Go5Bq1V{zme!i$$yy zcCpVqbed}Gply=8s5^2-b6Ffw8CsHO5&o~s!JWuZ_3$%~l;r@i&*vjlv_XFzeYyv$}* z(yGv6;of?b7Mn?{7)+K!hEQB9!3fT2z65lLzq|y6^|0b#0+dDIJQz$(Cke=>vnXWWszxK3vD5@@ByeL{ozrJOLmjyJE?UDUd}b&GauCF zZnak0tGaSjQl7kce_%RX@ngn2hl&dgO5fyhp9mF|z=in^_*GiUsn>rgQpqpW~?a;UI zf-%sCL|(P3vST9k6|LcwREKnSQmer9C>Y<`bU%|J&BCc9vDMoorPmHdP62C( zm6KHD<@G=IC%@cx!BCrBN3T6ON>omibF_yAS{9-xd-|ee7_~*dTgS=5W}vh(&COCCsBCCLqe)m&SF z+JX+hBHcbAmbc{Nt$-0ky2)FhfUs|vUc&!VsoU&9s)3GF$?LgH+@RdZs!NGV_n%1l zxZ2gz`Fcgl)hoQYhW=N6m%Qavk?$BhMmbAv(nyC?~(e4m@SkQ zVNcMytBySgi>YDN(Tc|IAX21Nuw9&AV$*W)KFHZH;_ue^1vt%>LFvZyt=rZo$-p0c zIOyYQ6buu8JA3OoWU;S1*)@^T)bFH~8}ss{H{-tAB?K4^eK&I8-{Mr&ys>3!bMDe;mhcOIp`U1$yd47ot1X!WJszG;hss8iIaUiG|-wt_lZ3S~w! zv!s%#{e^lg!?`LkrK*lNga}$+YR7=Kx9##`G+cF&awaeRXjH!O#&7t~@3V<3p&=(` z?evMO?wX-#x6SUHO@^#JGAt6k%D@YUPk<o`I)bKk3SRN_~v9Uw41b zBso{r8v}E>Rj?$gfL9u?mJ;?*Y~Q3}o)wD<9wy7{gVW?$+*fU2Yn5B(O5mshR7Yvg ze3c6St|Wdmj!dW3O@*pNpS3+VB9Ixrt30@@9Ul2saLQ`~Sq0iaYdO?ifc3fpr3D5Z z$RO7_KEPHGiH@Ah;>Y9d<8I0rG1ZdW1+1Oz08#VY(6Q3r-h^P<@v9Ee z;=cBO2nq%?I=6cTEy5>Kp}h>Aa6tH_e&1x(dXKVSb{;06HXd(@#=FZ_WB z+pRtKzMW}Hw!B`KOpep^O(IWd-oV>xOx{=!1(yGb;mq7^Ta15VGSxd)=r|q16r=Ak z?RF2Db)B64W-N175sgczS%U(8O?YE(POIqrhL(tuoOMI%8*(5xi`bDr)&+k$H4I%1 zNxtShV<;nEd>UOPVGNs@DVtHfO`@}9HjzWhN-rrGU;clezuyTPP+`-|YzNX5;2Bl} zB|#2a!B|;jtvb-tV?iM9=j$lM*&?$^@9$4jXbzXF(30!pw}s!0BtU(Fm4M+IV*-o~IhU;{Z8jx6yexjMCKuw9-@3^~Y8Hqf$8Z0Q zHiz+g=!*R?&N5=iXRS1=l`C5;oq}g3UGUv`01osuBW0lblJ|Wy#h6)s;rsJWmG=bu zHyE1qcaWf=_k@kuNdRd(iC_$^nw>mnq`0Uu$LZLC{ zdv|oCC9WA&^1-KuU_n-`o-mh38}v{S${;pq%utYYjQOR3-K+ByiqbewN{-$#VudFj z>=-mnAb1hOWMVtVp1|2YsW`jlbw@?}7?F%InLsd0ONe!iWQ<$P0vDN?39tWy71q>L z=IV?-RT_}7T||k%D273nk*W1~32-1Ar~_6kbI&Nocmqpmo4zhcn8J_M0?J%3imsVG z9+WbU6%%+%Hsz*1jW?(Txf^dZ-U=i7Ca!asO z!gFIMQ-u0lH;x8D+MC&gI8ct~t7ufY1ZFMK~A#C*B`cE<0~ zSOWn}`WRUussxWS1x~t~#Aq^KCdIrJCU^y~j}y7s?j7z2fuXS~Xzn=w?3rU7TrhoU zn+%|btcwx9Ve31o)HqX&DU%F15oW}yjR`11D`A#pCT8DN;aDPwMw(J^#v71x63fl}tojfkrO;3xVN`wnTKamHv6bk>l80DpJlhP!b&mD}ODnEhx7y1I|Cxz1O{Qf3o>H&B*3kq@I z8j*?~Y9p-3`rQt2kUquNzgoz_gBP3|9W00#qb;+snw{Z9@On>oPF5r97Pr?wD?`T9 z7a%mM;F*S~eaAFdWShP(A1J z7|;M1pz{tvHDw6x#C%mhY5*w~R2t>Uz*^){C3f%170gLiDI7^u{853fw&j)7!@X@4 zaeYXwuF z5l`tB#RceYd?4K8+>qShr_uX*p&+5{tY|TK9s*Y+CGjBc1izKe_zmm^OEj8Q3Nv@Y zhq4};lyxE0xr>fxj3?B8z7ws@==}w)Q05p1)9#R>rG}ikbdSV`4P;0^k_4Eq2+Yhp z^fy2!l3a45r947n_wb!k;i>3MAM*CZ0Mcd*1?E!aER6?-d4Jm^JZU{iDnFEeWAU@U zVE?f-1;d@oE1{J1L$~j*$~-I;l?PFKwYPSv#Z$@B=o%(MM(?5Bi=?KVf`>xuF?+bYuJs`zh;E?NIZPz=hfM zzDd8|@jf1*2x^q^V9kuPHG&R{d-zqO$H~1WjtcIjl zV2jX|f3PRK_T2yk>0M){^Fo0FQJkap2$}drdS6zTMS8OS`LhCAx;DS1s#HI1zF!{7 zxgZA_bNlWHNWr+^nXr(C^^Z&%L0&7Vd$lVZnGt(<_kMe-==scRZS=r~M`}f~LRFnx z(epVvDlR-bQsfZgG%>C1#LN_Z;d|b+KiLAgKMGv3@GRP6mAzZ!AkvvfVwSBwVD~cL z@Z7`phKc2+Gs85DP@r7FvGrWVL3}^;E`F>&xa!(4m>V=qUU(R5pgE9|2+&8)=bM$u z9+8&BX|J+R=c&J2$E=8(-MzWViiLvN82}m)s5|{8>)!V#tqOjE0Cj;{KYu?#3hf;r z@<{ctg|P2AzLtIc^mTcgZ_Ld4ok3P+c`;=W-I<}l`_fyeYpTX9CFF2p)+@ae?@5`H z^zpN_p?+63zmbl$#O@_VfcQ~4gmHayvdfN-)IIw>2$30b!-VX51M&NMcJdUUm7-0N zell=gs!3GEKG`{6qPtSUPq9yhR^-O!wvH}7`#R+}CdBP}dt3t?>0(a46-emZT%|Z` zNJCt8zEyqxR?*WRc}GOS+COfr(7xxcWi00WIJ|Q(q1A5IHC>AoR7-);ynzjHDNLLp zf?^KNg)DMJmUTDFGPekNaAIhC1lUXq|IOwt9ao0c_GsLWp@Vo56nyA6tX!N-qY(9~ zBq9#PF6IOtD%8BxfT4lI%u&gheeeVmug~Rg6TFSxMY*1B|I3s0?x9f6>rLFUCQn^L zBoStrT6mWi{)b;=Y08h?qusMRzP9J@@APrjt0=n*1UA)ZU)*NQc5&GO-nf zrt4dSz2Z2cZRYB7hRX&DXCFdQ4)@_3MgZl$Od)Oky5+q*HCiv|j6~NmsI!$>vUxAn zgwO%9VO>tjZr8C(7gNVy`RpF}ysCtMVs4*d%hcVtpci#*jJ6Ep-VVgvG=*^MlK0$p z2CAyPN!K}t+txG=6y9UQ!N%vC9ed@@>#0>ihl=`{R>*Dq-^w-Qp>nJy^Z)Lo|F@~u I|Mz$Q4z5%P-T(jq literal 0 HcmV?d00001 diff --git a/tests/configs/moderations/single_provider/single_provider.json b/tests/configs/moderations/single_provider/single_provider.json new file mode 100644 index 00000000..9471258a --- /dev/null +++ b/tests/configs/moderations/single_provider/single_provider.json @@ -0,0 +1,3 @@ +{ + "virtual_key": "openai-virtual-key" +} \ No newline at end of file diff --git a/tests/configs/moderations/single_provider_with_vk_retry_cache/single_provider_with_vk_retry_cache.json b/tests/configs/moderations/single_provider_with_vk_retry_cache/single_provider_with_vk_retry_cache.json new file mode 100644 index 00000000..52281ce7 --- /dev/null +++ b/tests/configs/moderations/single_provider_with_vk_retry_cache/single_provider_with_vk_retry_cache.json @@ -0,0 +1,13 @@ +{ + "virtual_key": "openai-virtual-key", + "cache": { + "mode": "semantic", + "max_age": 60 + }, + "retry": { + "attempts": 5, + "on_status_codes": [ + 429 + ] + } +} \ No newline at end of file diff --git a/tests/configs/moderations/single_with_basic_config/single_with_basic_config.json b/tests/configs/moderations/single_with_basic_config/single_with_basic_config.json new file mode 100644 index 00000000..9471258a --- /dev/null +++ b/tests/configs/moderations/single_with_basic_config/single_with_basic_config.json @@ -0,0 +1,3 @@ +{ + "virtual_key": "openai-virtual-key" +} \ No newline at end of file From fd2e53fa595a636fb67d31cf752cb183b6465ac4 Mon Sep 17 00:00:00 2001 From: csgulati09 Date: Sat, 18 May 2024 16:12:22 -0400 Subject: [PATCH 22/38] fix: linting issues --- portkey_ai/api_resources/apis/audio.py | 5 ++-- tests/test_async_audio_speech.py | 12 ++++++---- tests/test_async_audio_transcript.py | 32 +++++++++++++++----------- tests/test_async_audio_translation.py | 32 +++++++++++++++----------- tests/test_async_moderations.py | 5 ++-- tests/test_audio_speech.py | 8 +++---- tests/test_audio_transcript.py | 28 ++++++++++++---------- tests/test_audio_translation.py | 28 ++++++++++++---------- tests/test_moderations.py | 1 - 9 files changed, 86 insertions(+), 65 deletions(-) diff --git a/portkey_ai/api_resources/apis/audio.py b/portkey_ai/api_resources/apis/audio.py index 9b062db7..e0367b79 100644 --- a/portkey_ai/api_resources/apis/audio.py +++ b/portkey_ai/api_resources/apis/audio.py @@ -6,7 +6,6 @@ import typing from portkey_ai.api_resources.types.audio_types import Transcription, Translation -from portkey_ai.api_resources.utils import GenericResponse class Audio(APIResource): @@ -104,9 +103,9 @@ def create( response_format=response_format, speed=speed, **kwargs - ) + ) - return response + return response class AsyncAudio(AsyncAPIResource): diff --git a/tests/test_async_audio_speech.py b/tests/test_async_audio_speech.py index 06a5ee72..953dd5d9 100644 --- a/tests/test_async_audio_speech.py +++ b/tests/test_async_audio_speech.py @@ -69,7 +69,7 @@ async def test_method_single_with_vk_and_provider( audio = await portkey.audio.speech.create( model="tts-1", voice="alloy", - input="The quick brown fox jumped over the lazy dog." + input="The quick brown fox jumped over the lazy dog.", ) assert isinstance(audio.content, bytes) is True @@ -82,7 +82,9 @@ async def test_method_single_with_vk_and_provider( @pytest.mark.asyncio @pytest.mark.parametrize("client, config", t2_params) - async def test_method_single_with_basic_config(self, client: Any, config: Dict) -> None: + async def test_method_single_with_basic_config( + self, client: Any, config: Dict + ) -> None: portkey = client( base_url=base_url, api_key=api_key, @@ -94,7 +96,7 @@ async def test_method_single_with_basic_config(self, client: Any, config: Dict) audio = await portkey.audio.speech.create( model="tts-1", voice="alloy", - input="The quick brown fox jumped over the lazy dog." + input="The quick brown fox jumped over the lazy dog.", ) assert isinstance(audio.content, bytes) is True @@ -126,7 +128,7 @@ async def test_method_single_provider_with_vk_retry_cache( audio = await portkey.audio.speech.create( model="tts-1", voice="alloy", - input="The quick brown fox jumped over the lazy dog." + input="The quick brown fox jumped over the lazy dog.", ) assert isinstance(audio.content, bytes) is True @@ -145,7 +147,7 @@ async def test_method_single_provider_with_vk_retry_cache( cached_audio = await portkey_2.audio.speech.create( model="tts-1", voice="alloy", - input="The quick brown fox jumped over the lazy dog." + input="The quick brown fox jumped over the lazy dog.", ) assert isinstance(cached_audio.content, bytes) is True diff --git a/tests/test_async_audio_transcript.py b/tests/test_async_audio_transcript.py index d058126c..6fd7462a 100644 --- a/tests/test_async_audio_transcript.py +++ b/tests/test_async_audio_transcript.py @@ -66,11 +66,13 @@ async def test_method_single_with_vk_and_provider( metadata=self.get_metadata(), ) - audio_file = open("/Users/chandeep/Documents/Workspace/Portkey/SDK/python latest version/portkey-python-sdk/tests/configs/audio/speech.mp3", "rb") + audio_file = open( + "speech.mp3", + "rb", + ) transcript = await portkey.audio.transcriptions.create( - model="whisper-1", - file=audio_file + model="whisper-1", file=audio_file ) assert isinstance(transcript.text, str) is True @@ -83,7 +85,9 @@ async def test_method_single_with_vk_and_provider( @pytest.mark.asyncio @pytest.mark.parametrize("client, config", t5_params) - async def test_method_single_with_basic_config(self, client: Any, config: Dict) -> None: + async def test_method_single_with_basic_config( + self, client: Any, config: Dict + ) -> None: portkey = client( base_url=base_url, api_key=api_key, @@ -91,11 +95,13 @@ async def test_method_single_with_basic_config(self, client: Any, config: Dict) metadata=self.get_metadata(), config=config, ) - audio_file = open("/Users/chandeep/Documents/Workspace/Portkey/SDK/python latest version/portkey-python-sdk/tests/configs/audio/speech.mp3", "rb") + audio_file = open( + "speech.mp3", + "rb", + ) transcript = await portkey.audio.transcriptions.create( - model="whisper-1", - file=audio_file + model="whisper-1", file=audio_file ) assert isinstance(transcript.text, str) is True @@ -124,11 +130,13 @@ async def test_method_single_provider_with_vk_retry_cache( config=config, ) - audio_file = open("/Users/chandeep/Documents/Workspace/Portkey/SDK/python latest version/portkey-python-sdk/tests/configs/audio/speech.mp3", "rb") + audio_file = open( + "speech.mp3", + "rb", + ) transcript = await portkey.audio.transcriptions.create( - model="whisper-1", - file=audio_file + model="whisper-1", file=audio_file ) assert isinstance(transcript.text, str) is True @@ -145,9 +153,7 @@ async def test_method_single_provider_with_vk_retry_cache( ) cached_transcript = await portkey_2.audio.transcriptions.create( - model="whisper-1", - file=audio_file + model="whisper-1", file=audio_file ) assert isinstance(cached_transcript.text, str) is True - diff --git a/tests/test_async_audio_translation.py b/tests/test_async_audio_translation.py index b2c4e43f..6640f81e 100644 --- a/tests/test_async_audio_translation.py +++ b/tests/test_async_audio_translation.py @@ -66,11 +66,13 @@ async def test_method_single_with_vk_and_provider( metadata=self.get_metadata(), ) - audio_file = open("/Users/chandeep/Documents/Workspace/Portkey/SDK/python latest version/portkey-python-sdk/tests/configs/audio/speech.mp3", "rb") + audio_file = open( + "speech.mp3", + "rb", + ) translations = await portkey.audio.translations.create( - model="whisper-1", - file=audio_file + model="whisper-1", file=audio_file ) assert isinstance(translations.text, str) is True @@ -83,7 +85,9 @@ async def test_method_single_with_vk_and_provider( @pytest.mark.asyncio @pytest.mark.parametrize("client, config", t5_params) - async def test_method_single_with_basic_config(self, client: Any, config: Dict) -> None: + async def test_method_single_with_basic_config( + self, client: Any, config: Dict + ) -> None: portkey = client( base_url=base_url, api_key=api_key, @@ -91,11 +95,13 @@ async def test_method_single_with_basic_config(self, client: Any, config: Dict) metadata=self.get_metadata(), config=config, ) - audio_file = open("/Users/chandeep/Documents/Workspace/Portkey/SDK/python latest version/portkey-python-sdk/tests/configs/audio/speech.mp3", "rb") + audio_file = open( + "speech.mp3", + "rb", + ) translations = await portkey.audio.translations.create( - model="whisper-1", - file=audio_file + model="whisper-1", file=audio_file ) assert isinstance(translations.text, str) is True @@ -124,11 +130,13 @@ async def test_method_single_provider_with_vk_retry_cache( config=config, ) - audio_file = open("/Users/chandeep/Documents/Workspace/Portkey/SDK/python latest version/portkey-python-sdk/tests/configs/audio/speech.mp3", "rb") + audio_file = open( + "speech.mp3", + "rb", + ) translations = await portkey.audio.translations.create( - model="whisper-1", - file=audio_file + model="whisper-1", file=audio_file ) assert isinstance(translations.text, str) is True @@ -145,9 +153,7 @@ async def test_method_single_provider_with_vk_retry_cache( ) cached_translations = await portkey_2.audio.translations.create( - model="whisper-1", - file=audio_file + model="whisper-1", file=audio_file ) assert isinstance(cached_translations.text, str) is True - diff --git a/tests/test_async_moderations.py b/tests/test_async_moderations.py index c522c75e..02a1f2da 100644 --- a/tests/test_async_moderations.py +++ b/tests/test_async_moderations.py @@ -80,7 +80,9 @@ async def test_method_single_with_vk_and_provider( @pytest.mark.asyncio @pytest.mark.parametrize("client, config", t2_params) - async def test_method_single_with_basic_config(self, client: Any, config: Dict) -> None: + async def test_method_single_with_basic_config( + self, client: Any, config: Dict + ) -> None: portkey = client( base_url=base_url, api_key=api_key, @@ -140,5 +142,4 @@ async def test_method_single_provider_with_vk_retry_cache( input="I want to kill them.", model="text-moderation-stable" ) - assert isinstance(cached_moderations.id, str) is True diff --git a/tests/test_audio_speech.py b/tests/test_audio_speech.py index b429036e..ebc406ce 100644 --- a/tests/test_audio_speech.py +++ b/tests/test_audio_speech.py @@ -68,7 +68,7 @@ def test_method_single_with_vk_and_provider( audio = portkey.audio.speech.create( model="tts-1", voice="alloy", - input="The quick brown fox jumped over the lazy dog." + input="The quick brown fox jumped over the lazy dog.", ) assert isinstance(audio.content, bytes) is True @@ -92,7 +92,7 @@ def test_method_single_with_basic_config(self, client: Any, config: Dict) -> Non audio = portkey.audio.speech.create( model="tts-1", voice="alloy", - input="The quick brown fox jumped over the lazy dog." + input="The quick brown fox jumped over the lazy dog.", ) assert isinstance(audio.content, bytes) is True @@ -123,7 +123,7 @@ def test_method_single_provider_with_vk_retry_cache( audio = portkey.audio.speech.create( model="tts-1", voice="alloy", - input="The quick brown fox jumped over the lazy dog." + input="The quick brown fox jumped over the lazy dog.", ) assert isinstance(audio.content, bytes) is True @@ -142,7 +142,7 @@ def test_method_single_provider_with_vk_retry_cache( cached_audio = portkey_2.audio.speech.create( model="tts-1", voice="alloy", - input="The quick brown fox jumped over the lazy dog." + input="The quick brown fox jumped over the lazy dog.", ) assert isinstance(cached_audio.content, bytes) is True diff --git a/tests/test_audio_transcript.py b/tests/test_audio_transcript.py index 206361c1..060798e9 100644 --- a/tests/test_audio_transcript.py +++ b/tests/test_audio_transcript.py @@ -65,11 +65,13 @@ def test_method_single_with_vk_and_provider( metadata=self.get_metadata(), ) - audio_file = open("/Users/chandeep/Documents/Workspace/Portkey/SDK/python latest version/portkey-python-sdk/tests/configs/audio/speech.mp3", "rb") + audio_file = open( + "speech.mp3", + "rb", + ) transcript = portkey.audio.transcriptions.create( - model="whisper-1", - file=audio_file + model="whisper-1", file=audio_file ) assert isinstance(transcript.text, str) is True @@ -89,11 +91,13 @@ def test_method_single_with_basic_config(self, client: Any, config: Dict) -> Non metadata=self.get_metadata(), config=config, ) - audio_file = open("/Users/chandeep/Documents/Workspace/Portkey/SDK/python latest version/portkey-python-sdk/tests/configs/audio/speech.mp3", "rb") + audio_file = open( + "speech.mp3", + "rb", + ) transcript = portkey.audio.transcriptions.create( - model="whisper-1", - file=audio_file + model="whisper-1", file=audio_file ) assert isinstance(transcript.text, str) is True @@ -121,11 +125,13 @@ def test_method_single_provider_with_vk_retry_cache( config=config, ) - audio_file = open("/Users/chandeep/Documents/Workspace/Portkey/SDK/python latest version/portkey-python-sdk/tests/configs/audio/speech.mp3", "rb") + audio_file = open( + "speech.mp3", + "rb", + ) transcript = portkey.audio.transcriptions.create( - model="whisper-1", - file=audio_file + model="whisper-1", file=audio_file ) assert isinstance(transcript.text, str) is True @@ -142,9 +148,7 @@ def test_method_single_provider_with_vk_retry_cache( ) cached_transcript = portkey_2.audio.transcriptions.create( - model="whisper-1", - file=audio_file + model="whisper-1", file=audio_file ) assert isinstance(cached_transcript.text, str) is True - diff --git a/tests/test_audio_translation.py b/tests/test_audio_translation.py index 1812ab71..955cf224 100644 --- a/tests/test_audio_translation.py +++ b/tests/test_audio_translation.py @@ -65,11 +65,13 @@ def test_method_single_with_vk_and_provider( metadata=self.get_metadata(), ) - audio_file = open("/Users/chandeep/Documents/Workspace/Portkey/SDK/python latest version/portkey-python-sdk/tests/configs/audio/speech.mp3", "rb") + audio_file = open( + "speech.mp3", + "rb", + ) translations = portkey.audio.translations.create( - model="whisper-1", - file=audio_file + model="whisper-1", file=audio_file ) assert isinstance(translations.text, str) is True @@ -89,11 +91,13 @@ def test_method_single_with_basic_config(self, client: Any, config: Dict) -> Non metadata=self.get_metadata(), config=config, ) - audio_file = open("/Users/chandeep/Documents/Workspace/Portkey/SDK/python latest version/portkey-python-sdk/tests/configs/audio/speech.mp3", "rb") + audio_file = open( + "speech.mp3", + "rb", + ) translations = portkey.audio.translations.create( - model="whisper-1", - file=audio_file + model="whisper-1", file=audio_file ) assert isinstance(translations.text, str) is True @@ -121,11 +125,13 @@ def test_method_single_provider_with_vk_retry_cache( config=config, ) - audio_file = open("/Users/chandeep/Documents/Workspace/Portkey/SDK/python latest version/portkey-python-sdk/tests/configs/audio/speech.mp3", "rb") + audio_file = open( + "speech.mp3", + "rb", + ) translations = portkey.audio.translations.create( - model="whisper-1", - file=audio_file + model="whisper-1", file=audio_file ) assert isinstance(translations.text, str) is True @@ -142,9 +148,7 @@ def test_method_single_provider_with_vk_retry_cache( ) cached_translations = portkey_2.audio.translations.create( - model="whisper-1", - file=audio_file + model="whisper-1", file=audio_file ) assert isinstance(cached_translations.text, str) is True - diff --git a/tests/test_moderations.py b/tests/test_moderations.py index 3a961906..56ef453d 100644 --- a/tests/test_moderations.py +++ b/tests/test_moderations.py @@ -137,5 +137,4 @@ def test_method_single_provider_with_vk_retry_cache( input="I want to kill them.", model="text-moderation-stable" ) - assert isinstance(cached_moderations.id, str) is True From 5f71180bde8c79133cab3ee089931af1e99d65a7 Mon Sep 17 00:00:00 2001 From: csgulati09 Date: Tue, 21 May 2024 01:01:08 -0400 Subject: [PATCH 23/38] feat: openai-project openai-organization headers added --- portkey_ai/api_resources/base_client.py | 12 ++++++++++++ portkey_ai/api_resources/client.py | 16 ++++++++++++++++ portkey_ai/api_resources/utils.py | 2 ++ 3 files changed, 30 insertions(+) diff --git a/portkey_ai/api_resources/base_client.py b/portkey_ai/api_resources/base_client.py index b40b4c98..e623779f 100644 --- a/portkey_ai/api_resources/base_client.py +++ b/portkey_ai/api_resources/base_client.py @@ -54,6 +54,8 @@ def __init__( provider: Optional[str] = None, trace_id: Optional[str] = None, metadata: Optional[str] = None, + openai_project: Optional[str] = None, + openai_organization: Optional[str] = None, **kwargs, ) -> None: self.api_key = api_key or default_api_key() @@ -63,6 +65,8 @@ def __init__( self.provider = provider self.trace_id = trace_id self.metadata = metadata + self.openai_project = openai_project + self.openai_organization = openai_organization self.kwargs = kwargs self.custom_headers = createHeaders( @@ -72,6 +76,8 @@ def __init__( provider=provider, trace_id=trace_id, metadata=metadata, + openai_project=openai_project, + openai_organization=openai_organization, **kwargs, ) @@ -403,6 +409,8 @@ def __init__( provider: Optional[str] = None, trace_id: Optional[str] = None, metadata: Optional[str] = None, + openai_project: Optional[str] = None, + openai_organization: Optional[str] = None, **kwargs, ) -> None: self.api_key = api_key or default_api_key() @@ -412,6 +420,8 @@ def __init__( self.provider = provider self.trace_id = trace_id self.metadata = metadata + self.openai_project = openai_project + self.openai_organization = openai_organization self.kwargs = kwargs self.custom_headers = createHeaders( @@ -421,6 +431,8 @@ def __init__( provider=provider, trace_id=trace_id, metadata=metadata, + openai_project=openai_project, + openai_organization=openai_organization, **kwargs, ) diff --git a/portkey_ai/api_resources/client.py b/portkey_ai/api_resources/client.py index d927906f..f840916c 100644 --- a/portkey_ai/api_resources/client.py +++ b/portkey_ai/api_resources/client.py @@ -38,6 +38,8 @@ def __init__( provider: Optional[str] = None, trace_id: Optional[str] = None, metadata: Optional[str] = None, + openai_project: Optional[str] = None, + openai_organization: Optional[str] = None, **kwargs, ) -> None: super().__init__( @@ -48,6 +50,8 @@ def __init__( provider=provider, trace_id=trace_id, metadata=metadata, + openai_project=openai_project, + openai_organization=openai_organization, **kwargs, ) @@ -78,6 +82,8 @@ def copy( provider: Optional[str] = None, trace_id: Optional[str] = None, metadata: Optional[str] = None, + openai_project: Optional[str] = None, + openai_organization: Optional[str] = None, **kwargs, ) -> Portkey: return self.__class__( @@ -88,6 +94,8 @@ def copy( provider=provider or self.provider, trace_id=trace_id or self.trace_id, metadata=metadata or self.metadata, + openai_project=openai_project or self.openai_project, + openai_organization=openai_organization or self.openai_organization, **self.kwargs, **kwargs, ) @@ -126,6 +134,8 @@ def __init__( provider: Optional[str] = None, trace_id: Optional[str] = None, metadata: Optional[str] = None, + openai_project: Optional[str] = None, + openai_organization: Optional[str] = None, **kwargs, ) -> None: super().__init__( @@ -136,6 +146,8 @@ def __init__( provider=provider, trace_id=trace_id, metadata=metadata, + openai_project=openai_project, + openai_organization=openai_organization, **kwargs, ) @@ -166,6 +178,8 @@ def copy( provider: Optional[str] = None, trace_id: Optional[str] = None, metadata: Optional[str] = None, + openai_project: Optional[str] = None, + openai_organization: Optional[str] = None, **kwargs, ) -> AsyncPortkey: return self.__class__( @@ -176,6 +190,8 @@ def copy( provider=provider or self.provider, trace_id=trace_id or self.trace_id, metadata=metadata or self.metadata, + openai_project=openai_project or self.openai_project, + openai_organization=openai_organization or self.openai_organization, **self.kwargs, **kwargs, ) diff --git a/portkey_ai/api_resources/utils.py b/portkey_ai/api_resources/utils.py index 860952e4..1c4fe294 100644 --- a/portkey_ai/api_resources/utils.py +++ b/portkey_ai/api_resources/utils.py @@ -248,6 +248,8 @@ class Constructs(BaseModel): deployment_id: Optional[str] = None resource_name: Optional[str] = None api_version: Optional[str] = None + openai_project: Optional[str] = None, + openai_organization: Optional[str] = None class LLMOptions(Constructs, ConversationInput, ModelParams): From 1abdc24cb8d7a1162c55ba6c3fc8265138213134 Mon Sep 17 00:00:00 2001 From: csgulati09 Date: Tue, 21 May 2024 01:01:48 -0400 Subject: [PATCH 24/38] fix: linting issues --- portkey_ai/api_resources/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/portkey_ai/api_resources/utils.py b/portkey_ai/api_resources/utils.py index 1c4fe294..1fa864bd 100644 --- a/portkey_ai/api_resources/utils.py +++ b/portkey_ai/api_resources/utils.py @@ -248,7 +248,7 @@ class Constructs(BaseModel): deployment_id: Optional[str] = None resource_name: Optional[str] = None api_version: Optional[str] = None - openai_project: Optional[str] = None, + openai_project: Optional[str] = None openai_organization: Optional[str] = None From 10e999f455349126d37fd320ff963806dd57c62d Mon Sep 17 00:00:00 2001 From: csgulati09 Date: Thu, 30 May 2024 17:58:15 +0530 Subject: [PATCH 25/38] hotfix: response type for prompt completions --- portkey_ai/api_resources/types/generation_type.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/portkey_ai/api_resources/types/generation_type.py b/portkey_ai/api_resources/types/generation_type.py index 9e7bad9f..fc3fadd1 100644 --- a/portkey_ai/api_resources/types/generation_type.py +++ b/portkey_ai/api_resources/types/generation_type.py @@ -16,11 +16,11 @@ class PromptCompletion(BaseModel): - id: Optional[str] - choices: List[Choice] - created: Optional[int] - model: Optional[str] - object: Optional[str] + id: Optional[str] = None + choices: List[Choice] = None + created: Optional[int] = None + model: Optional[str] = None + object: Optional[str] = None system_fingerprint: Optional[str] = None usage: Optional[Usage] = None index: Optional[int] = None @@ -48,7 +48,7 @@ class PromptCompletionChunk(BaseModel): created: Optional[int] = None model: Optional[str] = None provider: Optional[str] = None - choices: Optional[Union[List[TextChoice], List[StreamChoice]]] + choices: Optional[Union[List[TextChoice], List[StreamChoice]]] = None def __str__(self): return json.dumps(self.dict(), indent=4) From fa196ea52349d295a5621de1cbf4fc70de638b71 Mon Sep 17 00:00:00 2001 From: csgulati09 Date: Fri, 31 May 2024 14:18:07 +0530 Subject: [PATCH 26/38] fix: audio specific test cases --- tests/models.json | 13 ++++++++++--- tests/test_async_audio_speech.py | 7 ++++--- tests/test_async_audio_transcript.py | 8 +++++--- tests/test_async_audio_translation.py | 7 ++++--- tests/test_audio_speech.py | 7 ++++--- tests/test_audio_transcript.py | 7 ++++--- tests/test_audio_translation.py | 7 ++++--- 7 files changed, 35 insertions(+), 21 deletions(-) diff --git a/tests/models.json b/tests/models.json index b8af6331..29246963 100644 --- a/tests/models.json +++ b/tests/models.json @@ -18,6 +18,10 @@ "image":[ "dall-e-3", "dall-e-2" + ], + "audio":[ + "tts-1", + "whisper-1" ] }, "anyscale": { @@ -36,7 +40,8 @@ "mistralai/Mistral-7B-Instruct-v0.1", "mistralai/Mixtral-8x7B-Instruct-v0.1" ], - "image":[] + "image":[], + "audio":[] }, "anthropic": { "env_variable": "ANTHROPIC_API_KEY", @@ -53,7 +58,8 @@ "claude-2.0", "claude-instant-1.2" ], - "image":[] + "image":[], + "audio":[] }, "cohere": { "env_variable": "COHERE_API_KEY", @@ -69,6 +75,7 @@ "command", "command-nightly" ], - "image":[] + "image":[], + "audio":[] } } \ No newline at end of file diff --git a/tests/test_async_audio_speech.py b/tests/test_async_audio_speech.py index 953dd5d9..03a0a671 100644 --- a/tests/test_async_audio_speech.py +++ b/tests/test_async_audio_speech.py @@ -47,10 +47,11 @@ def get_metadata(self): t1_params = [] t = [] for k, v in models.items(): - for i in v["chat"]: - t.append((client, k, os.environ.get(v["env_variable"]), i)) + if k == "openai": + for i in v["audio"]: + t.append((client, k, os.environ.get(v["env_variable"]), i)) - t1_params.extend(t) + t1_params.extend(t) @pytest.mark.asyncio @pytest.mark.parametrize("client, provider, auth, model", t1_params) diff --git a/tests/test_async_audio_transcript.py b/tests/test_async_audio_transcript.py index 6fd7462a..3a99b34a 100644 --- a/tests/test_async_audio_transcript.py +++ b/tests/test_async_audio_transcript.py @@ -46,11 +46,13 @@ def get_metadata(self): # Test-4 t4_params = [] t4 = [] + for k, v in models.items(): - for i in v["chat"]: - t4.append((client, k, os.environ.get(v["env_variable"]), i)) + if k == "openai": + for i in v["audio"]: + t4.append((client, k, os.environ.get(v["env_variable"]), i)) - t4_params.extend(t4) + t4_params.extend(t4) @pytest.mark.asyncio @pytest.mark.parametrize("client, provider, auth, model", t4_params) diff --git a/tests/test_async_audio_translation.py b/tests/test_async_audio_translation.py index 6640f81e..6c6805da 100644 --- a/tests/test_async_audio_translation.py +++ b/tests/test_async_audio_translation.py @@ -47,10 +47,11 @@ def get_metadata(self): t4_params = [] t4 = [] for k, v in models.items(): - for i in v["chat"]: - t4.append((client, k, os.environ.get(v["env_variable"]), i)) + if k == "openai": + for i in v["audio"]: + t4.append((client, k, os.environ.get(v["env_variable"]), i)) - t4_params.extend(t4) + t4_params.extend(t4) @pytest.mark.asyncio @pytest.mark.parametrize("client, provider, auth, model", t4_params) diff --git a/tests/test_audio_speech.py b/tests/test_audio_speech.py index ebc406ce..63aeecee 100644 --- a/tests/test_audio_speech.py +++ b/tests/test_audio_speech.py @@ -47,10 +47,11 @@ def get_metadata(self): t1_params = [] t = [] for k, v in models.items(): - for i in v["chat"]: - t.append((client, k, os.environ.get(v["env_variable"]), i)) + if k == "openai": + for i in v["audio"]: + t.append((client, k, os.environ.get(v["env_variable"]), i)) - t1_params.extend(t) + t1_params.extend(t) @pytest.mark.parametrize("client, provider, auth, model", t1_params) def test_method_single_with_vk_and_provider( diff --git a/tests/test_audio_transcript.py b/tests/test_audio_transcript.py index 060798e9..532e464a 100644 --- a/tests/test_audio_transcript.py +++ b/tests/test_audio_transcript.py @@ -47,10 +47,11 @@ def get_metadata(self): t4_params = [] t4 = [] for k, v in models.items(): - for i in v["chat"]: - t4.append((client, k, os.environ.get(v["env_variable"]), i)) + if k == "openai": + for i in v["audio"]: + t4.append((client, k, os.environ.get(v["env_variable"]), i)) - t4_params.extend(t4) + t4_params.extend(t4) @pytest.mark.parametrize("client, provider, auth, model", t4_params) def test_method_single_with_vk_and_provider( diff --git a/tests/test_audio_translation.py b/tests/test_audio_translation.py index 955cf224..4667f6ed 100644 --- a/tests/test_audio_translation.py +++ b/tests/test_audio_translation.py @@ -47,10 +47,11 @@ def get_metadata(self): t4_params = [] t4 = [] for k, v in models.items(): - for i in v["chat"]: - t4.append((client, k, os.environ.get(v["env_variable"]), i)) + if k == "openai": + for i in v["audio"]: + t4.append((client, k, os.environ.get(v["env_variable"]), i)) - t4_params.extend(t4) + t4_params.extend(t4) @pytest.mark.parametrize("client, provider, auth, model", t4_params) def test_method_single_with_vk_and_provider( From ed5a45307fd80830b2eb7f73445771b37a7292b4 Mon Sep 17 00:00:00 2001 From: csgulati09 Date: Fri, 31 May 2024 14:19:34 +0530 Subject: [PATCH 27/38] fix: linting issues --- portkey_ai/api_resources/types/generation_type.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/portkey_ai/api_resources/types/generation_type.py b/portkey_ai/api_resources/types/generation_type.py index fc3fadd1..99fd406d 100644 --- a/portkey_ai/api_resources/types/generation_type.py +++ b/portkey_ai/api_resources/types/generation_type.py @@ -17,7 +17,7 @@ class PromptCompletion(BaseModel): id: Optional[str] = None - choices: List[Choice] = None + choices: Optional[List[Choice]] = None created: Optional[int] = None model: Optional[str] = None object: Optional[str] = None From 0b3fd850b669c0140932ad56e779007ede01a484 Mon Sep 17 00:00:00 2001 From: csgulati09 Date: Fri, 31 May 2024 14:57:50 +0530 Subject: [PATCH 28/38] feat: vendor specific headers --- portkey_ai/api_resources/base_client.py | 61 +++++++++++++++++++ portkey_ai/api_resources/client.py | 80 +++++++++++++++++++++++++ portkey_ai/api_resources/utils.py | 10 ++++ 3 files changed, 151 insertions(+) diff --git a/portkey_ai/api_resources/base_client.py b/portkey_ai/api_resources/base_client.py index e623779f..3009498c 100644 --- a/portkey_ai/api_resources/base_client.py +++ b/portkey_ai/api_resources/base_client.py @@ -56,6 +56,16 @@ def __init__( metadata: Optional[str] = None, openai_project: Optional[str] = None, openai_organization: Optional[str] = None, + aws_secret_access_key: Optional[str] = None, + aws_access_key_id: Optional[str] = None, + aws_session_token: Optional[str] = None, + aws_region: Optional[str] = None, + vertex_project_id: Optional[str] = None, + vertex_region: Optional[str] = None, + workers_ai_account_id: Optional[str] = None, + azure_resource_name: Optional[str] = None, + azure_deployment_id: Optional[str] = None, + azure_api_version: Optional[str] = None, **kwargs, ) -> None: self.api_key = api_key or default_api_key() @@ -67,6 +77,16 @@ def __init__( self.metadata = metadata self.openai_project = openai_project self.openai_organization = openai_organization + self.aws_secret_access_key=aws_secret_access_key, + self.aws_access_key_id=aws_access_key_id, + self.aws_session_token=aws_session_token, + self.aws_region=aws_region, + self.vertex_project_id=vertex_project_id, + self.vertex_region=vertex_region, + self.workers_ai_account_id=workers_ai_account_id, + self.azure_resource_name=azure_resource_name, + self.azure_deployment_id=azure_deployment_id, + self.azure_api_version=azure_api_version, self.kwargs = kwargs self.custom_headers = createHeaders( @@ -78,6 +98,16 @@ def __init__( metadata=metadata, openai_project=openai_project, openai_organization=openai_organization, + aws_secret_access_key=aws_secret_access_key, + aws_access_key_id=aws_access_key_id, + aws_session_token=aws_session_token, + aws_region=aws_region, + vertex_project_id=vertex_project_id, + vertex_region=vertex_region, + workers_ai_account_id=workers_ai_account_id, + azure_resource_name=azure_resource_name, + azure_deployment_id=azure_deployment_id, + azure_api_version=azure_api_version, **kwargs, ) @@ -411,6 +441,17 @@ def __init__( metadata: Optional[str] = None, openai_project: Optional[str] = None, openai_organization: Optional[str] = None, + aws_secret_access_key: Optional[str] = None, + aws_access_key_id: Optional[str] = None, + aws_session_token: Optional[str] = None, + aws_region: Optional[str] = None, + vertex_project_id: Optional[str] = None, + vertex_region: Optional[str] = None, + workers_ai_account_id: Optional[str] = None, + azure_resource_name: Optional[str] = None, + azure_deployment_id: Optional[str] = None, + azure_api_version: Optional[str] = None, + **kwargs, ) -> None: self.api_key = api_key or default_api_key() @@ -422,6 +463,16 @@ def __init__( self.metadata = metadata self.openai_project = openai_project self.openai_organization = openai_organization + self.aws_secret_access_key=aws_secret_access_key, + self.aws_access_key_id=aws_access_key_id, + self.aws_session_token=aws_session_token, + self.aws_region=aws_region, + self.vertex_project_id=vertex_project_id, + self.vertex_region=vertex_region, + self.workers_ai_account_id=workers_ai_account_id, + self.azure_resource_name=azure_resource_name, + self.azure_deployment_id=azure_deployment_id, + self.azure_api_version=azure_api_version, self.kwargs = kwargs self.custom_headers = createHeaders( @@ -433,6 +484,16 @@ def __init__( metadata=metadata, openai_project=openai_project, openai_organization=openai_organization, + aws_secret_access_key=aws_secret_access_key, + aws_access_key_id=aws_access_key_id, + aws_session_token=aws_session_token, + aws_region=aws_region, + vertex_project_id=vertex_project_id, + vertex_region=vertex_region, + workers_ai_account_id=workers_ai_account_id, + azure_resource_name=azure_resource_name, + azure_deployment_id=azure_deployment_id, + azure_api_version=azure_api_version, **kwargs, ) diff --git a/portkey_ai/api_resources/client.py b/portkey_ai/api_resources/client.py index f840916c..c425de6b 100644 --- a/portkey_ai/api_resources/client.py +++ b/portkey_ai/api_resources/client.py @@ -40,6 +40,16 @@ def __init__( metadata: Optional[str] = None, openai_project: Optional[str] = None, openai_organization: Optional[str] = None, + aws_secret_access_key: Optional[str] = None, + aws_access_key_id: Optional[str] = None, + aws_session_token: Optional[str] = None, + aws_region: Optional[str] = None, + vertex_project_id: Optional[str] = None, + vertex_region: Optional[str] = None, + workers_ai_account_id: Optional[str] = None, + azure_resource_name: Optional[str] = None, + azure_deployment_id: Optional[str] = None, + azure_api_version: Optional[str] = None, **kwargs, ) -> None: super().__init__( @@ -52,6 +62,16 @@ def __init__( metadata=metadata, openai_project=openai_project, openai_organization=openai_organization, + aws_secret_access_key=aws_secret_access_key, + aws_access_key_id=aws_access_key_id, + aws_session_token=aws_session_token, + aws_region=aws_region, + vertex_project_id=vertex_project_id, + vertex_region=vertex_region, + workers_ai_account_id=workers_ai_account_id, + azure_resource_name=azure_resource_name, + azure_deployment_id=azure_deployment_id, + azure_api_version=azure_api_version, **kwargs, ) @@ -84,6 +104,16 @@ def copy( metadata: Optional[str] = None, openai_project: Optional[str] = None, openai_organization: Optional[str] = None, + aws_secret_access_key: Optional[str] = None, + aws_access_key_id: Optional[str] = None, + aws_session_token: Optional[str] = None, + aws_region: Optional[str] = None, + vertex_project_id: Optional[str] = None, + vertex_region: Optional[str] = None, + workers_ai_account_id: Optional[str] = None, + azure_resource_name: Optional[str] = None, + azure_deployment_id: Optional[str] = None, + azure_api_version: Optional[str] = None, **kwargs, ) -> Portkey: return self.__class__( @@ -96,6 +126,16 @@ def copy( metadata=metadata or self.metadata, openai_project=openai_project or self.openai_project, openai_organization=openai_organization or self.openai_organization, + aws_secret_access_key=aws_secret_access_key or self.aws_secret_access_key, + aws_access_key_id=aws_access_key_id or self.aws_access_key_id, + aws_session_token=aws_session_token or self.aws_session_token, + aws_region=aws_region or self.aws_region, + vertex_project_id=vertex_project_id or self.vertex_project_id, + vertex_region=vertex_region or self.vertex_region, + workers_ai_account_id=workers_ai_account_id or self.workers_ai_account_id, + azure_resource_name=azure_resource_name or self.azure_resource_name, + azure_deployment_id=azure_deployment_id or self.azure_deployment_id, + azure_api_version=azure_api_version or self.azure_api_version, **self.kwargs, **kwargs, ) @@ -136,6 +176,16 @@ def __init__( metadata: Optional[str] = None, openai_project: Optional[str] = None, openai_organization: Optional[str] = None, + aws_secret_access_key: Optional[str] = None, + aws_access_key_id: Optional[str] = None, + aws_session_token: Optional[str] = None, + aws_region: Optional[str] = None, + vertex_project_id: Optional[str] = None, + vertex_region: Optional[str] = None, + workers_ai_account_id: Optional[str] = None, + azure_resource_name: Optional[str] = None, + azure_deployment_id: Optional[str] = None, + azure_api_version: Optional[str] = None, **kwargs, ) -> None: super().__init__( @@ -148,6 +198,16 @@ def __init__( metadata=metadata, openai_project=openai_project, openai_organization=openai_organization, + aws_secret_access_key=aws_secret_access_key, + aws_access_key_id=aws_access_key_id, + aws_session_token=aws_session_token, + aws_region=aws_region, + vertex_project_id=vertex_project_id, + vertex_region=vertex_region, + workers_ai_account_id=workers_ai_account_id, + azure_resource_name=azure_resource_name, + azure_deployment_id=azure_deployment_id, + azure_api_version=azure_api_version, **kwargs, ) @@ -180,6 +240,16 @@ def copy( metadata: Optional[str] = None, openai_project: Optional[str] = None, openai_organization: Optional[str] = None, + aws_secret_access_key: Optional[str] = None, + aws_access_key_id: Optional[str] = None, + aws_session_token: Optional[str] = None, + aws_region: Optional[str] = None, + vertex_project_id: Optional[str] = None, + vertex_region: Optional[str] = None, + workers_ai_account_id: Optional[str] = None, + azure_resource_name: Optional[str] = None, + azure_deployment_id: Optional[str] = None, + azure_api_version: Optional[str] = None, **kwargs, ) -> AsyncPortkey: return self.__class__( @@ -192,6 +262,16 @@ def copy( metadata=metadata or self.metadata, openai_project=openai_project or self.openai_project, openai_organization=openai_organization or self.openai_organization, + aws_secret_access_key=aws_secret_access_key or self.aws_secret_access_key, + aws_access_key_id=aws_access_key_id or self.aws_access_key_id, + aws_session_token=aws_session_token or self.aws_session_token, + aws_region=aws_region or self.aws_region, + vertex_project_id=vertex_project_id or self.vertex_project_id, + vertex_region=vertex_region or self.vertex_region, + workers_ai_account_id=workers_ai_account_id or self.workers_ai_account_id, + azure_resource_name=azure_resource_name or self.azure_resource_name, + azure_deployment_id=azure_deployment_id or self.azure_deployment_id, + azure_api_version=azure_api_version or self.azure_api_version, **self.kwargs, **kwargs, ) diff --git a/portkey_ai/api_resources/utils.py b/portkey_ai/api_resources/utils.py index 1fa864bd..0a8f0d15 100644 --- a/portkey_ai/api_resources/utils.py +++ b/portkey_ai/api_resources/utils.py @@ -250,6 +250,16 @@ class Constructs(BaseModel): api_version: Optional[str] = None openai_project: Optional[str] = None openai_organization: Optional[str] = None + aws_secret_access_key: Optional[str] = None, + aws_access_key_id: Optional[str] = None, + aws_session_token: Optional[str] = None, + aws_region: Optional[str] = None, + vertex_project_id: Optional[str] = None, + vertex_region: Optional[str] = None, + workers_ai_account_id: Optional[str] = None, + azure_resource_name: Optional[str] = None, + azure_deployment_id: Optional[str] = None, + azure_api_version: Optional[str] = None, class LLMOptions(Constructs, ConversationInput, ModelParams): From a5224ed992b54c3825dc17c24ae125a196331682 Mon Sep 17 00:00:00 2001 From: csgulati09 Date: Fri, 31 May 2024 15:15:54 +0530 Subject: [PATCH 29/38] fix: linting issues --- portkey_ai/api_resources/base_client.py | 41 ++++++++++++------------- portkey_ai/api_resources/utils.py | 20 ++++++------ 2 files changed, 30 insertions(+), 31 deletions(-) diff --git a/portkey_ai/api_resources/base_client.py b/portkey_ai/api_resources/base_client.py index 3009498c..ba08847d 100644 --- a/portkey_ai/api_resources/base_client.py +++ b/portkey_ai/api_resources/base_client.py @@ -77,16 +77,16 @@ def __init__( self.metadata = metadata self.openai_project = openai_project self.openai_organization = openai_organization - self.aws_secret_access_key=aws_secret_access_key, - self.aws_access_key_id=aws_access_key_id, - self.aws_session_token=aws_session_token, - self.aws_region=aws_region, - self.vertex_project_id=vertex_project_id, - self.vertex_region=vertex_region, - self.workers_ai_account_id=workers_ai_account_id, - self.azure_resource_name=azure_resource_name, - self.azure_deployment_id=azure_deployment_id, - self.azure_api_version=azure_api_version, + self.aws_secret_access_key=aws_secret_access_key + self.aws_access_key_id=aws_access_key_id + self.aws_session_token=aws_session_token + self.aws_region=aws_region + self.vertex_project_id=vertex_project_id + self.vertex_region=vertex_region + self.workers_ai_account_id=workers_ai_account_id + self.azure_resource_name=azure_resource_name + self.azure_deployment_id=azure_deployment_id + self.azure_api_version=azure_api_version self.kwargs = kwargs self.custom_headers = createHeaders( @@ -451,7 +451,6 @@ def __init__( azure_resource_name: Optional[str] = None, azure_deployment_id: Optional[str] = None, azure_api_version: Optional[str] = None, - **kwargs, ) -> None: self.api_key = api_key or default_api_key() @@ -463,16 +462,16 @@ def __init__( self.metadata = metadata self.openai_project = openai_project self.openai_organization = openai_organization - self.aws_secret_access_key=aws_secret_access_key, - self.aws_access_key_id=aws_access_key_id, - self.aws_session_token=aws_session_token, - self.aws_region=aws_region, - self.vertex_project_id=vertex_project_id, - self.vertex_region=vertex_region, - self.workers_ai_account_id=workers_ai_account_id, - self.azure_resource_name=azure_resource_name, - self.azure_deployment_id=azure_deployment_id, - self.azure_api_version=azure_api_version, + self.aws_secret_access_key=aws_secret_access_key + self.aws_access_key_id=aws_access_key_id + self.aws_session_token=aws_session_token + self.aws_region=aws_region + self.vertex_project_id=vertex_project_id + self.vertex_region=vertex_region + self.workers_ai_account_id=workers_ai_account_id + self.azure_resource_name=azure_resource_name + self.azure_deployment_id=azure_deployment_id + self.azure_api_version=azure_api_version self.kwargs = kwargs self.custom_headers = createHeaders( diff --git a/portkey_ai/api_resources/utils.py b/portkey_ai/api_resources/utils.py index 0a8f0d15..9dab140a 100644 --- a/portkey_ai/api_resources/utils.py +++ b/portkey_ai/api_resources/utils.py @@ -250,16 +250,16 @@ class Constructs(BaseModel): api_version: Optional[str] = None openai_project: Optional[str] = None openai_organization: Optional[str] = None - aws_secret_access_key: Optional[str] = None, - aws_access_key_id: Optional[str] = None, - aws_session_token: Optional[str] = None, - aws_region: Optional[str] = None, - vertex_project_id: Optional[str] = None, - vertex_region: Optional[str] = None, - workers_ai_account_id: Optional[str] = None, - azure_resource_name: Optional[str] = None, - azure_deployment_id: Optional[str] = None, - azure_api_version: Optional[str] = None, + aws_secret_access_key: Optional[str] = None + aws_access_key_id: Optional[str] = None + aws_session_token: Optional[str] = None + aws_region: Optional[str] = None + vertex_project_id: Optional[str] = None + vertex_region: Optional[str] = None + workers_ai_account_id: Optional[str] = None + azure_resource_name: Optional[str] = None + azure_deployment_id: Optional[str] = None + azure_api_version: Optional[str] = None class LLMOptions(Constructs, ConversationInput, ModelParams): From f706e7f5a293e0b22beeb9372921139cb6227d17 Mon Sep 17 00:00:00 2001 From: csgulati09 Date: Fri, 31 May 2024 15:22:50 +0530 Subject: [PATCH 30/38] feat: debug, cache_force_refresh, custom_host, forward_headers headers added --- portkey_ai/api_resources/base_client.py | 24 +++++++++++++++++++ portkey_ai/api_resources/client.py | 32 +++++++++++++++++++++++++ portkey_ai/api_resources/utils.py | 3 +++ 3 files changed, 59 insertions(+) diff --git a/portkey_ai/api_resources/base_client.py b/portkey_ai/api_resources/base_client.py index ba08847d..a6707997 100644 --- a/portkey_ai/api_resources/base_client.py +++ b/portkey_ai/api_resources/base_client.py @@ -54,6 +54,10 @@ def __init__( provider: Optional[str] = None, trace_id: Optional[str] = None, metadata: Optional[str] = None, + debug: Optional[bool] = None, + cache_force_refresh: Optional[bool] = None, + custom_host: Optional[str] = None, + forward_headers: Optional[str] = None, openai_project: Optional[str] = None, openai_organization: Optional[str] = None, aws_secret_access_key: Optional[str] = None, @@ -75,6 +79,10 @@ def __init__( self.provider = provider self.trace_id = trace_id self.metadata = metadata + self.debug = debug + self.cache_force_refresh = cache_force_refresh + self.custom_host = custom_host + self.forward_headers = forward_headers self.openai_project = openai_project self.openai_organization = openai_organization self.aws_secret_access_key=aws_secret_access_key @@ -96,6 +104,10 @@ def __init__( provider=provider, trace_id=trace_id, metadata=metadata, + debug=debug, + cache_force_refresh=cache_force_refresh, + custom_host=custom_host, + forward_headers=forward_headers, openai_project=openai_project, openai_organization=openai_organization, aws_secret_access_key=aws_secret_access_key, @@ -439,6 +451,10 @@ def __init__( provider: Optional[str] = None, trace_id: Optional[str] = None, metadata: Optional[str] = None, + debug: Optional[bool] = None, + cache_force_refresh: Optional[bool] = None, + custom_host: Optional[str] = None, + forward_headers: Optional[str] = None, openai_project: Optional[str] = None, openai_organization: Optional[str] = None, aws_secret_access_key: Optional[str] = None, @@ -460,6 +476,10 @@ def __init__( self.provider = provider self.trace_id = trace_id self.metadata = metadata + self.debug = debug + self.cache_force_refresh = cache_force_refresh + self.custom_host = custom_host + self.forward_headers = forward_headers self.openai_project = openai_project self.openai_organization = openai_organization self.aws_secret_access_key=aws_secret_access_key @@ -481,6 +501,10 @@ def __init__( provider=provider, trace_id=trace_id, metadata=metadata, + debug=debug, + cache_force_refresh=cache_force_refresh, + custom_host=custom_host, + forward_headers=forward_headers, openai_project=openai_project, openai_organization=openai_organization, aws_secret_access_key=aws_secret_access_key, diff --git a/portkey_ai/api_resources/client.py b/portkey_ai/api_resources/client.py index c425de6b..fe1c21be 100644 --- a/portkey_ai/api_resources/client.py +++ b/portkey_ai/api_resources/client.py @@ -38,6 +38,10 @@ def __init__( provider: Optional[str] = None, trace_id: Optional[str] = None, metadata: Optional[str] = None, + debug: Optional[bool] = None, + cache_force_refresh: Optional[bool] = None, + custom_host: Optional[str] = None, + forward_headers: Optional[str] = None, openai_project: Optional[str] = None, openai_organization: Optional[str] = None, aws_secret_access_key: Optional[str] = None, @@ -60,6 +64,10 @@ def __init__( provider=provider, trace_id=trace_id, metadata=metadata, + debug=debug, + cache_force_refresh=cache_force_refresh, + custom_host=custom_host, + forward_headers=forward_headers, openai_project=openai_project, openai_organization=openai_organization, aws_secret_access_key=aws_secret_access_key, @@ -102,6 +110,10 @@ def copy( provider: Optional[str] = None, trace_id: Optional[str] = None, metadata: Optional[str] = None, + debug: Optional[bool] = None, + cache_force_refresh: Optional[bool] = None, + custom_host: Optional[str] = None, + forward_headers: Optional[str] = None, openai_project: Optional[str] = None, openai_organization: Optional[str] = None, aws_secret_access_key: Optional[str] = None, @@ -124,6 +136,10 @@ def copy( provider=provider or self.provider, trace_id=trace_id or self.trace_id, metadata=metadata or self.metadata, + debug=debug or self.debug, + cache_force_refresh=cache_force_refresh or self.cache_force_refresh, + custom_host=custom_host or self.custom_host, + forward_headers=forward_headers or self.forward_headers, openai_project=openai_project or self.openai_project, openai_organization=openai_organization or self.openai_organization, aws_secret_access_key=aws_secret_access_key or self.aws_secret_access_key, @@ -174,6 +190,10 @@ def __init__( provider: Optional[str] = None, trace_id: Optional[str] = None, metadata: Optional[str] = None, + debug: Optional[bool] = None, + cache_force_refresh: Optional[bool] = None, + custom_host: Optional[str] = None, + forward_headers: Optional[str] = None, openai_project: Optional[str] = None, openai_organization: Optional[str] = None, aws_secret_access_key: Optional[str] = None, @@ -196,6 +216,10 @@ def __init__( provider=provider, trace_id=trace_id, metadata=metadata, + debug=debug, + cache_force_refresh=cache_force_refresh, + custom_host=custom_host, + forward_headers=forward_headers, openai_project=openai_project, openai_organization=openai_organization, aws_secret_access_key=aws_secret_access_key, @@ -238,6 +262,10 @@ def copy( provider: Optional[str] = None, trace_id: Optional[str] = None, metadata: Optional[str] = None, + debug: Optional[bool] = None, + cache_force_refresh: Optional[bool] = None, + custom_host: Optional[str] = None, + forward_headers: Optional[str] = None, openai_project: Optional[str] = None, openai_organization: Optional[str] = None, aws_secret_access_key: Optional[str] = None, @@ -260,6 +288,10 @@ def copy( provider=provider or self.provider, trace_id=trace_id or self.trace_id, metadata=metadata or self.metadata, + debug=debug or self.debug, + cache_force_refresh=cache_force_refresh or self.cache_force_refresh, + custom_host=custom_host or self.custom_host, + forward_headers=forward_headers or self.forward_headers, openai_project=openai_project or self.openai_project, openai_organization=openai_organization or self.openai_organization, aws_secret_access_key=aws_secret_access_key or self.aws_secret_access_key, diff --git a/portkey_ai/api_resources/utils.py b/portkey_ai/api_resources/utils.py index 9dab140a..3a17fe0d 100644 --- a/portkey_ai/api_resources/utils.py +++ b/portkey_ai/api_resources/utils.py @@ -243,6 +243,9 @@ class Constructs(BaseModel): cache_force_refresh: Optional[bool] = None trace_id: Optional[str] = None metadata: Optional[Dict[str, Any]] = None + debug: Optional[bool] = None + custom_host: Optional[str] = None + forward_headers:Optional[str] = None weight: Optional[float] = None retry: Optional[RetrySettings] = None deployment_id: Optional[str] = None From fb65f693cb151896ce86689a472575138cfc476a Mon Sep 17 00:00:00 2001 From: csgulati09 Date: Fri, 31 May 2024 15:52:19 +0530 Subject: [PATCH 31/38] feat: falsy logic in createHeaders --- portkey_ai/api_resources/apis/create_headers.py | 2 ++ portkey_ai/api_resources/base_client.py | 8 ++++---- portkey_ai/api_resources/client.py | 16 ++++++++-------- portkey_ai/api_resources/utils.py | 4 ++-- 4 files changed, 16 insertions(+), 14 deletions(-) diff --git a/portkey_ai/api_resources/apis/create_headers.py b/portkey_ai/api_resources/apis/create_headers.py index 12f51021..a1136ef6 100644 --- a/portkey_ai/api_resources/apis/create_headers.py +++ b/portkey_ai/api_resources/apis/create_headers.py @@ -12,6 +12,8 @@ def __init__(self, **kwargs) -> None: # type: ignore def json(self) -> Mapping: headers = {} for k, v in self.kwargs.items(): + if type(v) == bool: + v = str(v).lower() if k == "mode" and "proxy" not in v: v = f"proxy {v}" k = "-".join(k.split("_")) diff --git a/portkey_ai/api_resources/base_client.py b/portkey_ai/api_resources/base_client.py index a6707997..ca533879 100644 --- a/portkey_ai/api_resources/base_client.py +++ b/portkey_ai/api_resources/base_client.py @@ -54,8 +54,8 @@ def __init__( provider: Optional[str] = None, trace_id: Optional[str] = None, metadata: Optional[str] = None, - debug: Optional[bool] = None, - cache_force_refresh: Optional[bool] = None, + debug: Optional[bool] = False, + cache_force_refresh: Optional[bool] = False, custom_host: Optional[str] = None, forward_headers: Optional[str] = None, openai_project: Optional[str] = None, @@ -451,8 +451,8 @@ def __init__( provider: Optional[str] = None, trace_id: Optional[str] = None, metadata: Optional[str] = None, - debug: Optional[bool] = None, - cache_force_refresh: Optional[bool] = None, + debug: Optional[bool] = False, + cache_force_refresh: Optional[bool] = False, custom_host: Optional[str] = None, forward_headers: Optional[str] = None, openai_project: Optional[str] = None, diff --git a/portkey_ai/api_resources/client.py b/portkey_ai/api_resources/client.py index fe1c21be..ad3f4a79 100644 --- a/portkey_ai/api_resources/client.py +++ b/portkey_ai/api_resources/client.py @@ -38,8 +38,8 @@ def __init__( provider: Optional[str] = None, trace_id: Optional[str] = None, metadata: Optional[str] = None, - debug: Optional[bool] = None, - cache_force_refresh: Optional[bool] = None, + debug: Optional[bool] = False, + cache_force_refresh: Optional[bool] = False, custom_host: Optional[str] = None, forward_headers: Optional[str] = None, openai_project: Optional[str] = None, @@ -110,8 +110,8 @@ def copy( provider: Optional[str] = None, trace_id: Optional[str] = None, metadata: Optional[str] = None, - debug: Optional[bool] = None, - cache_force_refresh: Optional[bool] = None, + debug: Optional[bool] = False, + cache_force_refresh: Optional[bool] = False, custom_host: Optional[str] = None, forward_headers: Optional[str] = None, openai_project: Optional[str] = None, @@ -190,8 +190,8 @@ def __init__( provider: Optional[str] = None, trace_id: Optional[str] = None, metadata: Optional[str] = None, - debug: Optional[bool] = None, - cache_force_refresh: Optional[bool] = None, + debug: Optional[bool] = False, + cache_force_refresh: Optional[bool] = False, custom_host: Optional[str] = None, forward_headers: Optional[str] = None, openai_project: Optional[str] = None, @@ -262,8 +262,8 @@ def copy( provider: Optional[str] = None, trace_id: Optional[str] = None, metadata: Optional[str] = None, - debug: Optional[bool] = None, - cache_force_refresh: Optional[bool] = None, + debug: Optional[bool] = False, + cache_force_refresh: Optional[bool] = False, custom_host: Optional[str] = None, forward_headers: Optional[str] = None, openai_project: Optional[str] = None, diff --git a/portkey_ai/api_resources/utils.py b/portkey_ai/api_resources/utils.py index 3a17fe0d..b3fb57e8 100644 --- a/portkey_ai/api_resources/utils.py +++ b/portkey_ai/api_resources/utils.py @@ -240,10 +240,10 @@ class Constructs(BaseModel): cache: Optional[bool] = None cache_age: Optional[int] = None cache_status: Optional[Union[CacheType, CacheLiteral]] = None - cache_force_refresh: Optional[bool] = None + cache_force_refresh: Optional[bool] = False trace_id: Optional[str] = None metadata: Optional[Dict[str, Any]] = None - debug: Optional[bool] = None + debug: Optional[bool] = False custom_host: Optional[str] = None forward_headers:Optional[str] = None weight: Optional[float] = None From c1a00c8f2d3f3c348a2c5f54df7d5f9700217e9b Mon Sep 17 00:00:00 2001 From: csgulati09 Date: Fri, 31 May 2024 16:29:45 +0530 Subject: [PATCH 32/38] fix: list of str for forward headers + logic for list of str to comma separated str --- portkey_ai/api_resources/apis/create_headers.py | 6 ++++++ portkey_ai/api_resources/base_client.py | 5 +++-- portkey_ai/api_resources/client.py | 10 +++++----- 3 files changed, 14 insertions(+), 7 deletions(-) diff --git a/portkey_ai/api_resources/apis/create_headers.py b/portkey_ai/api_resources/apis/create_headers.py index a1136ef6..50f7f41e 100644 --- a/portkey_ai/api_resources/apis/create_headers.py +++ b/portkey_ai/api_resources/apis/create_headers.py @@ -12,6 +12,8 @@ def __init__(self, **kwargs) -> None: # type: ignore def json(self) -> Mapping: headers = {} for k, v in self.kwargs.items(): + + # logic for boolean type headers if type(v) == bool: v = str(v).lower() if k == "mode" and "proxy" not in v: @@ -24,6 +26,10 @@ def json(self) -> Mapping: headers[get_portkey_header(k)] = str(v) else: headers[k] = str("Bearer " + v) + + # logic for List of str to comma separated string + if k == "forward-headers": + headers[get_portkey_header(k)] = ",".join(v) return headers diff --git a/portkey_ai/api_resources/base_client.py b/portkey_ai/api_resources/base_client.py index ca533879..db169896 100644 --- a/portkey_ai/api_resources/base_client.py +++ b/portkey_ai/api_resources/base_client.py @@ -6,6 +6,7 @@ from typing import ( Dict, Any, + List, Union, Mapping, cast, @@ -57,7 +58,7 @@ def __init__( debug: Optional[bool] = False, cache_force_refresh: Optional[bool] = False, custom_host: Optional[str] = None, - forward_headers: Optional[str] = None, + forward_headers: Optional[List[str]] = None, openai_project: Optional[str] = None, openai_organization: Optional[str] = None, aws_secret_access_key: Optional[str] = None, @@ -454,7 +455,7 @@ def __init__( debug: Optional[bool] = False, cache_force_refresh: Optional[bool] = False, custom_host: Optional[str] = None, - forward_headers: Optional[str] = None, + forward_headers: Optional[List[str]] = None, openai_project: Optional[str] = None, openai_organization: Optional[str] = None, aws_secret_access_key: Optional[str] = None, diff --git a/portkey_ai/api_resources/client.py b/portkey_ai/api_resources/client.py index ad3f4a79..678f5746 100644 --- a/portkey_ai/api_resources/client.py +++ b/portkey_ai/api_resources/client.py @@ -1,6 +1,6 @@ from __future__ import annotations -from typing import Mapping, Optional, Union +from typing import List, Mapping, Optional, Union from portkey_ai.api_resources import apis from portkey_ai.api_resources.base_client import APIClient, AsyncAPIClient @@ -41,7 +41,7 @@ def __init__( debug: Optional[bool] = False, cache_force_refresh: Optional[bool] = False, custom_host: Optional[str] = None, - forward_headers: Optional[str] = None, + forward_headers: Optional[List[str]] = None, openai_project: Optional[str] = None, openai_organization: Optional[str] = None, aws_secret_access_key: Optional[str] = None, @@ -113,7 +113,7 @@ def copy( debug: Optional[bool] = False, cache_force_refresh: Optional[bool] = False, custom_host: Optional[str] = None, - forward_headers: Optional[str] = None, + forward_headers: Optional[List[str]] = None, openai_project: Optional[str] = None, openai_organization: Optional[str] = None, aws_secret_access_key: Optional[str] = None, @@ -193,7 +193,7 @@ def __init__( debug: Optional[bool] = False, cache_force_refresh: Optional[bool] = False, custom_host: Optional[str] = None, - forward_headers: Optional[str] = None, + forward_headers: Optional[List[str]] = None, openai_project: Optional[str] = None, openai_organization: Optional[str] = None, aws_secret_access_key: Optional[str] = None, @@ -265,7 +265,7 @@ def copy( debug: Optional[bool] = False, cache_force_refresh: Optional[bool] = False, custom_host: Optional[str] = None, - forward_headers: Optional[str] = None, + forward_headers: Optional[List[str]] = None, openai_project: Optional[str] = None, openai_organization: Optional[str] = None, aws_secret_access_key: Optional[str] = None, From f96595e0c5dd9abc757814c6e7fac7621ceb45cb Mon Sep 17 00:00:00 2001 From: csgulati09 Date: Fri, 31 May 2024 17:27:43 +0530 Subject: [PATCH 33/38] fix: none type for debug and cache force refresh --- portkey_ai/api_resources/apis/create_headers.py | 1 + portkey_ai/api_resources/base_client.py | 8 ++++---- portkey_ai/api_resources/client.py | 16 ++++++++-------- portkey_ai/api_resources/utils.py | 4 ++-- 4 files changed, 15 insertions(+), 14 deletions(-) diff --git a/portkey_ai/api_resources/apis/create_headers.py b/portkey_ai/api_resources/apis/create_headers.py index 50f7f41e..8aad9796 100644 --- a/portkey_ai/api_resources/apis/create_headers.py +++ b/portkey_ai/api_resources/apis/create_headers.py @@ -30,6 +30,7 @@ def json(self) -> Mapping: # logic for List of str to comma separated string if k == "forward-headers": headers[get_portkey_header(k)] = ",".join(v) + print(headers) return headers diff --git a/portkey_ai/api_resources/base_client.py b/portkey_ai/api_resources/base_client.py index db169896..6de50efe 100644 --- a/portkey_ai/api_resources/base_client.py +++ b/portkey_ai/api_resources/base_client.py @@ -55,8 +55,8 @@ def __init__( provider: Optional[str] = None, trace_id: Optional[str] = None, metadata: Optional[str] = None, - debug: Optional[bool] = False, - cache_force_refresh: Optional[bool] = False, + debug: Optional[bool] = None, + cache_force_refresh: Optional[bool] = None, custom_host: Optional[str] = None, forward_headers: Optional[List[str]] = None, openai_project: Optional[str] = None, @@ -452,8 +452,8 @@ def __init__( provider: Optional[str] = None, trace_id: Optional[str] = None, metadata: Optional[str] = None, - debug: Optional[bool] = False, - cache_force_refresh: Optional[bool] = False, + debug: Optional[bool] = None, + cache_force_refresh: Optional[bool] = None, custom_host: Optional[str] = None, forward_headers: Optional[List[str]] = None, openai_project: Optional[str] = None, diff --git a/portkey_ai/api_resources/client.py b/portkey_ai/api_resources/client.py index 678f5746..6c68b482 100644 --- a/portkey_ai/api_resources/client.py +++ b/portkey_ai/api_resources/client.py @@ -38,8 +38,8 @@ def __init__( provider: Optional[str] = None, trace_id: Optional[str] = None, metadata: Optional[str] = None, - debug: Optional[bool] = False, - cache_force_refresh: Optional[bool] = False, + debug: Optional[bool] = None, + cache_force_refresh: Optional[bool] = None, custom_host: Optional[str] = None, forward_headers: Optional[List[str]] = None, openai_project: Optional[str] = None, @@ -110,8 +110,8 @@ def copy( provider: Optional[str] = None, trace_id: Optional[str] = None, metadata: Optional[str] = None, - debug: Optional[bool] = False, - cache_force_refresh: Optional[bool] = False, + debug: Optional[bool] = None, + cache_force_refresh: Optional[bool] = None, custom_host: Optional[str] = None, forward_headers: Optional[List[str]] = None, openai_project: Optional[str] = None, @@ -190,8 +190,8 @@ def __init__( provider: Optional[str] = None, trace_id: Optional[str] = None, metadata: Optional[str] = None, - debug: Optional[bool] = False, - cache_force_refresh: Optional[bool] = False, + debug: Optional[bool] = None, + cache_force_refresh: Optional[bool] = None, custom_host: Optional[str] = None, forward_headers: Optional[List[str]] = None, openai_project: Optional[str] = None, @@ -262,8 +262,8 @@ def copy( provider: Optional[str] = None, trace_id: Optional[str] = None, metadata: Optional[str] = None, - debug: Optional[bool] = False, - cache_force_refresh: Optional[bool] = False, + debug: Optional[bool] = None, + cache_force_refresh: Optional[bool] = None, custom_host: Optional[str] = None, forward_headers: Optional[List[str]] = None, openai_project: Optional[str] = None, diff --git a/portkey_ai/api_resources/utils.py b/portkey_ai/api_resources/utils.py index b3fb57e8..3a17fe0d 100644 --- a/portkey_ai/api_resources/utils.py +++ b/portkey_ai/api_resources/utils.py @@ -240,10 +240,10 @@ class Constructs(BaseModel): cache: Optional[bool] = None cache_age: Optional[int] = None cache_status: Optional[Union[CacheType, CacheLiteral]] = None - cache_force_refresh: Optional[bool] = False + cache_force_refresh: Optional[bool] = None trace_id: Optional[str] = None metadata: Optional[Dict[str, Any]] = None - debug: Optional[bool] = False + debug: Optional[bool] = None custom_host: Optional[str] = None forward_headers:Optional[str] = None weight: Optional[float] = None From afb2a0eed4acb73f0d4310622759e28deb64603d Mon Sep 17 00:00:00 2001 From: csgulati09 Date: Fri, 31 May 2024 18:55:17 +0530 Subject: [PATCH 34/38] fix: issues fixed for isinstance() --- portkey_ai/api_resources/apis/create_headers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/portkey_ai/api_resources/apis/create_headers.py b/portkey_ai/api_resources/apis/create_headers.py index 8aad9796..8e34f5f0 100644 --- a/portkey_ai/api_resources/apis/create_headers.py +++ b/portkey_ai/api_resources/apis/create_headers.py @@ -14,7 +14,7 @@ def json(self) -> Mapping: for k, v in self.kwargs.items(): # logic for boolean type headers - if type(v) == bool: + if isinstance(v, bool): v = str(v).lower() if k == "mode" and "proxy" not in v: v = f"proxy {v}" From 8007a15d4c95ebfd62437b47cd5fb29bcd10a7d9 Mon Sep 17 00:00:00 2001 From: csgulati09 Date: Fri, 31 May 2024 20:03:54 +0530 Subject: [PATCH 35/38] fix: linting issues --- portkey_ai/api_resources/apis/api_resource.py | 2 +- .../api_resources/apis/create_headers.py | 1 - portkey_ai/api_resources/apis/feedback.py | 12 ++--- portkey_ai/api_resources/base_client.py | 44 +++++++++---------- .../api_resources/types/feedback_type.py | 2 - portkey_ai/api_resources/utils.py | 2 +- 6 files changed, 29 insertions(+), 34 deletions(-) diff --git a/portkey_ai/api_resources/apis/api_resource.py b/portkey_ai/api_resources/apis/api_resource.py index 1dc6daad..b0f0dde9 100644 --- a/portkey_ai/api_resources/apis/api_resource.py +++ b/portkey_ai/api_resources/apis/api_resource.py @@ -35,7 +35,7 @@ def __init__(self, client: AsyncAPIClient) -> None: async def _post(self, *args, **kwargs): return await self._client._post(*args, **kwargs) - + async def _put(self, *args, **kwargs): return await self._client._put(*args, **kwargs) diff --git a/portkey_ai/api_resources/apis/create_headers.py b/portkey_ai/api_resources/apis/create_headers.py index 8e34f5f0..0ada9d5e 100644 --- a/portkey_ai/api_resources/apis/create_headers.py +++ b/portkey_ai/api_resources/apis/create_headers.py @@ -12,7 +12,6 @@ def __init__(self, **kwargs) -> None: # type: ignore def json(self) -> Mapping: headers = {} for k, v in self.kwargs.items(): - # logic for boolean type headers if isinstance(v, bool): v = str(v).lower() diff --git a/portkey_ai/api_resources/apis/feedback.py b/portkey_ai/api_resources/apis/feedback.py index c7374d6f..6db00901 100644 --- a/portkey_ai/api_resources/apis/feedback.py +++ b/portkey_ai/api_resources/apis/feedback.py @@ -16,7 +16,7 @@ def create( trace_id: Optional[str] = None, value: Optional[int] = None, weight: Optional[float] = None, - metadata: Optional[Dict[str, Any]] = None + metadata: Optional[Dict[str, Any]] = None, ) -> FeedbackResponse: body = dict(trace_id=trace_id, value=value, weight=weight, metadata=metadata) return self._post( @@ -40,14 +40,14 @@ def bulk_create(self, *, feedbacks: List[Dict[str, Any]]) -> FeedbackResponse: stream=False, headers={}, ) - + def update( self, *, feedback_id: Optional[str] = None, value: Optional[int] = None, weight: Optional[float] = None, - metadata: Optional[Dict[str, Any]] = None + metadata: Optional[Dict[str, Any]] = None, ) -> FeedbackResponse: body = dict(value=value, weight=weight, metadata=metadata) @@ -72,7 +72,7 @@ async def create( trace_id: Optional[str] = None, value: Optional[int] = None, weight: Optional[float] = None, - metadata: Optional[Dict[str, Any]] = None + metadata: Optional[Dict[str, Any]] = None, ) -> FeedbackResponse: body = dict(trace_id=trace_id, value=value, weight=weight, metadata=metadata) return await self._post( @@ -96,14 +96,14 @@ async def bulk_create(self, *, feedbacks: List[Dict[str, Any]]) -> FeedbackRespo stream=False, headers={}, ) - + async def update( self, *, feedback_id: Optional[str] = None, value: Optional[int] = None, weight: Optional[float] = None, - metadata: Optional[Dict[str, Any]] = None + metadata: Optional[Dict[str, Any]] = None, ) -> FeedbackResponse: body = dict(value=value, weight=weight, metadata=metadata) return await self._put( diff --git a/portkey_ai/api_resources/base_client.py b/portkey_ai/api_resources/base_client.py index e4afbce6..48730d97 100644 --- a/portkey_ai/api_resources/base_client.py +++ b/portkey_ai/api_resources/base_client.py @@ -87,16 +87,16 @@ def __init__( self.forward_headers = forward_headers self.openai_project = openai_project self.openai_organization = openai_organization - self.aws_secret_access_key=aws_secret_access_key - self.aws_access_key_id=aws_access_key_id - self.aws_session_token=aws_session_token - self.aws_region=aws_region - self.vertex_project_id=vertex_project_id - self.vertex_region=vertex_region - self.workers_ai_account_id=workers_ai_account_id - self.azure_resource_name=azure_resource_name - self.azure_deployment_id=azure_deployment_id - self.azure_api_version=azure_api_version + self.aws_secret_access_key = aws_secret_access_key + self.aws_access_key_id = aws_access_key_id + self.aws_session_token = aws_session_token + self.aws_region = aws_region + self.vertex_project_id = vertex_project_id + self.vertex_region = vertex_region + self.workers_ai_account_id = workers_ai_account_id + self.azure_resource_name = azure_resource_name + self.azure_deployment_id = azure_deployment_id + self.azure_api_version = azure_api_version self.cache_namespace = cache_namespace self.kwargs = kwargs @@ -286,7 +286,6 @@ def _put( params: Mapping[str, str], headers: Mapping[str, str], ) -> Union[ResponseT, StreamT]: - opts = self._construct( method="put", url=path, @@ -557,17 +556,17 @@ def __init__( self.forward_headers = forward_headers self.openai_project = openai_project self.openai_organization = openai_organization - self.aws_secret_access_key=aws_secret_access_key - self.aws_access_key_id=aws_access_key_id - self.aws_session_token=aws_session_token - self.aws_region=aws_region - self.vertex_project_id=vertex_project_id - self.vertex_region=vertex_region - self.workers_ai_account_id=workers_ai_account_id - self.azure_resource_name=azure_resource_name - self.azure_deployment_id=azure_deployment_id - self.azure_api_version=azure_api_version - self.cache_namespace=cache_namespace + self.aws_secret_access_key = aws_secret_access_key + self.aws_access_key_id = aws_access_key_id + self.aws_session_token = aws_session_token + self.aws_region = aws_region + self.vertex_project_id = vertex_project_id + self.vertex_region = vertex_region + self.workers_ai_account_id = workers_ai_account_id + self.azure_resource_name = azure_resource_name + self.azure_deployment_id = azure_deployment_id + self.azure_api_version = azure_api_version + self.cache_namespace = cache_namespace self.kwargs = kwargs self.custom_headers = createHeaders( @@ -756,7 +755,6 @@ async def _put( params: Mapping[str, str], headers: Mapping[str, str], ) -> Union[ResponseT, AsyncStreamT]: - opts = await self._construct( method="put", url=path, diff --git a/portkey_ai/api_resources/types/feedback_type.py b/portkey_ai/api_resources/types/feedback_type.py index 511c1585..a3043b59 100644 --- a/portkey_ai/api_resources/types/feedback_type.py +++ b/portkey_ai/api_resources/types/feedback_type.py @@ -1,7 +1,5 @@ -import json from typing import Optional -from .utils import parse_headers from typing import List from pydantic import BaseModel diff --git a/portkey_ai/api_resources/utils.py b/portkey_ai/api_resources/utils.py index 93946bb2..26a1ae33 100644 --- a/portkey_ai/api_resources/utils.py +++ b/portkey_ai/api_resources/utils.py @@ -246,7 +246,7 @@ class Constructs(BaseModel): metadata: Optional[Dict[str, Any]] = None debug: Optional[bool] = None custom_host: Optional[str] = None - forward_headers:Optional[str] = None + forward_headers: Optional[str] = None weight: Optional[float] = None retry: Optional[RetrySettings] = None deployment_id: Optional[str] = None From 6c7d5239769e5b6ae4ae52b08cf3e4dbde0a6a5f Mon Sep 17 00:00:00 2001 From: visargD Date: Fri, 31 May 2024 20:50:56 +0530 Subject: [PATCH 36/38] 1.3.0 --- portkey_ai/version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/portkey_ai/version.py b/portkey_ai/version.py index ef7090f3..7b49cf1d 100644 --- a/portkey_ai/version.py +++ b/portkey_ai/version.py @@ -1 +1 @@ -VERSION = "1.2.4" +VERSION = "1.3.0" From 51bd3385a1db9e7b68c42526bc1abe01838e8cf0 Mon Sep 17 00:00:00 2001 From: csgulati09 Date: Fri, 31 May 2024 21:15:22 +0530 Subject: [PATCH 37/38] fix: sanity check --- portkey_ai/api_resources/apis/create_headers.py | 1 - 1 file changed, 1 deletion(-) diff --git a/portkey_ai/api_resources/apis/create_headers.py b/portkey_ai/api_resources/apis/create_headers.py index 0ada9d5e..aa5d8749 100644 --- a/portkey_ai/api_resources/apis/create_headers.py +++ b/portkey_ai/api_resources/apis/create_headers.py @@ -29,7 +29,6 @@ def json(self) -> Mapping: # logic for List of str to comma separated string if k == "forward-headers": headers[get_portkey_header(k)] = ",".join(v) - print(headers) return headers From fc52afccfcd6b203558b9a8e9007eeed65771964 Mon Sep 17 00:00:00 2001 From: visargD Date: Fri, 31 May 2024 22:09:54 +0530 Subject: [PATCH 38/38] 1.3.1 --- portkey_ai/version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/portkey_ai/version.py b/portkey_ai/version.py index 7b49cf1d..4cf03a8d 100644 --- a/portkey_ai/version.py +++ b/portkey_ai/version.py @@ -1 +1 @@ -VERSION = "1.3.0" +VERSION = "1.3.1"