From 8ce351e75981e7a45d3544aabe9ddb740b013597 Mon Sep 17 00:00:00 2001 From: csgulati09 Date: Fri, 22 Mar 2024 16:10:02 +0530 Subject: [PATCH] fix: prompt.render.create to prompt.render --- portkey_ai/api_resources/apis/generation.py | 142 +++----------------- 1 file changed, 16 insertions(+), 126 deletions(-) diff --git a/portkey_ai/api_resources/apis/generation.py b/portkey_ai/api_resources/apis/generation.py index f7976c01..726ed8f3 100644 --- a/portkey_ai/api_resources/apis/generation.py +++ b/portkey_ai/api_resources/apis/generation.py @@ -79,62 +79,12 @@ async def create( class Prompts(APIResource): completions: Completions - render: Render def __init__(self, client: APIClient) -> None: super().__init__(client) self.completions = Completions(client) - self.render = Render(client) - -class AsyncPrompts(AsyncAPIResource): - completions: AsyncCompletions - render: AsyncRender - - def __init__(self, client: AsyncAPIClient) -> None: - super().__init__(client) - self.completions = AsyncCompletions(client) - self.render = AsyncRender(client) - - -class Completions(APIResource): - def __init__(self, client: APIClient) -> None: - super().__init__(client) - - @overload - def create( - self, - *, - prompt_id: str, - variables: Optional[Mapping[str, Any]] = None, - config: Optional[Union[Mapping, str]] = None, - stream: Literal[True], - temperature: Optional[float] = None, - max_tokens: Optional[int] = None, - top_k: Optional[int] = None, - top_p: Optional[float] = None, - **kwargs, - ) -> Stream[GenericResponse]: - ... - - @overload - def create( - self, - *, - prompt_id: str, - variables: Optional[Mapping[str, Any]] = None, - config: Optional[Union[Mapping, str]] = None, - stream: Literal[False] = False, - temperature: Optional[float] = None, - max_tokens: Optional[int] = None, - top_k: Optional[int] = None, - top_p: Optional[float] = None, - **kwargs, - ) -> GenericResponse: - ... - - @overload - def create( + def render( self, *, prompt_id: str, @@ -147,22 +97,7 @@ def create( top_p: Optional[float] = None, **kwargs, ) -> Union[GenericResponse, Stream[GenericResponse]]: - ... - - def create( - self, - *, - prompt_id: str, - variables: Optional[Mapping[str, Any]] = None, - config: Optional[Union[Mapping, str]] = None, - stream: bool = False, - temperature: Optional[float] = None, - max_tokens: Optional[int] = None, - top_k: Optional[int] = None, - top_p: Optional[float] = None, - **kwargs, - ) -> Union[GenericResponse, Stream[GenericResponse]]: - """Prompt completions Method""" + """Prompt render Method""" if config is None: config = retrieve_config() body = { @@ -175,7 +110,7 @@ def create( **kwargs, } return self._post( - f"/prompts/{prompt_id}/completions", + f"/prompts/{prompt_id}/render", body=body, params=None, cast_to=GenericResponse, @@ -185,59 +120,14 @@ def create( ) -class AsyncCompletions(AsyncAPIResource): +class AsyncPrompts(AsyncAPIResource): + completions: AsyncCompletions + def __init__(self, client: AsyncAPIClient) -> None: super().__init__(client) + self.completions = AsyncCompletions(client) - @overload - async def create( - self, - *, - prompt_id: str, - variables: Optional[Mapping[str, Any]] = None, - config: Optional[Union[Mapping, str]] = None, - stream: Literal[True], - temperature: Optional[float] = None, - max_tokens: Optional[int] = None, - top_k: Optional[int] = None, - top_p: Optional[float] = None, - **kwargs, - ) -> AsyncStream[GenericResponse]: - ... - - @overload - async def create( - self, - *, - prompt_id: str, - variables: Optional[Mapping[str, Any]] = None, - config: Optional[Union[Mapping, str]] = None, - stream: Literal[False] = False, - temperature: Optional[float] = None, - max_tokens: Optional[int] = None, - top_k: Optional[int] = None, - top_p: Optional[float] = None, - **kwargs, - ) -> GenericResponse: - ... - - @overload - async def create( - self, - *, - prompt_id: str, - variables: Optional[Mapping[str, Any]] = None, - config: Optional[Union[Mapping, str]] = None, - stream: bool = False, - temperature: Optional[float] = None, - max_tokens: Optional[int] = None, - top_k: Optional[int] = None, - top_p: Optional[float] = None, - **kwargs, - ) -> Union[GenericResponse, AsyncStream[GenericResponse]]: - ... - - async def create( + async def render( self, *, prompt_id: str, @@ -250,7 +140,7 @@ async def create( top_p: Optional[float] = None, **kwargs, ) -> Union[GenericResponse, AsyncStream[GenericResponse]]: - """Prompt completions Method""" + """Prompt render Method""" if config is None: config = retrieve_config() body = { @@ -263,7 +153,7 @@ async def create( **kwargs, } return await self._post( - f"/prompts/{prompt_id}/completions", + f"/prompts/{prompt_id}/render", body=body, params=None, cast_to=GenericResponse, @@ -273,7 +163,7 @@ async def create( ) -class Render(APIResource): +class Completions(APIResource): def __init__(self, client: APIClient) -> None: super().__init__(client) @@ -338,7 +228,7 @@ def create( top_p: Optional[float] = None, **kwargs, ) -> Union[GenericResponse, Stream[GenericResponse]]: - """Prompt render Method""" + """Prompt completions Method""" if config is None: config = retrieve_config() body = { @@ -351,7 +241,7 @@ def create( **kwargs, } return self._post( - f"/prompts/{prompt_id}/render", + f"/prompts/{prompt_id}/completions", body=body, params=None, cast_to=GenericResponse, @@ -361,7 +251,7 @@ def create( ) -class AsyncRender(AsyncAPIResource): +class AsyncCompletions(AsyncAPIResource): def __init__(self, client: AsyncAPIClient) -> None: super().__init__(client) @@ -426,7 +316,7 @@ async def create( top_p: Optional[float] = None, **kwargs, ) -> Union[GenericResponse, AsyncStream[GenericResponse]]: - """Prompt render Method""" + """Prompt completions Method""" if config is None: config = retrieve_config() body = { @@ -439,7 +329,7 @@ async def create( **kwargs, } return await self._post( - f"/prompts/{prompt_id}/render", + f"/prompts/{prompt_id}/completions", body=body, params=None, cast_to=GenericResponse,