From 84c6bed16899f0370381489dd708495a2482ce57 Mon Sep 17 00:00:00 2001 From: David vonThenen <12752197+dvonthenen@users.noreply.github.com> Date: Tue, 30 Jul 2024 12:52:55 -0700 Subject: [PATCH] Implements Models API --- deepgram/__init__.py | 2 + deepgram/client.py | 8 +- deepgram/clients/__init__.py | 2 + deepgram/clients/manage/__init__.py | 2 + deepgram/clients/manage/client.py | 5 +- deepgram/clients/manage/v1/__init__.py | 2 + deepgram/clients/manage/v1/async_client.py | 196 ++++++++++++++++- deepgram/clients/manage/v1/client.py | 198 +++++++++++++++++- deepgram/clients/manage/v1/response.py | 110 ++++++++++ examples/manage/async_models/main.py | 94 +++++++++ examples/manage/models/main.py | 89 ++++++++ .../websocket/microphone/main.py | 3 +- 12 files changed, 704 insertions(+), 7 deletions(-) create mode 100644 examples/manage/async_models/main.py create mode 100644 examples/manage/models/main.py diff --git a/deepgram/__init__.py b/deepgram/__init__.py index f0984073..4976afcf 100644 --- a/deepgram/__init__.py +++ b/deepgram/__init__.py @@ -160,6 +160,8 @@ UsageFieldsResponse, Balance, BalancesResponse, + ModelsResponse, + ModelResponse, ) # selfhosted diff --git a/deepgram/client.py b/deepgram/client.py index 05ce0bd7..39cc1361 100644 --- a/deepgram/client.py +++ b/deepgram/client.py @@ -164,6 +164,8 @@ UsageFieldsResponse, Balance, BalancesResponse, + ModelResponse, + ModelsResponse, ) # on-prem @@ -261,21 +263,21 @@ def __init__( @property def listen(self): """ - Returns a ListenClient instance for interacting with Deepgram's transcription services. + Returns a Listen dot-notation router for interacting with Deepgram's transcription services. """ return Listen(self._config) @property def read(self): """ - Returns a ReadClient instance for interacting with Deepgram's read services. + Returns a Read dot-notation router for interacting with Deepgram's read services. """ return Read(self._config) @property def speak(self): """ - Returns a SpeakClient instance for interacting with Deepgram's speak services. + Returns a Speak dot-notation router for interacting with Deepgram's speak services. """ return Speak(self._config) diff --git a/deepgram/clients/__init__.py b/deepgram/clients/__init__.py index 8aa1661e..1b3012f2 100644 --- a/deepgram/clients/__init__.py +++ b/deepgram/clients/__init__.py @@ -168,6 +168,8 @@ UsageFieldsResponse, Balance, BalancesResponse, + ModelsResponse, + ModelResponse, ) # selfhosted diff --git a/deepgram/clients/manage/__init__.py b/deepgram/clients/manage/__init__.py index 723f7dc5..d51a9eca 100644 --- a/deepgram/clients/manage/__init__.py +++ b/deepgram/clients/manage/__init__.py @@ -29,5 +29,7 @@ UsageFieldsResponse, Balance, BalancesResponse, + ModelResponse, + ModelsResponse, ) from ...options import DeepgramClientOptions, ClientOptionsFromEnv diff --git a/deepgram/clients/manage/client.py b/deepgram/clients/manage/client.py index aff57e0f..57a581aa 100644 --- a/deepgram/clients/manage/client.py +++ b/deepgram/clients/manage/client.py @@ -33,6 +33,8 @@ UsageFieldsResponse as UsageFieldsResponseLatest, Balance as BalanceLatest, BalancesResponse as BalancesResponseLatest, + ModelResponse as ModelResponseLatest, + ModelsResponse as ModelsResponseLatest, ) @@ -66,7 +68,8 @@ UsageFieldsResponse = UsageFieldsResponseLatest Balance = BalanceLatest BalancesResponse = BalancesResponseLatest - +ModelResponse = ModelResponseLatest +ModelsResponse = ModelsResponseLatest # clients ManageClient = ManageClientLatest diff --git a/deepgram/clients/manage/v1/__init__.py b/deepgram/clients/manage/v1/__init__.py index 057e68e8..8fdff843 100644 --- a/deepgram/clients/manage/v1/__init__.py +++ b/deepgram/clients/manage/v1/__init__.py @@ -29,5 +29,7 @@ UsageFieldsResponse, Balance, BalancesResponse, + ModelResponse, + ModelsResponse, ) from ....options import DeepgramClientOptions, ClientOptionsFromEnv diff --git a/deepgram/clients/manage/v1/async_client.py b/deepgram/clients/manage/v1/async_client.py index 9adcc36d..92d6e0f7 100644 --- a/deepgram/clients/manage/v1/async_client.py +++ b/deepgram/clients/manage/v1/async_client.py @@ -27,6 +27,8 @@ UsageFieldsResponse, Balance, BalancesResponse, + ModelResponse, + ModelsResponse, ) from .options import ( ProjectOptions, @@ -41,7 +43,7 @@ class AsyncManageClient( AbstractAsyncRestClient -): # pylint: disable=too-many-public-methods +): # pylint: disable=too-many-public-methods,too-many-lines """ A client for managing Deepgram projects and associated resources via the Deepgram API. @@ -239,6 +241,198 @@ async def delete_project( self._logger.debug("ManageClient.delete_project LEAVE") return res + async def list_project_models( + self, + project_id: str, + timeout: Optional[httpx.Timeout] = None, + addons: Optional[Dict] = None, + headers: Optional[Dict] = None, + **kwargs, + ) -> ModelsResponse: + """ + Please see get_project_models. + """ + return await self.get_project_models( + project_id, timeout=timeout, addons=addons, headers=headers, **kwargs + ) + + async def get_project_models( + self, + project_id: str, + timeout: Optional[httpx.Timeout] = None, + addons: Optional[Dict] = None, + headers: Optional[Dict] = None, + **kwargs, + ) -> ModelsResponse: + """ + Gets models for a specific project. + + Reference: + https://developers.deepgram.com/reference/get-project + https://developers.deepgram.com/reference/get-model + + Args: + project_id (str): The ID of the project. + timeout (Optional[httpx.Timeout]): The timeout setting for the request. + addons (Optional[Dict]): Additional options for the request. + headers (Optional[Dict]): Headers to include in the request. + **kwargs: Additional keyword arguments. + + Returns: + ModelsResponse: A response object containing the model details. + """ + self._logger.debug("ManageClient.get_project_models ENTER") + url = f"{self._config.url}/{self._endpoint}/{project_id}/models" + self._logger.info("url: %s", url) + self._logger.info("project_id: %s", project_id) + self._logger.info("addons: %s", addons) + self._logger.info("headers: %s", headers) + result = await self.get( + url, timeout=timeout, addons=addons, headers=headers, **kwargs + ) + self._logger.info("json: %s", result) + res = ModelsResponse.from_json(result) + self._logger.verbose("result: %s", res) + self._logger.notice("get_project_models succeeded") + self._logger.debug("ManageClient.get_project_models LEAVE") + return res + + async def get_project_model( + self, + project_id: str, + model_id: str, + timeout: Optional[httpx.Timeout] = None, + addons: Optional[Dict] = None, + headers: Optional[Dict] = None, + **kwargs, + ) -> ModelResponse: + """ + Gets a single model for a specific project. + + Reference: + https://developers.deepgram.com/reference/get-project + https://developers.deepgram.com/reference/get-model + + Args: + project_id (str): The ID of the project. + model_id (str): The ID of the model. + timeout (Optional[httpx.Timeout]): The timeout setting for the request. + addons (Optional[Dict]): Additional options for the request. + headers (Optional[Dict]): Headers to include in the request. + **kwargs: Additional keyword arguments. + + Returns: + ModelResponse: A response object containing the model details. + """ + self._logger.debug("ManageClient.get_project_model ENTER") + url = f"{self._config.url}/{self._endpoint}/{project_id}/models/{model_id}" + self._logger.info("url: %s", url) + self._logger.info("project_id: %s", project_id) + self._logger.info("model_id: %s", model_id) + self._logger.info("addons: %s", addons) + self._logger.info("headers: %s", headers) + result = await self.get( + url, timeout=timeout, addons=addons, headers=headers, **kwargs + ) + self._logger.info("json: %s", result) + res = ModelResponse.from_json(result) + self._logger.verbose("result: %s", res) + self._logger.notice("get_project_model succeeded") + self._logger.debug("ManageClient.get_project_model LEAVE") + return res + + # models + async def list_models( + self, + timeout: Optional[httpx.Timeout] = None, + addons: Optional[Dict] = None, + headers: Optional[Dict] = None, + **kwargs, + ) -> ModelsResponse: + """ + Please see get_models for more information. + """ + return await self.get_models( + timeout=timeout, addons=addons, headers=headers, **kwargs + ) + + async def get_models( + self, + timeout: Optional[httpx.Timeout] = None, + addons: Optional[Dict] = None, + headers: Optional[Dict] = None, + **kwargs, + ) -> ModelsResponse: + """ + Gets all models available. + + Reference: + https://developers.deepgram.com/reference/get-model + + Args: + timeout (Optional[httpx.Timeout]): The timeout setting for the request. + addons (Optional[Dict]): Additional options for the request. + headers (Optional[Dict]): Headers to include in the request. + **kwargs: Additional keyword arguments. + + Returns: + ModelsResponse: A response object containing the model details. + """ + self._logger.debug("ManageClient.get_models ENTER") + url = f"{self._config.url}/v1/models" + self._logger.info("url: %s", url) + self._logger.info("addons: %s", addons) + self._logger.info("headers: %s", headers) + result = await self.get( + url, timeout=timeout, addons=addons, headers=headers, **kwargs + ) + self._logger.info("result: %s", result) + res = ModelsResponse.from_json(result) + self._logger.verbose("result: %s", res) + self._logger.notice("get_models succeeded") + self._logger.debug("ManageClient.get_models LEAVE") + return res + + async def get_model( + self, + model_id: str, + timeout: Optional[httpx.Timeout] = None, + addons: Optional[Dict] = None, + headers: Optional[Dict] = None, + **kwargs, + ) -> ModelResponse: + """ + Gets information for a specific model. + + Reference: + https://developers.deepgram.com/reference/get-model + + Args: + model_id (str): The ID of the model. + timeout (Optional[httpx.Timeout]): The timeout setting for the request. + addons (Optional[Dict]): Additional options for the request. + headers (Optional[Dict]): Headers to include in the request. + **kwargs: Additional keyword arguments. + + Returns: + ModelResponse: A response object containing the model details. + """ + self._logger.debug("ManageClient.get_model ENTER") + url = f"{self._config.url}/v1/models/{model_id}" + self._logger.info("url: %s", url) + self._logger.info("model_id: %s", model_id) + self._logger.info("addons: %s", addons) + self._logger.info("headers: %s", headers) + result = await self.get( + url, timeout=timeout, addons=addons, headers=headers, **kwargs + ) + self._logger.info("result: %s", result) + res = ModelResponse.from_json(result) + self._logger.verbose("result: %s", res) + self._logger.notice("get_model succeeded") + self._logger.debug("ManageClient.get_model LEAVE") + return res + # keys async def list_keys( self, diff --git a/deepgram/clients/manage/v1/client.py b/deepgram/clients/manage/v1/client.py index 91de15be..75094e2c 100644 --- a/deepgram/clients/manage/v1/client.py +++ b/deepgram/clients/manage/v1/client.py @@ -27,6 +27,8 @@ UsageFieldsResponse, Balance, BalancesResponse, + ModelResponse, + ModelsResponse, ) from .options import ( ProjectOptions, @@ -39,7 +41,9 @@ ) -class ManageClient(AbstractSyncRestClient): # pylint: disable=too-many-public-methods +class ManageClient( + AbstractSyncRestClient +): # pylint: disable=too-many-public-methods,too-many-lines """ A client for managing Deepgram projects and associated resources via the Deepgram API. @@ -238,6 +242,198 @@ def delete_project( self._logger.debug("ManageClient.delete_project LEAVE") return res + def list_project_models( + self, + project_id: str, + timeout: Optional[httpx.Timeout] = None, + addons: Optional[Dict] = None, + headers: Optional[Dict] = None, + **kwargs, + ) -> ModelsResponse: + """ + Please see get_project_models. + """ + return self.get_project_models( + project_id, timeout=timeout, addons=addons, headers=headers, **kwargs + ) + + def get_project_models( + self, + project_id: str, + timeout: Optional[httpx.Timeout] = None, + addons: Optional[Dict] = None, + headers: Optional[Dict] = None, + **kwargs, + ) -> ModelsResponse: + """ + Gets models for a specific project. + + Reference: + https://developers.deepgram.com/reference/get-project + https://developers.deepgram.com/reference/get-model + + Args: + project_id (str): The ID of the project. + timeout (Optional[httpx.Timeout]): The timeout setting for the request. + addons (Optional[Dict]): Additional options for the request. + headers (Optional[Dict]): Headers to include in the request. + **kwargs: Additional keyword arguments. + + Returns: + ModelsResponse: A response object containing the model details. + """ + self._logger.debug("ManageClient.get_project_models ENTER") + url = f"{self._config.url}/{self._endpoint}/{project_id}/models" + self._logger.info("url: %s", url) + self._logger.info("project_id: %s", project_id) + self._logger.info("addons: %s", addons) + self._logger.info("headers: %s", headers) + result = self.get( + url, timeout=timeout, addons=addons, headers=headers, **kwargs + ) + self._logger.info("json: %s", result) + res = ModelsResponse.from_json(result) + self._logger.verbose("result: %s", res) + self._logger.notice("get_project_models succeeded") + self._logger.debug("ManageClient.get_project_models LEAVE") + return res + + def get_project_model( + self, + project_id: str, + model_id: str, + timeout: Optional[httpx.Timeout] = None, + addons: Optional[Dict] = None, + headers: Optional[Dict] = None, + **kwargs, + ) -> ModelResponse: + """ + Gets a single model for a specific project. + + Reference: + https://developers.deepgram.com/reference/get-project + https://developers.deepgram.com/reference/get-model + + Args: + project_id (str): The ID of the project. + model_id (str): The ID of the model. + timeout (Optional[httpx.Timeout]): The timeout setting for the request. + addons (Optional[Dict]): Additional options for the request. + headers (Optional[Dict]): Headers to include in the request. + **kwargs: Additional keyword arguments. + + Returns: + ModelResponse: A response object containing the model details. + """ + self._logger.debug("ManageClient.get_project_model ENTER") + url = f"{self._config.url}/{self._endpoint}/{project_id}/models/{model_id}" + self._logger.info("url: %s", url) + self._logger.info("project_id: %s", project_id) + self._logger.info("model_id: %s", model_id) + self._logger.info("addons: %s", addons) + self._logger.info("headers: %s", headers) + result = self.get( + url, timeout=timeout, addons=addons, headers=headers, **kwargs + ) + self._logger.info("json: %s", result) + res = ModelResponse.from_json(result) + self._logger.verbose("result: %s", res) + self._logger.notice("get_project_model succeeded") + self._logger.debug("ManageClient.get_project_model LEAVE") + return res + + # models + async def list_models( + self, + timeout: Optional[httpx.Timeout] = None, + addons: Optional[Dict] = None, + headers: Optional[Dict] = None, + **kwargs, + ) -> ModelsResponse: + """ + Please see get_models for more information. + """ + return self.get_models( + timeout=timeout, addons=addons, headers=headers, **kwargs + ) + + def get_models( + self, + timeout: Optional[httpx.Timeout] = None, + addons: Optional[Dict] = None, + headers: Optional[Dict] = None, + **kwargs, + ) -> ModelsResponse: + """ + Gets all models available. + + Reference: + https://developers.deepgram.com/reference/get-model + + Args: + timeout (Optional[httpx.Timeout]): The timeout setting for the request. + addons (Optional[Dict]): Additional options for the request. + headers (Optional[Dict]): Headers to include in the request. + **kwargs: Additional keyword arguments. + + Returns: + ModelsResponse: A response object containing the model details. + """ + self._logger.debug("ManageClient.get_models ENTER") + url = f"{self._config.url}/v1/models" + self._logger.info("url: %s", url) + self._logger.info("addons: %s", addons) + self._logger.info("headers: %s", headers) + result = self.get( + url, timeout=timeout, addons=addons, headers=headers, **kwargs + ) + self._logger.info("json: %s", result) + res = ModelsResponse.from_json(result) + self._logger.verbose("result: %s", res) + self._logger.notice("get_models succeeded") + self._logger.debug("ManageClient.get_models LEAVE") + return res + + def get_model( + self, + model_id: str, + timeout: Optional[httpx.Timeout] = None, + addons: Optional[Dict] = None, + headers: Optional[Dict] = None, + **kwargs, + ) -> ModelResponse: + """ + Gets information for a specific model. + + Reference: + https://developers.deepgram.com/reference/get-model + + Args: + model_id (str): The ID of the model. + timeout (Optional[httpx.Timeout]): The timeout setting for the request. + addons (Optional[Dict]): Additional options for the request. + headers (Optional[Dict]): Headers to include in the request. + **kwargs: Additional keyword arguments. + + Returns: + ModelResponse: A response object containing the model details. + """ + self._logger.debug("ManageClient.get_model ENTER") + url = f"{self._config.url}/v1/models/{model_id}" + self._logger.info("url: %s", url) + self._logger.info("model_id: %s", model_id) + self._logger.info("addons: %s", addons) + self._logger.info("headers: %s", headers) + result = self.get( + url, timeout=timeout, addons=addons, headers=headers, **kwargs + ) + self._logger.info("json: %s", result) + res = ModelResponse.from_json(result) + self._logger.verbose("result: %s", res) + self._logger.notice("get_model succeeded") + self._logger.debug("ManageClient.get_model LEAVE") + return res + # keys def list_keys( self, diff --git a/deepgram/clients/manage/v1/response.py b/deepgram/clients/manage/v1/response.py index 795b8699..376c8fd6 100644 --- a/deepgram/clients/manage/v1/response.py +++ b/deepgram/clients/manage/v1/response.py @@ -82,6 +82,116 @@ def __getitem__(self, key): return _dict[key] +# Models + + +@dataclass +class Stt(BaseResponse): # pylint: disable=too-many-instance-attributes + """ + STT class used to define the properties of the Speech-to-Text model response object. + """ + + name: str = "" + canonical_name: str = "" + architecture: str = "" + languages: List[str] = field(default_factory=list) + version: str = "" + uuid: str = "" + batch: bool = False + streaming: bool = False + formatted_output: bool = False + + def __getitem__(self, key): + _dict = self.to_dict() + if "languages" in _dict: + _dict["languages"] = [str(languages) for languages in _dict["languages"]] + return _dict[key] + + +@dataclass +class Metadata(BaseResponse): + """ + Metadata class used to define the properties for a given STT or TTS model. + """ + + accent: str = "" + color: str = "" + gender: str = "" + image: str = "" + sample: str = "" + + +@dataclass +class Tts(BaseResponse): + """ + TTS class used to define the properties of the Text-to-Speech model response object. + """ + + name: str = "" + canonical_name: str = "" + architecture: str = "" + language: str = "" + version: str = "" + uuid: str = "" + metadata: Optional[Metadata] = field( + default=None, metadata=dataclass_config(exclude=lambda f: f is None) + ) + + def __getitem__(self, key): + _dict = self.to_dict() + if "metadata" in _dict: + _dict["metadata"] = [ + Metadata.from_dict(metadata) for metadata in _dict["metadata"] + ] + return _dict[key] + + +# responses + + +@dataclass +class ModelResponse(BaseResponse): + """ + ModelResponse class used to define the properties of a single model. + """ + + name: str = "" + canonical_name: str = "" + architecture: str = "" + language: str = "" + version: str = "" + uuid: str = "" + metadata: Optional[Metadata] = field( + default=None, metadata=dataclass_config(exclude=lambda f: f is None) + ) + + def __getitem__(self, key): + _dict = self.to_dict() + if "metadata" in _dict: + _dict["metadata"] = [ + Metadata.from_dict(metadata) for metadata in _dict["metadata"] + ] + return _dict[key] + + +@dataclass +class ModelsResponse(BaseResponse): + """ + ModelsResponse class used to obtain a list of models. + """ + + stt: List[Stt] = field(default_factory=list) + tts: List[Tts] = field(default_factory=list) + + def __getitem__(self, key): + _dict = self.to_dict() + if "stt" in _dict: + _dict["stt"] = [Stt.from_dict(stt) for stt in _dict["stt"]] + if "tts" in _dict: + _dict["tts"] = [Tts.from_dict(tts) for tts in _dict["tts"]] + return _dict[key] + + # Members diff --git a/examples/manage/async_models/main.py b/examples/manage/async_models/main.py new file mode 100644 index 00000000..0e7177f8 --- /dev/null +++ b/examples/manage/async_models/main.py @@ -0,0 +1,94 @@ +# Copyright 2024 Deepgram SDK contributors. All Rights Reserved. +# Use of this source code is governed by a MIT license that can be found in the LICENSE file. +# SPDX-License-Identifier: MIT + +import asyncio +import sys +from dotenv import load_dotenv + +from deepgram import DeepgramClient + +load_dotenv() + + +async def main(): + try: + # Create a Deepgram client using the API key + deepgram: DeepgramClient = DeepgramClient() + + # get projects + projectResp = await deepgram.asyncmanage.v("1").get_projects() + if projectResp is None: + print("get_projects failed.") + sys.exit(1) + + myProjectId = None + myProjectName = None + for project in projectResp.projects: + myProjectId = project.project_id + myProjectName = project.name + print(f"ListProjects() - ID: {myProjectId}, Name: {myProjectName}") + break + print("\n\n") + + # get models + myModelId = None + listModels = await deepgram.asyncmanage.v("1").get_models() + if listModels is None: + print("No models found") + else: + if listModels.stt: + for stt in listModels.stt: + print( + f"general.get_models() - Name: {stt.name}, Amount: {stt.uuid}" + ) + myModelId = stt.uuid + if listModels.tts: + for tts in listModels.tts: + print( + f"general.get_models() - Name: {tts.name}, Amount: {tts.uuid}" + ) + print("\n\n") + + # get model + listModel = await deepgram.asyncmanage.v("1").get_model(myModelId) + if listModel is None: + print(f"No model for {myModelId} found") + else: + print(f"get_model() - Name: {listModel.name}, Amount: {listModel.uuid}") + print("\n\n") + + # get project models + myModelId = None + listProjModels = await deepgram.asyncmanage.v("1").get_project_models( + myProjectId + ) + if listProjModels is None: + print(f"No model for project id {myProjectId} found") + else: + if listProjModels.stt: + for stt in listProjModels.stt: + print(f"manage.get_models() - Name: {stt.name}, Amount: {stt.uuid}") + if listProjModels.tts: + for tts in listProjModels.tts: + print(f"manage.get_models() - Name: {tts.name}, Amount: {tts.uuid}") + myModelId = tts.uuid + print("\n\n") + + # get project model + listProjModel = await deepgram.asyncmanage.v("1").get_project_model( + myProjectId, myModelId + ) + if listProjModel is None: + print(f"No model {myModelId} for project id {myProjectId} found") + else: + print( + f"get_model() - Name: {listProjModel.name}, Amount: {listProjModel.uuid}" + ) + + except Exception as e: + print(f"Exception: {e}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/examples/manage/models/main.py b/examples/manage/models/main.py new file mode 100644 index 00000000..a9119841 --- /dev/null +++ b/examples/manage/models/main.py @@ -0,0 +1,89 @@ +# Copyright 2024 Deepgram SDK contributors. All Rights Reserved. +# Use of this source code is governed by a MIT license that can be found in the LICENSE file. +# SPDX-License-Identifier: MIT + +import sys +from dotenv import load_dotenv + +from deepgram import DeepgramClient + +load_dotenv() + + +def main(): + try: + # Create a Deepgram client using the API key + deepgram: DeepgramClient = DeepgramClient() + + # get projects + projectResp = deepgram.manage.v("1").get_projects() + if projectResp is None: + print("ListProjects failed.") + sys.exit(1) + + myProjectId = None + myProjectName = None + for project in projectResp.projects: + myProjectId = project.project_id + myProjectName = project.name + print(f"ListProjects() - ID: {myProjectId}, Name: {myProjectName}") + break + print("\n\n") + + # get models + myModelId = None + listModels = deepgram.manage.v("1").get_models() + if listModels is None: + print("No models found") + else: + if listModels.stt: + for stt in listModels.stt: + print( + f"general.get_models() - Name: {stt.name}, Amount: {stt.uuid}" + ) + myModelId = stt.uuid + if listModels.tts: + for tts in listModels.tts: + print( + f"general.get_models() - Name: {tts.name}, Amount: {tts.uuid}" + ) + print("\n\n") + + # get model + listModel = deepgram.manage.v("1").get_model(myModelId) + if listModel is None: + print(f"No model for {myModelId} found") + else: + print(f"get_model() - Name: {listModel.name}, Amount: {listModel.uuid}") + print("\n\n") + + # get project models + myModelId = None + listProjModels = deepgram.manage.v("1").get_project_models(myProjectId) + if listProjModels is None: + print(f"No model for project id {myProjectId} found") + else: + if listProjModels.stt: + for stt in listProjModels.stt: + print(f"manage.get_models() - Name: {stt.name}, Amount: {stt.uuid}") + if listProjModels.tts: + for tts in listProjModels.tts: + print(f"manage.get_models() - Name: {tts.name}, Amount: {tts.uuid}") + myModelId = tts.uuid + print("\n\n") + + # get project model + listProjModel = deepgram.manage.v("1").get_project_model(myProjectId, myModelId) + if listProjModel is None: + print(f"No model {myModelId} for project id {myProjectId} found") + else: + print( + f"get_model() - Name: {listProjModel.name}, Amount: {listProjModel.uuid}" + ) + + except Exception as e: + print(f"Exception: {e}") + + +if __name__ == "__main__": + main() diff --git a/examples/speech-to-text/websocket/microphone/main.py b/examples/speech-to-text/websocket/microphone/main.py index fc2bb37a..af755d25 100644 --- a/examples/speech-to-text/websocket/microphone/main.py +++ b/examples/speech-to-text/websocket/microphone/main.py @@ -3,9 +3,10 @@ # SPDX-License-Identifier: MIT from dotenv import load_dotenv +from time import sleep import logging + from deepgram.utils import verboselogs -from time import sleep from deepgram import ( DeepgramClient,