diff --git a/code/app/.dockerignore b/.dockerignore similarity index 100% rename from code/app/.dockerignore rename to .dockerignore diff --git a/.gitignore b/.gitignore index 95c394d..79e23ed 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,5 @@ venv/ .vscode/ -temp/ \ No newline at end of file +temp/ +docs/ +__pycache__/ \ No newline at end of file diff --git a/code/app/.streamlit/config.prod.toml b/.streamlit/config.prod.toml similarity index 100% rename from code/app/.streamlit/config.prod.toml rename to .streamlit/config.prod.toml diff --git a/code/app/.streamlit/config.toml b/.streamlit/config.toml similarity index 100% rename from code/app/.streamlit/config.toml rename to .streamlit/config.toml diff --git a/code/app/Dockerfile b/Dockerfile similarity index 100% rename from code/app/Dockerfile rename to Dockerfile diff --git a/code/app/app.py b/app.py similarity index 100% rename from code/app/app.py rename to app.py diff --git a/code/app/__pycache__/helper.cpython-38.pyc b/code/app/__pycache__/helper.cpython-38.pyc deleted file mode 100644 index 5389857..0000000 Binary files a/code/app/__pycache__/helper.cpython-38.pyc and /dev/null differ diff --git a/code/app/main copy.py b/code/app/main copy.py deleted file mode 100644 index 33c38b7..0000000 --- a/code/app/main copy.py +++ /dev/null @@ -1,70 +0,0 @@ -import os -import streamlit as st -import logging - -logging.basicConfig(format="%(asctime)s - %(message)s", level=logging.INFO) - - -class App: - def __init__(self): - st.set_page_config(page_title="Azure A.I. Demo Gallery", page_icon="src/assets/img/logo.png", layout="wide", initial_sidebar_state="auto") - - def _get_source_code(self, script_name): - file = open("src/assets/scripts/" + script_name + '.py', "r") - content = file.read() - return content - - def main(self): - topic_demo = {"Language": {"Language Detection": "lang_detect", - "Sentiment Analysis": "sentiment_analysis", - "Key Phrase Extraction": "key_phrases", - "Entity Extraction": "entity_extraction", - "Entity Linking": "entity_linking", - "Text Translation": "text_translation", - "Speech to Text": "speech_to_text", - "Text to Speech": "text_to_speech", - "Speech to Speech Translation": "speech_to_speech"}, - "Computer Vision": {"Object Classification": "object_classification", - "Object Detection": "object_detection" }} - - with st.sidebar: - st.title("Gallery") - show_source_code = st.checkbox("Show Source Code", False) - - selected_topic = st.sidebar.selectbox( - "Select Topic", - options=sorted(topic_demo.keys()) - ) - - demo = topic_demo[selected_topic] - selected_demo = st.sidebar.selectbox( - "Select Demo", - options=sorted(demo) - ) - - st.sidebar.title("About") - st.sidebar.info( - "This app demonstrates a variety of Azure A.I. services in the domains of language & vision.\n\n" - "Developed by Jon-Paul Boyd. \n\n" - "Check the code at https://github.com/corticalstack/azure-ai-demo-gallery" - ) - - python_code = self._get_source_code(demo[selected_demo]) - if python_code is not None: - st.header(selected_demo) - try: - with st.spinner(f"Loading {selected_demo} ..."): - exec(python_code, globals()) - except Exception as exception: - st.write("Error occurred when executing [{0}]".format(selected_demo)) - st.error(str(exception)) - logging.error(exception) - if show_source_code: - st.write("\n") - st.subheader("Source code") - st.code(python_code) - - -if __name__ == "__main__": - app = App() - app.main() \ No newline at end of file diff --git a/code/app/temp/speech_to_text_audio.wav b/code/app/temp/speech_to_text_audio.wav deleted file mode 100644 index 5b01fb7..0000000 Binary files a/code/app/temp/speech_to_text_audio.wav and /dev/null differ diff --git a/code/app/test.py b/code/app/test.py deleted file mode 100644 index 85956ee..0000000 --- a/code/app/test.py +++ /dev/null @@ -1,139 +0,0 @@ -""" -recorder based on streamlit-webrtc -streamlit run st_recorder.py --server.port 8606 -""" -from streamlit_webrtc import ( - webrtc_streamer, - WebRtcMode, - WebRtcStreamerContext, -) -from aiortc.contrib.media import MediaRecorder -import soundfile as sf -import numpy as np -import matplotlib.pyplot as plt -import streamlit as st -import queue -from pathlib import Path -import time -import pydub - -# from streamlit_lottie import st_lottie -import json - -# file_ = '16581-audio.json' -# with open(file_, 'r', encoding='utf-8') as f: -# lottie_json = json.load(f) - -TMP_DIR = Path('temp') -if not TMP_DIR.exists(): - TMP_DIR.mkdir(exist_ok=True, parents=True) - -MEDIA_STREAM_CONSTRAINTS = { - "video": False, - "audio": { - # these setting doesn't work - # "sampleRate": 48000, - # "sampleSize": 16, - # "channelCount": 1, - "echoCancellation": False, # don't turn on else it would reduce wav quality - "noiseSuppression": True, - "autoGainControl": True, - }, -} - - -def aiortc_audio_recorder(wavpath): - def recorder_factory(): - return MediaRecorder(wavpath) - - webrtc_ctx: WebRtcStreamerContext = webrtc_streamer( - key="sendonly-audio", - # mode=WebRtcMode.SENDONLY, - mode=WebRtcMode.SENDRECV, - in_recorder_factory=recorder_factory, - media_stream_constraints=MEDIA_STREAM_CONSTRAINTS, - ) - - -def save_frames_from_audio_receiver(wavpath): - webrtc_ctx = webrtc_streamer( - key="sendonly-audio", - mode=WebRtcMode.SENDONLY, - media_stream_constraints=MEDIA_STREAM_CONSTRAINTS, - ) - - if "audio_buffer" not in st.session_state: - st.session_state["audio_buffer"] = pydub.AudioSegment.empty() - - status_indicator = st.empty() - lottie = False - while True: - if webrtc_ctx.audio_receiver: - try: - audio_frames = webrtc_ctx.audio_receiver.get_frames(timeout=1) - except queue.Empty: - status_indicator.info("No frame arrived.") - continue - - # if not lottie: # voice gif - # st_lottie(lottie_json, height=80) - # lottie = True - - for i, audio_frame in enumerate(audio_frames): - sound = pydub.AudioSegment( - data=audio_frame.to_ndarray().tobytes(), - sample_width=audio_frame.format.bytes, - frame_rate=audio_frame.sample_rate, - channels=len(audio_frame.layout.channels), - ) - # st.markdown(f'{len(audio_frame.layout.channels)}, {audio_frame.format.bytes}, {audio_frame.sample_rate}') - # 2, 2, 48000 - st.session_state["audio_buffer"] += sound - else: - lottie = True - break - - audio_buffer = st.session_state["audio_buffer"] - - if not webrtc_ctx.state.playing and len(audio_buffer) > 0: - audio_buffer.export(wavpath, format="wav") - st.session_state["audio_buffer"] = pydub.AudioSegment.empty() - - -def display_wavfile(wavpath): - audio_bytes = open(wavpath, 'rb').read() - file_type = Path(wavpath).suffix - st.audio(audio_bytes, format=f'audio/{file_type}', start_time=0) - - -def plot_wav(wavpath): - audio, sr = sf.read(str(wavpath)) - fig = plt.figure() - plt.plot(audio) - plt.xticks( - np.arange(0, audio.shape[0], sr / 2), np.arange(0, audio.shape[0] / sr, 0.5) - ) - plt.xlabel('time') - st.pyplot(fig) - - -def record_page(): - st.markdown('# recorder') - if "wavpath" not in st.session_state: - cur_time = time.strftime("%Y-%m-%d_%H:%M:%S", time.localtime()) - tmp_wavpath = TMP_DIR / f'{cur_time}.wav' - st.session_state["wavpath"] = str(tmp_wavpath) - - wavpath = st.session_state["wavpath"] - - aiortc_audio_recorder(wavpath) # first way - # save_frames_from_audio_receiver(wavpath) # second way - - if Path(wavpath).exists(): - st.markdown(wavpath) - display_wavfile(wavpath) - plot_wav(wavpath) - - -if __name__ == "__main__": - record_page() \ No newline at end of file diff --git a/code/app/testsrc.py b/code/app/testsrc.py deleted file mode 100644 index 6bdac36..0000000 --- a/code/app/testsrc.py +++ /dev/null @@ -1,151 +0,0 @@ -import os -import streamlit as st -from streamlit.server.server import Server -from streamlit.script_run_context import add_script_run_ctx -import pandas as pd -import http.client -#from urllib import request, parse, error -from azure.core.credentials import AzureKeyCredential -from azure.ai.textanalytics import TextAnalyticsClient -import requests -import json -import azure.cognitiveservices.speech as speech_sdk - -from streamlit_webrtc import VideoProcessorBase, WebRtcMode, webrtc_streamer, WebRtcStreamerContext - -from aiortc.contrib.media import MediaRecorder - -import queue - -import soundfile as sf -import numpy as np -import matplotlib.pyplot as plt - -import queue -from pathlib import Path -import time -import pydub - - -TMP_DIR = Path('temp') -if not TMP_DIR.exists(): - TMP_DIR.mkdir(exist_ok=True, parents=True) - -MEDIA_STREAM_CONSTRAINTS = { - "video": False, - "audio": { - # these setting doesn't work - # "sampleRate": 48000, - # "sampleSize": 16, - # "channelCount": 1, - "echoCancellation": False, # don't turn on else it would reduce wav quality - "noiseSuppression": True, - "autoGainControl": True, - }, -} - -class App: - def __init__(self): - - if "config_loaded" not in st.session_state: - st.session_state.update({ - "http_headers": self._get_session_http_headers(), - "cog_endpoint": os.environ["AZ_COG_ENDPOINT"], - "cog_key": os.environ["AZ_COG_KEY"], - "cog_region": os.environ["AZ_COG_REGION"] - }) - - def _get_source_code(self): - import urllib.request - url = 'https://raw.githubusercontent.com/corticalstack/azure-ai-demo-gallery/master/code/app/main.py' - try: - data = urllib.request.urlopen(url).read() - except urllib.error.HTTPError as exception: # type: ignore - pass - - return data.decode("utf-8") - - def _get_session_http_headers(self): - headers = { - "site_host": "", - "logged_in_user_name": "", - "site_deployment_id": "" - } - - session_id = add_script_run_ctx().streamlit_script_run_ctx.session_id - session_info = Server.get_current()._get_session_info(session_id) - - # Note case of headers differs from shown in xxx.scm.azurewebsites.net/env - try: - if "Host" in session_info.ws.request.headers._dict: - headers["site_host"] = session_info.ws.request.headers._dict["Host"] - - if "X-Ms-Client-Principal-Name" in session_info.ws.request.headers._dict: - headers["logged_in_user_name"] = session_info.ws.request.headers._dict["X-Ms-Client-Principal-Name"] - - if "X-Site-Deployment-Id" in session_info.ws.request.headers._dict: - headers["site_deployment_id"] = session_info.ws.request.headers._dict["X-Site-Deployment-Id"] - except Exception as ex: - pass - return headers - - def main(self): - if not st.session_state.http_headers["site_host"]: - st.session_state.http_headers = self._get_session_http_headers() - #st.title("test") - - if "audio_buffer" not in st.session_state: - st.session_state["audio_buffer"] = pydub.AudioSegment.empty() - - # Demo example using the REST api rather than the Python SDK - - - input_text = st.text_input('Input Text', 'Hello') - if input_text: - lang_name, _ = self.get_language(input_text) - if lang_name: - st.write("Language detected:", lang_name) - - def get_language(self, text): - api_endpoint = "/text/analytics/v3.1/languages?" - lang_name = None - lang_iso6391Name = None - - try: - jsonBody = { - "documents":[ - {"id": 1, - "text": text} - ] - } - - uri = st.session_state.cog_endpoint.rstrip("/").replace("https://", "") - conn = http.client.HTTPSConnection(uri) - - headers = { - "Content-Type": "application/json", - "Ocp-Apim-Subscription-Key": st.session_state.cog_key - } - - conn.request("POST", api_endpoint, str(jsonBody).encode("utf-8"), headers) - response = conn.getresponse() - data = response.read().decode("UTF-8") - - if response.status == 200: - results = json.loads(data) - for document in results["documents"]: - print(document) - lang_name = document["detectedLanguage"]["name"] - lang_iso6391Name = document["detectedLanguage"]["iso6391Name"] - - conn.close() - - except Exception as ex: - st.write(ex) - - return lang_name, lang_iso6391Name - - -if __name__ == "__main__": - app = App() - app.main() \ No newline at end of file diff --git a/code/app/docker-compose.yaml b/docker-compose.yaml similarity index 100% rename from code/app/docker-compose.yaml rename to docker-compose.yaml diff --git a/code/app/requirements.txt b/requirements.txt similarity index 100% rename from code/app/requirements.txt rename to requirements.txt diff --git a/code/app/src/assets/img/favicon.ico.ico b/src/assets/img/favicon.ico.ico similarity index 100% rename from code/app/src/assets/img/favicon.ico.ico rename to src/assets/img/favicon.ico.ico diff --git a/code/app/src/assets/img/logo.png b/src/assets/img/logo.png similarity index 100% rename from code/app/src/assets/img/logo.png rename to src/assets/img/logo.png diff --git a/code/app/src/assets/scripts/complex_ocr.py b/src/assets/scripts/complex_ocr.py similarity index 100% rename from code/app/src/assets/scripts/complex_ocr.py rename to src/assets/scripts/complex_ocr.py diff --git a/code/app/src/assets/scripts/entity_extraction.py b/src/assets/scripts/entity_extraction.py similarity index 100% rename from code/app/src/assets/scripts/entity_extraction.py rename to src/assets/scripts/entity_extraction.py diff --git a/code/app/src/assets/scripts/entity_linking.py b/src/assets/scripts/entity_linking.py similarity index 100% rename from code/app/src/assets/scripts/entity_linking.py rename to src/assets/scripts/entity_linking.py diff --git a/code/app/src/assets/scripts/face_analysis.py b/src/assets/scripts/face_analysis.py similarity index 100% rename from code/app/src/assets/scripts/face_analysis.py rename to src/assets/scripts/face_analysis.py diff --git a/code/app/src/assets/scripts/image_analysis.py b/src/assets/scripts/image_analysis.py similarity index 100% rename from code/app/src/assets/scripts/image_analysis.py rename to src/assets/scripts/image_analysis.py diff --git a/code/app/src/assets/scripts/key_phrases.py b/src/assets/scripts/key_phrases.py similarity index 100% rename from code/app/src/assets/scripts/key_phrases.py rename to src/assets/scripts/key_phrases.py diff --git a/code/app/src/assets/scripts/lang_detect.py b/src/assets/scripts/lang_detect.py similarity index 100% rename from code/app/src/assets/scripts/lang_detect.py rename to src/assets/scripts/lang_detect.py diff --git a/code/app/src/assets/scripts/object_classification.py b/src/assets/scripts/object_classification.py similarity index 100% rename from code/app/src/assets/scripts/object_classification.py rename to src/assets/scripts/object_classification.py diff --git a/code/app/src/assets/scripts/object_detection.py b/src/assets/scripts/object_detection.py similarity index 100% rename from code/app/src/assets/scripts/object_detection.py rename to src/assets/scripts/object_detection.py diff --git a/code/app/src/assets/scripts/sentiment_analysis.py b/src/assets/scripts/sentiment_analysis.py similarity index 100% rename from code/app/src/assets/scripts/sentiment_analysis.py rename to src/assets/scripts/sentiment_analysis.py diff --git a/code/app/src/assets/scripts/simple_ocr.py b/src/assets/scripts/simple_ocr.py similarity index 100% rename from code/app/src/assets/scripts/simple_ocr.py rename to src/assets/scripts/simple_ocr.py diff --git a/code/app/src/assets/scripts/speech_to_speech.py b/src/assets/scripts/speech_to_speech.py similarity index 100% rename from code/app/src/assets/scripts/speech_to_speech.py rename to src/assets/scripts/speech_to_speech.py diff --git a/code/app/src/assets/scripts/speech_to_text.py b/src/assets/scripts/speech_to_text.py similarity index 100% rename from code/app/src/assets/scripts/speech_to_text.py rename to src/assets/scripts/speech_to_text.py diff --git a/code/app/src/assets/scripts/text_to_speech.py b/src/assets/scripts/text_to_speech.py similarity index 100% rename from code/app/src/assets/scripts/text_to_speech.py rename to src/assets/scripts/text_to_speech.py diff --git a/code/app/src/assets/scripts/text_translation.py b/src/assets/scripts/text_translation.py similarity index 100% rename from code/app/src/assets/scripts/text_translation.py rename to src/assets/scripts/text_translation.py