diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 0000000..435b1fd --- /dev/null +++ b/.dockerignore @@ -0,0 +1,97 @@ +# 가상환경 +poetry.lock +pyproject.toml + +# Data +data/ + +# Git +.git +.github +.gitignore +.gitattributes +*,md + +# CI +.codeclimate.yml +.travis.yml +.taskcluster.yml + +# Docker +docker-compose.yml +Dockerfile +.docker +.dockerignore + +# Byte-compiled / optimized / DLL files +**/__pycache__/ +**/*.py[cod] + +# C extensions +*.so + +# Distribution / packaging +.Python +env/ +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +*.egg-info/ +.installed.cfg +*.egg + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.coverage +.cache +nosetests.xml +coverage.xml + +# Translations +*.mo +*.pot + +# Django stuff: +*.log + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# Virtual environment +.env +.venv/ +venv/ + +# PyCharm +.idea + +# Python mode for VIM +.ropeproject +**/.ropeproject + +# Vim swap files +**/*.swp + +# VS Code +.vscode/ diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..c17c09d --- /dev/null +++ b/Dockerfile @@ -0,0 +1,17 @@ +FROM python:3.10-slim + +WORKDIR /app + +COPY requirements.txt . +RUN pip install --no-cache-dir -r requirements.txt + +COPY . . + +# Create a startup script +RUN echo '#!/bin/bash\n\ +python main.py &\n\ +streamlit run app.py' > start.sh + +RUN chmod +x start.sh + +CMD ["/bin/bash", "start.sh"] diff --git a/back/kakao.py b/back/kakao.py index 07ed33e..54c0892 100644 --- a/back/kakao.py +++ b/back/kakao.py @@ -1,7 +1,7 @@ import secrets from typing import Optional -from config import CLIENT_ID, CLIENT_SECRET, OUTSIDE_IP, PORT, STREAMLIT_PORT +from config import CLIENT_ID, CLIENT_SECRET, OUTSIDE_IP, PORT from fastapi import APIRouter, Depends, Header, HTTPException, status from fastapi.responses import RedirectResponse from fastapi_oauth_client import OAuthClient diff --git a/back/managers/mongo_config.py b/back/managers/mongo_config.py index c30cd0d..c8a55e9 100644 --- a/back/managers/mongo_config.py +++ b/back/managers/mongo_config.py @@ -1,6 +1,9 @@ import os from pymongo import MongoClient from gridfs import GridFSBucket +from dotenv import load_dotenv + +load_dotenv(override=True) # 환경 변수로부터 MongoDB 설정 읽기 username = os.getenv("MONGO_USERNAME", "admin") @@ -11,4 +14,4 @@ client = MongoClient(MONGO_URL) database = client["database"] collection = database["users"] -fs_bucket = GridFSBucket(database) \ No newline at end of file +fs_bucket = GridFSBucket(database) diff --git a/config.py b/config.py index 135e86d..a00e19b 100644 --- a/config.py +++ b/config.py @@ -1,29 +1,42 @@ import os -import yaml +import socket +import requests +from dotenv import load_dotenv + +def get_public_ip(): + response = requests.get('https://checkip.amazonaws.com') + public_ip = response.text.strip() + return public_ip + +def get_private_ip(): + try: + s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + s.connect(("8.8.8.8", 80)) + private_ip = s.getsockname()[0] + s.close() + except Exception as e: + hostname = socket.gethostname() + private_ip = socket.gethostbyname(hostname) + return private_ip + path = os.getcwd() # 상위 폴더에서 실행된 경우 -> secret_key.yaml이 상위 폴더에 있음 # path = os.path.dirname(os.path.abspath(__file__)) # 현재 폴더에서 실행된 경우 -> secret_key.yaml이 현재 폴더에 있음 -with open(os.path.join(path, "secret_key.yaml"), "r") as yaml_file: - cfg = yaml.safe_load(yaml_file) - -OPENAI_API_KEY = cfg["OPENAI_API_KEY"] -COHERE_API_KEY = cfg["COHERE_API_KEY"] - -INSIDE_IP = cfg["IP"]["INSIDE_IP"] -OUTSIDE_IP = cfg["IP"]["OUTSIDE_IP"] +load_dotenv(override=True) -REST_API_KEY = cfg["Kakaologin"]["REST_API_KEY"] -REDIRECT_URI = f"http://{OUTSIDE_IP}:{cfg['PORT']}/auth" +MODEL_NAME = os.getenv("MODEL_NAME", "gpt-4o-mini") +OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") +COHERE_API_KEY = os.getenv("COHERE_API_KEY") -PORT = cfg["PORT"] -STREAMLIT_PORT = cfg["STREAMLIT"]["PORT"] +INSIDE_IP = get_private_ip() +OUTSIDE_IP = get_public_ip() -KEY_FILE = cfg["SSL"]["KEY_FILE"] -CERT_FILE = cfg["SSL"]["CERT_FILE"] +PORT = 8001 +STREAMLIT_PORT = 8501 -CLIENT_ID = cfg["CLIENT_ID"] -CLIENT_SECRET = cfg["CLIENT_SECRET"] +CLIENT_ID = os.getenv("CLIENT_ID") +CLIENT_SECRET = os.getenv("CLIENT_SECRET") DATA_DIR = os.path.join(path, "data") IMG_PATH = os.path.join(path, "data", "images") diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 0000000..cd6ac5d --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,56 @@ +version: '3.10' + +services: + # nginx: + # image: nginx:alpine + # ports: + # - "8080:80" + # - "443:443" + # volumes: + # - /etc/nginx/nginx.conf:/etc/nginx/nginx.conf:ro + # - /etc/ssl/merged_certificate.crt:/etc/ssl/merged_certificate.crt:ro + # - /etc/ssl/private/private.key:/etc/ssl/private/private.key:ro + # depends_on: + # - app + # networks: + # - app-network + + app: + image: kooqooo/hello-jobits:latest + ports: + - "8501:8501" + - "8001:8001" + volumes: + - ~/data:/app/data + environment: + - CLIENT_ID=${CLIENT_ID} + - CLIENT_SECRET=${CLIENT_SECRET} + - OPENAI_API_KEY=${OPENAI_API_KEY} + - COHERE_API_KEY=${COHERE_API_KEY} + - MODEL_NAME=${MODEL_NAME} + - MONGO_USERNAME=${MONGO_USERNAME} + - MONGO_PASSWORD=${MONGO_PASSWORD} + depends_on: + - mongo + networks: + - app-network + + mongo: + image: mongo:latest + ports: + - "27017:27017" + volumes: + - mongo-data:/data/db + environment: + - MONGO_INITDB_ROOT_USERNAME=${MONGO_USERNAME} + - MONGO_INITDB_ROOT_PASSWORD=${MONGO_PASSWORD} + networks: + - app-network + +networks: + app-network: + driver: bridge + +volumes: + mongo-data: + driver: local diff --git a/pages/1_home.py b/pages/1_home.py index 0eb7e25..18193b1 100644 --- a/pages/1_home.py +++ b/pages/1_home.py @@ -66,7 +66,6 @@ print("user_id : ", st.session_state["user_id"]) if "openai_api_key" not in st.session_state: - os.environ["OPENAI_API_KEY"] = OPENAI_API_KEY st.session_state.openai_api_key = OPENAI_API_KEY if "FAV_IMAGE_PATH" not in st.session_state: diff --git a/pages/2_user.py b/pages/2_user.py index 09e5132..e4aa541 100644 --- a/pages/2_user.py +++ b/pages/2_user.py @@ -8,7 +8,7 @@ sys.path.append("./") sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__)))) -from config import DATA_DIR, IMG_PATH, path +from config import DATA_DIR, IMG_PATH, path, CSS_PATH from loguru import logger as _logger from src.logger import DevConfig from src.util import (check_essential, get_image_base64, local_css, @@ -48,7 +48,7 @@ st.title("안녕하세요, " + st.session_state.nickname + "님!") # 사용자 이름을 받아서 화면에 출력합니다. -local_css(os.path.join(path, "front", "css", "background.css")) +local_css(os.path.join(CSS_PATH, "background.css")) # local_css("css/1_user.css") @@ -237,7 +237,7 @@ ## read job info tb -job_info, JOBS = read_job_info_tb(path + "/data/samples/job_info_tb.parquet") +job_info, JOBS = read_job_info_tb(os.path.join(DATA_DIR, "samples", "job_info_tb.parquet")) st.session_state.job_info = job_info st.session_state.logger.info("read job tb") st.session_state.logger.info(f"job info is ... {JOBS}") diff --git a/pages/3_gene_question.py b/pages/3_gene_question.py index 3e0a8d8..d3c889a 100644 --- a/pages/3_gene_question.py +++ b/pages/3_gene_question.py @@ -26,7 +26,7 @@ from src.rule_based import list_extend_questions_based_on_keywords from src.util import local_css, read_prompt_from_txt from src.semantic_search import faiss_inference, reranker -from config import OPENAI_API_KEY, DATA_DIR, IMG_PATH, CSS_PATH, PORT +from config import DATA_DIR, IMG_PATH, CSS_PATH, PORT, MODEL_NAME st.session_state["FAV_IMAGE_PATH"] = os.path.join(IMG_PATH, "favicon.png") st.set_page_config( @@ -80,8 +80,6 @@ """,unsafe_allow_html=True) -## set variables -MODEL_NAME = "gpt-3.5-turbo-16k" ## set save dir USER_RESUME_SAVE_DIR = os.path.join(st.session_state["save_dir"], "2_generate_question_user_resume.pdf") @@ -139,16 +137,14 @@ ### JD 사용하여 JD 추출용 프롬프트 만들기 st.session_state.logger.info("prompt JD start") - prompt_template_jd = read_prompt_from_txt(os.path.join(DATA_DIR, "test/prompt_JD_template.txt")) + prompt_template_jd = read_prompt_from_txt(os.path.join(DATA_DIR, "prompts", "prompt_JD_template.txt")) st.session_state.prompt_JD = create_prompt_with_jd(prompt_template_jd) # prompt_JD 생성완료 st.session_state.logger.info("create prompt JD object") ### 모델 세팅 그대로 - llm = ChatOpenAI(temperature=st.session_state.temperature, - model_name=MODEL_NAME, - openai_api_key=OPENAI_API_KEY) + llm = ChatOpenAI(temperature=st.session_state.temperature, model_name=MODEL_NAME) st.session_state.logger.info("create llm object") @@ -167,7 +163,7 @@ # prompt_qa_template ####################################### st.session_state.logger.info("prompt resume start") - prompt_template_resume = read_prompt_from_txt(os.path.join(DATA_DIR, "test/prompt_resume_template.txt")) + prompt_template_resume = read_prompt_from_txt(os.path.join(DATA_DIR, "prompts", "prompt_resume_template.txt")) st.session_state.logger.info("create prompt resume template") st.session_state.prompt_resume = create_prompt_with_resume(prompt_template_resume) @@ -178,9 +174,7 @@ st.session_state.logger.info("user_resume chunk OpenAIEmbeddings ") ### STEP 2 를 위한 새 모델 호출 - llm2 = ChatOpenAI(temperature=0.0, - model_name=MODEL_NAME, - openai_api_key=OPENAI_API_KEY) + llm2 = ChatOpenAI(temperature=0.0, model_name=MODEL_NAME) st.session_state.chain_type_kwargs = {"prompt": st.session_state.prompt_resume} @@ -198,12 +192,12 @@ ## step3 : st.session_state.logger.info("prompt question start") - prompt_template_question = read_prompt_from_txt(os.path.join(DATA_DIR, "test/prompt_question_template.txt")) + prompt_template_question = read_prompt_from_txt(os.path.join(DATA_DIR, "prompts", "prompt_question_template.txt")) st.session_state.logger.info("create prompt question template") st.session_state.prompt_question = create_prompt_with_question(prompt_template_question) - llm3 = ChatOpenAI(temperature=0, model_name=MODEL_NAME, openai_api_key=OPENAI_API_KEY) + llm3 = ChatOpenAI(temperature=0, model_name=MODEL_NAME) st.session_state.chain = LLMChain(llm=llm3, prompt=st.session_state.prompt_question) st.session_state.main_question = st.session_state.chain.invoke({"jd": st.session_state.job_description, "resume": st.session_state.resume})['text'] ################# diff --git a/pages/3_gene_question_no_resume.py b/pages/3_gene_question_no_resume.py index f5745e5..5026277 100644 --- a/pages/3_gene_question_no_resume.py +++ b/pages/3_gene_question_no_resume.py @@ -27,7 +27,7 @@ from src.rule_based import list_extend_questions_based_on_keywords from src.util import local_css, read_prompt_from_txt from src.semantic_search import faiss_inference, reranker -from config import OPENAI_API_KEY, DATA_DIR, IMG_PATH, CSS_PATH, PORT +from config import DATA_DIR, IMG_PATH, CSS_PATH, MODEL_NAME st.session_state["FAV_IMAGE_PATH"] = os.path.join(IMG_PATH, "favicon.png") st.set_page_config( @@ -81,8 +81,6 @@ """,unsafe_allow_html=True) -## set variables -MODEL_NAME = "gpt-3.5-turbo-16k" ## set save dir USER_RESUME_SAVE_DIR = os.path.join(st.session_state["save_dir"], "2_generate_question_user_resume.pdf") @@ -130,16 +128,14 @@ ### JD 사용하여 JD 추출용 프롬프트 만들기 st.session_state.logger.info("prompt JD start") - prompt_template_jd = read_prompt_from_txt(os.path.join(DATA_DIR, "test/prompt_JD_template.txt")) + prompt_template_jd = read_prompt_from_txt(os.path.join(DATA_DIR, "prompts", "prompt_JD_template.txt")) st.session_state.prompt_JD = create_prompt_with_jd(prompt_template_jd) # prompt_JD 생성완료 st.session_state.logger.info("create prompt JD object") ### 모델 세팅 그대로 - llm = ChatOpenAI(temperature=st.session_state.temperature, - model_name=MODEL_NAME, - openai_api_key=OPENAI_API_KEY) + llm = ChatOpenAI(temperature=st.session_state.temperature, model_name=MODEL_NAME,) st.session_state.logger.info("create llm object") @@ -152,12 +148,12 @@ ## step2 JD 만을 이용해 질문 6개를 생성합니다 : st.session_state.logger.info("prompt question start") - prompt_noResume_question_template = read_prompt_from_txt(os.path.join(DATA_DIR, "test/prompt_noResume_question_template.txt")) + prompt_noResume_question_template = read_prompt_from_txt(os.path.join(DATA_DIR, "prompts", "prompt_noResume_question_template.txt")) st.session_state.logger.info("create no resume prompt question template") st.session_state.prompt_question = create_prompt_with_no_resume(prompt_noResume_question_template) - llm3 = ChatOpenAI(temperature=0, model_name=MODEL_NAME, openai_api_key=OPENAI_API_KEY) + llm3 = ChatOpenAI(temperature=0, model_name=MODEL_NAME) st.session_state.chain = LLMChain(llm=llm3, prompt=st.session_state.prompt_question) st.session_state.main_question = st.session_state.chain.run({"jd": st.session_state.job_description}) ################# diff --git a/pages/4_interview.py b/pages/4_interview.py index 4bba826..27b5b49 100644 --- a/pages/4_interview.py +++ b/pages/4_interview.py @@ -83,7 +83,7 @@ def next_tail_question(): # 대화내역 파일로 저장 st.session_state.interview_script_download = "\n\n".join(st.session_state.interview_script) -with open(st.session_state['save_dir'] + "/interview_history.txt", "w") as file: +with open(os.path.join(st.session_state['save_dir'], "interview_history.txt"), "w") as file: file.write(st.session_state.interview_script_download) ##################################### 여기서부터 모의 면접 시작 ############################################ diff --git a/pages/4_show_questions_hint.py b/pages/4_show_questions_hint.py index 59aa416..49eb567 100644 --- a/pages/4_show_questions_hint.py +++ b/pages/4_show_questions_hint.py @@ -12,7 +12,7 @@ from src.generate_question import (create_prompt_feedback, # 추가 create_prompt_hint) from src.util import read_prompt_from_txt -from config import DATA_DIR, IMG_PATH, OPENAI_API_KEY +from config import DATA_DIR, IMG_PATH, MODEL_NAME st.session_state["FAV_IMAGE_PATH"] = os.path.join(IMG_PATH, "favicon.png") st.set_page_config( @@ -23,8 +23,6 @@ layout="wide", initial_sidebar_state="collapsed", ) -#MODEL_NAME = "gpt-4-0125-preview" -MODEL_NAME = "gpt-3.5-turbo-16k" NEXT_PAGE = "introduction" st.session_state.logger.info("start show_questions page") @@ -50,8 +48,8 @@ st.title(f"{st.session_state.user_name}님의 기술면접 예상 질문입니다.🤗 ") -st.session_state.prompt_template_fb = read_prompt_from_txt(os.path.join(DATA_DIR, "test/prompt_feedback.txt")) -st.session_state.prompt_template_ht = read_prompt_from_txt(os.path.join(DATA_DIR, "test/prompt_hint.txt")) +st.session_state.prompt_template_fb = read_prompt_from_txt(os.path.join(DATA_DIR, "prompts", "prompt_feedback.txt")) +st.session_state.prompt_template_ht = read_prompt_from_txt(os.path.join(DATA_DIR, "prompts", "prompt_hint.txt")) # 각 질문에 대해 번호를 매기고 토글 위젯 생성 @@ -82,7 +80,7 @@ st.session_state.logger.info("create prompt_Feedback object") ### 모델 세팅 그대로 - llm = ChatOpenAI(temperature=0.0, model_name=MODEL_NAME, openai_api_key=OPENAI_API_KEY) + llm = ChatOpenAI(temperature=0.0, model_name=MODEL_NAME) st.session_state.logger.info("create llm object") @@ -117,7 +115,7 @@ st.session_state.logger.info("create prompt_Hint object") ### 모델 세팅 - llm = ChatOpenAI(temperature=0.0, model_name=MODEL_NAME, openai_api_key=OPENAI_API_KEY) + llm = ChatOpenAI(temperature=0.0, model_name=MODEL_NAME) st.session_state.logger.info("create llm object") @@ -142,7 +140,7 @@ switch_page("user") st.session_state.question_history = "\n\n".join(st.session_state.questions_showhint) -with open(st.session_state['save_dir'] + "/question_history.txt", "w") as file: +with open(os.path.join(st.session_state['save_dir'], "question_history.txt"), "w") as file: file.write(st.session_state.question_history) # 생성된 질문을 파일로 저장 # 다운로드 버튼 생성 diff --git a/requirements.txt b/requirements.txt index 6dcb980..f2c119a 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,205 +1,189 @@ -aiohttp==3.9.5 +aiohappyeyeballs==2.3.5 +aiohttp==3.10.1 aiosignal==1.3.1 altair==5.3.0 -annotated-types==0.6.0 -anyio==4.3.0 +annotated-types==0.7.0 +anyio==4.4.0 asgiref==3.8.1 async-timeout==4.0.3 -attrs==23.2.0 +attrs==24.2.0 backoff==2.2.1 -bcrypt==4.1.2 +bcrypt==4.2.0 beautifulsoup4==4.12.3 -blinker==1.7.0 +blinker==1.8.2 +boto3==1.34.156 +botocore==1.34.156 build==1.2.1 -cachetools==5.3.3 -certifi==2024.2.2 -cffi==1.16.0 +cachetools==5.4.0 +certifi==2024.7.4 +cffi==1.17.0 charset-normalizer==3.3.2 -chroma-hnswlib==0.7.3 -chromadb==0.4.24 +Chroma==0.2.0 +chroma-hnswlib==0.7.6 +chromadb==0.5.5 click==8.1.7 -cohere==5.3.0 +cohere==5.6.2 coloredlogs==15.0.1 contourpy==1.2.1 -cryptography==42.0.5 +cryptography==43.0.0 cycler==0.12.1 -dataclasses-json==0.6.4 +dataclasses-json==0.6.7 Deprecated==1.2.14 distro==1.9.0 dnspython==2.6.1 entrypoints==0.4 -exceptiongroup==1.2.0 -faiss-gpu==1.7.2 -Faker==24.9.0 -fastapi==0.110.1 -fastavro==1.9.4 +exceptiongroup==1.2.2 +faiss-cpu==1.8.0.post1 +Faker==26.2.0 +fastapi==0.112.0 +fastavro==1.9.5 favicon==0.7.0 -filelock==3.13.4 +filelock==3.15.4 flatbuffers==24.3.25 -fonttools==4.51.0 +fonttools==4.53.1 frozenlist==1.4.1 -fsspec==2024.3.1 +fsspec==2024.6.1 gitdb==4.0.11 GitPython==3.1.43 -google-auth==2.29.0 -googleapis-common-protos==1.63.0 +google-auth==2.33.0 +googleapis-common-protos==1.63.2 greenlet==3.0.3 -grpcio==1.62.1 +grpcio==1.65.4 h11==0.14.0 htbuilder==0.6.2 httpcore==1.0.5 httptools==0.6.1 httpx==0.27.0 -huggingface-hub==0.22.2 +httpx-sse==0.4.0 +huggingface-hub==0.24.5 humanfriendly==10.0 idna==3.7 -importlib-metadata==7.0.0 +importlib_metadata==8.0.0 importlib_resources==6.4.0 -Jinja2==3.1.3 -joblib==1.4.0 +Jinja2==3.1.4 +jiter==0.5.0 +jmespath==1.0.1 jsonpatch==1.33 -jsonpointer==2.4 -jsonschema==4.21.1 +jsonpointer==3.0.0 +jsonschema==4.23.0 jsonschema-specifications==2023.12.1 +jwt==1.3.1 kiwisolver==1.4.5 -kubernetes==29.0.0 -langchain==0.1.16 -langchain-chroma==0.1.0 -langchain-community==0.0.33 -langchain-core==0.1.43 -langchain-openai==0.1.3 -langchain-text-splitters==0.0.1 -langsmith==0.1.48 +kubernetes==30.1.0 +langchain==0.2.12 +langchain-chroma==0.1.2 +langchain-community==0.2.11 +langchain-core==0.2.29 +langchain-openai==0.1.20 +langchain-text-splitters==0.2.2 +langsmith==0.1.98 loguru==0.7.2 -lxml==5.2.1 +lxml==5.2.2 Markdown==3.6 markdown-it-py==3.0.0 markdownlit==0.0.7 MarkupSafe==2.1.5 -marshmallow==3.21.1 -matplotlib==3.8.4 +marshmallow==3.21.3 +matplotlib==3.9.1.post1 mdurl==0.1.2 mmh3==4.1.0 monotonic==1.6 -more-itertools==10.2.0 -motor==3.4.0 +more-itertools==10.4.0 +motor==3.5.1 mpmath==1.3.0 multidict==6.0.5 mypy-extensions==1.0.0 -networkx==3.3 numpy==1.26.4 -nvidia-cublas-cu12==12.1.3.1 -nvidia-cuda-cupti-cu12==12.1.105 -nvidia-cuda-nvrtc-cu12==12.1.105 -nvidia-cuda-runtime-cu12==12.1.105 -nvidia-cudnn-cu12==8.9.2.26 -nvidia-cufft-cu12==11.0.2.54 -nvidia-curand-cu12==10.3.2.106 -nvidia-cusolver-cu12==11.4.5.107 -nvidia-cusparse-cu12==12.1.0.106 -nvidia-nccl-cu12==2.19.3 -nvidia-nvjitlink-cu12==12.4.127 -nvidia-nvtx-cu12==12.1.105 oauthlib==3.2.2 -onnxruntime==1.17.3 -openai==1.20.0 -opentelemetry-api==1.24.0 -opentelemetry-exporter-otlp-proto-common==1.24.0 -opentelemetry-exporter-otlp-proto-grpc==1.24.0 -opentelemetry-instrumentation==0.45b0 -opentelemetry-instrumentation-asgi==0.45b0 -opentelemetry-instrumentation-fastapi==0.45b0 -opentelemetry-proto==1.24.0 -opentelemetry-sdk==1.24.0 -opentelemetry-semantic-conventions==0.45b0 -opentelemetry-util-http==0.45b0 -orjson==3.10.1 +onnxruntime==1.18.1 +openai==1.40.1 +opentelemetry-api==1.26.0 +opentelemetry-exporter-otlp-proto-common==1.26.0 +opentelemetry-exporter-otlp-proto-grpc==1.26.0 +opentelemetry-instrumentation==0.47b0 +opentelemetry-instrumentation-asgi==0.47b0 +opentelemetry-instrumentation-fastapi==0.47b0 +opentelemetry-proto==1.26.0 +opentelemetry-sdk==1.26.0 +opentelemetry-semantic-conventions==0.47b0 +opentelemetry-util-http==0.47b0 +orjson==3.10.6 overrides==7.7.0 -packaging==23.2 +packaging==24.1 pandas==2.2.2 -passlib==1.7.4 -pillow==10.3.0 -plotly==5.20.0 +parameterized==0.9.0 +pillow==10.4.0 +plotly==5.23.0 posthog==3.5.0 prometheus_client==0.20.0 -protobuf==4.25.3 -pulsar-client==3.5.0 -pyarrow==15.0.2 +protobuf==4.25.4 +pyarrow==17.0.0 pyasn1==0.6.0 pyasn1_modules==0.4.0 pycparser==2.22 -pydantic==2.7.0 -pydantic_core==2.18.1 -pydeck==0.8.1b0 -Pygments==2.17.2 -PyJWT==2.8.0 -pymdown-extensions==10.7.1 -pymongo==4.6.3 +pydantic==2.8.2 +pydantic_core==2.20.1 +pydeck==0.9.1 +Pygments==2.18.0 +pymdown-extensions==10.9 +pymongo==4.8.0 pyparsing==3.1.2 -pypdf==4.2.0 PyPika==0.48.9 -pyproject_hooks==1.0.0 +pyproject_hooks==1.1.0 python-dateutil==2.9.0.post0 python-dotenv==1.0.1 python-multipart==0.0.9 pytz==2024.1 -PyYAML==6.0.1 -referencing==0.34.0 -regex==2024.4.16 -requests==2.31.0 +PyYAML==6.0.2 +referencing==0.35.1 +regex==2024.7.24 +requests==2.32.3 requests-oauthlib==2.0.0 -requests-toolbelt==1.0.0 rich==13.7.1 -rpds-py==0.18.0 +rpds-py==0.20.0 rsa==4.9 -safetensors==0.4.3 -scikit-learn==1.4.2 -scipy==1.13.0 -sentence-transformers==2.7.0 +s3transfer==0.10.2 shellingham==1.5.4 six==1.16.0 smmap==5.0.1 sniffio==1.3.1 soupsieve==2.5 -SQLAlchemy==2.0.29 +SQLAlchemy==2.0.32 st-annotated-text==4.0.1 +st-theme==1.2.3 starlette==0.37.2 -streamlit==1.33.0 +streamlit==1.37.1 streamlit-camera-input-live==0.2.0 -streamlit-card==1.0.0 +streamlit-card==1.0.2 streamlit-embedcode==0.1.2 -streamlit-extras==0.4.2 +streamlit-extras==0.4.6 streamlit-faker==0.0.3 -streamlit-image-coordinates==0.1.6 +streamlit-image-coordinates==0.1.9 streamlit-keyup==0.2.4 streamlit-toggle-switch==1.0.2 streamlit-vertical-slider==2.5.5 -sympy==1.12 -tenacity==8.2.3 -threadpoolctl==3.4.0 -tiktoken==0.6.0 -tokenizers==0.15.2 +sympy==1.13.1 +tenacity==8.5.0 +tiktoken==0.7.0 +tokenizers==0.19.1 toml==0.10.2 tomli==2.0.1 toolz==0.12.1 -torch==2.2.2 -tornado==6.4 -tqdm==4.66.2 -transformers==4.39.3 -triton==2.2.0 +tornado==6.4.1 +tqdm==4.66.5 typer==0.12.3 -types-requests==2.31.0.20240406 +types-requests==2.32.0.20240712 typing-inspect==0.9.0 -typing_extensions==4.11.0 +typing_extensions==4.12.2 tzdata==2024.1 -urllib3==2.2.1 -uvicorn==0.29.0 +urllib3==2.2.2 +uvicorn==0.30.5 uvloop==0.19.0 -validators==0.28.0 -watchdog==4.0.0 -watchfiles==0.21.0 -websocket-client==1.7.0 +validators==0.33.0 +watchdog==4.0.1 +watchfiles==0.23.0 +websocket-client==1.8.0 websockets==12.0 wrapt==1.16.0 yarl==1.9.4 -zipp==3.18.1 +zipp==3.19.2 diff --git a/src/gene_question_2chain_ver.py b/src/gene_question_2chain_ver.py index 506ea76..781f72b 100644 --- a/src/gene_question_2chain_ver.py +++ b/src/gene_question_2chain_ver.py @@ -20,10 +20,10 @@ from streamlit_extras.switch_page_button import switch_page from util import local_css, read_prompt_from_txt -from config import OPENAI_API_KEY # OPENAI_API_KEY 불러오기 +from config import MODEL_NAME, CSS_PATH DATA_DIR = os.path.join(os.path.dirname(os.path.dirname(__file__)), "data") -st.session_state["FAV_IMAGE_PATH"] = os.path.join(DATA_DIR, "images/favicon.png") +st.session_state["FAV_IMAGE_PATH"] = os.path.join(DATA_DIR, "images", "favicon.png") st.set_page_config( page_title="Hello Jobits", # 브라우저탭에 뜰 제목 page_icon=Image.open( @@ -36,14 +36,12 @@ st.session_state.logger.info("start") NEXT_PAGE = "show_questions_hint" -MY_PATH = os.path.dirname(os.path.dirname(__file__)) - #### style css #### MAIN_IMG = st.session_state.MAIN_IMG LOGO_IMG = st.session_state.LOGO_IMG -local_css(MY_PATH + "/css/background.css") -local_css(MY_PATH + "/css/2_generate_question.css") +local_css(os.path.join(CSS_PATH, "background.css")) +local_css(os.path.join(CSS_PATH, "2_generate_question.css")) st.markdown(f"""