diff --git a/backend/ai_response_processor/main.py b/backend/ai_response_processor/main.py new file mode 100644 index 00000000..151a4992 --- /dev/null +++ b/backend/ai_response_processor/main.py @@ -0,0 +1,11 @@ +from fastapi import FastAPI +import os +from dotenv import load_dotenv + +from news.contentqueue import rabbitmq_consumer + +BASE_DIR = os.path.dirname(os.path.abspath(__file__)) +load_dotenv(os.path.join(BASE_DIR, ".env")) + +app = FastAPI() +rabbitmq_consumer = rabbitmq_consumer diff --git a/backend/ai_response_processor/news/contentqueue/__init__.py b/backend/ai_response_processor/news/contentqueue/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/backend/ai_response_processor/news/contentqueue/rabbitmq_consumer.py b/backend/ai_response_processor/news/contentqueue/rabbitmq_consumer.py new file mode 100644 index 00000000..ab512b0f --- /dev/null +++ b/backend/ai_response_processor/news/contentqueue/rabbitmq_consumer.py @@ -0,0 +1,40 @@ +import os +from dotenv import load_dotenv +import json +import logging +from pika import BlockingConnection, ConnectionParameters, PlainCredentials + +from news.crud.news_summarizer import summarize_news + +load_dotenv() +logger = logging.getLogger(__name__) + +credentials = PlainCredentials(username=os.getenv('RABBITMQ_USERNAME'), password=os.getenv('RABBITMQ_PASSWORD')) +connection = BlockingConnection(ConnectionParameters(host=os.getenv('RABBITMQ_HOST'), + port=int(os.getenv('RABBITMQ_PORT')), + credentials=credentials)) +channel = connection.channel() +channel.exchange_declare(exchange=os.getenv('SUMMARY_EXCHANGE'), + exchange_type='direct', + durable=True) +channel.queue_declare(queue=os.getenv('SUMMARY_QUEUE'), durable=True) +channel.queue_bind(exchange=os.getenv('SUMMARY_EXCHANGE'), + queue=os.getenv('SUMMARY_QUEUE'), routing_key=os.getenv('SUMMARY_KEY')) + + +def callback(ch, method, properties, body): + logger.info(" [x] Queue Received ") + received_data = body.decode() + + try: + received_data_json = json.loads(received_data) + if 'content' in received_data_json: + summarize_news(news_id=received_data_json['id'], content=received_data_json['content']) + else: + logger.info("Error: 'content' key is missing in the received data.") + except json.JSONDecodeError: + logger.info("Error decoding JSON from the received data.") + + +channel.basic_consume(queue=os.getenv('SUMMARY_QUEUE'), on_message_callback=callback, auto_ack=True) +channel.start_consuming() diff --git a/backend/ai_response_processor/news/contentqueue/rabbitmq_producer.py b/backend/ai_response_processor/news/contentqueue/rabbitmq_producer.py new file mode 100644 index 00000000..ecd6f92b --- /dev/null +++ b/backend/ai_response_processor/news/contentqueue/rabbitmq_producer.py @@ -0,0 +1,39 @@ +import os +from dotenv import load_dotenv +import json +from pika import BlockingConnection, ConnectionParameters, PlainCredentials, BasicProperties + +from news.schema.message_item import MessageItem + +CONFIG = { + 'username': os.getenv('RABBITMQ_USERNAME'), + 'password': os.getenv('RABBITMQ_PASSWORD'), + 'host': os.getenv('RABBITMQ_HOST'), + 'port': int(os.getenv('RABBITMQ_PORT')), + 'queue_name': os.getenv('STORE_QUEUE'), + 'exchange_name': os.getenv('STORE_EXCHANGE'), + 'routing_key': os.getenv('STORE_KEY'), +} + +CONTENT_TYPE = 'application/json' + +def get_connection_params(): + credentials = PlainCredentials(username=CONFIG['username'], password=CONFIG['password']) + return ConnectionParameters(host=CONFIG['host'], + credentials=credentials, + heartbeat=600, + blocked_connection_timeout=300) + +def send_message(message: MessageItem): + connection = BlockingConnection(get_connection_params()) + channel = connection.channel() + channel.queue_declare(queue=CONFIG['queue_name'], durable=True) + + props = BasicProperties(content_type=CONTENT_TYPE, delivery_mode=1) + serialized_message = json.dumps(message.__dict__) + + channel.basic_publish(exchange=CONFIG['exchange_name'], + routing_key=CONFIG['routing_key'], + body=serialized_message, + properties=props) + connection.close() \ No newline at end of file diff --git a/backend/ai_response_processor/news/crud/__init__.py b/backend/ai_response_processor/news/crud/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/backend/ai_response_processor/news/crud/news_summarizer.py b/backend/ai_response_processor/news/crud/news_summarizer.py new file mode 100644 index 00000000..e8de57d8 --- /dev/null +++ b/backend/ai_response_processor/news/crud/news_summarizer.py @@ -0,0 +1,34 @@ +import os + +from langchain.chat_models import ChatOpenAI +from langchain.prompts import ChatPromptTemplate +import logging + +from news.contentqueue.rabbitmq_producer import send_message +from news.schema.message_item import MessageItem + +logger = logging.getLogger(__name__) +ChatOpenAI.openai_api_key = os.environ['OPENAI_API_KEY'] + + +def summarize_news(news_id: int, content: str): + logger.info("summarize_news start : (id)", news_id) + chat = ChatOpenAI(temperature=0.3) + + template = ChatPromptTemplate.from_messages( + [ + ("system", "You're a news summarizer. Also, the answer must be no more than 500 characters in Korean."), + ("user", "{raw_news_content}"), + ] + ) + + prompt = template.format_messages( + raw_news_content={content} + ) + + result = chat.predict_messages(prompt) + message_item = MessageItem( + id=news_id, + content=result.content + ) + send_message(message_item) diff --git a/backend/ai_response_processor/news/schema/__init__.py b/backend/ai_response_processor/news/schema/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/backend/ai_response_processor/news/schema/message_item.py b/backend/ai_response_processor/news/schema/message_item.py new file mode 100644 index 00000000..7def3770 --- /dev/null +++ b/backend/ai_response_processor/news/schema/message_item.py @@ -0,0 +1,7 @@ +from dataclasses import dataclass + + +@dataclass +class MessageItem: + id: int + content: str diff --git a/backend/ai_response_processor/requirements.txt b/backend/ai_response_processor/requirements.txt new file mode 100644 index 00000000..7555686d --- /dev/null +++ b/backend/ai_response_processor/requirements.txt @@ -0,0 +1,6 @@ +langchain==0.0.332 +python-dotenv==1.0.0 +openai==0.28.0 +uvicorn==0.28.0 +pika +fastapi \ No newline at end of file