Skip to content

Commit

Permalink
Merge pull request openshift#4 from thoraxe/restructure
Browse files Browse the repository at this point in the history
big refactor
  • Loading branch information
thoraxe authored Nov 2, 2023
2 parents 9073616 + 8b59afb commit 0ff27ea
Show file tree
Hide file tree
Showing 14 changed files with 31 additions and 21 deletions.
5 changes: 4 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
@@ -1,4 +1,7 @@
__pycache__
tmp
data
vector-db
vector-db

# ignore files that begin with tmptest_
tmptest_*
Empty file added __init__.py
Empty file.
Empty file added modules/__init__.py
Empty file.
File renamed without changes.
3 changes: 2 additions & 1 deletion question_validator.py → modules/question_validator.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import logging
import sys
from model_context import get_watsonx_predictor
from modules.model_context import get_watsonx_predictor
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate

Expand Down Expand Up @@ -95,6 +95,7 @@ def validate_question(self, conversation, query, **kwargs):
return response['text']

if __name__ == "__main__":
"""to execute, from the repo root, use python -m modules.question_validator.py"""
import argparse

parser = argparse.ArgumentParser(
Expand Down
3 changes: 2 additions & 1 deletion task_breakdown.py → modules/task_breakdown.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import llama_index
from llama_index import StorageContext, load_index_from_storage
from model_context import get_watsonx_context
from modules.model_context import get_watsonx_context
from llama_index.prompts import Prompt, PromptTemplate
import logging
import sys
Expand Down Expand Up @@ -101,6 +101,7 @@ def breakdown_tasks(self, conversation, query, **kwargs):


if __name__ == "__main__":
"""to execute, from the repo root, use python -m modules.task_breakdown.py"""
import argparse

parser = argparse.ArgumentParser(
Expand Down
3 changes: 2 additions & 1 deletion task_performer.py → modules/task_performer.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import logging
import sys
from string import Template
from model_context import get_watsonx_predictor
from modules.model_context import get_watsonx_predictor

DEFAULT_MODEL = "ibm/granite-13b-chat-grounded-v01"

Expand Down Expand Up @@ -67,6 +67,7 @@ def perform_task(self, conversation, task, **kwargs):


if __name__ == "__main__":
"""to execute, from the repo root, use python -m modules.task_performer.py"""
import argparse

parser = argparse.ArgumentParser(description="Perform a task")
Expand Down
9 changes: 5 additions & 4 deletions task_processor.py → modules/task_processor.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
import logging
import sys
from model_context import get_watsonx_predictor
from yes_no_classifier import YesNoClassifier
from task_performer import TaskPerformer
from task_rephraser import TaskRephraser
from modules.model_context import get_watsonx_predictor
from modules.yes_no_classifier import YesNoClassifier
from modules.task_performer import TaskPerformer
from modules.task_rephraser import TaskRephraser
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate

Expand Down Expand Up @@ -153,6 +153,7 @@ def process_tasks(self, conversation, tasklist, original_query, **kwargs):


if __name__ == "__main__":
"""to execute, from the repo root, use python -m modules.task_processor.py"""
import argparse

parser = argparse.ArgumentParser(description="Process a list of tasks")
Expand Down
7 changes: 4 additions & 3 deletions task_rephraser.py → modules/task_rephraser.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
import logging
import sys
from model_context import get_watsonx_predictor
from yes_no_classifier import YesNoClassifier
from task_performer import TaskPerformer
from modules.model_context import get_watsonx_predictor
from modules.yes_no_classifier import YesNoClassifier
from modules.task_performer import TaskPerformer
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate

Expand Down Expand Up @@ -85,6 +85,7 @@ def rephrase_task(self, conversation, task, original_query, **kwargs):
return response['text']

if __name__ == "__main__":
"""to execute, from the repo root, use python -m modules.task_rephraser.py"""
import argparse

parser = argparse.ArgumentParser(
Expand Down
3 changes: 2 additions & 1 deletion yes_no_classifier.py → modules/yes_no_classifier.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import logging, sys
from string import Template
from model_context import get_watsonx_predictor
from modules.model_context import get_watsonx_predictor
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate

Expand Down Expand Up @@ -89,6 +89,7 @@ def classify(self, conversation, string, **kwargs):


if __name__ == "__main__":
"""to execute, from the repo root, use PYTHONPATH="$PYTHONPATH:`pwd`" python modules/yes_no_classifier.py"""
import argparse

parser = argparse.ArgumentParser(description="Process a list of tasks")
Expand Down
6 changes: 3 additions & 3 deletions ols.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,8 @@
from fastapi import FastAPI, HTTPException

from pydantic import BaseModel
from task_breakdown import TaskBreakdown
from task_processor import TaskProcessor
from modules.task_breakdown import TaskBreakdown
from modules.task_processor import TaskProcessor

import logging, sys, os

Expand All @@ -16,7 +16,7 @@
import uuid

## internal stuff
from model_context import get_watsonx_predictor
from modules.model_context import get_watsonx_predictor

instruct_model = os.getenv("INSTRUCT_MODEL", "ibm/granite-13b-instruct-v1")
rag_model = os.getenv("RAG_MODEL", "ibm/granite-13b-chat-grounded-v01")
Expand Down
11 changes: 6 additions & 5 deletions requirements.txt
Original file line number Diff line number Diff line change
@@ -1,8 +1,9 @@
fastapi
uvicorn
llama_index
langchain
ibm_watson_machine_learning
ibm-generative-ai
ibm_watson_machine_learning
kubernetes
langchain
llama_index
torch
transformers
torch
uvicorn
2 changes: 1 addition & 1 deletion indexer.py → tools/indexer.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
import llama_index
from llama_index import VectorStoreIndex, SimpleDirectoryReader
from llama_index.storage.storage_context import StorageContext
from model_context import get_watsonx_context
from modules.model_context import get_watsonx_context

import logging, sys, os
#logging.basicConfig(stream=sys.stdout, level=logging.INFO)
Expand Down
File renamed without changes.

0 comments on commit 0ff27ea

Please sign in to comment.