Skip to content

Commit

Permalink
style: Black up
Browse files Browse the repository at this point in the history
  • Loading branch information
Ramimashkouk committed Dec 27, 2024
1 parent f2546d1 commit 4e8b002
Show file tree
Hide file tree
Showing 8 changed files with 38 additions and 30 deletions.
3 changes: 1 addition & 2 deletions backend/chatsky_ui/api/api_v1/endpoints/flows.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,9 +9,8 @@
from chatsky_ui.api.deps import get_build_manager
from chatsky_ui.core.config import settings
from chatsky_ui.db.base import read_conf, write_conf
from chatsky_ui.services.process_manager import BuildManager
from chatsky_ui.schemas.front_graph_components.llm_model import LLMModel

from chatsky_ui.services.process_manager import BuildManager

router = APIRouter()

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ class TextResponse(Response):
class CustomResponse(Response):
code: str


class LLMResponse(Response):
model_name: str
prompt: Optional[str] = None
Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
import os
from typing import Optional, ClassVar
from typing import ClassVar, Optional

from dotenv import load_dotenv
from pydantic import Field, model_validator
Expand All @@ -19,11 +19,7 @@ class LLMModel(BaseComponent):
},
}

MODEL_TO_KEY: ClassVar = {
model: config["api_key"]
for _, config in PROVIDERS.items()
for model in config["models"]
}
MODEL_TO_KEY: ClassVar = {model: config["api_key"] for _, config in PROVIDERS.items() for model in config["models"]}

name: str
llm: str
Expand Down
16 changes: 7 additions & 9 deletions backend/chatsky_ui/services/json_converter/llm_converter.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,8 @@ def __init__(self, llm_models_config: List[dict]):
name=config["name"],
llm=config["modelName"],
system_prompt=config.get("systemPrompt"),
) for config in llm_models_config
)
for config in llm_models_config
]

def _convert(self):
Expand All @@ -21,15 +22,12 @@ def _convert(self):
"model": {
"external:langchain_openai.ChatOpenAI": {
"model": model.llm,
"api_key": {
"external:os.getenv": model.MODEL_TO_KEY[model.llm]
},
"base_url": {
"external:os.getenv": "LLM_API_BASE_URL"
}
"api_key": {"external:os.getenv": model.MODEL_TO_KEY[model.llm]},
"base_url": {"external:os.getenv": "LLM_API_BASE_URL"},
}
},
"system_prompt": model.system_prompt
"system_prompt": model.system_prompt,
}
} for model in self.models
}
for model in self.models
}
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
from abc import ABC, abstractmethod

from ....core.config import settings
from ....schemas.front_graph_components.info_holders.condition import CustomCondition, SlotCondition, LLMCondition
from ....schemas.front_graph_components.info_holders.condition import CustomCondition, LLMCondition, SlotCondition
from ..base_converter import BaseConverter
from ..consts import CONDITIONS_FILE, CUSTOM_FILE
from .service_replacer import store_custom_service
Expand Down Expand Up @@ -70,19 +70,25 @@ class LLMConditionConverter(ConditionConverter):
def __init__(self, condition: dict):
super().__init__()
try:
self.condition = LLMCondition(name=condition["data"]["name"], model_name=condition["data"]["model_name"], prompt=condition["data"]["prompt"])
self.condition = LLMCondition(
name=condition["data"]["name"],
model_name=condition["data"]["model_name"],
prompt=condition["data"]["prompt"],
)
except KeyError as missing_key:
raise BadConditionException("Missing key in LLM condition data") from missing_key

def _convert(self):
super()._convert()

condition_data = self.condition.model_dump()
condition_data.update({
"method": {
"chatsky.llm.methods.Contains": {
"pattern": '"TRUE"',
}
condition_data.update(
{
"method": {
"chatsky.llm.methods.Contains": {
"pattern": '"TRUE"',
}
}
}
})
)
return {"chatsky.conditions.llm.LLMCondition": condition_data}
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
from ....core.config import settings
from ....schemas.front_graph_components.info_holders.response import CustomResponse, TextResponse, LLMResponse
from ....schemas.front_graph_components.info_holders.response import CustomResponse, LLMResponse, TextResponse
from ..base_converter import BaseConverter
from ..consts import CUSTOM_FILE, RESPONSES_FILE
from .service_replacer import store_custom_service
Expand Down Expand Up @@ -53,6 +53,6 @@ def __init__(self, response: dict):
)
except KeyError as e:
raise BadResponseException("Missing key in LLM response data") from e

def _convert(self):
return {"chatsky.responses.llm.LLMResponse": self.response.model_dump()}
12 changes: 10 additions & 2 deletions backend/chatsky_ui/services/json_converter/node_converter.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,16 @@

from ...schemas.front_graph_components.node import InfoNode, LinkNode
from .base_converter import BaseConverter
from .logic_component_converter.condition_converter import CustomConditionConverter, SlotConditionConverter, LLMConditionConverter
from .logic_component_converter.response_converter import CustomResponseConverter, TextResponseConverter, LLMResponseConverter
from .logic_component_converter.condition_converter import (
CustomConditionConverter,
LLMConditionConverter,
SlotConditionConverter,
)
from .logic_component_converter.response_converter import (
CustomResponseConverter,
LLMResponseConverter,
TextResponseConverter,
)


class NodeConverter(BaseConverter):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,9 +11,9 @@
from ...schemas.front_graph_components.pipeline import Pipeline
from .base_converter import BaseConverter
from .interface_converter import InterfaceConverter
from .llm_converter import LLMModelsConverter
from .script_converter import ScriptConverter
from .slots_converter import SlotsConverter
from .llm_converter import LLMModelsConverter


class PipelineConverter(BaseConverter):
Expand Down

0 comments on commit 4e8b002

Please sign in to comment.