-
Notifications
You must be signed in to change notification settings - Fork 19
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Merge pull request #213 from sirji-ai/code-reviewer
Code reviewer
- Loading branch information
Showing
51 changed files
with
604 additions
and
207 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,4 +1,4 @@ | ||
sirji-messages==0.0.30 | ||
sirji-messages==0.0.32 | ||
sirji-tools==0.0.16 | ||
openai==1.35.7 | ||
anthropic==0.29.0 | ||
|
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Empty file.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,86 @@ | ||
from sirji_tools.logger import create_logger | ||
from sirji_messages import message_parse, MessageParsingError, MessageValidationError, ActionEnum, AgentEnum, allowed_response_templates, permissions_dict, ActionEnum | ||
from ..model_providers.factory import LLMProviderFactory | ||
from .system_prompts.factory import SystemPromptsFactory | ||
from ...decorators import retry_on_exception | ||
|
||
class GenericAgentInfer(): | ||
def __init__(self, config, agent_output_folder_index, file_summaries=None): | ||
# Initialize the logger as an instance variable | ||
self.logger = create_logger(f"{config['id']}.log", 'debug') | ||
|
||
self.logger.info(config) | ||
self.logger.info(agent_output_folder_index) | ||
|
||
self.config = config | ||
self.agent_output_folder_index = agent_output_folder_index | ||
self.file_summaries = file_summaries | ||
|
||
def message(self, input_message, history=[]): | ||
conversation = self.__prepare_conversation(input_message, history) | ||
|
||
self.logger.info(f"Incoming: \n{input_message}") | ||
self.logger.info("Calling OpenAI Chat Completions API\n") | ||
|
||
response_message, prompt_tokens, completion_tokens = self.__get_response(conversation) | ||
|
||
return response_message, conversation, prompt_tokens, completion_tokens | ||
|
||
def __prepare_conversation(self, input_message, history): | ||
conversation = [] | ||
|
||
if not history: | ||
conversation.append( | ||
{"role": "system", "content": SystemPromptsFactory.get_system_prompt(self.config, self.agent_output_folder_index)}) | ||
else: | ||
if history[0]['role'] == "system": | ||
history[0]['content'] = SystemPromptsFactory.get_system_prompt(self.config, self.agent_output_folder_index) | ||
conversation = history | ||
|
||
parsed_input_message = message_parse(input_message) | ||
conversation.append({"role": "user", "content": input_message, "parsed_content": parsed_input_message}) | ||
|
||
return conversation | ||
|
||
def __get_response(self, conversation): | ||
retry_llm_count = 0 | ||
response_message = '' | ||
prompt_tokens = 0 | ||
completion_tokens = 0 | ||
|
||
while(True): | ||
response_message, current_prompt_tokens, current_completion_tokens = self.__call_llm(conversation) | ||
|
||
prompt_tokens += current_prompt_tokens | ||
completion_tokens += current_completion_tokens | ||
try: | ||
# Attempt parsing | ||
parsed_response_message = message_parse(response_message) | ||
conversation.append({"role": "assistant", "content": response_message, "parsed_content": parsed_response_message}) | ||
break | ||
except (MessageParsingError, MessageValidationError) as e: | ||
# Handling both MessageParsingError and MessageValidationError similarly | ||
self.logger.info("Error while parsing the message.\n") | ||
retry_llm_count += 1 | ||
if retry_llm_count > 2: | ||
raise e | ||
self.logger.info(f"Requesting LLM to resend the message in correct format.\n") | ||
conversation.append({"role": "assistant", "content": response_message, "parsed_content": {}}) | ||
# Todo: @vaibhav - Change the error message language later. | ||
conversation.append({"role": "user", "content": "Error! Your last response has two action in it and both has been discarded because of the below error:\nError in processing your last response. Your response must conform strictly to one of the allowed Response Templates, as it will be processed programmatically and only these templates are recognized. Your response must be enclosed within '***' at the beginning and end, without any additional text above or below these markers. Not conforming above rules will lead to response processing errors."}) | ||
except Exception as e: | ||
self.logger.info(f"Generic error while parsing message. Error: {e}\n") | ||
raise e | ||
|
||
return response_message, prompt_tokens, completion_tokens | ||
|
||
@retry_on_exception() | ||
def __call_llm(self, conversation): | ||
history = [] | ||
|
||
for message in conversation: | ||
history.append({"role": message['role'], "content": message['content']}) | ||
|
||
model_provider = LLMProviderFactory.get_instance() | ||
|
||
return model_provider.get_response(history, self.logger) |
Empty file.
110 changes: 110 additions & 0 deletions
110
agents/sirji_agents/llm/generic/system_prompts/anthropic.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,110 @@ | ||
import json | ||
import os | ||
import textwrap | ||
|
||
from sirji_messages import ActionEnum, AgentEnum, allowed_response_templates, permissions_dict, ActionEnum | ||
|
||
class AnthropicSystemPrompt: | ||
def __init__(self, config, agent_output_folder_index): | ||
self.config = config | ||
self.agent_output_folder_index = agent_output_folder_index | ||
pass | ||
|
||
def system_prompt(self): | ||
|
||
initial_intro = textwrap.dedent(f""" | ||
You are an agent named "{self.config['name']}", a component of the Sirji AI agentic framework. Sirji is a framework that enables developers to create and run custom AI agents for their everyday development tasks. A Custom Agent is a modular AI component that performs specific tasks based on predefined pseudocode. | ||
Your Agent ID: {self.config['id']} | ||
Your OS (referred as SIRJI_OS later): {os.name} | ||
You are an expert having skill: {self.config['skills'][0]['skill']}""") | ||
|
||
instructions = textwrap.dedent(f""" | ||
You must follow these instructions: | ||
1. Convert all points in your pseudo code into plain English steps with a maximum of 10 words each. Log these steps using the LOG_STEPS action. | ||
2. After logging the steps, follow your pseudo code step by step to the best of your ability. Following each pseudo code step in the specified order is mandatory. Dont miss to follow any of these steps. | ||
3. If any step is not applicable or cannot be followed, use the DO_NOTHING action to skip it.""") | ||
|
||
pseudo_code = "\nYour pseudo code which you must follow:\n" + self.config['skills'][0]['pseudo_code'] | ||
|
||
response_specifications = textwrap.dedent(f""" | ||
Your response must adhere rigorously to the following rules, without exception, to avoid critical system failures: | ||
- Conform precisely to one of the Allowed Response Templates, as the system processes only these templates correctly. | ||
- Enclose the entire response within '***' markers at both the beginning and the end, without any additional text outside these markers. | ||
- Respond with only one action at a time.""") | ||
|
||
understanding_the_folders = textwrap.dedent(""" | ||
Terminologies: | ||
1. Project Folder: | ||
- The Project Folder is your primary directory for accessing all user-specific project files, including code files, documentation, and other relevant resources. | ||
- When initializing Sirji, the SIRJI_USER selects this folder as the primary workspace for the project. You should refer to this folder exclusively for accessing and modifying project-specific files. | ||
2. Agent Output Folder: | ||
- The Agent Output Folder is designated for storing the results and data outputs generated by the agents (like you) of Sirji. | ||
- Ensure you do not confuse this folder with the Project Folder; remember, no project source files are stored here. | ||
- This folder is different from the project folder and this ensures that operational data is kept separate from project files. | ||
3. Agent Output Index: | ||
- The Agent Output Index is an index file for the Agent Output Folder that keeps track of all files written by agents in that folder along with the a brief description of the file contents. | ||
- The Agent Output Index will look as follows: | ||
{{ | ||
'agent_id/file_name': {{ | ||
'description': 'description of the file contents' | ||
'created_by': 'agent_id' | ||
}} | ||
}}""") | ||
|
||
allowed_response_templates_str = textwrap.dedent(""" | ||
Allowed Response Templates: | ||
Below are all the possible allowed "Response Template" formats for each of the allowed recipients. You must always respond using one of them.""") | ||
|
||
if "sub_agents" in self.config and self.config["sub_agents"]: | ||
for sub_agent in self.config["sub_agents"]: | ||
|
||
allowed_response_templates_str += textwrap.dedent(f""" | ||
Allowed Response Templates to {sub_agent['id']}: | ||
For invoking the {sub_agent['id']}, in a fresh session, use the following response template. Please respond with the following, including the starting and ending '***', with no commentary above or below. | ||
Response template: | ||
*** | ||
FROM: {{Your Agent ID}} | ||
TO: {sub_agent['id']} | ||
ACTION: INVOKE_AGENT | ||
STEP: "provide the step number here for the ongoing step if any." | ||
SUMMARY: {{Display a concise summary to the user, describing the action using the present continuous tense.}} | ||
BODY: | ||
{{Purpose of invocation.}} | ||
***""") + '\n' | ||
|
||
allowed_response_templates_str += textwrap.dedent(f""" | ||
For invoking the {sub_agent['id']}, continuing over the existing session session, use the following response template. Please respond with the following, including the starting and ending '***', with no commentary above or below. | ||
Response template: | ||
*** | ||
FROM: {{Your Agent ID}} | ||
TO: {sub_agent['id']} | ||
ACTION: INVOKE_AGENT_EXISTING_SESSION | ||
STEP: "provide the step number here for the ongoing step if any." | ||
SUMMARY: {{Display a concise summary to the user, describing the action using the present continuous tense.}} | ||
BODY: | ||
{{Purpose of invocation.}} | ||
***""") + '\n' | ||
|
||
allowed_response_templates_str += '\n' + allowed_response_templates(AgentEnum.ANY, AgentEnum.SIRJI_USER, permissions_dict[(AgentEnum.ANY, AgentEnum.SIRJI_USER)]) + '\n' | ||
|
||
action_list = permissions_dict[(AgentEnum.ANY, AgentEnum.EXECUTOR)] | ||
accessible_actions = self.config.get("accessible_actions", []) | ||
if accessible_actions: | ||
for action in accessible_actions: | ||
action_list.add(ActionEnum[action]) | ||
allowed_response_templates_str += '\n' + allowed_response_templates(AgentEnum.ANY, AgentEnum.EXECUTOR, action_list) + '\n' | ||
|
||
allowed_response_templates_str += "For updating in project folder use either FIND_AND_REPLACE, INSERT_ABOVE or INSERT_BELOW actions. Ensure you provide the exact matching string in find from file, with the exact number of lines and proper indentation for insert and replace actions.\n" | ||
allowed_response_templates_str += '\n' + allowed_response_templates(AgentEnum.ANY, AgentEnum.CALLER, permissions_dict[(AgentEnum.ANY, AgentEnum.CALLER)]) + '\n' | ||
|
||
current_agent_output_index = f"Current contents of Agent Output Index:\n{json.dumps(self.agent_output_folder_index, indent=4)}" | ||
|
||
current_project_folder_structure = f"Recursive structure of the project folder:\n{os.environ.get('SIRJI_PROJECT_STRUCTURE')}" | ||
|
||
return f"{initial_intro}\n{instructions}\n{pseudo_code}\n{response_specifications}\n{understanding_the_folders}\n{allowed_response_templates_str}\n\n{current_agent_output_index}\n\n{current_project_folder_structure}".strip() | ||
|
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Oops, something went wrong.