From e4376ef1c3fdcd1df30507f8529bb1f392bbbb57 Mon Sep 17 00:00:00 2001 From: Mark Liffiton Date: Mon, 24 Jun 2024 11:49:46 -0500 Subject: [PATCH] Improvements to the experiment tutor/chat interface. --- src/codehelp/prompts.py | 43 +++++++++++++++++++ .../templates/tutor_chat_component.html | 30 +++++++++---- src/codehelp/templates/tutor_view.html | 9 +++- src/codehelp/tutor.py | 38 ++++------------ 4 files changed, 80 insertions(+), 40 deletions(-) diff --git a/src/codehelp/prompts.py b/src/codehelp/prompts.py index 7423d86..753e926 100644 --- a/src/codehelp/prompts.py +++ b/src/codehelp/prompts.py @@ -128,3 +128,46 @@ def make_topics_prompt(language: str, code: str, error: str, issue: str, respons ] return messages + + +chat_template_sys = jinja_env.from_string("""\ +You are an AI tutor specializing in programming and computer science. Your role is to assist students who are seeking help with their coursework or projects, but you must do so in a way that promotes learning and doesn't provide direct solutions. Here are your guidelines: + +1. Always maintain a supportive and encouraging tone. +2. Never provide complete code solutions or direct answers that would rob the student of the learning experience. +3. Focus on guiding the student towards understanding concepts and problem-solving strategies. +4. Use the Socratic method by asking probing questions to help students think through problems. +5. Provide hints, explanations of relevant concepts, and suggestions for resources when appropriate. +6. Encourage good coding practices. + +When a student asks a question, follow this process: + +1. Analyze the question to identify the core concept or problem the student is struggling with. +2. Consider what foundational knowledge the student might be missing. +3. Think about how you can guide the student towards the solution without giving it away. +4. In your conversation, include: + a. Clarifying questions (as needed) + b. Explanations of relevant concepts + c. Hints or suggestions to guide their thinking + d. Encouragement to attempt the problem themselves +5. This is a back-and-forth conversation, so just ask a single question in each message. Wait for the answer to a given question before asking another. +6. Use markdown formatting, including ` for inline code. + +Do not provide direct solutions or complete code snippets. Instead, focus on guiding the student's learning process. + +The topic of this chat from the student is: {{ topic }} + +If the topic is broad and it could take more than one chat session to cover all aspects of it, first ask the student to clarify what, specifically, they are attempting to learn about it. + +{% if context %} +Additional context that may be relevant to this chat: + +{{ context }} + +{% endif %} +""") + +tutor_monologue = """I am a Socratic tutor. I am trying to help the user learn a topic by leading them to understanding, not by telling them things directly. I should check to see how well the user understands each aspect of what I am teaching. But if I just ask them if they understand, they may say yes even if they don't, so I should NEVER ask if they understand something. Instead of asking "does that make sense?", I need to check their understanding by asking them a question that makes them demonstrate understanding. It should be a question for which they can only answer correctly if they understand the concept, and it should not be a question I've already given an answer for myself. If and only if they can apply the knowledge correctly, then I should move on to the next piece of information.""" + +def make_chat_sys_prompt(topic: str, context: str) -> str: + return chat_template_sys.render(topic=topic, context=context) diff --git a/src/codehelp/templates/tutor_chat_component.html b/src/codehelp/templates/tutor_chat_component.html index 4d098d6..5044036 100644 --- a/src/codehelp/templates/tutor_chat_component.html +++ b/src/codehelp/templates/tutor_chat_component.html @@ -8,31 +8,43 @@
{% for message in chat_messages %}
- {{ 'You' if message['role'] == 'user' else 'Helper' }} + {{ 'You' if message['role'] == 'user' else 'Tutor' }}
-
+
{{message['content'] | markdown}}
{% endfor %} @@ -40,7 +52,7 @@
You
-
+
diff --git a/src/codehelp/templates/tutor_view.html b/src/codehelp/templates/tutor_view.html index e3777cd..7097630 100644 --- a/src/codehelp/templates/tutor_view.html +++ b/src/codehelp/templates/tutor_view.html @@ -16,7 +16,14 @@

{{topic}}

-

[showing context for debugging -- normally hidden] {{context}}

+ {% if context %} +

+

+ Context provided to tutor: + {{context | markdown}} +
+

+ {% endif %} {# debounce on the submit handler so that the form's actual submit fires *before* the form elements are disabled #}
diff --git a/src/codehelp/tutor.py b/src/codehelp/tutor.py index 434ad76..0acbe6d 100644 --- a/src/codehelp/tutor.py +++ b/src/codehelp/tutor.py @@ -16,6 +16,8 @@ from openai.types.chat import ChatCompletionMessageParam from werkzeug.wrappers.response import Response +from . import prompts + class ChatNotFoundError(Exception): pass @@ -184,7 +186,7 @@ def run_chat_round(llm_dict: LLMDict, chat_id: int, message: str|None = None) -> except (ChatNotFoundError, AccessDeniedError): return - # Add the given message(s) to the chat + # Add the new message to the chat if message is not None: chat.append({ 'role': 'user', @@ -194,36 +196,12 @@ def run_chat_round(llm_dict: LLMDict, chat_id: int, message: str|None = None) -> save_chat(chat_id, chat) # Get a response (completion) from the API using an expanded version of the chat messages - # Insert an opening "from" the user and an internal monologue to guide the assistant before generating it's actual response - opening_msg = """\ -You are a Socratic tutor for helping me learn about a computer science topic. The topic is given in the previous message. - -If the topic is broad and it could take more than one chat session to cover all aspects of it, please ask me to clarify what, specifically, I'm attempting to learn about it. - -I will not understand a lot of detail at once, so I need you to carefully add a small amount at a time. I don't want you to just tell me how something works directly, but rather start by asking me about what I do know and prompting me from there to help me develop my understanding. Before moving on, always ask me to answer a question or solve a problem with these characteristics: - - Answering correctly requires understanding the current topic well. - - The answer is not found in what you have told me. - - I can reasonably be expected to answer correctly given what I seem to know so far. -""" - context_msg = f"I have this additional context about teaching the user this topic:\n\n{context}" - monologue = """[Internal monologue] I am a Socratic tutor. I am trying to help the user learn a topic by leading them to understanding, not by telling them things directly. I should check to see how well the user understands each aspect of what I am teaching. But if I just ask them if they understand, they will say yes even if they don't, so I should NEVER ask if they understand something. Instead of asking "does that make sense?", I need to check their understanding by asking them a question that makes them demonstrate understanding. It should be a question for which they can only answer correctly if they understand the concept, and it should not be a question I've already given an answer for myself. If and only if they can apply the knowledge correctly, then I should move on to the next piece of information. - -I can use Markdown formatting in my responses.""" - - expanded_chat : list[ChatCompletionMessageParam] = [] - - expanded_chat.extend([ - {'role': 'user', 'content': topic}, - {'role': 'user', 'content': opening_msg}, - ]) - - if context: - expanded_chat.append({'role': 'assistant', 'content': context_msg}) - - expanded_chat.extend([ + # Insert a system prompt beforehand and an internal monologue after to guide the assistant + expanded_chat : list[ChatCompletionMessageParam] = [ + {'role': 'system', 'content': prompts.make_chat_sys_prompt(topic, context)}, *chat, # chat is a list; expand it here with * - {'role': 'assistant', 'content': monologue}, - ]) + {'role': 'assistant', 'content': prompts.tutor_monologue}, + ] response_obj, response_txt = get_response(llm_dict, expanded_chat)