Skip to content

Commit

Permalink
feat: settings for the language model and misc changes
Browse files Browse the repository at this point in the history
Signed-off-by: David de Hilster <[email protected]>
  • Loading branch information
dehilsterlexis committed May 6, 2024
1 parent cb2dff7 commit 3ed37a6
Show file tree
Hide file tree
Showing 2 changed files with 24 additions and 8 deletions.
10 changes: 10 additions & 0 deletions package.json
Original file line number Diff line number Diff line change
Expand Up @@ -1285,6 +1285,16 @@
"type": "object",
"title": "ECL",
"properties": {
"ecl.languageModel": {
"type": "string",
"scope": "resource",
"enum": [
"copilot-gpt-3.5-turbo",
"copilot-gpt-4"
],
"default": "copilot-gpt-3.5-turbo",
"description": "%Language Model (faster copilot-gpt-3.5-turbo, slower more powerful copilot-gpt-4%)"
},
"ecl.eclccArgs": {
"type": "array",
"scope": "resource",
Expand Down
22 changes: 14 additions & 8 deletions src/chat/main.ts
Original file line number Diff line number Diff line change
Expand Up @@ -21,23 +21,27 @@ interface IECLChatResult extends vscode.ChatResult {
}
}

const LANGUAGE_MODEL_ID = "copilot-gpt-3.5-turbo"; // Use faster model. Alternative is 'copilot-gpt-4', which is slower but more powerful

export function activate(context: vscode.ExtensionContext) {

// Define an ECL chat handler.

const handler: vscode.ChatRequestHandler = async (request: vscode.ChatRequest, context: vscode.ChatContext, stream: vscode.ChatResponseStream, token: vscode.CancellationToken): Promise<IECLChatResult> => {
// To talk to an LLM in your subcommand handler implementation, your
// extension can use VS Code's `requestChatAccess` API to access the Copilot API.
// The GitHub Copilot Chat extension implements this provider.

const config = vscode.workspace.getConfiguration("ecl");
const languageModelId: string = config.get("languageModel");

if (request.command == "teach") {
stream.progress("Picking the right topic to teach...");
const topic = getTopic(context.history);
const messages = [
new vscode.LanguageModelChatSystemMessage("You are an ECL language expert! Your job is to explain ECL concepts. Always start your response by stating what concept you are explaining. Always include code samples."),
new vscode.LanguageModelChatUserMessage(topic)
];
const chatResponse = await vscode.lm.sendChatRequest(LANGUAGE_MODEL_ID, messages, {}, token);

const chatResponse = await vscode.lm.sendChatRequest(languageModelId, messages, {}, token);
for await (const fragment of chatResponse.stream) {
stream.markdown(fragment);
}
Expand All @@ -54,18 +58,18 @@ export function activate(context: vscode.ExtensionContext) {
new vscode.LanguageModelChatSystemMessage("You are an ECL language expert! You are also very knowledgable about HPCC."),
new vscode.LanguageModelChatUserMessage("Give small random ECL code samples. " + request.prompt)
];
const chatResponse = await vscode.lm.sendChatRequest(LANGUAGE_MODEL_ID, messages, {}, token);
const chatResponse = await vscode.lm.sendChatRequest(languageModelId, messages, {}, token);
for await (const fragment of chatResponse.stream) {
stream.markdown(fragment);
}
return { metadata: { command: "play" } };
} else {
const messages = [
new vscode.LanguageModelChatSystemMessage(`You are an ECL language expert! Think carefully and step by step like an expert who is good at explaining something.
new vscode.LanguageModelChatSystemMessage(`You are an ECL language expert! Think carefully and step by step like an ECL language expert who is good at explaining something.
Your job is to explain computer science concepts in fun and entertaining way. Always start your response by stating what concept you are explaining. Always include code samples.`),
new vscode.LanguageModelChatUserMessage("In the ECL language, " + request.prompt)
new vscode.LanguageModelChatUserMessage("In the ECL language, explain " + request.prompt)
];
const chatResponse = await vscode.lm.sendChatRequest(LANGUAGE_MODEL_ID, messages, {}, token);
const chatResponse = await vscode.lm.sendChatRequest(languageModelId, messages, {}, token);
for await (const fragment of chatResponse.stream) {
// Process the output from the language model
stream.markdown(fragment);
Expand Down Expand Up @@ -116,6 +120,8 @@ export function activate(context: vscode.ExtensionContext) {
eclagent,
// Register the command handler for the do something followup
vscode.commands.registerTextEditorCommand(ECLAGENT_NAMES_COMMAND_ID, async (textEditor: vscode.TextEditor) => {
const config = vscode.workspace.getConfiguration("ecl");
const languageModelId: string = config.get("languageModel");
const text = textEditor.document.getText();
const messages = [
new vscode.LanguageModelChatSystemMessage(`You are an ECL expert! Think carefully and step by step.
Expand All @@ -125,7 +131,7 @@ export function activate(context: vscode.ExtensionContext) {

let chatResponse: vscode.LanguageModelChatResponse | undefined;
try {
chatResponse = await vscode.lm.sendChatRequest(LANGUAGE_MODEL_ID, messages, {}, new vscode.CancellationTokenSource().token);
chatResponse = await vscode.lm.sendChatRequest(languageModelId, messages, {}, new vscode.CancellationTokenSource().token);

} catch (err) {
// making the chat request might fail because
Expand Down

0 comments on commit 3ed37a6

Please sign in to comment.