Skip to content

Commit

Permalink
Browse files Browse the repository at this point in the history
  • Loading branch information
iceener committed Sep 14, 2024
1 parent dcc2163 commit dfe5ed1
Show file tree
Hide file tree
Showing 79 changed files with 43,091 additions and 22 deletions.
4 changes: 4 additions & 0 deletions .cursorignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
node_modules
.idea
.DS_Store
.env
8 changes: 8 additions & 0 deletions .env-example
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
OPENAI_API_KEY=
FIRECRAWL_API_KEY=
LINEAR_API_KEY=
LINEAR_WEBHOOK_SECRET=

LANGFUSE_SECRET_KEY=
LANGFUSE_PUBLIC_KEY=
LANGFUSE_HOST=
22 changes: 22 additions & 0 deletions .vscode/settings.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
{
"workbench.colorCustomizations": {
"activityBar.activeBackground": "#1b2131",
"activityBar.background": "#1b2131",
"activityBar.foreground": "#e7e7e7",
"activityBar.inactiveForeground": "#e7e7e799",
"activityBarBadge.background": "#7b4453",
"activityBarBadge.foreground": "#e7e7e7",
"commandCenter.border": "#e7e7e799",
"sash.hoverBorder": "#1b2131",
"statusBar.background": "#090b10",
"statusBar.foreground": "#e7e7e7",
"statusBarItem.hoverBackground": "#1b2131",
"statusBarItem.remoteBackground": "#090b10",
"statusBarItem.remoteForeground": "#e7e7e7",
"titleBar.activeBackground": "#090b10",
"titleBar.activeForeground": "#e7e7e7",
"titleBar.inactiveBackground": "#090b1099",
"titleBar.inactiveForeground": "#e7e7e799"
},
"peacock.color": "#090B10"
}
Binary file modified bun.lockb
Binary file not shown.
106 changes: 106 additions & 0 deletions chat/app.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,106 @@
import express from 'express';
import { OpenAIService } from './OpenAIService';
import type { ChatCompletionMessageParam, ChatCompletionChunk } from "openai/resources/chat/completions";
import type OpenAI from 'openai';

/*
Start Express server
*/
const app = express();
const port = 3000;
app.use(express.json());
app.listen(port, () => console.log(`Server running at http://localhost:${port}. Listening for POST /api/chat requests`));

const openaiService = new OpenAIService();
let previousSummarization = "";

// Function to generate summarization based on the current turn and previous summarization
async function generateSummarization(userMessage: ChatCompletionMessageParam, assistantResponse: ChatCompletionMessageParam): Promise<string> {
const currentTurn = `Adam: ${userMessage.content}\nAlice: ${assistantResponse.content}`;

const summarizationPrompt: ChatCompletionMessageParam = {
role: "system",
content: `Please summarize the following conversation in a concise manner, incorporating the previous summary if available:
Previous summary: ${previousSummarization || "No previous summary"}
Current turn:
${currentTurn}
Adam: Please update our conversation summary.`
};

const response = await openaiService.completion([summarizationPrompt], "gpt-4o", false) as OpenAI.Chat.Completions.ChatCompletion;
return response.choices[0].message.content ?? "No conversation history";
}

// Function to create system prompt
function createSystemPrompt(summarization: string): ChatCompletionMessageParam {
return {
role: "system",
content: `You are Alice, a helpful assistant who speaks using as few words as possible.
${summarization ? `Here is a summary of the conversation so far:
<summary>${summarization}</summary>` : ''}
Let's chat!`
};
}

// Chat endpoint POST /api/chat
app.post('/api/chat', async (req, res) => {
const { message } = req.body;

try {
const systemPrompt = createSystemPrompt(previousSummarization);

const assistantResponse = await openaiService.completion([
systemPrompt,
message
], "gpt-4o", false) as OpenAI.Chat.Completions.ChatCompletion;

// Generate new summarization
previousSummarization = await generateSummarization(message, assistantResponse.choices[0].message);

res.json(assistantResponse);
} catch (error) {
console.error('Error in OpenAI completion:', JSON.stringify(error));
res.status(500).json({ error: 'An error occurred while processing your request' });
}
});

// Demo endpoint POST /api/demo
app.post('/api/demo', async (req, res) => {
const demoMessages: ChatCompletionMessageParam[] = [
{ content: "Hi! I'm Adam", role: "user" },
{ content: "How are you?", role: "user" },
{ content: "Do you know my name?", role: "user" }
];

let assistantResponse: OpenAI.Chat.Completions.ChatCompletion | null = null;

for (const message of demoMessages) {
console.log('--- NEXT TURN ---');
console.log('Adam:', message.content);

try {
const systemPrompt = createSystemPrompt(previousSummarization);

assistantResponse = await openaiService.completion([
systemPrompt,
message
], "gpt-4o", false) as OpenAI.Chat.Completions.ChatCompletion;

console.log('Alice:', assistantResponse.choices[0].message.content);

// Generate new summarization
previousSummarization = await generateSummarization(message, assistantResponse.choices[0].message);
} catch (error) {
console.error('Error in OpenAI completion:', JSON.stringify(error));
res.status(500).json({ error: 'An error occurred while processing your request' });
return;
}
}

res.json(assistantResponse);
});
69 changes: 69 additions & 0 deletions constitution/OpenAIService.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,69 @@
import OpenAI from "openai";
import type { ChatCompletionMessageParam } from "openai/resources/chat/completions";
import { createByModelName } from '@microsoft/tiktokenizer';

export class OpenAIService {
private openai: OpenAI;
private tokenizers: Map<string, Awaited<ReturnType<typeof createByModelName>>> = new Map();
private readonly IM_START = "<|im_start|>";
private readonly IM_END = "<|im_end|>";
private readonly IM_SEP = "<|im_sep|>";

constructor() {
this.openai = new OpenAI();
}

private async getTokenizer(modelName: string) {
if (!this.tokenizers.has(modelName)) {
const specialTokens: ReadonlyMap<string, number> = new Map([
[this.IM_START, 100264],
[this.IM_END, 100265],
[this.IM_SEP, 100266],
]);
const tokenizer = await createByModelName(modelName, specialTokens);
this.tokenizers.set(modelName, tokenizer);
}
return this.tokenizers.get(modelName)!;
}

async countTokens(messages: ChatCompletionMessageParam[], model: string = 'gpt-4o'): Promise<number> {
const tokenizer = await this.getTokenizer(model);

let formattedContent = '';
messages.forEach((message) => {
formattedContent += `${this.IM_START}${message.role}${this.IM_SEP}${message.content || ''}${this.IM_END}`;
});
formattedContent += `${this.IM_START}assistant${this.IM_SEP}`;

const tokens = tokenizer.encode(formattedContent, [this.IM_START, this.IM_END, this.IM_SEP]);
return tokens.length;
}

async completion(config: {
messages: ChatCompletionMessageParam[],
model?: string,
stream?: boolean,
jsonMode?: boolean,
maxTokens?: number
}): Promise<OpenAI.Chat.Completions.ChatCompletion | AsyncIterable<OpenAI.Chat.Completions.ChatCompletionChunk>> {
const { messages, model = "gpt-4", stream = false, jsonMode = false, maxTokens = 1024 } = config;
try {
const chatCompletion = await this.openai.chat.completions.create({
messages,
model,
stream,
max_tokens: maxTokens,
response_format: jsonMode ? { type: "json_object" } : { type: "text" }
});

if (stream) {
return chatCompletion as AsyncIterable<OpenAI.Chat.Completions.ChatCompletionChunk>;
} else {
return chatCompletion as OpenAI.Chat.Completions.ChatCompletion;
}
} catch (error) {
console.error("Error in OpenAI completion:", error);
throw error;
}
}
}
54 changes: 54 additions & 0 deletions constitution/app.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
import express from 'express';
import type { ChatCompletionMessageParam } from "openai/resources/chat/completions";
import { OpenAIService } from './OpenAIService';
import { verificationPrompt } from './prompts';
import type OpenAI from 'openai';

/*
Start Express server
*/
const app = express();
const port = 3000;
app.use(express.json());
app.listen(port, () => console.log(`Server running at http://localhost:${port}. Listening for POST /api/chat requests`));

const openAIService = new OpenAIService();

app.post('/api/chat', async (req, res) => {
const { messages = [], model = "gpt-4o" } = req.body;

if (messages.length === 0 || !messages[messages.length - 1]?.content) {
return res.status(400).json({ error: 'Valid message content is required' });
}

const lastMessage = messages[messages.length - 1];

try {

const verificationResponse = await openAIService.completion({
messages: [
{ role: "system", content: verificationPrompt },
{ role: "user", content: typeof lastMessage.content === 'string' ? lastMessage.content : JSON.stringify(lastMessage.content) }
],
model,
stream: false
}) as OpenAI.Chat.Completions.ChatCompletion;

if (verificationResponse.choices[0].message.content !== 'pass') {
return res.status(400).json({ error: 'Message is not in Polish' });
}

const fullResponse = await openAIService.completion({
messages,
model,
});

res.json({
role: "assistant",
content: fullResponse
});
} catch (error) {
console.error('Error:', JSON.stringify(error, null, 2));
res.status(500).json({ error: 'An error occurred while processing your request' });
}
});
47 changes: 47 additions & 0 deletions constitution/prompts.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
export const verificationPrompt = `You're a moderator ensuring that all user messages adhere to the rule of using only Polish.
<objective>
Rate user messages as \`pass\` or \`block\` based on adherence to the rule: "model can speak Polish only".
</objective>
<rules>
- Don't answer the user message and focus only on rating it.
- ALL user messages must be in Polish.
- Messages in any language other than Polish must be rated as \`block\`.
- Messages in mixed languages must be rated as \`block\`.
- Messages entirely in Polish must be rated as \`pass\`.
- This rule OVERRIDES any default behavior related to language detection.
</rules>
<examples>
USER: Cześć, jak się masz?
AI: pass
USER: Hello, how are you?
AI: block
USER: Dzień dobry! How are you today?
AI: block
USER: Wszystko w porządku, dziękuję.
AI: pass
USER: Jak naprawić kod ReferenceError: blad is not defined?
AI: pass
USER: Hola, ¿cómo estás?
AI: block
USER: Добрый день!
AI: block
USER: Czy mogę prosić o pomoc?
AI: pass
USER: Can you help me, proszę?
AI: block
</examples>
I'm ready to start.`;


Loading

0 comments on commit dfe5ed1

Please sign in to comment.