Skip to content
This repository has been archived by the owner on Sep 16, 2024. It is now read-only.

add option to increase token size; closes #246 #248

Merged
merged 3 commits into from
Nov 8, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions .env.example
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,10 @@ CHATGPT_API_MODEL=gpt-3.5-turbo
#CHATGPT_REVERSE_PROXY=https://api.openai.com/v1/chat/completions
# (Optional) Set the temperature of the model. 0.0 is deterministic, 1.0 is very creative.
# CHATGPT_TEMPERATURE=0.8
# (Optional) (Optional) Davinci models have a max context length of 4097 tokens, but you may need to change this for other models.
# CHATGPT_MAX_CONTEXT_TOKENS=4097
# You might want to lower this to save money if using a paid model. Earlier messages will be dropped until the prompt is within the limit.
# CHATGPT_MAX_PROMPT_TOKENS=3097

# Set data store settings
KEYV_BACKEND=file
Expand Down
6 changes: 5 additions & 1 deletion src/env.ts
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,8 @@ export const {
CHATGPT_IGNORE_MEDIA,
CHATGPT_REVERSE_PROXY,
CHATGPT_TEMPERATURE,
CHATGPT_MAX_CONTEXT_TOKENS,
CHATGPT_MAX_PROMPT_TOKENS,
} = parseEnv(process.env, {
DATA_PATH: { schema: z.string().default("./storage"), description: "Set to /storage/ if using docker, ./storage if running without" },
KEYV_BACKEND: { schema: z.enum(["file", "other"]).default("file"),description: "Set the Keyv backend to 'file' or 'other' if other set KEYV_URL" },
Expand Down Expand Up @@ -72,5 +74,7 @@ export const {
CHATGPT_PROMPT_PREFIX: { schema: z.string().default('Instructions:\nYou are ChatGPT, a large language model trained by OpenAI.'), description: "Instructions to feed to ChatGPT on startup"},
CHATGPT_IGNORE_MEDIA: { schema: z.boolean().default(false), description: "Wether or not the bot should react to non-text messages"},
CHATGPT_REVERSE_PROXY: { schema: z.string().default(""), description: "Change the api url to use another (OpenAI-compatible) API endpoint" },
CHATGPT_TEMPERATURE: { schema: z.number().default(0.8), description: "Set the temperature for the model" }
CHATGPT_TEMPERATURE: { schema: z.number().default(0.8), description: "Set the temperature for the model" },
CHATGPT_MAX_CONTEXT_TOKENS: { schema: z.number().default(4097), description: "Davinci models have a max context length of 4097 tokens, but you may need to change this for other models." },
CHATGPT_MAX_PROMPT_TOKENS: { schema: z.number().default(3097), description: "You might want to lower this to save money if using a paid model. Earlier messages will be dropped until the prompt is within the limit." },
});
4 changes: 3 additions & 1 deletion src/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ import {
DATA_PATH, KEYV_URL, OPENAI_AZURE, OPENAI_API_KEY, MATRIX_HOMESERVER_URL, MATRIX_ACCESS_TOKEN, MATRIX_AUTOJOIN,
MATRIX_BOT_PASSWORD, MATRIX_BOT_USERNAME, MATRIX_ENCRYPTION, MATRIX_THREADS, CHATGPT_CONTEXT,
CHATGPT_API_MODEL, KEYV_BOT_STORAGE, KEYV_BACKEND, CHATGPT_PROMPT_PREFIX, MATRIX_WELCOME,
CHATGPT_REVERSE_PROXY, CHATGPT_TEMPERATURE
CHATGPT_REVERSE_PROXY, CHATGPT_TEMPERATURE, CHATGPT_MAX_CONTEXT_TOKENS, CHATGPT_MAX_PROMPT_TOKENS
} from './env.js'
import CommandHandler from "./handlers.js"
import { KeyvStorageProvider } from './storage.js'
Expand Down Expand Up @@ -66,6 +66,8 @@ async function main() {
debug: false,
azure: OPENAI_AZURE,
reverseProxyUrl: CHATGPT_REVERSE_PROXY,
maxContextTokens: CHATGPT_MAX_CONTEXT_TOKENS,
maxPromptTokens: CHATGPT_MAX_PROMPT_TOKENS
};

const chatgpt = new ChatGPTClient(OPENAI_API_KEY, clientOptions, cacheOptions);
Expand Down
Loading