From 38d26c2ea323296abc01d0c3a67f1cda1dbd91d2 Mon Sep 17 00:00:00 2001 From: Adam Shedivy Date: Mon, 14 Oct 2024 16:11:10 -0500 Subject: [PATCH] sync continue index.d.ts --- index.d.ts | 1887 ++++++++++++++++++++++++++++++---------------------- 1 file changed, 1075 insertions(+), 812 deletions(-) diff --git a/index.d.ts b/index.d.ts index 4236ff0d..5504cbfe 100644 --- a/index.d.ts +++ b/index.d.ts @@ -1,818 +1,1081 @@ - declare global { - declare global { - interface Window { - ide?: "vscode"; - windowId: string; - serverUrl: string; - vscMachineId: string; - vscMediaUrl: string; - fullColorTheme?: { - rules?: { - token?: string; - foreground?: string; - }[]; - }; - colorThemeName?: string; - workspacePaths?: string[]; - postIntellijMessage?: ( - messageType: string, - data: any, - messageIde: string, - ) => void; - } - } - - export interface ChunkWithoutID { - content: string; - startLine: number; - endLine: number; - otherMetadata?: { [key: string]: any }; - } - - export interface Chunk extends ChunkWithoutID { - digest: string; - filepath: string; - index: number; // Index of the chunk in the document at filepath - } - - export interface IndexingProgressUpdate { - progress: number; - desc: string; - } - - export interface LLMReturnValue { - prompt: string; - completion: string; + interface Window { + ide?: "vscode"; + windowId: string; + serverUrl: string; + vscMachineId: string; + vscMediaUrl: string; + fullColorTheme?: { + rules?: { + token?: string; + foreground?: string; + }[]; + }; + colorThemeName?: string; + workspacePaths?: string[]; + postIntellijMessage?: ( + messageType: string, + data: any, + messageIde: string, + ) => void; } - - export type PromptTemplate = - | string - | (( - history: ChatMessage[], - otherData: Record, - ) => string | ChatMessage[]); - - export interface ILLM extends LLMOptions { - get providerName(): ModelProvider; - - uniqueId: string; - model: string; - - title?: string; - systemMessage?: string; - contextLength: number; - completionOptions: CompletionOptions; - requestOptions?: RequestOptions; - promptTemplates?: Record; - templateMessages?: (messages: ChatMessage[]) => string; - writeLog?: (str: string) => Promise; - llmRequestHook?: (model: string, prompt: string) => any; - apiKey?: string; - apiBase?: string; - - engine?: string; - apiVersion?: string; - apiType?: string; - region?: string; - projectId?: string; - - complete(prompt: string, options?: LLMFullCompletionOptions): Promise; - - streamComplete( - prompt: string, - options?: LLMFullCompletionOptions, - ): AsyncGenerator; - - streamChat( - messages: ChatMessage[], - options?: LLMFullCompletionOptions, - ): AsyncGenerator; - - chat( - messages: ChatMessage[], - options?: LLMFullCompletionOptions, - ): Promise; - - countTokens(text: string): number; - - supportsImages(): boolean; - - supportsCompletions(): boolean; - - supportsPrefill(): boolean; - - listModels(): Promise; - - renderPromptTemplate( - template: PromptTemplate, +} + +export interface ChunkWithoutID { + content: string; + startLine: number; + endLine: number; + signature?: string; + otherMetadata?: { [key: string]: any }; +} + +export interface Chunk extends ChunkWithoutID { + digest: string; + filepath: string; + index: number; // Index of the chunk in the document at filepath +} + +export interface IndexingProgressUpdate { + progress: number; + desc: string; + shouldClearIndexes?: boolean; + status: "loading" | "indexing" | "done" | "failed" | "paused" | "disabled"; + debugInfo?: string; +} + +export type PromptTemplate = + | string + | (( history: ChatMessage[], otherData: Record, - canPutWordsInModelsMouth?: boolean, - ): string | ChatMessage[]; - } - - export type ContextProviderType = "normal" | "query" | "submenu"; - - export interface ContextProviderDescription { - title: string; - displayTitle: string; - description: string; - renderInlineAs?: string; - type: ContextProviderType; - } - - export type FetchFunction = (url: string | URL, init?: any) => Promise; - - export interface ContextProviderExtras { - fullInput: string; - embeddingsProvider: EmbeddingsProvider; - reranker: Reranker | undefined; - llm: ILLM; - ide: IDE; - selectedCode: RangeInFile[]; - fetch: FetchFunction; - } - - export interface LoadSubmenuItemsArgs { - ide: IDE; - fetch: FetchFunction; - } - - export interface CustomContextProvider { - title: string; - displayTitle?: string; - description?: string; - renderInlineAs?: string; - type?: ContextProviderType; - getContextItems( - query: string, - extras: ContextProviderExtras, - ): Promise; - loadSubmenuItems?: ( - args: LoadSubmenuItemsArgs, - ) => Promise; - } - - export interface ContextSubmenuItem { - id: string; - title: string; - description: string; - } - - export interface IContextProvider { - get description(): ContextProviderDescription; - - getContextItems( - query: string, - extras: ContextProviderExtras, - ): Promise; - - loadSubmenuItems(args: LoadSubmenuItemsArgs): Promise; - } - - export interface PersistedSessionInfo { - history: ChatHistory; - title: string; - workspaceDirectory: string; - sessionId: string; - } - - export interface SessionInfo { - sessionId: string; - title: string; - dateCreated: string; - workspaceDirectory: string; - } - - export interface RangeInFile { - filepath: string; - range: Range; - } - - export interface FileWithContents { - filepath: string; - contents: string; - } - - export interface Range { - start: Position; - end: Position; - } - export interface Position { - line: number; - character: number; - } - export interface FileEdit { - filepath: string; - range: Range; - replacement: string; - } - - export interface ContinueError { - title: string; - message: string; - } - - export interface CompletionOptions extends BaseCompletionOptions { - model: string; - } - - export type ChatMessageRole = "user" | "assistant" | "system"; - - export interface MessagePart { - type: "text" | "imageUrl"; - text?: string; - imageUrl?: { url: string }; - } - - export type MessageContent = string | MessagePart[]; - - export interface ChatMessage { - role: ChatMessageRole; - content: MessageContent; - } - - export interface ContextItemId { - providerTitle: string; - itemId: string; - } - - export interface ContextItem { - content: string; - name: string; - description: string; - editing?: boolean; - editable?: boolean; - } - - export interface ContextItemWithId { - content: string; - name: string; - description: string; - id: ContextItemId; - editing?: boolean; - editable?: boolean; - } - - export interface InputModifiers { - useCodebase: boolean; - } - - export interface ChatHistoryItem { - message: ChatMessage; - editorState?: any; - modifiers?: InputModifiers; - contextItems: ContextItemWithId[]; - promptLogs?: [string, string][]; // [prompt, completion] - } - - export type ChatHistory = ChatHistoryItem[]; - - // LLM - - export interface LLMFullCompletionOptions extends BaseCompletionOptions { - log?: boolean; - - model?: string; - } - export interface LLMOptions { - model: string; - - title?: string; - uniqueId?: string; - systemMessage?: string; - contextLength?: number; - completionOptions?: CompletionOptions; - requestOptions?: RequestOptions; - template?: TemplateType; - promptTemplates?: Record; - templateMessages?: (messages: ChatMessage[]) => string; - writeLog?: (str: string) => Promise; - llmRequestHook?: (model: string, prompt: string) => any; - apiKey?: string; - apiBase?: string; - - useLegacyCompletionsEndpoint?: boolean; - - // Azure options - engine?: string; - apiVersion?: string; - apiType?: string; - - // GCP Options - region?: string; - projectId?: string; - } - type RequireAtLeastOne = Pick< - T, - Exclude - > & - { - [K in Keys]-?: Required> & Partial>>; - }[Keys]; - - export interface CustomLLMWithOptionals { - options: LLMOptions; - streamCompletion?: ( - prompt: string, - options: CompletionOptions, - fetch: (input: RequestInfo | URL, init?: RequestInit) => Promise, - ) => AsyncGenerator; - streamChat?: ( - messages: ChatMessage[], - options: CompletionOptions, - fetch: (input: RequestInfo | URL, init?: RequestInit) => Promise, - ) => AsyncGenerator; - listModels?: ( - fetch: (input: RequestInfo | URL, init?: RequestInit) => Promise, - ) => Promise; - } - + ) => string | ChatMessage[]); + +export interface ILLM extends LLMOptions { + get providerName(): ModelProvider; + + uniqueId: string; + model: string; + + title?: string; + systemMessage?: string; + contextLength: number; + maxStopWords?: number; + completionOptions: CompletionOptions; + requestOptions?: RequestOptions; + promptTemplates?: Record; + templateMessages?: (messages: ChatMessage[]) => string; + writeLog?: (str: string) => Promise; + llmRequestHook?: (model: string, prompt: string) => any; + apiKey?: string; + apiBase?: string; + cacheBehavior?: CacheBehavior; + + engine?: string; + apiVersion?: string; + apiType?: string; + region?: string; + projectId?: string; + + complete(prompt: string, options?: LLMFullCompletionOptions): Promise; + + streamComplete( + prompt: string, + options?: LLMFullCompletionOptions, + ): AsyncGenerator; + + streamFim( + prefix: string, + suffix: string, + options?: LLMFullCompletionOptions, + ): AsyncGenerator; + + streamChat( + messages: ChatMessage[], + options?: LLMFullCompletionOptions, + ): AsyncGenerator; + + chat( + messages: ChatMessage[], + options?: LLMFullCompletionOptions, + ): Promise; + + countTokens(text: string): number; + + supportsImages(): boolean; + + supportsCompletions(): boolean; + + supportsPrefill(): boolean; + + supportsFim(): boolean; + + listModels(): Promise; + + renderPromptTemplate( + template: PromptTemplate, + history: ChatMessage[], + otherData: Record, + canPutWordsInModelsMouth?: boolean, + ): string | ChatMessage[]; +} + +export type ContextProviderType = "normal" | "query" | "submenu"; + +export interface ContextProviderDescription { + title: ContextProviderName; + displayTitle: string; + description: string; + renderInlineAs?: string; + type: ContextProviderType; + dependsOnIndexing?: boolean; +} + +export type FetchFunction = (url: string | URL, init?: any) => Promise; + +export interface ContextProviderExtras { + config: ContinueConfig; + fullInput: string; + embeddingsProvider: EmbeddingsProvider; + reranker: Reranker | undefined; + llm: ILLM; + ide: IDE; + selectedCode: RangeInFile[]; + fetch: FetchFunction; +} + +export interface LoadSubmenuItemsArgs { + config: ContinueConfig; + ide: IDE; + fetch: FetchFunction; +} + +export interface CustomContextProvider { + title: string; + displayTitle?: string; + description?: string; + renderInlineAs?: string; + type?: ContextProviderType; + getContextItems( + query: string, + extras: ContextProviderExtras, + ): Promise; + loadSubmenuItems?: ( + args: LoadSubmenuItemsArgs, + ) => Promise; +} + +export interface ContextSubmenuItem { + id: string; + title: string; + description: string; + icon?: string; + metadata?: any; +} + +export interface SiteIndexingConfig { + title: string; + startUrl: string; + rootUrl?: string; + maxDepth?: number; + faviconUrl?: string; +} + +export interface SiteIndexingConfig { + startUrl: string; + rootUrl?: string; + title: string; + maxDepth?: number; +} + +export interface IContextProvider { + get description(): ContextProviderDescription; + + getContextItems( + query: string, + extras: ContextProviderExtras, + ): Promise; + + loadSubmenuItems(args: LoadSubmenuItemsArgs): Promise; +} + +export interface PersistedSessionInfo { + history: ChatHistory; + title: string; + workspaceDirectory: string; + sessionId: string; +} + +export interface SessionInfo { + sessionId: string; + title: string; + dateCreated: string; + workspaceDirectory: string; +} + +export interface RangeInFile { + filepath: string; + range: Range; +} + +export interface Location { + filepath: string; + position: Position; +} + +export interface FileWithContents { + filepath: string; + contents: string; +} + +export interface Range { + start: Position; + end: Position; +} +export interface Position { + line: number; + character: number; +} +export interface FileEdit { + filepath: string; + range: Range; + replacement: string; +} + +export interface ContinueError { + title: string; + message: string; +} + +export interface CompletionOptions extends BaseCompletionOptions { + model: string; +} + +export type ChatMessageRole = "user" | "assistant" | "system"; + +export interface MessagePart { + type: "text" | "imageUrl"; + text?: string; + imageUrl?: { url: string }; +} + +export type MessageContent = string | MessagePart[]; + +export interface ChatMessage { + role: ChatMessageRole; + content: MessageContent; +} + +export interface ContextItemId { + providerTitle: string; + itemId: string; +} + +export type ContextItemUriTypes = "file" | "url"; + +export interface ContextItemUri { + type: ContextItemUriTypes; + value: string; +} + +export interface ContextItem { + content: string; + name: string; + description: string; + editing?: boolean; + editable?: boolean; + icon?: string; + uri?: ContextItemUri; +} + +export interface ContextItemWithId extends ContextItem { + id: ContextItemId; +} + +export interface InputModifiers { + useCodebase: boolean; + noContext: boolean; +} + +export interface PromptLog { + modelTitle: string; + completionOptions: CompletionOptions; + prompt: string; + completion: string; +} + +export interface ChatHistoryItem { + message: ChatMessage; + editorState?: any; + modifiers?: InputModifiers; + contextItems: ContextItemWithId[]; + promptLogs?: PromptLog[]; +} + +export type ChatHistory = ChatHistoryItem[]; + +// LLM + +export interface LLMFullCompletionOptions extends BaseCompletionOptions { + log?: boolean; + + model?: string; +} + +export type ToastType = "info" | "error" | "warning"; + +export interface LLMOptions { + model: string; + + title?: string; + uniqueId?: string; + systemMessage?: string; + contextLength?: number; + maxStopWords?: number; + completionOptions?: CompletionOptions; + requestOptions?: RequestOptions; + template?: TemplateType; + promptTemplates?: Record; + templateMessages?: (messages: ChatMessage[]) => string; + writeLog?: (str: string) => Promise; + llmRequestHook?: (model: string, prompt: string) => any; + apiKey?: string; + aiGatewaySlug?: string; + apiBase?: string; + cacheBehavior?: CacheBehavior; + + useLegacyCompletionsEndpoint?: boolean; + + // Cloudflare options + accountId?: string; + + // Azure options + engine?: string; + apiVersion?: string; + apiType?: string; + + // AWS options + profile?: string; + modelArn?: string; + + // AWS and GCP Options + region?: string; + + // GCP Options + projectId?: string; + capabilities?: ModelCapability; + + // IBM watsonx options + watsonxUrl?: string; + watsonxCreds?: string; + watsonxProjectId?: string; + watsonxStopToken?: string; + watsonxApiVersion?: string; + watsonxFullUrl?: string; +} +type RequireAtLeastOne = Pick< + T, + Exclude +> & + { + [K in Keys]-?: Required> & Partial>>; + }[Keys]; + +export interface CustomLLMWithOptionals { + options: LLMOptions; + streamCompletion?: ( + prompt: string, + options: CompletionOptions, + fetch: (input: RequestInfo | URL, init?: RequestInit) => Promise, + ) => AsyncGenerator; + streamChat?: ( + messages: ChatMessage[], + options: CompletionOptions, + fetch: (input: RequestInfo | URL, init?: RequestInit) => Promise, + ) => AsyncGenerator; + listModels?: ( + fetch: (input: RequestInfo | URL, init?: RequestInit) => Promise, + ) => Promise; +} + +/** + * The LLM interface requires you to specify either `streamCompletion` or `streamChat` (or both). + */ +export type CustomLLM = RequireAtLeastOne< + CustomLLMWithOptionals, + "streamCompletion" | "streamChat" +>; + +// IDE + +export type DiffLineType = "new" | "old" | "same"; + +export interface DiffLine { + type: DiffLineType; + line: string; +} + +export class Problem { + filepath: string; + range: Range; + message: string; +} + +export class Thread { + name: string; + id: number; +} + +export type IdeType = "vscode" | "jetbrains"; +export interface IdeInfo { + ideType: IdeType; + name: string; + version: string; + remoteName: string; + extensionVersion: string; +} + +export interface BranchAndDir { + branch: string; + directory: string; +} + +export interface IndexTag extends BranchAndDir { + artifactId: string; +} + +export enum FileType { + Unkown = 0, + File = 1, + Directory = 2, + SymbolicLink = 64, +} + +export interface IdeSettings { + remoteConfigServerUrl: string | undefined; + remoteConfigSyncPeriod: number; + userToken: string; + enableControlServerBeta: boolean; + pauseCodebaseIndexOnStart: boolean; + enableDebugLogs: boolean; +} + +export interface IDE { + getIdeInfo(): Promise; + getIdeSettings(): Promise; + getDiff(): Promise; + isTelemetryEnabled(): Promise; + getUniqueId(): Promise; + getTerminalContents(): Promise; + getDebugLocals(threadIndex: number): Promise; + getTopLevelCallStackSources( + threadIndex: number, + stackDepth: number, + ): Promise; + getAvailableThreads(): Promise; + listFolders(): Promise; + getWorkspaceDirs(): Promise; + getWorkspaceConfigs(): Promise; + fileExists(filepath: string): Promise; + writeFile(path: string, contents: string): Promise; + showVirtualFile(title: string, contents: string): Promise; + getContinueDir(): Promise; + openFile(path: string): Promise; + runCommand(command: string): Promise; + saveFile(filepath: string): Promise; + readFile(filepath: string): Promise; + readRangeInFile(filepath: string, range: Range): Promise; + showLines( + filepath: string, + startLine: number, + endLine: number, + ): Promise; + showDiff( + filepath: string, + newContents: string, + stepIndex: number, + ): Promise; + getOpenFiles(): Promise; + getCurrentFile(): Promise; + getPinnedFiles(): Promise; + getSearchResults(query: string): Promise; + subprocess(command: string): Promise<[string, string]>; + getProblems(filepath?: string | undefined): Promise; + getBranch(dir: string): Promise; + getTags(artifactId: string): Promise; + getRepoName(dir: string): Promise; + showToast( + type: ToastType, + message: string, + ...otherParams: any[] + ): Promise; + getGitRootPath(dir: string): Promise; + listDir(dir: string): Promise<[string, FileType][]>; + getLastModified(files: string[]): Promise<{ [path: string]: number }>; + getGitHubAuthToken(args: GetGhTokenArgs): Promise; + + // LSP + gotoDefinition(location: Location): Promise; + + // Callbacks + onDidChangeActiveTextEditor(callback: (filepath: string) => void): void; + pathSep(): Promise; +} + +// Slash Commands + +export interface ContinueSDK { + ide: IDE; + llm: ILLM; + addContextItem: (item: ContextItemWithId) => void; + history: ChatMessage[]; + input: string; + params?: { [key: string]: any } | undefined; + contextItems: ContextItemWithId[]; + selectedCode: RangeInFile[]; + config: ContinueConfig; + fetch: FetchFunction; +} + +export interface SlashCommand { + name: string; + description: string; + params?: { [key: string]: any }; + run: (sdk: ContinueSDK) => AsyncGenerator; +} + +// Config + +type StepName = + | "AnswerQuestionChroma" + | "GenerateShellCommandStep" + | "EditHighlightedCodeStep" + | "ShareSessionStep" + | "CommentCodeStep" + | "ClearHistoryStep" + | "StackOverflowStep" + | "OpenConfigStep" + | "GenerateShellCommandStep" + | "DraftIssueStep"; + +type ContextProviderName = + | "diff" + | "github" + | "terminal" + | "locals" + | "open" + | "google" + | "search" + | "tree" + | "http" + | "codebase" + | "problems" + | "folder" + | "jira" + | "postgres" + | "database" + | "code" + | "docs" + | "gitlab-mr" + | "os" + | "currentFile" + | "greptile" + | "outline" + | "continue-proxy" + | "highlights" + | "file" + | "issue" + | "repo-map" + | "url" + | string; + +type TemplateType = + | "llama2" + | "alpaca" + | "zephyr" + | "phi2" + | "phind" + | "anthropic" + | "chatml" + | "none" + | "openchat" + | "deepseek" + | "xwin-coder" + | "neural-chat" + | "codellama-70b" + | "llava" + | "gemma" + | "llama3"; + +type ModelProvider = + | "openai" + | "free-trial" + | "anthropic" + | "cohere" + | "together" + | "ollama" + | "huggingface-tgi" + | "huggingface-inference-api" + | "kindo" + | "llama.cpp" + | "replicate" + | "text-gen-webui" + | "lmstudio" + | "llamafile" + | "gemini" + | "mistral" + | "bedrock" + | "bedrockimport" + | "sagemaker" + | "deepinfra" + | "flowise" + | "groq" + | "continue-proxy" + | "fireworks" + | "custom" + | "cloudflare" + | "deepseek" + | "azure" + | "openai-aiohttp" + | "msty" + | "watsonx" + | "openrouter" + | "sambanova" + | "nvidia" + | "vllm" + | "mock"; + +export type ModelName = + | "AUTODETECT" + // OpenAI + | "gpt-3.5-turbo" + | "gpt-3.5-turbo-16k" + | "gpt-4" + | "gpt-3.5-turbo-0613" + | "gpt-4-32k" + | "gpt-4o" + | "gpt-4o-mini" + | "gpt-4-turbo" + | "gpt-4-turbo-preview" + | "gpt-4-vision-preview" + // Mistral + | "codestral-latest" + | "open-mistral-7b" + | "open-mixtral-8x7b" + | "open-mixtral-8x22b" + | "mistral-small-latest" + | "mistral-large-latest" + | "mistral-7b" + | "mistral-8x7b" + | "mistral-tiny" + | "mistral-small" + | "mistral-medium" + | "mistral-embed" + // Llama 2 + | "llama2-7b" + | "llama2-13b" + | "llama2-70b" + | "codellama-7b" + | "codellama-13b" + | "codellama-34b" + | "codellama-70b" + // Llama 3 + | "llama3-8b" + | "llama3-70b" + // Other Open-source + | "phi2" + | "phind-codellama-34b" + | "wizardcoder-7b" + | "wizardcoder-13b" + | "wizardcoder-34b" + | "zephyr-7b" + | "codeup-13b" + | "deepseek-7b" + | "deepseek-33b" + | "neural-chat-7b" + | "gemma-7b-it" + | "gemma2-9b-it" + // Anthropic + | "claude-3-5-sonnet-20240620" + | "claude-3-opus-20240229" + | "claude-3-sonnet-20240229" + | "claude-3-haiku-20240307" + | "claude-2.1" + | "claude-2" + // Cohere + | "command-r" + | "command-r-plus" + // Gemini + | "gemini-pro" + | "gemini-1.5-pro-latest" + | "gemini-1.5-pro" + | "gemini-1.5-flash-latest" + | "gemini-1.5-flash" + // Tab autocomplete + | "deepseek-1b" + | "starcoder-1b" + | "starcoder-3b" + | "starcoder2-3b" + | "stable-code-3b"; + +export interface RequestOptions { + timeout?: number; + verifySsl?: boolean; + caBundlePath?: string | string[]; + proxy?: string; + headers?: { [key: string]: string }; + extraBodyProperties?: { [key: string]: any }; + noProxy?: string[]; + clientCertificate?: ClientCertificateOptions; +} + +export interface CacheBehavior { + cacheSystemMessage?: boolean; + cacheConversation?: boolean; +} + +export interface ClientCertificateOptions { + cert: string; + key: string; + passphrase?: string; +} + +export interface StepWithParams { + name: StepName; + params: { [key: string]: any }; +} + +export interface ContextProviderWithParams { + name: ContextProviderName; + params: { [key: string]: any }; +} + +export interface SlashCommandDescription { + name: string; + description: string; + params?: { [key: string]: any }; +} + +export interface CustomCommand { + name: string; + prompt: string; + description: string; +} + +interface BaseCompletionOptions { + temperature?: number; + topP?: number; + topK?: number; + minP?: number; + presencePenalty?: number; + frequencyPenalty?: number; + mirostat?: number; + stop?: string[]; + maxTokens?: number; + numThreads?: number; + keepAlive?: number; + raw?: boolean; + stream?: boolean; +} + +export interface ModelCapability { + uploadImage?: boolean; +} + +export interface ModelDescription { + title: string; + provider: ModelProvider; + model: string; + apiKey?: string; + apiBase?: string; + contextLength?: number; + maxStopWords?: number; + template?: TemplateType; + completionOptions?: BaseCompletionOptions; + systemMessage?: string; + requestOptions?: RequestOptions; + promptTemplates?: { [key: string]: string }; + capabilities?: ModelCapability; + cacheBehavior?: CacheBehavior; +} + +export type EmbeddingsProviderName = + | "bedrock" + | "huggingface-tei" + | "transformers.js" + | "ollama" + | "openai" + | "cohere" + | "free-trial" + | "gemini" + | "continue-proxy" + | "deepinfra" + | "nvidia" + | "voyage" + | "mistral"; + +export interface EmbedOptions { + apiBase?: string; + apiKey?: string; + model?: string; + engine?: string; + apiType?: string; + apiVersion?: string; + requestOptions?: RequestOptions; + maxChunkSize?: number; + maxBatchSize?: number; + // AWS options + profile?: string; + + // AWS and GCP Options + region?: string; +} + +export interface EmbeddingsProviderDescription extends EmbedOptions { + provider: EmbeddingsProviderName; +} + +export interface EmbeddingsProvider { + id: string; + providerName: EmbeddingsProviderName; + maxChunkSize: number; + embed(chunks: string[]): Promise; +} + +export type RerankerName = + | "cohere" + | "voyage" + | "llm" + | "free-trial" + | "huggingface-tei" + | "continue-proxy"; + +export interface RerankerDescription { + name: RerankerName; + params?: { [key: string]: any }; +} + +export interface Reranker { + name: string; + rerank(query: string, chunks: Chunk[]): Promise; +} + +export interface TabAutocompleteOptions { + disable: boolean; + useCopyBuffer: boolean; + useFileSuffix: boolean; + maxPromptTokens: number; + debounceDelay: number; + maxSuffixPercentage: number; + prefixPercentage: number; + template?: string; + multilineCompletions: "always" | "never" | "auto"; + slidingWindowPrefixPercentage: number; + slidingWindowSize: number; + maxSnippetPercentage: number; + recentlyEditedSimilarityThreshold: number; + useCache: boolean; + onlyMyCode: boolean; + useOtherFiles: boolean; + useRecentlyEdited: boolean; + recentLinePrefixMatchMinLength: number; + disableInFiles?: string[]; + useImports?: boolean; + useRootPathContext?: boolean; +} + +export interface ContinueUIConfig { + codeBlockToolbarPosition?: "top" | "bottom"; + fontSize?: number; + displayRawMarkdown?: boolean; + showChatScrollbar?: boolean; +} + +interface ContextMenuConfig { + comment?: string; + docstring?: string; + fix?: string; + optimize?: string; + fixGrammar?: string; +} + +interface ModelRoles { + inlineEdit?: string; + applyCodeBlock?: string; + repoMapFileSelection?: string; +} + +/** + * Represents the configuration for a quick action in the Code Lens. + * Quick actions are custom commands that can be added to function and class declarations. + */ +interface QuickActionConfig { /** - * The LLM interface requires you to specify either `streamCompletion` or `streamChat` (or both). + * The title of the quick action that will display in the Code Lens. */ - export type CustomLLM = RequireAtLeastOne< - CustomLLMWithOptionals, - "streamCompletion" | "streamChat" - >; - - // IDE - - export interface DiffLine { - type: "new" | "old" | "same"; - line: string; - } - - export class Problem { - filepath: string; - range: Range; - message: string; - } - - export class Thread { - name: string; - id: number; - } - - export type IdeType = "vscode" | "jetbrains"; - export interface IdeInfo { - ideType: IdeType; - name: string; - version: string; - remoteName: string; - extensionVersion: string; - } - - export interface BranchAndDir { - branch: string; - directory: string; - } - - export interface IndexTag extends BranchAndDir { - artifactId: string; - } - - export interface IDE { - getIdeInfo(): Promise; - getDiff(): Promise; - isTelemetryEnabled(): Promise; - getUniqueId(): Promise; - getTerminalContents(): Promise; - getDebugLocals(threadIndex: number): Promise; - getTopLevelCallStackSources( - threadIndex: number, - stackDepth: number, - ): Promise; - getAvailableThreads(): Promise; - listWorkspaceContents(directory?: string): Promise; - listFolders(): Promise; - getWorkspaceDirs(): Promise; - getWorkspaceConfigs(): Promise; - writeFile(path: string, contents: string): Promise; - showVirtualFile(title: string, contents: string): Promise; - getContinueDir(): Promise; - openFile(path: string): Promise; - runCommand(command: string): Promise; - saveFile(filepath: string): Promise; - readFile(filepath: string): Promise; - readRangeInFile(filepath: string, range: Range): Promise; - showLines( - filepath: string, - startLine: number, - endLine: number, - ): Promise; - showDiff( - filepath: string, - newContents: string, - stepIndex: number, - ): Promise; - getOpenFiles(): Promise; - getPinnedFiles(): Promise; - getSearchResults(query: string): Promise; - subprocess(command: string): Promise<[string, string]>; - getProblems(filepath?: string | undefined): Promise; - getBranch(dir: string): Promise; - getTags(artifactId: string): Promise; - getRepoName(dir: string): Promise; - } - - // Slash Commands - - export interface ContinueSDK { - ide: IDE; - llm: ILLM; - addContextItem: (item: ContextItemWithId) => void; - history: ChatMessage[]; - input: string; - params?: { [key: string]: any } | undefined; - contextItems: ContextItemWithId[]; - selectedCode: RangeInFile[]; - config: ContinueConfig; - fetch: FetchFunction; - } - - export interface SlashCommand { - name: string; - description: string; - params?: { [key: string]: any }; - run: (sdk: ContinueSDK) => AsyncGenerator; - } - - // Config - - type StepName = - | "AnswerQuestionChroma" - | "GenerateShellCommandStep" - | "EditHighlightedCodeStep" - | "ShareSessionStep" - | "CommentCodeStep" - | "ClearHistoryStep" - | "StackOverflowStep" - | "OpenConfigStep" - | "GenerateShellCommandStep" - | "DraftIssueStep"; - - type ContextProviderName = - | "diff" - | "github" - | "terminal" - | "locals" - | "open" - | "google" - | "search" - | "tree" - | "http" - | "codebase" - | "problems" - | "folder" - | "jira" - | "postgres" - | "database" - | "code" - | "docs" - | "gitlab-mr" - | "os"; - - type TemplateType = - | "llama2" - | "alpaca" - | "zephyr" - | "phi2" - | "phind" - | "anthropic" - | "bedrock" - | "chatml" - | "none" - | "openchat" - | "deepseek" - | "xwin-coder" - | "neural-chat" - | "codellama-70b" - | "llava" - | "gemma"; - - type ModelProvider = - | "openai" - | "free-trial" - | "anthropic" - | "cohere" - | "together" - | "ollama" - | "huggingface-tgi" - | "huggingface-inference-api" - | "llama.cpp" - | "replicate" - | "text-gen-webui" - | "lmstudio" - | "llamafile" - | "gemini" - | "mistral" - | "bedrock" - | "deepinfra" - | "flowise" - | "groq" - | "custom"; - - export type ModelName = - | "AUTODETECT" - // OpenAI - | "gpt-3.5-turbo" - | "gpt-3.5-turbo-16k" - | "gpt-4" - | "gpt-3.5-turbo-0613" - | "gpt-4-32k" - | "gpt-4-turbo" - | "gpt-4o" - | "gpt-4-turbo-preview" - | "gpt-4-vision-preview" - // Mistral - | "mistral-7b" - | "mistral-8x7b" - // Llama 2 - | "llama2-7b" - | "llama2-13b" - | "llama2-70b" - | "codellama-7b" - | "codellama-13b" - | "codellama-34b" - | "codellama-70b" - // Llama 3 - | "llama3-8b" - | "llama3-70b" - // Other Open-source - | "phi2" - | "phind-codellama-34b" - | "wizardcoder-7b" - | "wizardcoder-13b" - | "wizardcoder-34b" - | "zephyr-7b" - | "codeup-13b" - | "deepseek-7b" - | "deepseek-33b" - | "neural-chat-7b" - // Anthropic - | "claude-2" - | "claude-3-opus-20240229" - | "claude-3-sonnet-20240229" - | "claude-3-haiku-20240307" - | "claude-2.1" - // Cohere - | "command-r" - | "command-r-plus" - // Gemini - | "gemini-pro" - | "gemini-1.5-pro-latest" - // Mistral - | "mistral-tiny" - | "mistral-small" - | "mistral-medium" - // Tab autocomplete - | "deepseek-1b" - | "starcoder-1b" - | "starcoder-3b" - | "starcoder2-3b" - | "stable-code-3b"; - - export interface RequestOptions { - timeout?: number; - verifySsl?: boolean; - caBundlePath?: string | string[]; - proxy?: string; - headers?: { [key: string]: string }; - extraBodyProperties?: { [key: string]: any }; - } - - export interface StepWithParams { - name: StepName; - params: { [key: string]: any }; - } - - export interface ContextProviderWithParams { - name: ContextProviderName; - params: { [key: string]: any }; - } - - export interface SlashCommandDescription { - name: string; - description: string; - params?: { [key: string]: any }; - } - - export interface CustomCommand { - name: string; - prompt: string; - description: string; - } - - interface BaseCompletionOptions { - temperature?: number; - topP?: number; - topK?: number; - minP?: number; - presencePenalty?: number; - frequencyPenalty?: number; - mirostat?: number; - stop?: string[]; - maxTokens?: number; - numThreads?: number; - keepAlive?: number; - raw?: boolean; - stream?: boolean; - } - - export interface ModelDescription { - title: string; - provider: ModelProvider; - model: string; - apiKey?: string; - apiBase?: string; - contextLength?: number; - template?: TemplateType; - completionOptions?: BaseCompletionOptions; - systemMessage?: string; - requestOptions?: RequestOptions; - promptTemplates?: { [key: string]: string }; - } - - export type EmbeddingsProviderName = - | "transformers.js" - | "ollama" - | "openai" - | "cohere" - | "free-trial"; - - export interface EmbedOptions { - apiBase?: string; - apiKey?: string; - model?: string; - requestOptions?: RequestOptions; - } - - export interface EmbeddingsProviderDescription extends EmbedOptions { - provider: EmbeddingsProviderName; - } - - export interface EmbeddingsProvider { - id: string; - embed(chunks: string[]): Promise; - } - - export type RerankerName = "cohere" | "voyage" | "llm" | "free-trial"; - - export interface RerankerDescription { - name: RerankerName; - params?: { [key: string]: any }; - } - - export interface Reranker { - name: string; - rerank(query: string, chunks: Chunk[]): Promise; - } - - export interface TabAutocompleteOptions { - disable: boolean; - useCopyBuffer: boolean; - useSuffix: boolean; - maxPromptTokens: number; - debounceDelay: number; - maxSuffixPercentage: number; - prefixPercentage: number; - template?: string; - multilineCompletions: "always" | "never" | "auto"; - slidingWindowPrefixPercentage: number; - slidingWindowSize: number; - maxSnippetPercentage: number; - recentlyEditedSimilarityThreshold: number; - useCache: boolean; - onlyMyCode: boolean; - useOtherFiles: boolean; - disableInFiles?: string[]; - } - - export interface ContinueUIConfig { - codeBlockToolbarPosition?: "top" | "bottom"; - } - - interface ContextMenuConfig { - comment?: string; - docstring?: string; - fix?: string; - optimize?: string; - fixGrammar?: string; - } - - interface ModelRoles { - inlineEdit?: string; - } - - interface ExperimentalConfig { - contextMenuPrompts?: ContextMenuConfig; - modelRoles?: ModelRoles; - } - - export interface SerializedContinueConfig { - env?: string[]; - allowAnonymousTelemetry?: boolean; - models: ModelDescription[]; - systemMessage?: string; - completionOptions?: BaseCompletionOptions; - requestOptions?: RequestOptions; - slashCommands?: SlashCommandDescription[]; - customCommands?: CustomCommand[]; - contextProviders?: ContextProviderWithParams[]; - disableIndexing?: boolean; - disableSessionTitles?: boolean; - userToken?: string; - embeddingsProvider?: EmbeddingsProviderDescription; - tabAutocompleteModel?: ModelDescription; - tabAutocompleteOptions?: Partial; - ui?: ContinueUIConfig; - reranker?: RerankerDescription; - experimental?: ExperimentalConfig; - } - - export type ConfigMergeType = "merge" | "overwrite"; - - export type ContinueRcJson = Partial & { - mergeBehavior: ConfigMergeType; - }; - - export interface Config { - /** If set to true, Continue will collect anonymous usage data to improve the product. If set to false, we will collect nothing. Read here to learn more: https://docs.continue.dev/telemetry */ - allowAnonymousTelemetry?: boolean; - /** Each entry in this array will originally be a ModelDescription, the same object from your config.json, but you may add CustomLLMs. - * A CustomLLM requires you only to define an AsyncGenerator that calls the LLM and yields string updates. You can choose to define either `streamCompletion` or `streamChat` (or both). - * Continue will do the rest of the work to construct prompt templates, handle context items, prune context, etc. - */ - models: (CustomLLM | ModelDescription)[]; - /** A system message to be followed by all of your models */ - systemMessage?: string; - /** The default completion options for all models */ - completionOptions?: BaseCompletionOptions; - /** Request options that will be applied to all models and context providers */ - requestOptions?: RequestOptions; - /** The list of slash commands that will be available in the sidebar */ - slashCommands?: SlashCommand[]; - /** Each entry in this array will originally be a ContextProviderWithParams, the same object from your config.json, but you may add CustomContextProviders. - * A CustomContextProvider requires you only to define a title and getContextItems function. When you type '@title ', Continue will call `getContextItems(query)`. - */ - contextProviders?: (CustomContextProvider | ContextProviderWithParams)[]; - /** If set to true, Continue will not index your codebase for retrieval */ - disableIndexing?: boolean; - /** If set to true, Continue will not make extra requests to the LLM to generate a summary title of each session. */ - disableSessionTitles?: boolean; - /** An optional token to identify a user. Not used by Continue unless you write custom coniguration that requires such a token */ - userToken?: string; - /** The provider used to calculate embeddings. If left empty, Continue will use transformers.js to calculate the embeddings with all-MiniLM-L6-v2 */ - embeddingsProvider?: EmbeddingsProviderDescription | EmbeddingsProvider; - /** The model that Continue will use for tab autocompletions. */ - tabAutocompleteModel?: CustomLLM | ModelDescription; - /** Options for tab autocomplete */ - tabAutocompleteOptions?: Partial; - /** UI styles customization */ - ui?: ContinueUIConfig; - /** Options for the reranker */ - reranker?: RerankerDescription | Reranker; - /** Experimental configuration */ - experimental?: ExperimentalConfig; - } - - export interface ContinueConfig { - allowAnonymousTelemetry?: boolean; - models: ILLM[]; - systemMessage?: string; - completionOptions?: BaseCompletionOptions; - requestOptions?: RequestOptions; - slashCommands?: SlashCommand[]; - contextProviders?: IContextProvider[]; - disableSessionTitles?: boolean; - disableIndexing?: boolean; - userToken?: string; - embeddingsProvider: EmbeddingsProvider; - tabAutocompleteModel?: ILLM; - tabAutocompleteOptions?: Partial; - ui?: ContinueUIConfig; - reranker?: Reranker; - experimental?: ExperimentalConfig; - } - - export interface BrowserSerializedContinueConfig { - allowAnonymousTelemetry?: boolean; - models: ModelDescription[]; - systemMessage?: string; - completionOptions?: BaseCompletionOptions; - requestOptions?: RequestOptions; - slashCommands?: SlashCommandDescription[]; - contextProviders?: ContextProviderDescription[]; - disableIndexing?: boolean; - disableSessionTitles?: boolean; - userToken?: string; - embeddingsProvider?: string; - ui?: ContinueUIConfig; - reranker?: RerankerDescription; - experimental?: ExperimentalConfig; - } -} - -export {}; + title: string; + + /** + * The prompt that will be sent to the model when the quick action is invoked, + * with the function or class body concatenated. + */ + prompt: string; + + /** + * If `true`, the result of the quick action will be sent to the chat panel. + * If `false`, the streamed result will be inserted into the document. + * + * Defaults to `false`. + */ + sendToChat: boolean; +} + +export type DefaultContextProvider = ContextProviderWithParams & { + query?: string; +}; + +interface ExperimentalConfig { + contextMenuPrompts?: ContextMenuConfig; + modelRoles?: ModelRoles; + defaultContext?: DefaultContextProvider[]; + promptPath?: string; + + /** + * Quick actions are a way to add custom commands to the Code Lens of + * function and class declarations. + */ + quickActions?: QuickActionConfig[]; + + /** + * Automatically read LLM chat responses aloud using system TTS models + */ + readResponseTTS?: boolean; + + /** + * If set to true, we will attempt to pull down and install an instance of Chromium + * that is compatible with the current version of Puppeteer. + * This is needed to crawl a large number of documentation sites that are dynamically rendered. + */ + useChromiumForDocsCrawling?: boolean; +} + +interface AnalyticsConfig { + type: string; + url?: string; + clientKey?: string; +} + +// config.json +export interface SerializedContinueConfig { + env?: string[]; + allowAnonymousTelemetry?: boolean; + models: ModelDescription[]; + systemMessage?: string; + completionOptions?: BaseCompletionOptions; + requestOptions?: RequestOptions; + slashCommands?: SlashCommandDescription[]; + customCommands?: CustomCommand[]; + contextProviders?: ContextProviderWithParams[]; + disableIndexing?: boolean; + disableSessionTitles?: boolean; + userToken?: string; + embeddingsProvider?: EmbeddingsProviderDescription; + tabAutocompleteModel?: ModelDescription | ModelDescription[]; + tabAutocompleteOptions?: Partial; + ui?: ContinueUIConfig; + reranker?: RerankerDescription; + experimental?: ExperimentalConfig; + analytics?: AnalyticsConfig; + docs?: SiteIndexingConfig[]; +} + +export type ConfigMergeType = "merge" | "overwrite"; + +export type ContinueRcJson = Partial & { + mergeBehavior: ConfigMergeType; +}; + +// config.ts - give users simplified interfaces +export interface Config { + /** If set to true, Continue will collect anonymous usage data to improve the product. If set to false, we will collect nothing. Read here to learn more: https://docs.continue.dev/telemetry */ + allowAnonymousTelemetry?: boolean; + /** Each entry in this array will originally be a ModelDescription, the same object from your config.json, but you may add CustomLLMs. + * A CustomLLM requires you only to define an AsyncGenerator that calls the LLM and yields string updates. You can choose to define either `streamCompletion` or `streamChat` (or both). + * Continue will do the rest of the work to construct prompt templates, handle context items, prune context, etc. + */ + models: (CustomLLM | ModelDescription)[]; + /** A system message to be followed by all of your models */ + systemMessage?: string; + /** The default completion options for all models */ + completionOptions?: BaseCompletionOptions; + /** Request options that will be applied to all models and context providers */ + requestOptions?: RequestOptions; + /** The list of slash commands that will be available in the sidebar */ + slashCommands?: SlashCommand[]; + /** Each entry in this array will originally be a ContextProviderWithParams, the same object from your config.json, but you may add CustomContextProviders. + * A CustomContextProvider requires you only to define a title and getContextItems function. When you type '@title ', Continue will call `getContextItems(query)`. + */ + contextProviders?: (CustomContextProvider | ContextProviderWithParams)[]; + /** If set to true, Continue will not index your codebase for retrieval */ + disableIndexing?: boolean; + /** If set to true, Continue will not make extra requests to the LLM to generate a summary title of each session. */ + disableSessionTitles?: boolean; + /** An optional token to identify a user. Not used by Continue unless you write custom coniguration that requires such a token */ + userToken?: string; + /** The provider used to calculate embeddings. If left empty, Continue will use transformers.js to calculate the embeddings with all-MiniLM-L6-v2 */ + embeddingsProvider?: EmbeddingsProviderDescription | EmbeddingsProvider; + /** The model that Continue will use for tab autocompletions. */ + tabAutocompleteModel?: + | CustomLLM + | ModelDescription + | (CustomLLM | ModelDescription)[]; + /** Options for tab autocomplete */ + tabAutocompleteOptions?: Partial; + /** UI styles customization */ + ui?: ContinueUIConfig; + /** Options for the reranker */ + reranker?: RerankerDescription | Reranker; + /** Experimental configuration */ + experimental?: ExperimentalConfig; + /** Analytics configuration */ + analytics?: AnalyticsConfig; +} + +// in the actual Continue source code +export interface ContinueConfig { + allowAnonymousTelemetry?: boolean; + models: ILLM[]; + systemMessage?: string; + completionOptions?: BaseCompletionOptions; + requestOptions?: RequestOptions; + slashCommands?: SlashCommand[]; + contextProviders?: IContextProvider[]; + disableSessionTitles?: boolean; + disableIndexing?: boolean; + userToken?: string; + embeddingsProvider: EmbeddingsProvider; + tabAutocompleteModels?: ILLM[]; + tabAutocompleteOptions?: Partial; + ui?: ContinueUIConfig; + reranker?: Reranker; + experimental?: ExperimentalConfig; + analytics?: AnalyticsConfig; + docs?: SiteIndexingConfig[]; +} + +export interface BrowserSerializedContinueConfig { + allowAnonymousTelemetry?: boolean; + models: ModelDescription[]; + systemMessage?: string; + completionOptions?: BaseCompletionOptions; + requestOptions?: RequestOptions; + slashCommands?: SlashCommandDescription[]; + contextProviders?: ContextProviderDescription[]; + disableIndexing?: boolean; + disableSessionTitles?: boolean; + userToken?: string; + embeddingsProvider?: string; + ui?: ContinueUIConfig; + reranker?: RerankerDescription; + experimental?: ExperimentalConfig; + analytics?: AnalyticsConfig; +} \ No newline at end of file