Skip to content

Commit

Permalink
Merge pull request #6 from matiasvlevi/dev
Browse files Browse the repository at this point in the history
Merge Dev
  • Loading branch information
matiasvlevi authored Apr 15, 2023
2 parents ecb930a + 984c166 commit 864d2b5
Show file tree
Hide file tree
Showing 7 changed files with 96 additions and 82 deletions.
2 changes: 1 addition & 1 deletion package.json
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
{
"name": "gptdoc",
"version": "1.0.8",
"version": "1.0.9",
"license": "MIT",
"author": "Matias Vazquez-Levi",
"description": "Generate documentation comments with the help of OpenAI's models",
Expand Down
2 changes: 1 addition & 1 deletion src/config/makeConfig.ts
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import { Config } from './types'
import { CLIArgs } from "../cli";
import { parseEnv } from './parseEnv';
import { Models } from '../gpt';
import { Models } from '../models';

/**
* Merge a configuration with the defaults
Expand Down
11 changes: 8 additions & 3 deletions src/core/GPTDocument.ts
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,10 @@ import {
JSDOCComment
} from './regex';

import {
Models
} from '../models'

import {
GPT_DEBUG_COMMENT,
GPT_PROMPT,
Expand Down Expand Up @@ -134,12 +138,13 @@ export class GPTDocument {
// Generate a prompt
prompt = GPT_PROMPT(project.config, this.meta.kind, this.source);
prompt_tokens = GPTDocument.estimateTokenCount(prompt);

// Length guard,
// do not send a request to OpenAI if prompt is too lengthy
if (prompt_tokens >= 4000) {
const { maxTokens } = Models[project.config.openai.model];
if (prompt_tokens >= maxTokens) {
Logger.error(
`Your prompt has ${prompt_tokens} tokens, maximum is 4000`, true
`Your prompt has ${prompt_tokens} tokens, maximum is ${maxTokens}`, true
);
return source;
}
Expand Down
14 changes: 7 additions & 7 deletions src/core/Project.ts
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,11 @@ import { File } from './File'
import { Config } from "../config/index";

import * as Logger from "../utils/Logger";
import { Models, PriceRange } from "../gpt";

import {
Models,
PriceRange
} from '../models'

/** @gpt */
export class Project {
Expand Down Expand Up @@ -132,19 +136,15 @@ export class Project {
this.prompt_tokens += value;
}


addResponseTokens(value: number) {
this.response_tokens += value;
}

getTokenCost(): number | PriceRange {
let price: number | PriceRange = 0.02;

for (let model in Models) {
if (this.config.openai.model.includes(model)) {
price = Models[model].price;
break;
}
if (this.config.openai.model in Models) {
price = Models[this.config.openai.model].price;
}

return price;
Expand Down
69 changes: 0 additions & 69 deletions src/gpt.ts
Original file line number Diff line number Diff line change
Expand Up @@ -71,8 +71,6 @@ export async function OpenAICompletion(_config: Config, prompt: string) {
GPT_CHAT_COMPLETION_CONFIG(config, prompt):
GPT_COMPLETION_CONFIG(config, prompt);



const res = await fetch(
`https://api.openai.com/v1/${_config.chat ? 'chat/completions' : 'completions'}`, {
headers: {
Expand All @@ -86,70 +84,3 @@ export async function OpenAICompletion(_config: Config, prompt: string) {
return await res.json();
}

export interface PriceRange {
prompt: number;
response: number
}

interface ModelMeta {
price: number | PriceRange;
isChatModel: boolean;
}

/**
* Lookup table for pricing
*/
export const Models: { [key:string]: ModelMeta } = {
'text-ada-001': {
price: 0.0004,
isChatModel: false
},
'text-babbage-001': {
price: 0.0005,
isChatModel: false
},
'text-curie-001': {
price: 0.002,
isChatModel: false
},
'text-davinci-002': {
price: 0.02,
isChatModel: false
},
'text-davinci-003': {
price: 0.02,
isChatModel: false
},
'gpt-3.5-turbo': {
price: 0.002,
isChatModel: true
},
'gpt-4': {
price: {
prompt: 0.03,
response: 0.06
},
isChatModel: true
},
'gpt-4-0314': {
price: {
prompt: 0.03,
response: 0.06
},
isChatModel: true
},
'gpt-4-32k': {
price: {
prompt: 0.06,
response: 0.12
},
isChatModel: true
},
'gpt-4-32k-0314': {
price: {
prompt: 0.06,
response: 0.12
},
isChatModel: true
}
}
78 changes: 78 additions & 0 deletions src/models.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,78 @@
export interface PriceRange {
prompt: number;
response: number
}

interface ModelMeta {
price: number | PriceRange;
maxTokens: number;
isChatModel: boolean;
}

/**
* Lookup table for pricing
*/
export const Models: { [key:string]: ModelMeta } = {
'text-ada-001': {
price: 0.0004,
maxTokens: 2000,
isChatModel: false
},
'text-babbage-001': {
price: 0.0005,
maxTokens: 2000,
isChatModel: false
},
'text-curie-001': {
price: 0.002,
maxTokens: 2000,
isChatModel: false
},
'text-davinci-002': {
price: 0.02,
maxTokens: 4000,
isChatModel: false
},
'text-davinci-003': {
price: 0.02,
maxTokens: 4000,
isChatModel: false
},
'gpt-3.5-turbo': {
price: 0.002,
maxTokens: 4000,
isChatModel: true
},
'gpt-4': {
price: {
prompt: 0.03,
response: 0.06
},
maxTokens: 8000,
isChatModel: true
},
'gpt-4-0314': {
price: {
prompt: 0.03,
response: 0.06
},
maxTokens: 8000,
isChatModel: true
},
'gpt-4-32k': {
price: {
prompt: 0.06,
response: 0.12
},
maxTokens: 32000,
isChatModel: true
},
'gpt-4-32k-0314': {
price: {
prompt: 0.06,
response: 0.12
},
maxTokens: 32000,
isChatModel: true
}
}
2 changes: 1 addition & 1 deletion src/utils/Logger.ts
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
import { PriceRange } from "../gpt";
import { PriceRange } from "../models";

/** @gpt */
export function error(text: string, exit: boolean = false) {
Expand Down

0 comments on commit 864d2b5

Please sign in to comment.