Skip to content

Commit

Permalink
Merge pull request #50 from zhayujie/feat-gpt-3.5
Browse files Browse the repository at this point in the history
feat: support gpt-3.5 model
  • Loading branch information
zhayujie authored Mar 2, 2023
2 parents 76a4be7 + bb52f90 commit a593cd4
Show file tree
Hide file tree
Showing 5 changed files with 230 additions and 13 deletions.
46 changes: 35 additions & 11 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,9 @@

**模型:**

- [x] [ChatGPT](https://github.com/zhayujie/bot-on-anything#1chatgpt)
- [x] [ChatGPT (gpt-3.5)](https://github.com/zhayujie/bot-on-anything#1chatgpt)
- [x] [GPT-3.0](https://github.com/zhayujie/bot-on-anything#1chatgpt)


**应用:**

Expand Down Expand Up @@ -49,7 +51,7 @@ cp config-template.json config.json
```bash
{
"model": {
"type" : "openai", # 选用的算法模型
"type" : "chatgpt", # 选用的算法模型
"openai": {
# openAI配置
}
Expand All @@ -72,39 +74,61 @@ cp config-template.json config.json

## 二、选择模型

### 1.ChatGPT
### 1. ChatGPT (gpt-3.5)

使用的模型是 `gpt-3.5-turbo`,详情参考[官方文档](https://platform.openai.com/docs/guides/chat)

#### 1.1 注册 OpenAI 账号
#### (1) 注册 OpenAI 账号

前往 [OpenAI注册页面](https://beta.openai.com/signup) 创建账号,参考这篇 [教程](https://www.cnblogs.com/damugua/p/16969508.html) 可以通过虚拟手机号来接收验证码。创建完账号则前往 [API管理页面](https://beta.openai.com/account/api-keys) 创建一个 API Key 并保存下来,后面需要在项目中配置这个key。

> 项目中使用的对话模型是 davinci,计费方式是约每 750 字 (包含请求和回复) 消耗 $0.02,图片生成是每张消耗 $0.016,账号创建有免费的 $18 额度,使用完可以更换邮箱重新注册。
#### 1.2 安装依赖
#### (2) 安装依赖

```bash
pip3 install --upgrade openai
```
> 注: 如果安装失败可先升级pip, `pip3 install --upgrade pip`
> 注: openai版本需要`0.27.0`以上。如果安装失败可先升级pip,`pip3 install --upgrade pip`

#### 1.3 配置项说明
#### (3) 配置项说明

```bash
{
"model": {
"type" : "chatgpt",

"openai": {
"api_key": "YOUR API KEY",
"character_desc": "你是ChatGPT, 一个由OpenAI训练的大型语言模型, 你旨在回答并解决人们的任何问题,并且可以使用多种语言与人交流。"
}
}
```
+ `api_key`: 填入上面注册账号时创建的 `OpenAI API KEY`
+ `character_desc`: 配置中保存着你对chatgpt说的一段话,他会记住这段话并作为他的设定,你可以为他定制任何人格
### 2.GPT-3.0
使用的模型是 `text-davinci-003`,详情参考[官方文档]([https://platform.openai.com/docs/guides/chat](https://platform.openai.com/docs/guides/completion/introduction))。
使用步骤和上述GPT-3.5基本相同:
1. 注册OpenAI账号并配置API KEY
2. 安装openai依赖,要求版本高于 `0.25.0`
3. 修改`config.json`配置中的type字段为 `openai`
```bash
{
"model": {
"type" : "openai",

"openai": {
"api_key": "YOUR API KEY",
"conversation_max_tokens": 1000,
"character_desc": "你是ChatGPT, 一个由OpenAI训练的大型语言模型, 你旨在回答并解决人们的任何问题,并且可以使用多种语言与人交流。"
}
}
```
+ `api_key`:填入上面注册账号时创建的 `OpenAI API KEY`
+ `conversation_max_tokens`:表示能够记忆的上下文最大字数(一问一答为一组对话,如果累积的对话字数超出限制,就会优先移除最早的一组对话)
+ `character_desc` 配置中保存着你对机器人说的一段话,他会记住这段话并作为他的设定,你可以为他定制任何人格
## 三、选择应用
Expand Down
1 change: 1 addition & 0 deletions common/const.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,3 +10,4 @@

# model
OPEN_AI = "openai"
CHATGPT = "chatgpt"
2 changes: 1 addition & 1 deletion config-template.json
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
{
"model": {
"type" : "openai",
"type" : "chatgpt",
"openai": {
"api_key": "YOUR API KEY",
"conversation_max_tokens": 1000,
Expand Down
186 changes: 186 additions & 0 deletions model/chatgpt/chatgpt_model.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,186 @@
# encoding:utf-8

from model.model import Model
from config import model_conf
from common import const
from common import log
import openai
import time

user_session = dict()

# OpenAI对话模型API (可用)
class ChatGPTModel(Model):
def __init__(self):
openai.api_key = model_conf(const.OPEN_AI).get('api_key')

def reply(self, query, context=None):
# acquire reply content
if not context or not context.get('type') or context.get('type') == 'TEXT':
log.info("[OPEN_AI] query={}".format(query))
from_user_id = context['from_user_id']
if query == '#清除记忆':
Session.clear_session(from_user_id)
return '记忆已清除'

new_query = Session.build_session_query(query, from_user_id)
log.debug("[OPEN_AI] session query={}".format(new_query))

# if context.get('stream'):
# # reply in stream
# return self.reply_text_stream(query, new_query, from_user_id)

reply_content = self.reply_text(new_query, from_user_id, 0)
log.debug("[OPEN_AI] new_query={}, user={}, reply_cont={}".format(new_query, from_user_id, reply_content))
if reply_content:
Session.save_session(query, reply_content, from_user_id)
return reply_content

elif context.get('type', None) == 'IMAGE_CREATE':
return self.create_img(query, 0)

def reply_text(self, query, user_id, retry_count=0):
try:
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo", # 对话模型的名称
messages=query,
temperature=0.9, # 值在[0,1]之间,越大表示回复越具有不确定性
max_tokens=1200, # 回复最大的字符数
top_p=1,
frequency_penalty=0.0, # [-2,2]之间,该值越大则更倾向于产生不同的内容
presence_penalty=0.0, # [-2,2]之间,该值越大则更倾向于产生不同的内容
)
# res_content = response.choices[0]['text'].strip().replace('<|endoftext|>', '')
log.info(response.choices[0]['message']['content'])
# log.info("[OPEN_AI] reply={}".format(res_content))
return response.choices[0]['message']['content']
except openai.error.RateLimitError as e:
# rate limit exception
log.warn(e)
if retry_count < 1:
time.sleep(5)
log.warn("[OPEN_AI] RateLimit exceed, 第{}次重试".format(retry_count+1))
return self.reply_text(query, user_id, retry_count+1)
else:
return "提问太快啦,请休息一下再问我吧"
except Exception as e:
# unknown exception
log.exception(e)
Session.clear_session(user_id)
return "请再问我一次吧"


def reply_text_stream(self, query, new_query, user_id, retry_count=0):
try:
res = openai.Completion.create(
model="text-davinci-003", # 对话模型的名称
prompt=new_query,
temperature=0.9, # 值在[0,1]之间,越大表示回复越具有不确定性
max_tokens=4096, # 回复最大的字符数
top_p=1,
frequency_penalty=0.0, # [-2,2]之间,该值越大则更倾向于产生不同的内容
presence_penalty=0.0, # [-2,2]之间,该值越大则更倾向于产生不同的内容
stop=["\n\n\n"],
stream=True
)
return self._process_reply_stream(query, res, user_id)

except openai.error.RateLimitError as e:
# rate limit exception
log.warn(e)
if retry_count < 1:
time.sleep(5)
log.warn("[OPEN_AI] RateLimit exceed, 第{}次重试".format(retry_count+1))
return self.reply_text(query, user_id, retry_count+1)
else:
return "提问太快啦,请休息一下再问我吧"
except Exception as e:
# unknown exception
log.exception(e)
Session.clear_session(user_id)
return "请再问我一次吧"


def _process_reply_stream(
self,
query: str,
reply: dict,
user_id: str
) -> str:
full_response = ""
for response in reply:
if response.get("choices") is None or len(response["choices"]) == 0:
raise Exception("OpenAI API returned no choices")
if response["choices"][0].get("finish_details") is not None:
break
if response["choices"][0].get("text") is None:
raise Exception("OpenAI API returned no text")
if response["choices"][0]["text"] == "<|endoftext|>":
break
yield response["choices"][0]["text"]
full_response += response["choices"][0]["text"]
if query and full_response:
Session.save_session(query, full_response, user_id)


def create_img(self, query, retry_count=0):
try:
log.info("[OPEN_AI] image_query={}".format(query))
response = openai.Image.create(
prompt=query, #图片描述
n=1, #每次生成图片的数量
size="256x256" #图片大小,可选有 256x256, 512x512, 1024x1024
)
image_url = response['data'][0]['url']
log.info("[OPEN_AI] image_url={}".format(image_url))
return image_url
except openai.error.RateLimitError as e:
log.warn(e)
if retry_count < 1:
time.sleep(5)
log.warn("[OPEN_AI] ImgCreate RateLimit exceed, 第{}次重试".format(retry_count+1))
return self.reply_text(query, retry_count+1)
else:
return "提问太快啦,请休息一下再问我吧"
except Exception as e:
log.exception(e)
return None


class Session(object):
@staticmethod
def build_session_query(query, user_id):
'''
build query with conversation history
e.g. [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Who won the world series in 2020?"},
{"role": "assistant", "content": "The Los Angeles Dodgers won the World Series in 2020."},
{"role": "user", "content": "Where was it played?"}
]
:param query: query content
:param user_id: from user id
:return: query content with conversaction
'''
session = user_session.get(user_id, [])
if len(session) == 0:
system_prompt = model_conf(const.OPEN_AI).get("character_desc", "")
system_item = {'role': 'system', 'content': system_prompt}
session.append(system_item)
user_session[user_id] = session
user_item = {'role': 'user', 'content': query}
session.append(user_item)
return session

@staticmethod
def save_session(query, answer, user_id):
session = user_session.get(user_id)
if session:
# append conversation
gpt_item = {'role': 'assistant', 'content': answer}
session.append(gpt_item)

@staticmethod
def clear_session(user_id):
user_session[user_id] = []

8 changes: 7 additions & 1 deletion model/model_factory.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,14 @@ def create_bot(model_type):
"""

if model_type == const.OPEN_AI:
# OpenAI 官方对话模型API
# OpenAI 官方对话模型API (gpt-3.0)
from model.openai.open_ai_model import OpenAIModel
return OpenAIModel()

elif model_type == const.CHATGPT:
# ChatGPT API (gpt-3.5-turbo)
from model.chatgpt.chatgpt_model import ChatGPTModel
return ChatGPTModel()

raise RuntimeError

0 comments on commit a593cd4

Please sign in to comment.