diff --git a/.babelrc b/.babelrc new file mode 100644 index 00000000000..53e4d9b2403 --- /dev/null +++ b/.babelrc @@ -0,0 +1,14 @@ +{ + "presets": [ + [ + "next/babel", + { + "preset-env": { + "targets": { + "browsers": ["> 0.25%, not dead"] + } + } + } + ] + ] +} diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 00000000000..95ed9e268e4 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,97 @@ +# Logs +logs +*.log +npm-debug.log* +yarn-debug.log* +yarn-error.log* + +# Runtime data +pids +*.pid +*.seed +*.pid.lock + +# Directory for instrumented libs generated by jscoverage/JSCover +lib-cov + +# Coverage directory used by tools like istanbul +coverage +*.lcov + +# nyc test coverage +.nyc_output + +# Grunt intermediate storage (http://gruntjs.com/creating-plugins#storing-task-files) +.grunt + +# Node.js dependencies +/node_modules +/jspm_packages + +# TypeScript v1 declaration files +typings + +# Optional npm cache directory +.npm + +# Optional eslint cache +.eslintcache + +# Optional REPL history +.node_repl_history + +# Output of 'npm pack' +*.tgz + +# Yarn Integrity file +.yarn-integrity + +# dotenv environment variable files +.env +.env.test + +# local env files +.env*.local + +# Next.js build output +.next +out + +# Nuxt.js build output +.nuxt +dist + +# Gatsby files +.cache/ + + +# Vuepress build output +.vuepress/dist + +# Serverless directories +.serverless/ + +# FuseBox cache +.fusebox/ + +# DynamoDB Local files +.dynamodb/ + +# Temporary folders +tmp +temp + +# IDE and editor directories +.idea +.vscode +*.swp +*.swo +*~ + +# OS generated files +.DS_Store +Thumbs.db + +# secret key +*.key +*.key.pub diff --git a/.env.template b/.env.template index 0f4bf0e7c00..b2a0438d9d1 100644 --- a/.env.template +++ b/.env.template @@ -2,12 +2,22 @@ # Your openai api key. (required) OPENAI_API_KEY=sk-xxxx -# Access passsword, separated by comma. (optional) +# Access password, separated by comma. (optional) CODE=your-password # You can start service behind a proxy PROXY_URL=http://localhost:7890 +# (optional) +# Default: Empty +# Googel Gemini Pro API key, set if you want to use Google Gemini Pro API. +GOOGLE_API_KEY= + +# (optional) +# Default: https://generativelanguage.googleapis.com/ +# Googel Gemini Pro API url without pathname, set if you want to customize Google Gemini Pro API url. +GOOGLE_URL= + # Override openai api request base url. (optional) # Default: https://api.openai.com # Examples: http://your-openai-proxy.com @@ -15,9 +25,13 @@ BASE_URL= # Specify OpenAI organization ID.(optional) # Default: Empty -# If you do not want users to input their own API key, set this value to 1. OPENAI_ORG_ID= +# (optional) +# Default: Empty +# If you do not want users to use GPT-4, set this value to 1. +DISABLE_GPT4= + # (optional) # Default: Empty # If you do not want users to input their own API key, set this value to 1. @@ -25,10 +39,25 @@ HIDE_USER_API_KEY= # (optional) # Default: Empty -# If you do not want users to use GPT-4, set this value to 1. -DISABLE_GPT4= +# If you do want users to query balance, set this value to 1. +ENABLE_BALANCE_QUERY= # (optional) # Default: Empty -# If you do not want users to query balance, set this value to 1. -HIDE_BALANCE_QUERY= \ No newline at end of file +# If you want to disable parse settings from url, set this value to 1. +DISABLE_FAST_LINK= + + +# anthropic claude Api Key.(optional) +ANTHROPIC_API_KEY= + +### anthropic claude Api version. (optional) +ANTHROPIC_API_VERSION= + + + +### anthropic claude Api url (optional) +ANTHROPIC_URL= + +### (optional) +WHITE_WEBDEV_ENDPOINTS= \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md deleted file mode 100644 index 01fa35e8230..00000000000 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -name: Bug report -about: Create a report to help us improve -title: "[Bug] " -labels: '' -assignees: '' - ---- - -**Describe the bug** -A clear and concise description of what the bug is. - -**To Reproduce** -Steps to reproduce the behavior: -1. Go to '...' -2. Click on '....' -3. Scroll down to '....' -4. See error - -**Expected behavior** -A clear and concise description of what you expected to happen. - -**Screenshots** -If applicable, add screenshots to help explain your problem. - -**Deployment** -- [ ] Docker -- [ ] Vercel -- [ ] Server - -**Desktop (please complete the following information):** - - OS: [e.g. iOS] - - Browser [e.g. chrome, safari] - - Version [e.g. 22] - -**Smartphone (please complete the following information):** - - Device: [e.g. iPhone6] - - OS: [e.g. iOS8.1] - - Browser [e.g. stock browser, safari] - - Version [e.g. 22] - -**Additional Logs** -Add any logs about the problem here. diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml new file mode 100644 index 00000000000..bdba257d20a --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.yml @@ -0,0 +1,146 @@ +name: Bug report +description: Create a report to help us improve +title: "[Bug] " +labels: ["bug"] + +body: + - type: markdown + attributes: + value: "## Describe the bug" + - type: textarea + id: bug-description + attributes: + label: "Bug Description" + description: "A clear and concise description of what the bug is." + placeholder: "Explain the bug..." + validations: + required: true + + - type: markdown + attributes: + value: "## To Reproduce" + - type: textarea + id: steps-to-reproduce + attributes: + label: "Steps to Reproduce" + description: "Steps to reproduce the behavior:" + placeholder: | + 1. Go to '...' + 2. Click on '....' + 3. Scroll down to '....' + 4. See error + validations: + required: true + + - type: markdown + attributes: + value: "## Expected behavior" + - type: textarea + id: expected-behavior + attributes: + label: "Expected Behavior" + description: "A clear and concise description of what you expected to happen." + placeholder: "Describe what you expected to happen..." + validations: + required: true + + - type: markdown + attributes: + value: "## Screenshots" + - type: textarea + id: screenshots + attributes: + label: "Screenshots" + description: "If applicable, add screenshots to help explain your problem." + placeholder: "Paste your screenshots here or write 'N/A' if not applicable..." + validations: + required: false + + - type: markdown + attributes: + value: "## Deployment" + - type: checkboxes + id: deployment + attributes: + label: "Deployment Method" + description: "Please select the deployment method you are using." + options: + - label: "Docker" + - label: "Vercel" + - label: "Server" + + - type: markdown + attributes: + value: "## Desktop (please complete the following information):" + - type: input + id: desktop-os + attributes: + label: "Desktop OS" + description: "Your desktop operating system." + placeholder: "e.g., Windows 10" + validations: + required: false + - type: input + id: desktop-browser + attributes: + label: "Desktop Browser" + description: "Your desktop browser." + placeholder: "e.g., Chrome, Safari" + validations: + required: false + - type: input + id: desktop-version + attributes: + label: "Desktop Browser Version" + description: "Version of your desktop browser." + placeholder: "e.g., 89.0" + validations: + required: false + + - type: markdown + attributes: + value: "## Smartphone (please complete the following information):" + - type: input + id: smartphone-device + attributes: + label: "Smartphone Device" + description: "Your smartphone device." + placeholder: "e.g., iPhone X" + validations: + required: false + - type: input + id: smartphone-os + attributes: + label: "Smartphone OS" + description: "Your smartphone operating system." + placeholder: "e.g., iOS 14.4" + validations: + required: false + - type: input + id: smartphone-browser + attributes: + label: "Smartphone Browser" + description: "Your smartphone browser." + placeholder: "e.g., Safari" + validations: + required: false + - type: input + id: smartphone-version + attributes: + label: "Smartphone Browser Version" + description: "Version of your smartphone browser." + placeholder: "e.g., 14" + validations: + required: false + + - type: markdown + attributes: + value: "## Additional Logs" + - type: textarea + id: additional-logs + attributes: + label: "Additional Logs" + description: "Add any logs about the problem here." + placeholder: "Paste any relevant logs here..." + validations: + required: false diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md deleted file mode 100644 index 25c36ab679f..00000000000 --- a/.github/ISSUE_TEMPLATE/feature_request.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -name: Feature request -about: Suggest an idea for this project -title: "[Feature] " -labels: '' -assignees: '' - ---- - -**Is your feature request related to a problem? Please describe.** -A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] - -**Describe the solution you'd like** -A clear and concise description of what you want to happen. - -**Describe alternatives you've considered** -A clear and concise description of any alternative solutions or features you've considered. - -**Additional context** -Add any other context or screenshots about the feature request here. diff --git a/.github/ISSUE_TEMPLATE/feature_request.yml b/.github/ISSUE_TEMPLATE/feature_request.yml new file mode 100644 index 00000000000..49978133074 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.yml @@ -0,0 +1,53 @@ +name: Feature request +description: Suggest an idea for this project +title: "[Feature Request]: " +labels: ["enhancement"] + +body: + - type: markdown + attributes: + value: "## Is your feature request related to a problem? Please describe." + - type: textarea + id: problem-description + attributes: + label: Problem Description + description: "A clear and concise description of what the problem is. Example: I'm always frustrated when [...]" + placeholder: "Explain the problem you are facing..." + validations: + required: true + + - type: markdown + attributes: + value: "## Describe the solution you'd like" + - type: textarea + id: desired-solution + attributes: + label: Solution Description + description: A clear and concise description of what you want to happen. + placeholder: "Describe the solution you'd like..." + validations: + required: true + + - type: markdown + attributes: + value: "## Describe alternatives you've considered" + - type: textarea + id: alternatives-considered + attributes: + label: Alternatives Considered + description: A clear and concise description of any alternative solutions or features you've considered. + placeholder: "Describe any alternative solutions or features you've considered..." + validations: + required: false + + - type: markdown + attributes: + value: "## Additional context" + - type: textarea + id: additional-context + attributes: + label: Additional Context + description: Add any other context or screenshots about the feature request here. + placeholder: "Add any other context or screenshots about the feature request here..." + validations: + required: false diff --git "a/.github/ISSUE_TEMPLATE/\345\212\237\350\203\275\345\273\272\350\256\256.md" "b/.github/ISSUE_TEMPLATE/\345\212\237\350\203\275\345\273\272\350\256\256.md" deleted file mode 100644 index 3fc3d0769c2..00000000000 --- "a/.github/ISSUE_TEMPLATE/\345\212\237\350\203\275\345\273\272\350\256\256.md" +++ /dev/null @@ -1,24 +0,0 @@ ---- -name: 功能建议 -about: 请告诉我们你的灵光一闪 -title: "[Feature] " -labels: '' -assignees: '' - ---- - -> 为了提高交流效率,我们设立了官方 QQ 群和 QQ 频道,如果你在使用或者搭建过程中遇到了任何问题,请先第一时间加群或者频道咨询解决,除非是可以稳定复现的 Bug 或者较为有创意的功能建议,否则请不要随意往 Issue 区发送低质无意义帖子。 - -> [点击加入官方群聊](https://github.com/Yidadaa/ChatGPT-Next-Web/discussions/1724) - -**这个功能与现有的问题有关吗?** -如果有关,请在此列出链接或者描述问题。 - -**你想要什么功能或者有什么建议?** -尽管告诉我们。 - -**有没有可以参考的同类竞品?** -可以给出参考产品的链接或者截图。 - -**其他信息** -可以说说你的其他考虑。 diff --git "a/.github/ISSUE_TEMPLATE/\345\217\215\351\246\210\351\227\256\351\242\230.md" "b/.github/ISSUE_TEMPLATE/\345\217\215\351\246\210\351\227\256\351\242\230.md" deleted file mode 100644 index 270263f0626..00000000000 --- "a/.github/ISSUE_TEMPLATE/\345\217\215\351\246\210\351\227\256\351\242\230.md" +++ /dev/null @@ -1,36 +0,0 @@ ---- -name: 反馈问题 -about: 请告诉我们你遇到的问题 -title: "[Bug] " -labels: '' -assignees: '' - ---- - -> 为了提高交流效率,我们设立了官方 QQ 群和 QQ 频道,如果你在使用或者搭建过程中遇到了任何问题,请先第一时间加群或者频道咨询解决,除非是可以稳定复现的 Bug 或者较为有创意的功能建议,否则请不要随意往 Issue 区发送低质无意义帖子。 - -> [点击加入官方群聊](https://github.com/Yidadaa/ChatGPT-Next-Web/discussions/1724) - -**反馈须知** - -⚠️ 注意:不遵循此模板的任何帖子都会被立即关闭,如果没有提供下方的信息,我们无法定位你的问题。 - -请在下方中括号内输入 x 来表示你已经知晓相关内容。 -- [ ] 我确认已经在 [常见问题](https://github.com/Yidadaa/ChatGPT-Next-Web/blob/main/docs/faq-cn.md) 中搜索了此次反馈的问题,没有找到解答; -- [ ] 我确认已经在 [Issues](https://github.com/Yidadaa/ChatGPT-Next-Web/issues) 列表(包括已经 Close 的)中搜索了此次反馈的问题,没有找到解答。 -- [ ] 我确认已经在 [Vercel 使用教程](https://github.com/Yidadaa/ChatGPT-Next-Web/blob/main/docs/vercel-cn.md) 中搜索了此次反馈的问题,没有找到解答。 - -**描述问题** -请在此描述你遇到了什么问题。 - -**如何复现** -请告诉我们你是通过什么操作触发的该问题。 - -**截图** -请在此提供控制台截图、屏幕截图或者服务端的 log 截图。 - -**一些必要的信息** - - 系统:[比如 windows 10/ macos 12/ linux / android 11 / ios 16] - - 浏览器: [比如 chrome, safari] - - 版本: [填写设置页面的版本号] - - 部署方式:[比如 vercel、docker 或者服务器部署] diff --git a/.github/workflows/app.yml b/.github/workflows/app.yml index 234338dd4fe..7e74cf04595 100644 --- a/.github/workflows/app.yml +++ b/.github/workflows/app.yml @@ -9,7 +9,7 @@ jobs: create-release: permissions: contents: write - runs-on: ubuntu-20.04 + runs-on: ubuntu-latest outputs: release_id: ${{ steps.create-release.outputs.result }} @@ -18,7 +18,7 @@ jobs: - name: setup node uses: actions/setup-node@v3 with: - node-version: 16 + node-version: 18 - name: get version run: echo "PACKAGE_VERSION=$(node -p "require('./src-tauri/tauri.conf.json').package.version")" >> $GITHUB_ENV - name: create release @@ -39,19 +39,34 @@ jobs: strategy: fail-fast: false matrix: - platform: [macos-latest, ubuntu-20.04, windows-latest] + config: + - os: ubuntu-latest + arch: x86_64 + rust_target: x86_64-unknown-linux-gnu + - os: macos-latest + arch: aarch64 + rust_target: x86_64-apple-darwin,aarch64-apple-darwin + - os: windows-latest + arch: x86_64 + rust_target: x86_64-pc-windows-msvc - runs-on: ${{ matrix.platform }} + runs-on: ${{ matrix.config.os }} steps: - uses: actions/checkout@v3 - name: setup node uses: actions/setup-node@v3 with: - node-version: 16 + node-version: 18 + cache: 'yarn' - name: install Rust stable uses: dtolnay/rust-toolchain@stable + with: + targets: ${{ matrix.config.rust_target }} + - uses: Swatinem/rust-cache@v2 + with: + key: ${{ matrix.config.os }} - name: install dependencies (ubuntu only) - if: matrix.platform == 'ubuntu-20.04' + if: matrix.config.os == 'ubuntu-latest' run: | sudo apt-get update sudo apt-get install -y libgtk-3-dev libwebkit2gtk-4.0-dev libappindicator3-dev librsvg2-dev patchelf @@ -62,13 +77,20 @@ jobs: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} TAURI_PRIVATE_KEY: ${{ secrets.TAURI_PRIVATE_KEY }} TAURI_KEY_PASSWORD: ${{ secrets.TAURI_KEY_PASSWORD }} + APPLE_CERTIFICATE: ${{ secrets.APPLE_CERTIFICATE }} + APPLE_CERTIFICATE_PASSWORD: ${{ secrets.APPLE_CERTIFICATE_PASSWORD }} + APPLE_SIGNING_IDENTITY: ${{ secrets.APPLE_SIGNING_IDENTITY }} + APPLE_ID: ${{ secrets.APPLE_ID }} + APPLE_PASSWORD: ${{ secrets.APPLE_PASSWORD }} + APPLE_TEAM_ID: ${{ secrets.APPLE_TEAM_ID }} with: releaseId: ${{ needs.create-release.outputs.release_id }} + args: ${{ matrix.config.os == 'macos-latest' && '--target universal-apple-darwin' || '' }} publish-release: permissions: contents: write - runs-on: ubuntu-20.04 + runs-on: ubuntu-latest needs: [create-release, build-tauri] steps: diff --git a/.github/workflows/deploy_preview.yml b/.github/workflows/deploy_preview.yml new file mode 100644 index 00000000000..bdbb78c27c5 --- /dev/null +++ b/.github/workflows/deploy_preview.yml @@ -0,0 +1,84 @@ +name: VercelPreviewDeployment + +on: + pull_request_target: + types: + - opened + - synchronize + - reopened + +env: + VERCEL_TEAM: ${{ secrets.VERCEL_TEAM }} + VERCEL_TOKEN: ${{ secrets.VERCEL_TOKEN }} + VERCEL_ORG_ID: ${{ secrets.VERCEL_ORG_ID }} + VERCEL_PROJECT_ID: ${{ secrets.VERCEL_PROJECT_ID }} + VERCEL_PR_DOMAIN_SUFFIX: ${{ secrets.VERCEL_PR_DOMAIN_SUFFIX }} + +permissions: + contents: read + statuses: write + pull-requests: write + +jobs: + deploy-preview: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + with: + ref: ${{ github.event.pull_request.head.sha }} + + - name: Extract branch name + shell: bash + run: echo "branch=${GITHUB_HEAD_REF:-${GITHUB_REF#refs/heads/}}" >> "$GITHUB_OUTPUT" + id: extract_branch + + - name: Hash branch name + uses: pplanel/hash-calculator-action@v1.3.1 + id: hash_branch + with: + input: ${{ steps.extract_branch.outputs.branch }} + method: MD5 + + - name: Set Environment Variables + id: set_env + if: github.event_name == 'pull_request_target' + run: | + echo "VERCEL_ALIAS_DOMAIN=${{ github.event.pull_request.number }}-${{ github.workflow }}.${VERCEL_PR_DOMAIN_SUFFIX}" >> $GITHUB_OUTPUT + + - name: Install Vercel CLI + run: npm install --global vercel@latest + + - name: Cache dependencies + uses: actions/cache@v2 + id: cache-npm + with: + path: ~/.npm + key: npm-${{ hashFiles('package-lock.json') }} + restore-keys: npm- + + - name: Pull Vercel Environment Information + run: vercel pull --yes --environment=preview --token=${VERCEL_TOKEN} + + - name: Deploy Project Artifacts to Vercel + id: vercel + env: + META_TAG: ${{ steps.hash_branch.outputs.digest }}-${{ github.run_number }}-${{ github.run_attempt}} + run: | + set -e + vercel pull --yes --environment=preview --token=${VERCEL_TOKEN} + vercel build --token=${VERCEL_TOKEN} + vercel deploy --prebuilt --archive=tgz --token=${VERCEL_TOKEN} --meta base_hash=${{ env.META_TAG }} + + DEFAULT_URL=$(vercel ls --token=${VERCEL_TOKEN} --meta base_hash=${{ env.META_TAG }}) + ALIAS_URL=$(vercel alias set ${DEFAULT_URL} ${{ steps.set_env.outputs.VERCEL_ALIAS_DOMAIN }} --token=${VERCEL_TOKEN} --scope ${VERCEL_TEAM}| awk '{print $3}') + + echo "New preview URL: ${DEFAULT_URL}" + echo "New alias URL: ${ALIAS_URL}" + echo "VERCEL_URL=${ALIAS_URL}" >> "$GITHUB_OUTPUT" + + - uses: mshick/add-pr-comment@v2 + with: + message: | + Your build has completed! + + [Preview deployment](${{ steps.vercel.outputs.VERCEL_URL }}) diff --git a/.github/workflows/remove_deploy_preview.yml b/.github/workflows/remove_deploy_preview.yml new file mode 100644 index 00000000000..4846cda2d6a --- /dev/null +++ b/.github/workflows/remove_deploy_preview.yml @@ -0,0 +1,40 @@ +name: Removedeploypreview + +permissions: + contents: read + statuses: write + pull-requests: write + +env: + VERCEL_TOKEN: ${{ secrets.VERCEL_TOKEN }} + VERCEL_ORG_ID: ${{ secrets.VERCEL_ORG_ID }} + VERCEL_PROJECT_ID: ${{ secrets.VERCEL_PROJECT_ID }} + +on: + pull_request_target: + types: + - closed + +jobs: + delete-deployments: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + + - name: Extract branch name + shell: bash + run: echo "branch=${GITHUB_HEAD_REF:-${GITHUB_REF#refs/heads/}}" >> $GITHUB_OUTPUT + id: extract_branch + + - name: Hash branch name + uses: pplanel/hash-calculator-action@v1.3.1 + id: hash_branch + with: + input: ${{ steps.extract_branch.outputs.branch }} + method: MD5 + + - name: Call the delete-deployment-preview.sh script + env: + META_TAG: ${{ steps.hash_branch.outputs.digest }} + run: | + bash ./scripts/delete-deployment-preview.sh diff --git a/.github/workflows/sync.yml b/.github/workflows/sync.yml index ebf5587d07c..e04e30adbd6 100644 --- a/.github/workflows/sync.yml +++ b/.github/workflows/sync.yml @@ -24,7 +24,7 @@ jobs: id: sync uses: aormsby/Fork-Sync-With-Upstream-action@v3.4 with: - upstream_sync_repo: Yidadaa/ChatGPT-Next-Web + upstream_sync_repo: ChatGPTNextWeb/ChatGPT-Next-Web upstream_sync_branch: main target_sync_branch: main target_repo_token: ${{ secrets.GITHUB_TOKEN }} # automatically generated, no need to set diff --git a/.gitignore b/.gitignore index b00b0e325a4..a24c6e047d5 100644 --- a/.gitignore +++ b/.gitignore @@ -43,4 +43,4 @@ dev .env *.key -*.key.pub \ No newline at end of file +*.key.pub diff --git a/Dockerfile b/Dockerfile index 720a0cfe959..ae9a17cddbd 100644 --- a/Dockerfile +++ b/Dockerfile @@ -16,6 +16,7 @@ FROM base AS builder RUN apk update && apk add --no-cache git ENV OPENAI_API_KEY="" +ENV GOOGLE_API_KEY="" ENV CODE="" WORKDIR /app @@ -31,6 +32,7 @@ RUN apk add proxychains-ng ENV PROXY_URL="" ENV OPENAI_API_KEY="" +ENV GOOGLE_API_KEY="" ENV CODE="" COPY --from=builder /app/public ./public @@ -41,22 +43,22 @@ COPY --from=builder /app/.next/server ./.next/server EXPOSE 3000 CMD if [ -n "$PROXY_URL" ]; then \ - export HOSTNAME="127.0.0.1"; \ - protocol=$(echo $PROXY_URL | cut -d: -f1); \ - host=$(echo $PROXY_URL | cut -d/ -f3 | cut -d: -f1); \ - port=$(echo $PROXY_URL | cut -d: -f3); \ - conf=/etc/proxychains.conf; \ - echo "strict_chain" > $conf; \ - echo "proxy_dns" >> $conf; \ - echo "remote_dns_subnet 224" >> $conf; \ - echo "tcp_read_time_out 15000" >> $conf; \ - echo "tcp_connect_time_out 8000" >> $conf; \ - echo "localnet 127.0.0.0/255.0.0.0" >> $conf; \ - echo "localnet ::1/128" >> $conf; \ - echo "[ProxyList]" >> $conf; \ - echo "$protocol $host $port" >> $conf; \ - cat /etc/proxychains.conf; \ - proxychains -f $conf node server.js; \ + export HOSTNAME="0.0.0.0"; \ + protocol=$(echo $PROXY_URL | cut -d: -f1); \ + host=$(echo $PROXY_URL | cut -d/ -f3 | cut -d: -f1); \ + port=$(echo $PROXY_URL | cut -d: -f3); \ + conf=/etc/proxychains.conf; \ + echo "strict_chain" > $conf; \ + echo "proxy_dns" >> $conf; \ + echo "remote_dns_subnet 224" >> $conf; \ + echo "tcp_read_time_out 15000" >> $conf; \ + echo "tcp_connect_time_out 8000" >> $conf; \ + echo "localnet 127.0.0.0/255.0.0.0" >> $conf; \ + echo "localnet ::1/128" >> $conf; \ + echo "[ProxyList]" >> $conf; \ + echo "$protocol $host $port" >> $conf; \ + cat /etc/proxychains.conf; \ + proxychains -f $conf node server.js; \ else \ - node server.js; \ + node server.js; \ fi diff --git a/LICENSE b/LICENSE index 4f00efc8791..047f9431e7d 100644 --- a/LICENSE +++ b/LICENSE @@ -1,75 +1,21 @@ -版权所有(c)<2023> - -反996许可证版本1.0 - -在符合下列条件的情况下, -特此免费向任何得到本授权作品的副本(包括源代码、文件和/或相关内容,以下统称为“授权作品” -)的个人和法人实体授权:被授权个人或法人实体有权以任何目的处置授权作品,包括但不限于使 -用、复制,修改,衍生利用、散布,发布和再许可: - - -1. 个人或法人实体必须在许可作品的每个再散布或衍生副本上包含以上版权声明和本许可证,不 - 得自行修改。 -2. 个人或法人实体必须严格遵守与个人实际所在地或个人出生地或归化地、或法人实体注册地或 - 经营地(以较严格者为准)的司法管辖区所有适用的与劳动和就业相关法律、法规、规则和 - 标准。如果该司法管辖区没有此类法律、法规、规章和标准或其法律、法规、规章和标准不可 - 执行,则个人或法人实体必须遵守国际劳工标准的核心公约。 -3. 个人或法人不得以任何方式诱导或强迫其全职或兼职员工或其独立承包人以口头或书面形式同 - 意直接或间接限制、削弱或放弃其所拥有的,受相关与劳动和就业有关的法律、法规、规则和 - 标准保护的权利或补救措施,无论该等书面或口头协议是否被该司法管辖区的法律所承认,该 - 等个人或法人实体也不得以任何方法限制其雇员或独立承包人向版权持有人或监督许可证合规 - 情况的有关当局报告或投诉上述违反许可证的行为的权利。 - -该授权作品是"按原样"提供,不做任何明示或暗示的保证,包括但不限于对适销性、特定用途适用 -性和非侵权性的保证。在任何情况下,无论是在合同诉讼、侵权诉讼或其他诉讼中,版权持有人均 -不承担因本软件或本软件的使用或其他交易而产生、引起或与之相关的任何索赔、损害或其他责任。 - - -------------------------- ENGLISH ------------------------------ - - -Copyright (c) <2023> - -Anti 996 License Version 1.0 (Draft) - -Permission is hereby granted to any individual or legal entity obtaining a copy -of this licensed work (including the source code, documentation and/or related -items, hereinafter collectively referred to as the "licensed work"), free of -charge, to deal with the licensed work for any purpose, including without -limitation, the rights to use, reproduce, modify, prepare derivative works of, -publish, distribute and sublicense the licensed work, subject to the following -conditions: - -1. The individual or the legal entity must conspicuously display, without - modification, this License on each redistributed or derivative copy of the - Licensed Work. - -2. The individual or the legal entity must strictly comply with all applicable - laws, regulations, rules and standards of the jurisdiction relating to - labor and employment where the individual is physically located or where - the individual was born or naturalized; or where the legal entity is - registered or is operating (whichever is stricter). In case that the - jurisdiction has no such laws, regulations, rules and standards or its - laws, regulations, rules and standards are unenforceable, the individual - or the legal entity are required to comply with Core International Labor - Standards. - -3. The individual or the legal entity shall not induce or force its - employee(s), whether full-time or part-time, or its independent - contractor(s), in any methods, to agree in oral or written form, - to directly or indirectly restrict, weaken or relinquish his or - her rights or remedies under such laws, regulations, rules and - standards relating to labor and employment as mentioned above, - no matter whether such written or oral agreement are enforceable - under the laws of the said jurisdiction, nor shall such individual - or the legal entity limit, in any methods, the rights of its employee(s) - or independent contractor(s) from reporting or complaining to the copyright - holder or relevant authorities monitoring the compliance of the license - about its violation(s) of the said license. - -THE LICENSED WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT -HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ANY WAY CONNECTION -WITH THE LICENSED WORK OR THE USE OR OTHER DEALINGS IN THE LICENSED WORK. \ No newline at end of file +MIT License + +Copyright (c) 2023-2024 Zhang Yifei + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/README.md b/README.md index 91c857f1fd7..24967c16403 100644 --- a/README.md +++ b/README.md @@ -1,31 +1,33 @@
-icon +icon -

ChatGPT Next Web

+

NextChat (ChatGPT Next Web)

English / [简体中文](./README_CN.md) -One-Click to get well-designed cross-platform ChatGPT web UI. +One-Click to get a well-designed cross-platform ChatGPT web UI, with GPT3, GPT4 & Gemini Pro support. -一键免费部署你的跨平台私人 ChatGPT 应用。 +一键免费部署你的跨平台私人 ChatGPT 应用, 支持 GPT3, GPT4 & Gemini Pro 模型。 [![Web][Web-image]][web-url] [![Windows][Windows-image]][download-url] [![MacOS][MacOS-image]][download-url] [![Linux][Linux-image]][download-url] -[Web App](https://chatgpt.nextweb.fun/) / [Desktop App](https://github.com/Yidadaa/ChatGPT-Next-Web/releases) / [Issues](https://github.com/Yidadaa/ChatGPT-Next-Web/issues) / [Buy Me a Coffee](https://www.buymeacoffee.com/yidadaa) +[Web App](https://app.nextchat.dev/) / [Desktop App](https://github.com/Yidadaa/ChatGPT-Next-Web/releases) / [Discord](https://discord.gg/YCkeafCafC) / [Twitter](https://twitter.com/NextChatDev) -[网页版](https://chatgpt.nextweb.fun/) / [客户端](https://github.com/Yidadaa/ChatGPT-Next-Web/releases) / [反馈](https://github.com/Yidadaa/ChatGPT-Next-Web/issues) / [QQ 群](https://github.com/Yidadaa/ChatGPT-Next-Web/discussions/1724) / [打赏开发者](https://user-images.githubusercontent.com/16968934/227772541-5bcd52d8-61b7-488c-a203-0330d8006e2b.jpg) +[网页版](https://app.nextchat.dev/) / [客户端](https://github.com/Yidadaa/ChatGPT-Next-Web/releases) / [反馈](https://github.com/Yidadaa/ChatGPT-Next-Web/issues) -[web-url]: https://chatgpt.nextweb.fun +[web-url]: https://app.nextchat.dev/ [download-url]: https://github.com/Yidadaa/ChatGPT-Next-Web/releases [Web-image]: https://img.shields.io/badge/Web-PWA-orange?logo=microsoftedge [Windows-image]: https://img.shields.io/badge/-Windows-blue?logo=windows [MacOS-image]: https://img.shields.io/badge/-MacOS-black?logo=apple [Linux-image]: https://img.shields.io/badge/-Linux-333?logo=ubuntu -[![Deploy with Vercel](https://vercel.com/button)](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FYidadaa%2FChatGPT-Next-Web&env=OPENAI_API_KEY&env=CODE&project-name=chatgpt-next-web&repository-name=ChatGPT-Next-Web) +[![Deploy with Vercel](https://vercel.com/button)](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FChatGPTNextWeb%2FChatGPT-Next-Web&env=OPENAI_API_KEY&env=CODE&project-name=nextchat&repository-name=NextChat) + +[![Deploy on Zeabur](https://zeabur.com/button.svg)](https://zeabur.com/templates/ZBUEFA) [![Open in Gitpod](https://gitpod.io/button/open-in-gitpod.svg)](https://gitpod.io/#https://github.com/Yidadaa/ChatGPT-Next-Web) @@ -37,15 +39,15 @@ One-Click to get well-designed cross-platform ChatGPT web UI. - **Deploy for free with one-click** on Vercel in under 1 minute - Compact client (~5MB) on Linux/Windows/MacOS, [download it now](https://github.com/Yidadaa/ChatGPT-Next-Web/releases) -- Fully compatible with self-deployed llms, recommended for use with [RWKV-Runner](https://github.com/josStorer/RWKV-Runner) or [LocalAI](https://github.com/go-skynet/LocalAI) -- Privacy first, all data stored locally in the browser +- Fully compatible with self-deployed LLMs, recommended for use with [RWKV-Runner](https://github.com/josStorer/RWKV-Runner) or [LocalAI](https://github.com/go-skynet/LocalAI) +- Privacy first, all data is stored locally in the browser - Markdown support: LaTex, mermaid, code highlight, etc. - Responsive design, dark mode and PWA - Fast first screen loading speed (~100kb), support streaming response - New in v2: create, share and debug your chat tools with prompt templates (mask) - Awesome prompts powered by [awesome-chatgpt-prompts-zh](https://github.com/PlexPt/awesome-chatgpt-prompts-zh) and [awesome-chatgpt-prompts](https://github.com/f/awesome-chatgpt-prompts) - Automatically compresses chat history to support long conversations while also saving your tokens -- I18n: English, 简体中文, 繁体中文, 日本語, Français, Español, Italiano, Türkçe, Deutsch, Tiếng Việt, Русский, Čeština, 한국어 +- I18n: English, 简体中文, 繁体中文, 日本語, Français, Español, Italiano, Türkçe, Deutsch, Tiếng Việt, Русский, Čeština, 한국어, Indonesia ## Roadmap @@ -59,9 +61,11 @@ One-Click to get well-designed cross-platform ChatGPT web UI. ## What's New -- 🚀 v2.0 is released, now you can create prompt templates, turn your ideas into reality! Read this: [ChatGPT Prompt Engineering Tips: Zero, One and Few Shot Prompting](https://www.allabtai.com/prompt-engineering-tips-zero-one-and-few-shot-prompting/). -- 🚀 v2.7 let's share conversations as image, or share to ShareGPT! +- 🚀 v2.10.1 support Google Gemini Pro model. +- 🚀 v2.9.11 you can use azure endpoint now. - 🚀 v2.8 now we have a client that runs across all platforms! +- 🚀 v2.7 let's share conversations as image, or share to ShareGPT! +- 🚀 v2.0 is released, now you can create prompt templates, turn your ideas into reality! Read this: [ChatGPT Prompt Engineering Tips: Zero, One and Few Shot Prompting](https://www.allabtai.com/prompt-engineering-tips-zero-one-and-few-shot-prompting/). ## 主要功能 @@ -74,7 +78,7 @@ One-Click to get well-designed cross-platform ChatGPT web UI. - 预制角色功能(面具),方便地创建、分享和调试你的个性化对话 - 海量的内置 prompt 列表,来自[中文](https://github.com/PlexPt/awesome-chatgpt-prompts-zh)和[英文](https://github.com/f/awesome-chatgpt-prompts) - 自动压缩上下文聊天记录,在节省 Token 的同时支持超长对话 -- 多国语言支持:English, 简体中文, 繁体中文, 日本語, Español, Italiano, Türkçe, Deutsch, Tiếng Việt, Русский, Čeština +- 多国语言支持:English, 简体中文, 繁体中文, 日本語, Español, Italiano, Türkçe, Deutsch, Tiếng Việt, Русский, Čeština, 한국어, Indonesia - 拥有自己的域名?好上加好,绑定后即可在任何地方**无障碍**快速访问 ## 开发计划 @@ -84,7 +88,7 @@ One-Click to get well-designed cross-platform ChatGPT web UI. - [x] 预制角色:使用预制角色快速定制新对话 [#993](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/993) - [x] 分享为图片,分享到 ShareGPT 链接 [#1741](https://github.com/Yidadaa/ChatGPT-Next-Web/pull/1741) - [x] 使用 tauri 打包桌面应用 -- [x] 支持自部署的大语言模型:开箱即用 [RWKV-Runner](https://github.com/josStorer/RWKV-Runner) ,服务端部署 [LocalAI 项目](https://github.com/go-skynet/LocalAI) llama / gpt4all / rwkv / vicuna / koala / gpt4all-j / cerebras / falcon / dolly 等等 +- [x] 支持自部署的大语言模型:开箱即用 [RWKV-Runner](https://github.com/josStorer/RWKV-Runner) ,服务端部署 [LocalAI 项目](https://github.com/go-skynet/LocalAI) llama / gpt4all / rwkv / vicuna / koala / gpt4all-j / cerebras / falcon / dolly 等等,或者使用 [api-for-open-llm](https://github.com/xusenlinzy/api-for-open-llm) - [ ] 插件机制,支持联网搜索、计算器、调用其他平台 api [#165](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/165) ## 最新动态 @@ -93,6 +97,7 @@ One-Click to get well-designed cross-platform ChatGPT web UI. - 💡 想要更方便地随时随地使用本项目?可以试下这款桌面插件:https://github.com/mushan0x0/AI0x0.com - 🚀 v2.7 现在可以将会话分享为图片了,也可以分享到 ShareGPT 的在线链接。 - 🚀 v2.8 发布了横跨 Linux/Windows/MacOS 的体积极小的客户端。 +- 🚀 v2.9.11 现在可以使用自定义 Azure 服务了。 ## Get Started @@ -135,7 +140,7 @@ After forking the project, due to the limitations imposed by GitHub, you need to If you want to update instantly, you can check out the [GitHub documentation](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/working-with-forks/syncing-a-fork) to learn how to synchronize a forked project with upstream code. -You can star or watch this project or follow author to get release notifictions in time. +You can star or watch this project or follow author to get release notifications in time. ## Access Password @@ -153,13 +158,13 @@ After adding or modifying this environment variable, please redeploy the project > [简体中文 > 如何配置 api key、访问密码、接口代理](./README_CN.md#环境变量) -### `OPENAI_API_KEY` (required) +### `CODE` (optional) -Your openai api key. +Access password, separated by comma. -### `CODE` (optional) +### `OPENAI_API_KEY` (required) -Access passsword, separated by comma. +Your openai api key, join multiple api keys with comma. ### `BASE_URL` (optional) @@ -173,6 +178,69 @@ Override openai api request base url. Specify OpenAI organization ID. +### `AZURE_URL` (optional) + +> Example: https://{azure-resource-url}/openai/deployments/{deploy-name} +> if you config deployment name in `CUSTOM_MODELS`, you can remove `{deploy-name}` in `AZURE_URL` + +Azure deploy url. + +### `AZURE_API_KEY` (optional) + +Azure Api Key. + +### `AZURE_API_VERSION` (optional) + +Azure Api Version, find it at [Azure Documentation](https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#chat-completions). + +### `GOOGLE_API_KEY` (optional) + +Google Gemini Pro Api Key. + +### `GOOGLE_URL` (optional) + +Google Gemini Pro Api Url. + +### `ANTHROPIC_API_KEY` (optional) + +anthropic claude Api Key. + +### `ANTHROPIC_API_VERSION` (optional) + +anthropic claude Api version. + +### `ANTHROPIC_URL` (optional) + +anthropic claude Api Url. + +### `BAIDU_API_KEY` (optional) + +Baidu Api Key. + +### `BAIDU_SECRET_KEY` (optional) + +Baidu Secret Key. + +### `BAIDU_URL` (optional) + +Baidu Api Url. + +### `BYTEDANCE_API_KEY` (optional) + +ByteDance Api Key. + +### `BYTEDANCE_URL` (optional) + +ByteDance Api Url. + +### `ALIBABA_API_KEY` (optional) + +Alibaba Cloud Api Key. + +### `ALIBABA_URL` (optional) + +Alibaba Cloud Api Url. + ### `HIDE_USER_API_KEY` (optional) > Default: Empty @@ -185,11 +253,47 @@ If you do not want users to input their own API key, set this value to 1. If you do not want users to use GPT-4, set this value to 1. -### `HIDE_BALANCE_QUERY` (optional) +### `ENABLE_BALANCE_QUERY` (optional) + +> Default: Empty + +If you do want users to query balance, set this value to 1. + +### `DISABLE_FAST_LINK` (optional) + +> Default: Empty + +If you want to disable parse settings from url, set this to 1. + +### `CUSTOM_MODELS` (optional) > Default: Empty +> Example: `+llama,+claude-2,-gpt-3.5-turbo,gpt-4-1106-preview=gpt-4-turbo` means add `llama, claude-2` to model list, and remove `gpt-3.5-turbo` from list, and display `gpt-4-1106-preview` as `gpt-4-turbo`. -If you do not want users to query balance, set this value to 1. +To control custom models, use `+` to add a custom model, use `-` to hide a model, use `name=displayName` to customize model name, separated by comma. + +User `-all` to disable all default models, `+all` to enable all default models. + +For Azure: use `modelName@azure=deploymentName` to customize model name and deployment name. +> Example: `+gpt-3.5-turbo@azure=gpt35` will show option `gpt35(Azure)` in model list. + +For ByteDance: use `modelName@bytedance=deploymentName` to customize model name and deployment name. +> Example: `+Doubao-lite-4k@bytedance=ep-xxxxx-xxx` will show option `Doubao-lite-4k(ByteDance)` in model list. + +### `DEFAULT_MODEL` (optional) + +Change default model + +### `WHITE_WEBDEV_ENDPOINTS` (optional) + +You can use this option if you want to increase the number of webdav service addresses you are allowed to access, as required by the format: +- Each address must be a complete endpoint +> `https://xxxx/yyy` +- Multiple addresses are connected by ', ' + +### `DEFAULT_INPUT_TEMPLATE` (optional) + +Customize the default template used to initialize the User Input Preprocessing configuration item in Settings. ## Requirements @@ -230,8 +334,8 @@ yarn dev docker pull yidadaa/chatgpt-next-web docker run -d -p 3000:3000 \ - -e OPENAI_API_KEY="sk-xxxx" \ - -e CODE="your-password" \ + -e OPENAI_API_KEY=sk-xxxx \ + -e CODE=your-password \ yidadaa/chatgpt-next-web ``` @@ -239,9 +343,9 @@ You can start service behind a proxy: ```shell docker run -d -p 3000:3000 \ - -e OPENAI_API_KEY="sk-xxxx" \ - -e CODE="your-password" \ - -e PROXY_URL="http://localhost:7890" \ + -e OPENAI_API_KEY=sk-xxxx \ + -e CODE=your-password \ + -e PROXY_URL=http://localhost:7890 \ yidadaa/chatgpt-next-web ``` @@ -257,12 +361,30 @@ If your proxy needs password, use: bash <(curl -s https://raw.githubusercontent.com/Yidadaa/ChatGPT-Next-Web/main/scripts/setup.sh) ``` +## Synchronizing Chat Records (UpStash) + +| [简体中文](./docs/synchronise-chat-logs-cn.md) | [English](./docs/synchronise-chat-logs-en.md) | [Italiano](./docs/synchronise-chat-logs-es.md) | [日本語](./docs/synchronise-chat-logs-ja.md) | [한국어](./docs/synchronise-chat-logs-ko.md) + +## Documentation + +> Please go to the [docs][./docs] directory for more documentation instructions. + +- [Deploy with cloudflare (Deprecated)](./docs/cloudflare-pages-en.md) +- [Frequent Ask Questions](./docs/faq-en.md) +- [How to add a new translation](./docs/translation.md) +- [How to use Vercel (No English)](./docs/vercel-cn.md) +- [User Manual (Only Chinese, WIP)](./docs/user-manual-cn.md) + ## Screenshots ![Settings](./docs/images/settings.png) ![More](./docs/images/more.png) +## Translation + +If you want to add a new translation, read this [document](./docs/translation.md). + ## Donation [Buy Me a Coffee](https://www.buymeacoffee.com/yidadaa) @@ -294,11 +416,19 @@ bash <(curl -s https://raw.githubusercontent.com/Yidadaa/ChatGPT-Next-Web/main/s [@Sha1rholder](https://github.com/Sha1rholder) [@AnsonHyq](https://github.com/AnsonHyq) [@synwith](https://github.com/synwith) +[@piksonGit](https://github.com/piksonGit) +[@ouyangzhiping](https://github.com/ouyangzhiping) +[@wenjiavv](https://github.com/wenjiavv) +[@LeXwDeX](https://github.com/LeXwDeX) +[@Licoy](https://github.com/Licoy) +[@shangmin2009](https://github.com/shangmin2009) -### Contributor +### Contributors -[Contributors](https://github.com/Yidadaa/ChatGPT-Next-Web/graphs/contributors) + + + ## LICENSE -[Anti 996 License](https://github.com/kattgu7/Anti-996-License/blob/master/LICENSE_CN_EN) +[MIT](https://opensource.org/license/mit/) diff --git a/README_CN.md b/README_CN.md index 990b6442410..5400bb276fa 100644 --- a/README_CN.md +++ b/README_CN.md @@ -1,14 +1,16 @@
预览 -

ChatGPT Next Web

+

NextChat

-一键免费部署你的私人 ChatGPT 网页应用。 +一键免费部署你的私人 ChatGPT 网页应用,支持 GPT3, GPT4 & Gemini Pro 模型。 -[演示 Demo](https://chat-gpt-next-web.vercel.app/) / [反馈 Issues](https://github.com/Yidadaa/ChatGPT-Next-Web/issues) / [加入 Discord](https://discord.gg/zrhvHCr79N) / [QQ 群](https://user-images.githubusercontent.com/16968934/228190818-7dd00845-e9b9-4363-97e5-44c507ac76da.jpeg) / [打赏开发者](https://user-images.githubusercontent.com/16968934/227772541-5bcd52d8-61b7-488c-a203-0330d8006e2b.jpg) / [Donate](#捐赠-donate-usdt) +[演示 Demo](https://chat-gpt-next-web.vercel.app/) / [反馈 Issues](https://github.com/Yidadaa/ChatGPT-Next-Web/issues) / [加入 Discord](https://discord.gg/zrhvHCr79N) [![Deploy with Vercel](https://vercel.com/button)](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FYidadaa%2FChatGPT-Next-Web&env=OPENAI_API_KEY&env=CODE&project-name=chatgpt-next-web&repository-name=ChatGPT-Next-Web) +[![Deploy on Zeabur](https://zeabur.com/button.svg)](https://zeabur.com/templates/ZBUEFA) + [![Open in Gitpod](https://gitpod.io/button/open-in-gitpod.svg)](https://gitpod.io/#https://github.com/Yidadaa/ChatGPT-Next-Web) ![主界面](./docs/images/cover.png) @@ -19,7 +21,7 @@ 1. 准备好你的 [OpenAI API Key](https://platform.openai.com/account/api-keys); 2. 点击右侧按钮开始部署: - [![Deploy with Vercel](https://vercel.com/button)](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FYidadaa%2FChatGPT-Next-Web&env=OPENAI_API_KEY&env=CODE&project-name=chatgpt-next-web&repository-name=ChatGPT-Next-Web),直接使用 Github 账号登录即可,记得在环境变量页填入 API Key 和[页面访问密码](#配置页面访问密码) CODE; + [![Deploy with Vercel](https://vercel.com/button)](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FYidadaa%2FChatGPT-Next-Web&env=OPENAI_API_KEY&env=CODE&env=GOOGLE_API_KEY&project-name=chatgpt-next-web&repository-name=ChatGPT-Next-Web),直接使用 Github 账号登录即可,记得在环境变量页填入 API Key 和[页面访问密码](#配置页面访问密码) CODE; 3. 部署完毕后,即可开始使用; 4. (可选)[绑定自定义域名](https://vercel.com/docs/concepts/projects/domains/add-a-domain):Vercel 分配的域名 DNS 在某些区域被污染了,绑定自定义域名即可直连。 @@ -68,7 +70,7 @@ code1,code2,code3 ### `OPENAI_API_KEY` (必填项) -OpanAI 密钥,你在 openai 账户页面申请的 api key。 +OpanAI 密钥,你在 openai 账户页面申请的 api key,使用英文逗号隔开多个 key,这样可以随机轮询这些 key。 ### `CODE` (可选) @@ -90,6 +92,69 @@ OpenAI 接口代理 URL,如果你手动配置了 openai 接口代理,请填 指定 OpenAI 中的组织 ID。 +### `AZURE_URL` (可选) + +> 形如:https://{azure-resource-url}/openai/deployments/{deploy-name} +> 如果你已经在`CUSTOM_MODELS`中参考`displayName`的方式配置了{deploy-name},那么可以从`AZURE_URL`中移除`{deploy-name}` + +Azure 部署地址。 + +### `AZURE_API_KEY` (可选) + +Azure 密钥。 + +### `AZURE_API_VERSION` (可选) + +Azure Api 版本,你可以在这里找到:[Azure 文档](https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#chat-completions)。 + +### `GOOGLE_API_KEY` (可选) + +Google Gemini Pro 密钥. + +### `GOOGLE_URL` (可选) + +Google Gemini Pro Api Url. + +### `ANTHROPIC_API_KEY` (可选) + +anthropic claude Api Key. + +### `ANTHROPIC_API_VERSION` (可选) + +anthropic claude Api version. + +### `ANTHROPIC_URL` (可选) + +anthropic claude Api Url. + +### `BAIDU_API_KEY` (可选) + +Baidu Api Key. + +### `BAIDU_SECRET_KEY` (可选) + +Baidu Secret Key. + +### `BAIDU_URL` (可选) + +Baidu Api Url. + +### `BYTEDANCE_API_KEY` (可选) + +ByteDance Api Key. + +### `BYTEDANCE_URL` (可选) + +ByteDance Api Url. + +### `ALIBABA_API_KEY` (可选) + +阿里云(千问)Api Key. + +### `ALIBABA_URL` (可选) + +阿里云(千问)Api Url. + ### `HIDE_USER_API_KEY` (可选) 如果你不想让用户自行填入 API Key,将此环境变量设置为 1 即可。 @@ -98,9 +163,42 @@ OpenAI 接口代理 URL,如果你手动配置了 openai 接口代理,请填 如果你不想让用户使用 GPT-4,将此环境变量设置为 1 即可。 -### `HIDE_BALANCE_QUERY` (可选) +### `ENABLE_BALANCE_QUERY` (可选) + +如果你想启用余额查询功能,将此环境变量设置为 1 即可。 + +### `DISABLE_FAST_LINK` (可选) + +如果你想禁用从链接解析预制设置,将此环境变量设置为 1 即可。 + +### `WHITE_WEBDEV_ENDPOINTS` (可选) + +如果你想增加允许访问的webdav服务地址,可以使用该选项,格式要求: +- 每一个地址必须是一个完整的 endpoint +> `https://xxxx/xxx` +- 多个地址以`,`相连 -如果你不想让用户查询余额,将此环境变量设置为 1 即可。 +### `CUSTOM_MODELS` (可选) + +> 示例:`+qwen-7b-chat,+glm-6b,-gpt-3.5-turbo,gpt-4-1106-preview=gpt-4-turbo` 表示增加 `qwen-7b-chat` 和 `glm-6b` 到模型列表,而从列表中删除 `gpt-3.5-turbo`,并将 `gpt-4-1106-preview` 模型名字展示为 `gpt-4-turbo`。 +> 如果你想先禁用所有模型,再启用指定模型,可以使用 `-all,+gpt-3.5-turbo`,则表示仅启用 `gpt-3.5-turbo` + +用来控制模型列表,使用 `+` 增加一个模型,使用 `-` 来隐藏一个模型,使用 `模型名=展示名` 来自定义模型的展示名,用英文逗号隔开。 + +在Azure的模式下,支持使用`modelName@azure=deploymentName`的方式配置模型名称和部署名称(deploy-name) +> 示例:`+gpt-3.5-turbo@azure=gpt35`这个配置会在模型列表显示一个`gpt35(Azure)`的选项 + +在ByteDance的模式下,支持使用`modelName@bytedance=deploymentName`的方式配置模型名称和部署名称(deploy-name) +> 示例: `+Doubao-lite-4k@bytedance=ep-xxxxx-xxx`这个配置会在模型列表显示一个`Doubao-lite-4k(ByteDance)`的选项 + + +### `DEFAULT_MODEL` (可选) + +更改默认模型 + +### `DEFAULT_INPUT_TEMPLATE` (可选) + +自定义默认的 template,用于初始化『设置』中的『用户输入预处理』配置项 ## 开发 @@ -114,7 +212,7 @@ OpenAI 接口代理 URL,如果你手动配置了 openai 接口代理,请填 OPENAI_API_KEY= # 中国大陆用户,可以使用本项目自带的代理进行开发,你也可以自由选择其他代理地址 -BASE_URL=https://chatgpt1.nextweb.fun/api/proxy +BASE_URL=https://b.nextweb.fun/api/proxy ``` ### 本地开发 @@ -135,8 +233,8 @@ BASE_URL=https://chatgpt1.nextweb.fun/api/proxy docker pull yidadaa/chatgpt-next-web docker run -d -p 3000:3000 \ - -e OPENAI_API_KEY="sk-xxxx" \ - -e CODE="页面访问密码" \ + -e OPENAI_API_KEY=sk-xxxx \ + -e CODE=页面访问密码 \ yidadaa/chatgpt-next-web ``` @@ -144,10 +242,10 @@ docker run -d -p 3000:3000 \ ```shell docker run -d -p 3000:3000 \ - -e OPENAI_API_KEY="sk-xxxx" \ - -e CODE="页面访问密码" \ + -e OPENAI_API_KEY=sk-xxxx \ + -e CODE=页面访问密码 \ --net=host \ - -e PROXY_URL="http://127.0.0.1:7890" \ + -e PROXY_URL=http://127.0.0.1:7890 \ yidadaa/chatgpt-next-web ``` @@ -179,8 +277,10 @@ bash <(curl -s https://raw.githubusercontent.com/Yidadaa/ChatGPT-Next-Web/main/s [见项目贡献者列表](https://github.com/Yidadaa/ChatGPT-Next-Web/graphs/contributors) -## 开源协议 +### 相关项目 + +- [one-api](https://github.com/songquanpeng/one-api): 一站式大模型额度管理平台,支持市面上所有主流大语言模型 -> 反对 996,从我开始。 +## 开源协议 -[Anti 996 License](https://github.com/kattgu7/Anti-996-License/blob/master/LICENSE_CN_EN) +[MIT](https://opensource.org/license/mit/) diff --git a/README_ES.md b/README_ES.md deleted file mode 100644 index e9705e40206..00000000000 --- a/README_ES.md +++ /dev/null @@ -1,175 +0,0 @@ -
-预览 - -

ChatGPT Next Web

- -Implemente su aplicación web privada ChatGPT de forma gratuita con un solo clic. - -[Demo demo](https://chat-gpt-next-web.vercel.app/) / [Problemas de comentarios](https://github.com/Yidadaa/ChatGPT-Next-Web/issues) / [Únete a Discord](https://discord.gg/zrhvHCr79N) / [Grupo QQ](https://user-images.githubusercontent.com/16968934/228190818-7dd00845-e9b9-4363-97e5-44c507ac76da.jpeg) / [Desarrolladores de consejos](https://user-images.githubusercontent.com/16968934/227772541-5bcd52d8-61b7-488c-a203-0330d8006e2b.jpg) / [Donar](#捐赠-donate-usdt) - -[![Deploy with Vercel](https://vercel.com/button)](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FYidadaa%2FChatGPT-Next-Web\&env=OPENAI_API_KEY\&env=CODE\&project-name=chatgpt-next-web\&repository-name=ChatGPT-Next-Web) - -[![Open in Gitpod](https://gitpod.io/button/open-in-gitpod.svg)](https://gitpod.io/#https://github.com/Yidadaa/ChatGPT-Next-Web) - -![主界面](./docs/images/cover.png) - -
- -## Comenzar - -1. Prepara el tuyo [Clave API OpenAI](https://platform.openai.com/account/api-keys); -2. Haga clic en el botón de la derecha para iniciar la implementación: - [![Deploy with Vercel](https://vercel.com/button)](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FYidadaa%2FChatGPT-Next-Web\&env=OPENAI_API_KEY\&env=CODE\&project-name=chatgpt-next-web\&repository-name=ChatGPT-Next-Web), inicie sesión directamente con su cuenta de Github y recuerde completar la clave API y la suma en la página de variables de entorno[Contraseña de acceso a la página](#配置页面访问密码) CÓDIGO; -3. Una vez implementado, puede comenzar; -4. (Opcional)[Enlazar un nombre de dominio personalizado](https://vercel.com/docs/concepts/projects/domains/add-a-domain): El nombre de dominio DNS asignado por Vercel está contaminado en algunas regiones y puede conectarse directamente enlazando un nombre de dominio personalizado. - -## Manténgase actualizado - -Si sigue los pasos anteriores para implementar su proyecto con un solo clic, es posible que siempre diga "La actualización existe" porque Vercel creará un nuevo proyecto para usted de forma predeterminada en lugar de bifurcar el proyecto, lo que evitará que la actualización se detecte correctamente. -Le recomendamos que siga estos pasos para volver a implementar: - -* Eliminar el repositorio original; -* Utilice el botón de bifurcación en la esquina superior derecha de la página para bifurcar este proyecto; -* En Vercel, vuelva a seleccionar e implementar,[Echa un vistazo al tutorial detallado](./docs/vercel-cn.md#如何新建项目)。 - -### Activar actualizaciones automáticas - -> Si encuentra un error de ejecución de Upstream Sync, ¡Sync Fork manualmente una vez! - -Cuando bifurca el proyecto, debido a las limitaciones de Github, debe ir manualmente a la página Acciones de su proyecto bifurcado para habilitar Flujos de trabajo y habilitar Upstream Sync Action, después de habilitarlo, puede activar las actualizaciones automáticas cada hora: - -![自动更新](./docs/images/enable-actions.jpg) - -![启用自动更新](./docs/images/enable-actions-sync.jpg) - -### Actualizar el código manualmente - -Si desea que el manual se actualice inmediatamente, puede consultarlo [Documentación para Github](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/working-with-forks/syncing-a-fork) Aprenda a sincronizar un proyecto bifurcado con código ascendente. - -Puede destacar / ver este proyecto o seguir al autor para recibir notificaciones de nuevas actualizaciones de funciones. - -## Configurar la contraseña de acceso a la página - -> Después de configurar la contraseña, el usuario debe completar manualmente el código de acceso en la página de configuración para chatear normalmente, de lo contrario, se solicitará el estado no autorizado a través de un mensaje. - -> **advertir**: Asegúrese de establecer el número de dígitos de la contraseña lo suficientemente largo, preferiblemente más de 7 dígitos, de lo contrario[Será volado](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/518)。 - -Este proyecto proporciona control de permisos limitado, agregue el nombre al nombre en la página Variables de entorno del Panel de control del proyecto Vercel `CODE` Variables de entorno con valores para contraseñas personalizadas separadas por comas: - - code1,code2,code3 - -Después de agregar o modificar la variable de entorno, por favor**Redesplegar**proyecto para poner en vigor los cambios. - -## Variable de entorno - -> La mayoría de los elementos de configuración de este proyecto se establecen a través de variables de entorno, tutorial:[Cómo modificar las variables de entorno de Vercel](./docs/vercel-cn.md)。 - -### `OPENAI_API_KEY` (Requerido) - -OpanAI key, la clave API que solicita en la página de su cuenta openai. - -### `CODE` (Opcional) - -Las contraseñas de acceso, opcionalmente, se pueden separar por comas. - -**advertir**: Si no completa este campo, cualquiera puede usar directamente su sitio web implementado, lo que puede hacer que su token se consuma rápidamente, se recomienda completar esta opción. - -### `BASE_URL` (Opcional) - -> Predeterminado: `https://api.openai.com` - -> Ejemplos: `http://your-openai-proxy.com` - -URL del proxy de interfaz OpenAI, complete esta opción si configuró manualmente el proxy de interfaz openAI. - -> Si encuentra problemas con el certificado SSL, establezca el `BASE_URL` El protocolo se establece en http. - -### `OPENAI_ORG_ID` (Opcional) - -Especifica el identificador de la organización en OpenAI. - -### `HIDE_USER_API_KEY` (Opcional) - -Si no desea que los usuarios rellenen la clave de API ellos mismos, establezca esta variable de entorno en 1. - -### `DISABLE_GPT4` (Opcional) - -Si no desea que los usuarios utilicen GPT-4, establezca esta variable de entorno en 1. - -### `HIDE_BALANCE_QUERY` (Opcional) - -Si no desea que los usuarios consulte el saldo, establezca esta variable de entorno en 1. - -## explotación - -> No se recomienda encarecidamente desarrollar o implementar localmente, debido a algunas razones técnicas, es difícil configurar el agente API de OpenAI localmente, a menos que pueda asegurarse de que puede conectarse directamente al servidor OpenAI. - -Haga clic en el botón de abajo para iniciar el desarrollo secundario: - -[![Open in Gitpod](https://gitpod.io/button/open-in-gitpod.svg)](https://gitpod.io/#https://github.com/Yidadaa/ChatGPT-Next-Web) - -Antes de empezar a escribir código, debe crear uno nuevo en la raíz del proyecto `.env.local` archivo, lleno de variables de entorno: - - OPENAI_API_KEY= - -### Desarrollo local - -1. Instale nodejs 18 e hilo, pregunte a ChatGPT para obtener más detalles; -2. ejecutar `yarn install && yarn dev` Enlatar. ⚠️ Nota: Este comando es solo para desarrollo local, no para implementación. -3. Úselo si desea implementar localmente `yarn install && yarn start` comando, puede cooperar con pm2 a daemon para evitar ser asesinado, pregunte a ChatGPT para obtener más detalles. - -## desplegar - -### Implementación de contenedores (recomendado) - -> La versión de Docker debe ser 20 o posterior, de lo contrario se indicará que no se puede encontrar la imagen. - -> ⚠️ Nota: Las versiones de Docker están de 1 a 2 días por detrás de la última versión la mayor parte del tiempo, por lo que es normal que sigas diciendo "La actualización existe" después de la implementación. - -```shell -docker pull yidadaa/chatgpt-next-web - -docker run -d -p 3000:3000 \ - -e OPENAI_API_KEY="sk-xxxx" \ - -e CODE="页面访问密码" \ - yidadaa/chatgpt-next-web -``` - -También puede especificar proxy: - -```shell -docker run -d -p 3000:3000 \ - -e OPENAI_API_KEY="sk-xxxx" \ - -e CODE="页面访问密码" \ - --net=host \ - -e PROXY_URL="http://127.0.0.1:7890" \ - yidadaa/chatgpt-next-web -``` - -Si necesita especificar otras variables de entorno, agréguelas usted mismo en el comando anterior `-e 环境变量=环境变量值` para especificar. - -### Implementación local - -Ejecute el siguiente comando en la consola: - -```shell -bash <(curl -s https://raw.githubusercontent.com/Yidadaa/ChatGPT-Next-Web/main/scripts/setup.sh) -``` - -⚠️ Nota: Si tiene problemas durante la instalación, utilice la implementación de Docker. - -## Reconocimiento - -### donante - -> Ver versión en inglés. - -### Colaboradores - -[Ver la lista de colaboradores del proyecto](https://github.com/Yidadaa/ChatGPT-Next-Web/graphs/contributors) - -## Licencia de código abierto - -> Contra 996, empezando por mí. - -[Licencia Anti 996](https://github.com/kattgu7/Anti-996-License/blob/master/LICENSE_CN_EN) diff --git a/app/api/alibaba/[...path]/route.ts b/app/api/alibaba/[...path]/route.ts new file mode 100644 index 00000000000..c97ce593473 --- /dev/null +++ b/app/api/alibaba/[...path]/route.ts @@ -0,0 +1,155 @@ +import { getServerSideConfig } from "@/app/config/server"; +import { + Alibaba, + ALIBABA_BASE_URL, + ApiPath, + ModelProvider, + ServiceProvider, +} from "@/app/constant"; +import { prettyObject } from "@/app/utils/format"; +import { NextRequest, NextResponse } from "next/server"; +import { auth } from "@/app/api/auth"; +import { isModelAvailableInServer } from "@/app/utils/model"; +import type { RequestPayload } from "@/app/client/platforms/openai"; + +const serverConfig = getServerSideConfig(); + +async function handle( + req: NextRequest, + { params }: { params: { path: string[] } }, +) { + console.log("[Alibaba Route] params ", params); + + if (req.method === "OPTIONS") { + return NextResponse.json({ body: "OK" }, { status: 200 }); + } + + const authResult = auth(req, ModelProvider.Qwen); + if (authResult.error) { + return NextResponse.json(authResult, { + status: 401, + }); + } + + try { + const response = await request(req); + return response; + } catch (e) { + console.error("[Alibaba] ", e); + return NextResponse.json(prettyObject(e)); + } +} + +export const GET = handle; +export const POST = handle; + +export const runtime = "edge"; +export const preferredRegion = [ + "arn1", + "bom1", + "cdg1", + "cle1", + "cpt1", + "dub1", + "fra1", + "gru1", + "hnd1", + "iad1", + "icn1", + "kix1", + "lhr1", + "pdx1", + "sfo1", + "sin1", + "syd1", +]; + +async function request(req: NextRequest) { + const controller = new AbortController(); + + // alibaba use base url or just remove the path + let path = `${req.nextUrl.pathname}`.replaceAll(ApiPath.Alibaba, ""); + + let baseUrl = serverConfig.alibabaUrl || ALIBABA_BASE_URL; + + if (!baseUrl.startsWith("http")) { + baseUrl = `https://${baseUrl}`; + } + + if (baseUrl.endsWith("/")) { + baseUrl = baseUrl.slice(0, -1); + } + + console.log("[Proxy] ", path); + console.log("[Base Url]", baseUrl); + + const timeoutId = setTimeout( + () => { + controller.abort(); + }, + 10 * 60 * 1000, + ); + + const fetchUrl = `${baseUrl}${path}`; + const fetchOptions: RequestInit = { + headers: { + "Content-Type": "application/json", + Authorization: req.headers.get("Authorization") ?? "", + "X-DashScope-SSE": req.headers.get("X-DashScope-SSE") ?? "disable", + }, + method: req.method, + body: req.body, + redirect: "manual", + // @ts-ignore + duplex: "half", + signal: controller.signal, + }; + + // #1815 try to refuse some request to some models + if (serverConfig.customModels && req.body) { + try { + const clonedBody = await req.text(); + fetchOptions.body = clonedBody; + + const jsonBody = JSON.parse(clonedBody) as { model?: string }; + + // not undefined and is false + if ( + isModelAvailableInServer( + serverConfig.customModels, + jsonBody?.model as string, + ServiceProvider.Alibaba as string, + ) + ) { + return NextResponse.json( + { + error: true, + message: `you are not allowed to use ${jsonBody?.model} model`, + }, + { + status: 403, + }, + ); + } + } catch (e) { + console.error(`[Alibaba] filter`, e); + } + } + try { + const res = await fetch(fetchUrl, fetchOptions); + + // to prevent browser prompt for credentials + const newHeaders = new Headers(res.headers); + newHeaders.delete("www-authenticate"); + // to disable nginx buffering + newHeaders.set("X-Accel-Buffering", "no"); + + return new Response(res.body, { + status: res.status, + statusText: res.statusText, + headers: newHeaders, + }); + } finally { + clearTimeout(timeoutId); + } +} diff --git a/app/api/anthropic/[...path]/route.ts b/app/api/anthropic/[...path]/route.ts new file mode 100644 index 00000000000..20f8d52e062 --- /dev/null +++ b/app/api/anthropic/[...path]/route.ts @@ -0,0 +1,194 @@ +import { getServerSideConfig } from "@/app/config/server"; +import { + ANTHROPIC_BASE_URL, + Anthropic, + ApiPath, + DEFAULT_MODELS, + ServiceProvider, + ModelProvider, +} from "@/app/constant"; +import { prettyObject } from "@/app/utils/format"; +import { NextRequest, NextResponse } from "next/server"; +import { auth } from "../../auth"; +import { isModelAvailableInServer } from "@/app/utils/model"; +import { cloudflareAIGatewayUrl } from "@/app/utils/cloudflare"; + +const ALLOWD_PATH = new Set([Anthropic.ChatPath, Anthropic.ChatPath1]); + +async function handle( + req: NextRequest, + { params }: { params: { path: string[] } }, +) { + console.log("[Anthropic Route] params ", params); + + if (req.method === "OPTIONS") { + return NextResponse.json({ body: "OK" }, { status: 200 }); + } + + const subpath = params.path.join("/"); + + if (!ALLOWD_PATH.has(subpath)) { + console.log("[Anthropic Route] forbidden path ", subpath); + return NextResponse.json( + { + error: true, + msg: "you are not allowed to request " + subpath, + }, + { + status: 403, + }, + ); + } + + const authResult = auth(req, ModelProvider.Claude); + if (authResult.error) { + return NextResponse.json(authResult, { + status: 401, + }); + } + + try { + const response = await request(req); + return response; + } catch (e) { + console.error("[Anthropic] ", e); + return NextResponse.json(prettyObject(e)); + } +} + +export const GET = handle; +export const POST = handle; + +export const runtime = "edge"; +export const preferredRegion = [ + "arn1", + "bom1", + "cdg1", + "cle1", + "cpt1", + "dub1", + "fra1", + "gru1", + "hnd1", + "iad1", + "icn1", + "kix1", + "lhr1", + "pdx1", + "sfo1", + "sin1", + "syd1", +]; + +const serverConfig = getServerSideConfig(); + +async function request(req: NextRequest) { + const controller = new AbortController(); + + let authHeaderName = "x-api-key"; + let authValue = + req.headers.get(authHeaderName) || + req.headers.get("Authorization")?.replaceAll("Bearer ", "").trim() || + serverConfig.anthropicApiKey || + ""; + + let path = `${req.nextUrl.pathname}`.replaceAll(ApiPath.Anthropic, ""); + + let baseUrl = + serverConfig.anthropicUrl || serverConfig.baseUrl || ANTHROPIC_BASE_URL; + + if (!baseUrl.startsWith("http")) { + baseUrl = `https://${baseUrl}`; + } + + if (baseUrl.endsWith("/")) { + baseUrl = baseUrl.slice(0, -1); + } + + console.log("[Proxy] ", path); + console.log("[Base Url]", baseUrl); + + const timeoutId = setTimeout( + () => { + controller.abort(); + }, + 10 * 60 * 1000, + ); + + // try rebuild url, when using cloudflare ai gateway in server + const fetchUrl = cloudflareAIGatewayUrl(`${baseUrl}${path}`); + + const fetchOptions: RequestInit = { + headers: { + "Content-Type": "application/json", + "Cache-Control": "no-store", + [authHeaderName]: authValue, + "anthropic-version": + req.headers.get("anthropic-version") || + serverConfig.anthropicApiVersion || + Anthropic.Vision, + }, + method: req.method, + body: req.body, + redirect: "manual", + // @ts-ignore + duplex: "half", + signal: controller.signal, + }; + + // #1815 try to refuse some request to some models + if (serverConfig.customModels && req.body) { + try { + const clonedBody = await req.text(); + fetchOptions.body = clonedBody; + + const jsonBody = JSON.parse(clonedBody) as { model?: string }; + + // not undefined and is false + if ( + isModelAvailableInServer( + serverConfig.customModels, + jsonBody?.model as string, + ServiceProvider.Anthropic as string, + ) + ) { + return NextResponse.json( + { + error: true, + message: `you are not allowed to use ${jsonBody?.model} model`, + }, + { + status: 403, + }, + ); + } + } catch (e) { + console.error(`[Anthropic] filter`, e); + } + } + // console.log("[Anthropic request]", fetchOptions.headers, req.method); + try { + const res = await fetch(fetchUrl, fetchOptions); + + // console.log( + // "[Anthropic response]", + // res.status, + // " ", + // res.headers, + // res.url, + // ); + // to prevent browser prompt for credentials + const newHeaders = new Headers(res.headers); + newHeaders.delete("www-authenticate"); + // to disable nginx buffering + newHeaders.set("X-Accel-Buffering", "no"); + + return new Response(res.body, { + status: res.status, + statusText: res.statusText, + headers: newHeaders, + }); + } finally { + clearTimeout(timeoutId); + } +} diff --git a/app/api/auth.ts b/app/api/auth.ts index e0453b2b47f..e3b88702e66 100644 --- a/app/api/auth.ts +++ b/app/api/auth.ts @@ -1,7 +1,7 @@ import { NextRequest } from "next/server"; import { getServerSideConfig } from "../config/server"; import md5 from "spark-md5"; -import { ACCESS_CODE_PREFIX } from "../constant"; +import { ACCESS_CODE_PREFIX, ModelProvider } from "../constant"; function getIP(req: NextRequest) { let ip = req.ip ?? req.headers.get("x-real-ip"); @@ -16,19 +16,19 @@ function getIP(req: NextRequest) { function parseApiKey(bearToken: string) { const token = bearToken.trim().replaceAll("Bearer ", "").trim(); - const isOpenAiKey = !token.startsWith(ACCESS_CODE_PREFIX); + const isApiKey = !token.startsWith(ACCESS_CODE_PREFIX); return { - accessCode: isOpenAiKey ? "" : token.slice(ACCESS_CODE_PREFIX.length), - apiKey: isOpenAiKey ? token : "", + accessCode: isApiKey ? "" : token.slice(ACCESS_CODE_PREFIX.length), + apiKey: isApiKey ? token : "", }; } -export function auth(req: NextRequest) { +export function auth(req: NextRequest, modelProvider: ModelProvider) { const authToken = req.headers.get("Authorization") ?? ""; // check if it is openai api key or user token - const { accessCode, apiKey: token } = parseApiKey(authToken); + const { accessCode, apiKey } = parseApiKey(authToken); const hashedCode = md5.hash(accessCode ?? "").trim(); @@ -39,19 +39,61 @@ export function auth(req: NextRequest) { console.log("[User IP] ", getIP(req)); console.log("[Time] ", new Date().toLocaleString()); - if (serverConfig.needCode && !serverConfig.codes.has(hashedCode) && !token) { + if (serverConfig.needCode && !serverConfig.codes.has(hashedCode) && !apiKey) { return { error: true, msg: !accessCode ? "empty access code" : "wrong access code", }; } + if (serverConfig.hideUserApiKey && !!apiKey) { + return { + error: true, + msg: "you are not allowed to access with your own api key", + }; + } + // if user does not provide an api key, inject system api key - if (!token) { - const apiKey = serverConfig.apiKey; - if (apiKey) { + if (!apiKey) { + const serverConfig = getServerSideConfig(); + + // const systemApiKey = + // modelProvider === ModelProvider.GeminiPro + // ? serverConfig.googleApiKey + // : serverConfig.isAzure + // ? serverConfig.azureApiKey + // : serverConfig.apiKey; + + let systemApiKey: string | undefined; + + switch (modelProvider) { + case ModelProvider.GeminiPro: + systemApiKey = serverConfig.googleApiKey; + break; + case ModelProvider.Claude: + systemApiKey = serverConfig.anthropicApiKey; + break; + case ModelProvider.Doubao: + systemApiKey = serverConfig.bytedanceApiKey; + break; + case ModelProvider.Ernie: + systemApiKey = serverConfig.baiduApiKey; + break; + case ModelProvider.Qwen: + systemApiKey = serverConfig.alibabaApiKey; + break; + case ModelProvider.GPT: + default: + if (req.nextUrl.pathname.includes("azure/deployments")) { + systemApiKey = serverConfig.azureApiKey; + } else { + systemApiKey = serverConfig.apiKey; + } + } + + if (systemApiKey) { console.log("[Auth] use system api key"); - req.headers.set("Authorization", `Bearer ${apiKey}`); + req.headers.set("Authorization", `Bearer ${systemApiKey}`); } else { console.log("[Auth] admin did not provide an api key"); } diff --git a/app/api/azure/[...path]/route.ts b/app/api/azure/[...path]/route.ts new file mode 100644 index 00000000000..4a17de0c8ab --- /dev/null +++ b/app/api/azure/[...path]/route.ts @@ -0,0 +1,57 @@ +import { getServerSideConfig } from "@/app/config/server"; +import { ModelProvider } from "@/app/constant"; +import { prettyObject } from "@/app/utils/format"; +import { NextRequest, NextResponse } from "next/server"; +import { auth } from "../../auth"; +import { requestOpenai } from "../../common"; + +async function handle( + req: NextRequest, + { params }: { params: { path: string[] } }, +) { + console.log("[Azure Route] params ", params); + + if (req.method === "OPTIONS") { + return NextResponse.json({ body: "OK" }, { status: 200 }); + } + + const subpath = params.path.join("/"); + + const authResult = auth(req, ModelProvider.GPT); + if (authResult.error) { + return NextResponse.json(authResult, { + status: 401, + }); + } + + try { + return await requestOpenai(req); + } catch (e) { + console.error("[Azure] ", e); + return NextResponse.json(prettyObject(e)); + } +} + +export const GET = handle; +export const POST = handle; + +export const runtime = "edge"; +export const preferredRegion = [ + "arn1", + "bom1", + "cdg1", + "cle1", + "cpt1", + "dub1", + "fra1", + "gru1", + "hnd1", + "iad1", + "icn1", + "kix1", + "lhr1", + "pdx1", + "sfo1", + "sin1", + "syd1", +]; diff --git a/app/api/baidu/[...path]/route.ts b/app/api/baidu/[...path]/route.ts new file mode 100644 index 00000000000..94c9963c7e9 --- /dev/null +++ b/app/api/baidu/[...path]/route.ts @@ -0,0 +1,169 @@ +import { getServerSideConfig } from "@/app/config/server"; +import { + BAIDU_BASE_URL, + ApiPath, + ModelProvider, + BAIDU_OATUH_URL, + ServiceProvider, +} from "@/app/constant"; +import { prettyObject } from "@/app/utils/format"; +import { NextRequest, NextResponse } from "next/server"; +import { auth } from "@/app/api/auth"; +import { isModelAvailableInServer } from "@/app/utils/model"; +import { getAccessToken } from "@/app/utils/baidu"; + +const serverConfig = getServerSideConfig(); + +async function handle( + req: NextRequest, + { params }: { params: { path: string[] } }, +) { + console.log("[Baidu Route] params ", params); + + if (req.method === "OPTIONS") { + return NextResponse.json({ body: "OK" }, { status: 200 }); + } + + const authResult = auth(req, ModelProvider.Ernie); + if (authResult.error) { + return NextResponse.json(authResult, { + status: 401, + }); + } + + if (!serverConfig.baiduApiKey || !serverConfig.baiduSecretKey) { + return NextResponse.json( + { + error: true, + message: `missing BAIDU_API_KEY or BAIDU_SECRET_KEY in server env vars`, + }, + { + status: 401, + }, + ); + } + + try { + const response = await request(req); + return response; + } catch (e) { + console.error("[Baidu] ", e); + return NextResponse.json(prettyObject(e)); + } +} + +export const GET = handle; +export const POST = handle; + +export const runtime = "edge"; +export const preferredRegion = [ + "arn1", + "bom1", + "cdg1", + "cle1", + "cpt1", + "dub1", + "fra1", + "gru1", + "hnd1", + "iad1", + "icn1", + "kix1", + "lhr1", + "pdx1", + "sfo1", + "sin1", + "syd1", +]; + +async function request(req: NextRequest) { + const controller = new AbortController(); + + let path = `${req.nextUrl.pathname}`.replaceAll(ApiPath.Baidu, ""); + + let baseUrl = serverConfig.baiduUrl || BAIDU_BASE_URL; + + if (!baseUrl.startsWith("http")) { + baseUrl = `https://${baseUrl}`; + } + + if (baseUrl.endsWith("/")) { + baseUrl = baseUrl.slice(0, -1); + } + + console.log("[Proxy] ", path); + console.log("[Base Url]", baseUrl); + + const timeoutId = setTimeout( + () => { + controller.abort(); + }, + 10 * 60 * 1000, + ); + + const { access_token } = await getAccessToken( + serverConfig.baiduApiKey as string, + serverConfig.baiduSecretKey as string, + ); + const fetchUrl = `${baseUrl}${path}?access_token=${access_token}`; + + const fetchOptions: RequestInit = { + headers: { + "Content-Type": "application/json", + }, + method: req.method, + body: req.body, + redirect: "manual", + // @ts-ignore + duplex: "half", + signal: controller.signal, + }; + + // #1815 try to refuse some request to some models + if (serverConfig.customModels && req.body) { + try { + const clonedBody = await req.text(); + fetchOptions.body = clonedBody; + + const jsonBody = JSON.parse(clonedBody) as { model?: string }; + + // not undefined and is false + if ( + isModelAvailableInServer( + serverConfig.customModels, + jsonBody?.model as string, + ServiceProvider.Baidu as string, + ) + ) { + return NextResponse.json( + { + error: true, + message: `you are not allowed to use ${jsonBody?.model} model`, + }, + { + status: 403, + }, + ); + } + } catch (e) { + console.error(`[Baidu] filter`, e); + } + } + try { + const res = await fetch(fetchUrl, fetchOptions); + + // to prevent browser prompt for credentials + const newHeaders = new Headers(res.headers); + newHeaders.delete("www-authenticate"); + // to disable nginx buffering + newHeaders.set("X-Accel-Buffering", "no"); + + return new Response(res.body, { + status: res.status, + statusText: res.statusText, + headers: newHeaders, + }); + } finally { + clearTimeout(timeoutId); + } +} diff --git a/app/api/bytedance/[...path]/route.ts b/app/api/bytedance/[...path]/route.ts new file mode 100644 index 00000000000..336c837f037 --- /dev/null +++ b/app/api/bytedance/[...path]/route.ts @@ -0,0 +1,153 @@ +import { getServerSideConfig } from "@/app/config/server"; +import { + BYTEDANCE_BASE_URL, + ApiPath, + ModelProvider, + ServiceProvider, +} from "@/app/constant"; +import { prettyObject } from "@/app/utils/format"; +import { NextRequest, NextResponse } from "next/server"; +import { auth } from "@/app/api/auth"; +import { isModelAvailableInServer } from "@/app/utils/model"; + +const serverConfig = getServerSideConfig(); + +async function handle( + req: NextRequest, + { params }: { params: { path: string[] } }, +) { + console.log("[ByteDance Route] params ", params); + + if (req.method === "OPTIONS") { + return NextResponse.json({ body: "OK" }, { status: 200 }); + } + + const authResult = auth(req, ModelProvider.Doubao); + if (authResult.error) { + return NextResponse.json(authResult, { + status: 401, + }); + } + + try { + const response = await request(req); + return response; + } catch (e) { + console.error("[ByteDance] ", e); + return NextResponse.json(prettyObject(e)); + } +} + +export const GET = handle; +export const POST = handle; + +export const runtime = "edge"; +export const preferredRegion = [ + "arn1", + "bom1", + "cdg1", + "cle1", + "cpt1", + "dub1", + "fra1", + "gru1", + "hnd1", + "iad1", + "icn1", + "kix1", + "lhr1", + "pdx1", + "sfo1", + "sin1", + "syd1", +]; + +async function request(req: NextRequest) { + const controller = new AbortController(); + + let path = `${req.nextUrl.pathname}`.replaceAll(ApiPath.ByteDance, ""); + + let baseUrl = serverConfig.bytedanceUrl || BYTEDANCE_BASE_URL; + + if (!baseUrl.startsWith("http")) { + baseUrl = `https://${baseUrl}`; + } + + if (baseUrl.endsWith("/")) { + baseUrl = baseUrl.slice(0, -1); + } + + console.log("[Proxy] ", path); + console.log("[Base Url]", baseUrl); + + const timeoutId = setTimeout( + () => { + controller.abort(); + }, + 10 * 60 * 1000, + ); + + const fetchUrl = `${baseUrl}${path}`; + + const fetchOptions: RequestInit = { + headers: { + "Content-Type": "application/json", + Authorization: req.headers.get("Authorization") ?? "", + }, + method: req.method, + body: req.body, + redirect: "manual", + // @ts-ignore + duplex: "half", + signal: controller.signal, + }; + + // #1815 try to refuse some request to some models + if (serverConfig.customModels && req.body) { + try { + const clonedBody = await req.text(); + fetchOptions.body = clonedBody; + + const jsonBody = JSON.parse(clonedBody) as { model?: string }; + + // not undefined and is false + if ( + isModelAvailableInServer( + serverConfig.customModels, + jsonBody?.model as string, + ServiceProvider.ByteDance as string, + ) + ) { + return NextResponse.json( + { + error: true, + message: `you are not allowed to use ${jsonBody?.model} model`, + }, + { + status: 403, + }, + ); + } + } catch (e) { + console.error(`[ByteDance] filter`, e); + } + } + + try { + const res = await fetch(fetchUrl, fetchOptions); + + // to prevent browser prompt for credentials + const newHeaders = new Headers(res.headers); + newHeaders.delete("www-authenticate"); + // to disable nginx buffering + newHeaders.set("X-Accel-Buffering", "no"); + + return new Response(res.body, { + status: res.status, + statusText: res.statusText, + headers: newHeaders, + }); + } finally { + clearTimeout(timeoutId); + } +} diff --git a/app/api/common.ts b/app/api/common.ts index 22bd5d4a409..24453dd9635 100644 --- a/app/api/common.ts +++ b/app/api/common.ts @@ -1,66 +1,146 @@ import { NextRequest, NextResponse } from "next/server"; +import { getServerSideConfig } from "../config/server"; +import { + DEFAULT_MODELS, + OPENAI_BASE_URL, + GEMINI_BASE_URL, + ServiceProvider, +} from "../constant"; +import { isModelAvailableInServer } from "../utils/model"; +import { cloudflareAIGatewayUrl } from "../utils/cloudflare"; -export const OPENAI_URL = "api.openai.com"; -const DEFAULT_PROTOCOL = "https"; -const PROTOCOL = process.env.PROTOCOL ?? DEFAULT_PROTOCOL; -const BASE_URL = process.env.BASE_URL ?? OPENAI_URL; -const DISABLE_GPT4 = !!process.env.DISABLE_GPT4; +const serverConfig = getServerSideConfig(); export async function requestOpenai(req: NextRequest) { const controller = new AbortController(); - const authValue = req.headers.get("Authorization") ?? ""; - const openaiPath = `${req.nextUrl.pathname}${req.nextUrl.search}`.replaceAll( + + const isAzure = req.nextUrl.pathname.includes("azure/deployments"); + + var authValue, + authHeaderName = ""; + if (isAzure) { + authValue = + req.headers + .get("Authorization") + ?.trim() + .replaceAll("Bearer ", "") + .trim() ?? ""; + + authHeaderName = "api-key"; + } else { + authValue = req.headers.get("Authorization") ?? ""; + authHeaderName = "Authorization"; + } + + let path = `${req.nextUrl.pathname}${req.nextUrl.search}`.replaceAll( "/api/openai/", "", ); - let baseUrl = BASE_URL; + let baseUrl = + (isAzure ? serverConfig.azureUrl : serverConfig.baseUrl) || OPENAI_BASE_URL; if (!baseUrl.startsWith("http")) { - baseUrl = `${PROTOCOL}://${baseUrl}`; + baseUrl = `https://${baseUrl}`; } - console.log("[Proxy] ", openaiPath); + if (baseUrl.endsWith("/")) { + baseUrl = baseUrl.slice(0, -1); + } + + console.log("[Proxy] ", path); console.log("[Base Url]", baseUrl); - if (process.env.OPENAI_ORG_ID) { - console.log("[Org ID]", process.env.OPENAI_ORG_ID); - } + const timeoutId = setTimeout( + () => { + controller.abort(); + }, + 10 * 60 * 1000, + ); - const timeoutId = setTimeout(() => { - controller.abort(); - }, 10 * 60 * 1000); + if (isAzure) { + const azureApiVersion = + req?.nextUrl?.searchParams?.get("api-version") || + serverConfig.azureApiVersion; + baseUrl = baseUrl.split("/deployments").shift() as string; + path = `${req.nextUrl.pathname.replaceAll( + "/api/azure/", + "", + )}?api-version=${azureApiVersion}`; - const fetchUrl = `${baseUrl}/${openaiPath}`; + // Forward compatibility: + // if display_name(deployment_name) not set, and '{deploy-id}' in AZURE_URL + // then using default '{deploy-id}' + if (serverConfig.customModels && serverConfig.azureUrl) { + const modelName = path.split("/")[1]; + let realDeployName = ""; + serverConfig.customModels + .split(",") + .filter((v) => !!v && !v.startsWith("-") && v.includes(modelName)) + .forEach((m) => { + const [fullName, displayName] = m.split("="); + const [_, providerName] = fullName.split("@"); + if (providerName === "azure" && !displayName) { + const [_, deployId] = (serverConfig?.azureUrl ?? "").split( + "deployments/", + ); + if (deployId) { + realDeployName = deployId; + } + } + }); + if (realDeployName) { + console.log("[Replace with DeployId", realDeployName); + path = path.replaceAll(modelName, realDeployName); + } + } + } + + const fetchUrl = cloudflareAIGatewayUrl(`${baseUrl}/${path}`); + console.log("fetchUrl", fetchUrl); const fetchOptions: RequestInit = { headers: { "Content-Type": "application/json", - Authorization: authValue, - ...(process.env.OPENAI_ORG_ID && { - "OpenAI-Organization": process.env.OPENAI_ORG_ID, + "Cache-Control": "no-store", + [authHeaderName]: authValue, + ...(serverConfig.openaiOrgId && { + "OpenAI-Organization": serverConfig.openaiOrgId, }), }, - cache: "no-store", method: req.method, body: req.body, + // to fix #2485: https://stackoverflow.com/questions/55920957/cloudflare-worker-typeerror-one-time-use-body + redirect: "manual", // @ts-ignore duplex: "half", signal: controller.signal, }; // #1815 try to refuse gpt4 request - if (DISABLE_GPT4 && req.body) { + if (serverConfig.customModels && req.body) { try { const clonedBody = await req.text(); fetchOptions.body = clonedBody; - const jsonBody = JSON.parse(clonedBody); + const jsonBody = JSON.parse(clonedBody) as { model?: string }; - if ((jsonBody?.model ?? "").includes("gpt-4")) { + // not undefined and is false + if ( + isModelAvailableInServer( + serverConfig.customModels, + jsonBody?.model as string, + ServiceProvider.OpenAI as string, + ) || + isModelAvailableInServer( + serverConfig.customModels, + jsonBody?.model as string, + ServiceProvider.Azure as string, + ) + ) { return NextResponse.json( { error: true, - message: "you are not allowed to use gpt-4 model", + message: `you are not allowed to use ${jsonBody?.model} model`, }, { status: 403, @@ -75,13 +155,35 @@ export async function requestOpenai(req: NextRequest) { try { const res = await fetch(fetchUrl, fetchOptions); + // Extract the OpenAI-Organization header from the response + const openaiOrganizationHeader = res.headers.get("OpenAI-Organization"); + + // Check if serverConfig.openaiOrgId is defined and not an empty string + if (serverConfig.openaiOrgId && serverConfig.openaiOrgId.trim() !== "") { + // If openaiOrganizationHeader is present, log it; otherwise, log that the header is not present + console.log("[Org ID]", openaiOrganizationHeader); + } else { + console.log("[Org ID] is not set up."); + } + // to prevent browser prompt for credentials const newHeaders = new Headers(res.headers); newHeaders.delete("www-authenticate"); - - // to disbale ngnix buffering + // to disable nginx buffering newHeaders.set("X-Accel-Buffering", "no"); + // Conditionally delete the OpenAI-Organization header from the response if [Org ID] is undefined or empty (not setup in ENV) + // Also, this is to prevent the header from being sent to the client + if (!serverConfig.openaiOrgId || serverConfig.openaiOrgId.trim() === "") { + newHeaders.delete("OpenAI-Organization"); + } + + // The latest version of the OpenAI API forced the content-encoding to be "br" in json response + // So if the streaming is disabled, we need to remove the content-encoding header + // Because Vercel uses gzip to compress the response, if we don't remove the content-encoding header + // The browser will try to decode the response with brotli and fail + newHeaders.delete("content-encoding"); + return new Response(res.body, { status: res.status, statusText: res.statusText, diff --git a/app/api/config/route.ts b/app/api/config/route.ts index 6b95655885a..b0d9da03103 100644 --- a/app/api/config/route.ts +++ b/app/api/config/route.ts @@ -4,13 +4,16 @@ import { getServerSideConfig } from "../../config/server"; const serverConfig = getServerSideConfig(); -// Danger! Don not write any secret value here! +// Danger! Do not hard code any secret value here! // 警告!不要在这里写入任何敏感信息! const DANGER_CONFIG = { needCode: serverConfig.needCode, hideUserApiKey: serverConfig.hideUserApiKey, - enableGPT4: serverConfig.enableGPT4, + disableGPT4: serverConfig.disableGPT4, hideBalanceQuery: serverConfig.hideBalanceQuery, + disableFastLink: serverConfig.disableFastLink, + customModels: serverConfig.customModels, + defaultModel: serverConfig.defaultModel, }; declare global { diff --git a/app/api/google/[...path]/route.ts b/app/api/google/[...path]/route.ts new file mode 100644 index 00000000000..81e50538a56 --- /dev/null +++ b/app/api/google/[...path]/route.ts @@ -0,0 +1,118 @@ +import { NextRequest, NextResponse } from "next/server"; +import { auth } from "../../auth"; +import { getServerSideConfig } from "@/app/config/server"; +import { GEMINI_BASE_URL, Google, ModelProvider } from "@/app/constant"; + +async function handle( + req: NextRequest, + { params }: { params: { path: string[] } }, +) { + console.log("[Google Route] params ", params); + + if (req.method === "OPTIONS") { + return NextResponse.json({ body: "OK" }, { status: 200 }); + } + + const controller = new AbortController(); + + const serverConfig = getServerSideConfig(); + + let baseUrl = serverConfig.googleUrl || GEMINI_BASE_URL; + + if (!baseUrl.startsWith("http")) { + baseUrl = `https://${baseUrl}`; + } + + if (baseUrl.endsWith("/")) { + baseUrl = baseUrl.slice(0, -1); + } + + let path = `${req.nextUrl.pathname}`.replaceAll("/api/google/", ""); + + console.log("[Proxy] ", path); + console.log("[Base Url]", baseUrl); + + const timeoutId = setTimeout( + () => { + controller.abort(); + }, + 10 * 60 * 1000, + ); + + const authResult = auth(req, ModelProvider.GeminiPro); + if (authResult.error) { + return NextResponse.json(authResult, { + status: 401, + }); + } + + const bearToken = req.headers.get("Authorization") ?? ""; + const token = bearToken.trim().replaceAll("Bearer ", "").trim(); + + const key = token ? token : serverConfig.googleApiKey; + + if (!key) { + return NextResponse.json( + { + error: true, + message: `missing GOOGLE_API_KEY in server env vars`, + }, + { + status: 401, + }, + ); + } + + const fetchUrl = `${baseUrl}/${path}?key=${key}${ + req?.nextUrl?.searchParams?.get("alt") == "sse" ? "&alt=sse" : "" + }`; + const fetchOptions: RequestInit = { + headers: { + "Content-Type": "application/json", + "Cache-Control": "no-store", + }, + method: req.method, + body: req.body, + // to fix #2485: https://stackoverflow.com/questions/55920957/cloudflare-worker-typeerror-one-time-use-body + redirect: "manual", + // @ts-ignore + duplex: "half", + signal: controller.signal, + }; + + try { + const res = await fetch(fetchUrl, fetchOptions); + // to prevent browser prompt for credentials + const newHeaders = new Headers(res.headers); + newHeaders.delete("www-authenticate"); + // to disable nginx buffering + newHeaders.set("X-Accel-Buffering", "no"); + + return new Response(res.body, { + status: res.status, + statusText: res.statusText, + headers: newHeaders, + }); + } finally { + clearTimeout(timeoutId); + } +} + +export const GET = handle; +export const POST = handle; + +export const runtime = "edge"; +export const preferredRegion = [ + "bom1", + "cle1", + "cpt1", + "gru1", + "hnd1", + "iad1", + "icn1", + "kix1", + "pdx1", + "sfo1", + "sin1", + "syd1", +]; diff --git a/app/api/openai/[...path]/route.ts b/app/api/openai/[...path]/route.ts index 36f92d0ff53..77059c151fc 100644 --- a/app/api/openai/[...path]/route.ts +++ b/app/api/openai/[...path]/route.ts @@ -1,4 +1,6 @@ -import { OpenaiPath } from "@/app/constant"; +import { type OpenAIListModelResponse } from "@/app/client/platforms/openai"; +import { getServerSideConfig } from "@/app/config/server"; +import { ModelProvider, OpenaiPath } from "@/app/constant"; import { prettyObject } from "@/app/utils/format"; import { NextRequest, NextResponse } from "next/server"; import { auth } from "../../auth"; @@ -6,6 +8,18 @@ import { requestOpenai } from "../../common"; const ALLOWD_PATH = new Set(Object.values(OpenaiPath)); +function getModels(remoteModelRes: OpenAIListModelResponse) { + const config = getServerSideConfig(); + + if (config.disableGPT4) { + remoteModelRes.data = remoteModelRes.data.filter( + (m) => !m.id.startsWith("gpt-4"), + ); + } + + return remoteModelRes; +} + async function handle( req: NextRequest, { params }: { params: { path: string[] } }, @@ -31,7 +45,7 @@ async function handle( ); } - const authResult = auth(req); + const authResult = auth(req, ModelProvider.GPT); if (authResult.error) { return NextResponse.json(authResult, { status: 401, @@ -39,7 +53,18 @@ async function handle( } try { - return await requestOpenai(req); + const response = await requestOpenai(req); + + // list models + if (subpath === OpenaiPath.ListModelPath && response.status === 200) { + const resJson = (await response.json()) as OpenAIListModelResponse; + const availableModels = getModels(resJson); + return NextResponse.json(availableModels, { + status: response.status, + }); + } + + return response; } catch (e) { console.error("[OpenAI] ", e); return NextResponse.json(prettyObject(e)); @@ -50,3 +75,22 @@ export const GET = handle; export const POST = handle; export const runtime = "edge"; +export const preferredRegion = [ + "arn1", + "bom1", + "cdg1", + "cle1", + "cpt1", + "dub1", + "fra1", + "gru1", + "hnd1", + "iad1", + "icn1", + "kix1", + "lhr1", + "pdx1", + "sfo1", + "sin1", + "syd1", +]; diff --git a/app/api/upstash/[action]/[...key]/route.ts b/app/api/upstash/[action]/[...key]/route.ts new file mode 100644 index 00000000000..fcfef471862 --- /dev/null +++ b/app/api/upstash/[action]/[...key]/route.ts @@ -0,0 +1,73 @@ +import { NextRequest, NextResponse } from "next/server"; + +async function handle( + req: NextRequest, + { params }: { params: { action: string; key: string[] } }, +) { + const requestUrl = new URL(req.url); + const endpoint = requestUrl.searchParams.get("endpoint"); + + if (req.method === "OPTIONS") { + return NextResponse.json({ body: "OK" }, { status: 200 }); + } + const [...key] = params.key; + // only allow to request to *.upstash.io + if (!endpoint || !new URL(endpoint).hostname.endsWith(".upstash.io")) { + return NextResponse.json( + { + error: true, + msg: "you are not allowed to request " + params.key.join("/"), + }, + { + status: 403, + }, + ); + } + + // only allow upstash get and set method + if (params.action !== "get" && params.action !== "set") { + console.log("[Upstash Route] forbidden action ", params.action); + return NextResponse.json( + { + error: true, + msg: "you are not allowed to request " + params.action, + }, + { + status: 403, + }, + ); + } + + const targetUrl = `${endpoint}/${params.action}/${params.key.join("/")}`; + + const method = req.method; + const shouldNotHaveBody = ["get", "head"].includes( + method?.toLowerCase() ?? "", + ); + + const fetchOptions: RequestInit = { + headers: { + authorization: req.headers.get("authorization") ?? "", + }, + body: shouldNotHaveBody ? null : req.body, + method, + // @ts-ignore + duplex: "half", + }; + + console.log("[Upstash Proxy]", targetUrl, fetchOptions); + const fetchResult = await fetch(targetUrl, fetchOptions); + + console.log("[Any Proxy]", targetUrl, { + status: fetchResult.status, + statusText: fetchResult.statusText, + }); + + return fetchResult; +} + +export const POST = handle; +export const GET = handle; +export const OPTIONS = handle; + +export const runtime = "edge"; diff --git a/app/api/webdav/[...path]/route.ts b/app/api/webdav/[...path]/route.ts new file mode 100644 index 00000000000..01286fc1bf9 --- /dev/null +++ b/app/api/webdav/[...path]/route.ts @@ -0,0 +1,158 @@ +import { NextRequest, NextResponse } from "next/server"; +import { STORAGE_KEY, internalAllowedWebDavEndpoints } from "../../../constant"; +import { getServerSideConfig } from "@/app/config/server"; + +const config = getServerSideConfig(); + +const mergedAllowedWebDavEndpoints = [ + ...internalAllowedWebDavEndpoints, + ...config.allowedWebDevEndpoints, +].filter((domain) => Boolean(domain.trim())); + +const normalizeUrl = (url: string) => { + try { + return new URL(url); + } catch (err) { + return null; + } +}; + +async function handle( + req: NextRequest, + { params }: { params: { path: string[] } }, +) { + if (req.method === "OPTIONS") { + return NextResponse.json({ body: "OK" }, { status: 200 }); + } + const folder = STORAGE_KEY; + const fileName = `${folder}/backup.json`; + + const requestUrl = new URL(req.url); + let endpoint = requestUrl.searchParams.get("endpoint"); + + // Validate the endpoint to prevent potential SSRF attacks + if ( + !endpoint || + !mergedAllowedWebDavEndpoints.some((allowedEndpoint) => { + const normalizedAllowedEndpoint = normalizeUrl(allowedEndpoint); + const normalizedEndpoint = normalizeUrl(endpoint as string); + + return normalizedEndpoint && + normalizedEndpoint.hostname === normalizedAllowedEndpoint?.hostname && + normalizedEndpoint.pathname.startsWith(normalizedAllowedEndpoint.pathname); + }) + ) { + return NextResponse.json( + { + error: true, + msg: "Invalid endpoint", + }, + { + status: 400, + }, + ); + } + + if (!endpoint?.endsWith("/")) { + endpoint += "/"; + } + + const endpointPath = params.path.join("/"); + const targetPath = `${endpoint}${endpointPath}`; + + // only allow MKCOL, GET, PUT + if (req.method !== "MKCOL" && req.method !== "GET" && req.method !== "PUT") { + return NextResponse.json( + { + error: true, + msg: "you are not allowed to request " + targetPath, + }, + { + status: 403, + }, + ); + } + + // for MKCOL request, only allow request ${folder} + if (req.method === "MKCOL" && !targetPath.endsWith(folder)) { + return NextResponse.json( + { + error: true, + msg: "you are not allowed to request " + targetPath, + }, + { + status: 403, + }, + ); + } + + // for GET request, only allow request ending with fileName + if (req.method === "GET" && !targetPath.endsWith(fileName)) { + return NextResponse.json( + { + error: true, + msg: "you are not allowed to request " + targetPath, + }, + { + status: 403, + }, + ); + } + + // for PUT request, only allow request ending with fileName + if (req.method === "PUT" && !targetPath.endsWith(fileName)) { + return NextResponse.json( + { + error: true, + msg: "you are not allowed to request " + targetPath, + }, + { + status: 403, + }, + ); + } + + const targetUrl = targetPath; + + const method = req.method; + const shouldNotHaveBody = ["get", "head"].includes( + method?.toLowerCase() ?? "", + ); + + const fetchOptions: RequestInit = { + headers: { + authorization: req.headers.get("authorization") ?? "", + }, + body: shouldNotHaveBody ? null : req.body, + redirect: "manual", + method, + // @ts-ignore + duplex: "half", + }; + + let fetchResult; + + try { + fetchResult = await fetch(targetUrl, fetchOptions); + } finally { + console.log( + "[Any Proxy]", + targetUrl, + { + method: req.method, + }, + { + status: fetchResult?.status, + statusText: fetchResult?.statusText, + }, + ); + } + + return fetchResult; +} + +export const PUT = handle; +export const GET = handle; +export const OPTIONS = handle; + +export const runtime = "edge"; diff --git a/app/client/api.ts b/app/client/api.ts index a8960ff5129..c0c71480cd0 100644 --- a/app/client/api.ts +++ b/app/client/api.ts @@ -1,7 +1,17 @@ import { getClientConfig } from "../config/client"; -import { ACCESS_CODE_PREFIX } from "../constant"; -import { ChatMessage, ModelType, useAccessStore } from "../store"; +import { + ACCESS_CODE_PREFIX, + Azure, + ModelProvider, + ServiceProvider, +} from "../constant"; +import { ChatMessage, ModelType, useAccessStore, useChatStore } from "../store"; import { ChatGPTApi } from "./platforms/openai"; +import { GeminiProApi } from "./platforms/google"; +import { ClaudeApi } from "./platforms/anthropic"; +import { ErnieApi } from "./platforms/baidu"; +import { DoubaoApi } from "./platforms/bytedance"; +import { QwenApi } from "./platforms/alibaba"; export const ROLES = ["system", "user", "assistant"] as const; export type MessageRole = (typeof ROLES)[number]; @@ -9,13 +19,22 @@ export type MessageRole = (typeof ROLES)[number]; export const Models = ["gpt-3.5-turbo", "gpt-4"] as const; export type ChatModel = ModelType; +export interface MultimodalContent { + type: "text" | "image_url"; + text?: string; + image_url?: { + url: string; + }; +} + export interface RequestMessage { role: MessageRole; - content: string; + content: string | MultimodalContent[]; } export interface LLMConfig { model: string; + providerName?: string; temperature?: number; top_p?: number; stream?: boolean; @@ -38,9 +57,23 @@ export interface LLMUsage { total: number; } +export interface LLMModel { + name: string; + displayName?: string; + available: boolean; + provider: LLMModelProvider; +} + +export interface LLMModelProvider { + id: string; + providerName: string; + providerType: string; +} + export abstract class LLMApi { abstract chat(options: ChatOptions): Promise; abstract usage(): Promise; + abstract models(): Promise; } type ProviderName = "openai" | "azure" | "claude" | "palm"; @@ -67,8 +100,26 @@ interface ChatProvider { export class ClientApi { public llm: LLMApi; - constructor() { - this.llm = new ChatGPTApi(); + constructor(provider: ModelProvider = ModelProvider.GPT) { + switch (provider) { + case ModelProvider.GeminiPro: + this.llm = new GeminiProApi(); + break; + case ModelProvider.Claude: + this.llm = new ClaudeApi(); + break; + case ModelProvider.Ernie: + this.llm = new ErnieApi(); + break; + case ModelProvider.Doubao: + this.llm = new DoubaoApi(); + break; + case ModelProvider.Qwen: + this.llm = new QwenApi(); + break; + default: + this.llm = new ChatGPTApi(); + } } config() {} @@ -87,13 +138,13 @@ export class ClientApi { { from: "human", value: - "Share from [ChatGPT Next Web]: https://github.com/Yidadaa/ChatGPT-Next-Web", + "Share from [NextChat]: https://github.com/Yidadaa/ChatGPT-Next-Web", }, ]); // 敬告二开开发者们,为了开源大模型的发展,请不要修改上述消息,此消息用于后续数据清洗使用 // Please do not modify this message - console.log("[Share]", msgs); + console.log("[Share]", messages, msgs); const clientConfig = getClientConfig(); const proxyUrl = "/sharegpt"; const rawUrl = "https://sharegpt.com/api/conversations"; @@ -117,29 +168,102 @@ export class ClientApi { } } -export const api = new ClientApi(); - export function getHeaders() { const accessStore = useAccessStore.getState(); - let headers: Record = { + const chatStore = useChatStore.getState(); + const headers: Record = { "Content-Type": "application/json", - "x-requested-with": "XMLHttpRequest", + Accept: "application/json", }; - const makeBearer = (token: string) => `Bearer ${token.trim()}`; - const validString = (x: string) => x && x.length > 0; - - // use user's api key first - if (validString(accessStore.token)) { - headers.Authorization = makeBearer(accessStore.token); - } else if ( - accessStore.enabledAccessControl() && - validString(accessStore.accessCode) - ) { - headers.Authorization = makeBearer( + const clientConfig = getClientConfig(); + + function getConfig() { + const modelConfig = chatStore.currentSession().mask.modelConfig; + const isGoogle = modelConfig.providerName == ServiceProvider.Google; + const isAzure = modelConfig.providerName === ServiceProvider.Azure; + const isAnthropic = modelConfig.providerName === ServiceProvider.Anthropic; + const isBaidu = modelConfig.providerName == ServiceProvider.Baidu; + const isByteDance = modelConfig.providerName === ServiceProvider.ByteDance; + const isAlibaba = modelConfig.providerName === ServiceProvider.Alibaba; + const isEnabledAccessControl = accessStore.enabledAccessControl(); + const apiKey = isGoogle + ? accessStore.googleApiKey + : isAzure + ? accessStore.azureApiKey + : isAnthropic + ? accessStore.anthropicApiKey + : isByteDance + ? accessStore.bytedanceApiKey + : isAlibaba + ? accessStore.alibabaApiKey + : accessStore.openaiApiKey; + return { + isGoogle, + isAzure, + isAnthropic, + isBaidu, + isByteDance, + isAlibaba, + apiKey, + isEnabledAccessControl, + }; + } + + function getAuthHeader(): string { + return isAzure ? "api-key" : isAnthropic ? "x-api-key" : "Authorization"; + } + + function getBearerToken(apiKey: string, noBearer: boolean = false): string { + return validString(apiKey) + ? `${noBearer ? "" : "Bearer "}${apiKey.trim()}` + : ""; + } + + function validString(x: string): boolean { + return x?.length > 0; + } + const { + isGoogle, + isAzure, + isAnthropic, + isBaidu, + apiKey, + isEnabledAccessControl, + } = getConfig(); + // when using google api in app, not set auth header + if (isGoogle && clientConfig?.isApp) return headers; + // when using baidu api in app, not set auth header + if (isBaidu && clientConfig?.isApp) return headers; + + const authHeader = getAuthHeader(); + + const bearerToken = getBearerToken(apiKey, isAzure || isAnthropic); + + if (bearerToken) { + headers[authHeader] = bearerToken; + } else if (isEnabledAccessControl && validString(accessStore.accessCode)) { + headers["Authorization"] = getBearerToken( ACCESS_CODE_PREFIX + accessStore.accessCode, ); } return headers; } + +export function getClientApi(provider: ServiceProvider): ClientApi { + switch (provider) { + case ServiceProvider.Google: + return new ClientApi(ModelProvider.GeminiPro); + case ServiceProvider.Anthropic: + return new ClientApi(ModelProvider.Claude); + case ServiceProvider.Baidu: + return new ClientApi(ModelProvider.Ernie); + case ServiceProvider.ByteDance: + return new ClientApi(ModelProvider.Doubao); + case ServiceProvider.Alibaba: + return new ClientApi(ModelProvider.Qwen); + default: + return new ClientApi(ModelProvider.GPT); + } +} diff --git a/app/client/controller.ts b/app/client/controller.ts index 86cb99e7fee..a2e00173dd0 100644 --- a/app/client/controller.ts +++ b/app/client/controller.ts @@ -3,17 +3,17 @@ export const ChatControllerPool = { controllers: {} as Record, addController( - sessionIndex: number, - messageId: number, + sessionId: string, + messageId: string, controller: AbortController, ) { - const key = this.key(sessionIndex, messageId); + const key = this.key(sessionId, messageId); this.controllers[key] = controller; return key; }, - stop(sessionIndex: number, messageId: number) { - const key = this.key(sessionIndex, messageId); + stop(sessionId: string, messageId: string) { + const key = this.key(sessionId, messageId); const controller = this.controllers[key]; controller?.abort(); }, @@ -26,12 +26,12 @@ export const ChatControllerPool = { return Object.values(this.controllers).length > 0; }, - remove(sessionIndex: number, messageId: number) { - const key = this.key(sessionIndex, messageId); + remove(sessionId: string, messageId: string) { + const key = this.key(sessionId, messageId); delete this.controllers[key]; }, - key(sessionIndex: number, messageIndex: number) { - return `${sessionIndex},${messageIndex}`; + key(sessionId: string, messageIndex: string) { + return `${sessionId},${messageIndex}`; }, }; diff --git a/app/client/platforms/alibaba.ts b/app/client/platforms/alibaba.ts new file mode 100644 index 00000000000..723ba774b8e --- /dev/null +++ b/app/client/platforms/alibaba.ts @@ -0,0 +1,268 @@ +"use client"; +import { + ApiPath, + Alibaba, + ALIBABA_BASE_URL, + REQUEST_TIMEOUT_MS, +} from "@/app/constant"; +import { useAccessStore, useAppConfig, useChatStore } from "@/app/store"; + +import { + ChatOptions, + getHeaders, + LLMApi, + LLMModel, + MultimodalContent, +} from "../api"; +import Locale from "../../locales"; +import { + EventStreamContentType, + fetchEventSource, +} from "@fortaine/fetch-event-source"; +import { prettyObject } from "@/app/utils/format"; +import { getClientConfig } from "@/app/config/client"; +import { getMessageTextContent, isVisionModel } from "@/app/utils"; + +export interface OpenAIListModelResponse { + object: string; + data: Array<{ + id: string; + object: string; + root: string; + }>; +} + +interface RequestInput { + messages: { + role: "system" | "user" | "assistant"; + content: string | MultimodalContent[]; + }[]; +} +interface RequestParam { + result_format: string; + incremental_output?: boolean; + temperature: number; + repetition_penalty?: number; + top_p: number; + max_tokens?: number; +} +interface RequestPayload { + model: string; + input: RequestInput; + parameters: RequestParam; +} + +export class QwenApi implements LLMApi { + path(path: string): string { + const accessStore = useAccessStore.getState(); + + let baseUrl = ""; + + if (accessStore.useCustomConfig) { + baseUrl = accessStore.alibabaUrl; + } + + if (baseUrl.length === 0) { + const isApp = !!getClientConfig()?.isApp; + baseUrl = isApp ? ALIBABA_BASE_URL : ApiPath.Alibaba; + } + + if (baseUrl.endsWith("/")) { + baseUrl = baseUrl.slice(0, baseUrl.length - 1); + } + if (!baseUrl.startsWith("http") && !baseUrl.startsWith(ApiPath.Alibaba)) { + baseUrl = "https://" + baseUrl; + } + + console.log("[Proxy Endpoint] ", baseUrl, path); + + return [baseUrl, path].join("/"); + } + + extractMessage(res: any) { + return res?.output?.choices?.at(0)?.message?.content ?? ""; + } + + async chat(options: ChatOptions) { + const messages = options.messages.map((v) => ({ + role: v.role, + content: getMessageTextContent(v), + })); + + const modelConfig = { + ...useAppConfig.getState().modelConfig, + ...useChatStore.getState().currentSession().mask.modelConfig, + ...{ + model: options.config.model, + }, + }; + + const shouldStream = !!options.config.stream; + const requestPayload: RequestPayload = { + model: modelConfig.model, + input: { + messages, + }, + parameters: { + result_format: "message", + incremental_output: shouldStream, + temperature: modelConfig.temperature, + // max_tokens: modelConfig.max_tokens, + top_p: modelConfig.top_p === 1 ? 0.99 : modelConfig.top_p, // qwen top_p is should be < 1 + }, + }; + + const controller = new AbortController(); + options.onController?.(controller); + + try { + const chatPath = this.path(Alibaba.ChatPath); + const chatPayload = { + method: "POST", + body: JSON.stringify(requestPayload), + signal: controller.signal, + headers: { + ...getHeaders(), + "X-DashScope-SSE": shouldStream ? "enable" : "disable", + }, + }; + + // make a fetch request + const requestTimeoutId = setTimeout( + () => controller.abort(), + REQUEST_TIMEOUT_MS, + ); + + if (shouldStream) { + let responseText = ""; + let remainText = ""; + let finished = false; + + // animate response to make it looks smooth + function animateResponseText() { + if (finished || controller.signal.aborted) { + responseText += remainText; + console.log("[Response Animation] finished"); + if (responseText?.length === 0) { + options.onError?.(new Error("empty response from server")); + } + return; + } + + if (remainText.length > 0) { + const fetchCount = Math.max(1, Math.round(remainText.length / 60)); + const fetchText = remainText.slice(0, fetchCount); + responseText += fetchText; + remainText = remainText.slice(fetchCount); + options.onUpdate?.(responseText, fetchText); + } + + requestAnimationFrame(animateResponseText); + } + + // start animaion + animateResponseText(); + + const finish = () => { + if (!finished) { + finished = true; + options.onFinish(responseText + remainText); + } + }; + + controller.signal.onabort = finish; + + fetchEventSource(chatPath, { + ...chatPayload, + async onopen(res) { + clearTimeout(requestTimeoutId); + const contentType = res.headers.get("content-type"); + console.log( + "[Alibaba] request response content type: ", + contentType, + ); + + if (contentType?.startsWith("text/plain")) { + responseText = await res.clone().text(); + return finish(); + } + + if ( + !res.ok || + !res.headers + .get("content-type") + ?.startsWith(EventStreamContentType) || + res.status !== 200 + ) { + const responseTexts = [responseText]; + let extraInfo = await res.clone().text(); + try { + const resJson = await res.clone().json(); + extraInfo = prettyObject(resJson); + } catch {} + + if (res.status === 401) { + responseTexts.push(Locale.Error.Unauthorized); + } + + if (extraInfo) { + responseTexts.push(extraInfo); + } + + responseText = responseTexts.join("\n\n"); + + return finish(); + } + }, + onmessage(msg) { + if (msg.data === "[DONE]" || finished) { + return finish(); + } + const text = msg.data; + try { + const json = JSON.parse(text); + const choices = json.output.choices as Array<{ + message: { content: string }; + }>; + const delta = choices[0]?.message?.content; + if (delta) { + remainText += delta; + } + } catch (e) { + console.error("[Request] parse error", text, msg); + } + }, + onclose() { + finish(); + }, + onerror(e) { + options.onError?.(e); + throw e; + }, + openWhenHidden: true, + }); + } else { + const res = await fetch(chatPath, chatPayload); + clearTimeout(requestTimeoutId); + + const resJson = await res.json(); + const message = this.extractMessage(resJson); + options.onFinish(message); + } + } catch (e) { + console.log("[Request] failed to make a chat request", e); + options.onError?.(e as Error); + } + } + async usage() { + return { + used: 0, + total: 0, + }; + } + + async models(): Promise { + return []; + } +} +export { Alibaba }; diff --git a/app/client/platforms/anthropic.ts b/app/client/platforms/anthropic.ts new file mode 100644 index 00000000000..bf8faf83763 --- /dev/null +++ b/app/client/platforms/anthropic.ts @@ -0,0 +1,392 @@ +import { ACCESS_CODE_PREFIX, Anthropic, ApiPath } from "@/app/constant"; +import { ChatOptions, getHeaders, LLMApi, MultimodalContent } from "../api"; +import { useAccessStore, useAppConfig, useChatStore } from "@/app/store"; +import { getClientConfig } from "@/app/config/client"; +import { DEFAULT_API_HOST } from "@/app/constant"; +import { RequestMessage } from "@/app/typing"; +import { + EventStreamContentType, + fetchEventSource, +} from "@fortaine/fetch-event-source"; + +import Locale from "../../locales"; +import { prettyObject } from "@/app/utils/format"; +import { getMessageTextContent, isVisionModel } from "@/app/utils"; +import { cloudflareAIGatewayUrl } from "@/app/utils/cloudflare"; + +export type MultiBlockContent = { + type: "image" | "text"; + source?: { + type: string; + media_type: string; + data: string; + }; + text?: string; +}; + +export type AnthropicMessage = { + role: (typeof ClaudeMapper)[keyof typeof ClaudeMapper]; + content: string | MultiBlockContent[]; +}; + +export interface AnthropicChatRequest { + model: string; // The model that will complete your prompt. + messages: AnthropicMessage[]; // The prompt that you want Claude to complete. + max_tokens: number; // The maximum number of tokens to generate before stopping. + stop_sequences?: string[]; // Sequences that will cause the model to stop generating completion text. + temperature?: number; // Amount of randomness injected into the response. + top_p?: number; // Use nucleus sampling. + top_k?: number; // Only sample from the top K options for each subsequent token. + metadata?: object; // An object describing metadata about the request. + stream?: boolean; // Whether to incrementally stream the response using server-sent events. +} + +export interface ChatRequest { + model: string; // The model that will complete your prompt. + prompt: string; // The prompt that you want Claude to complete. + max_tokens_to_sample: number; // The maximum number of tokens to generate before stopping. + stop_sequences?: string[]; // Sequences that will cause the model to stop generating completion text. + temperature?: number; // Amount of randomness injected into the response. + top_p?: number; // Use nucleus sampling. + top_k?: number; // Only sample from the top K options for each subsequent token. + metadata?: object; // An object describing metadata about the request. + stream?: boolean; // Whether to incrementally stream the response using server-sent events. +} + +export interface ChatResponse { + completion: string; + stop_reason: "stop_sequence" | "max_tokens"; + model: string; +} + +export type ChatStreamResponse = ChatResponse & { + stop?: string; + log_id: string; +}; + +const ClaudeMapper = { + assistant: "assistant", + user: "user", + system: "user", +} as const; + +const keys = ["claude-2, claude-instant-1"]; + +export class ClaudeApi implements LLMApi { + extractMessage(res: any) { + console.log("[Response] claude response: ", res); + + return res?.content?.[0]?.text; + } + async chat(options: ChatOptions): Promise { + const visionModel = isVisionModel(options.config.model); + + const accessStore = useAccessStore.getState(); + + const shouldStream = !!options.config.stream; + + const modelConfig = { + ...useAppConfig.getState().modelConfig, + ...useChatStore.getState().currentSession().mask.modelConfig, + ...{ + model: options.config.model, + }, + }; + + const messages = [...options.messages]; + + const keys = ["system", "user"]; + + // roles must alternate between "user" and "assistant" in claude, so add a fake assistant message between two user messages + for (let i = 0; i < messages.length - 1; i++) { + const message = messages[i]; + const nextMessage = messages[i + 1]; + + if (keys.includes(message.role) && keys.includes(nextMessage.role)) { + messages[i] = [ + message, + { + role: "assistant", + content: ";", + }, + ] as any; + } + } + + const prompt = messages + .flat() + .filter((v) => { + if (!v.content) return false; + if (typeof v.content === "string" && !v.content.trim()) return false; + return true; + }) + .map((v) => { + const { role, content } = v; + const insideRole = ClaudeMapper[role] ?? "user"; + + if (!visionModel || typeof content === "string") { + return { + role: insideRole, + content: getMessageTextContent(v), + }; + } + return { + role: insideRole, + content: content + .filter((v) => v.image_url || v.text) + .map(({ type, text, image_url }) => { + if (type === "text") { + return { + type, + text: text!, + }; + } + const { url = "" } = image_url || {}; + const colonIndex = url.indexOf(":"); + const semicolonIndex = url.indexOf(";"); + const comma = url.indexOf(","); + + const mimeType = url.slice(colonIndex + 1, semicolonIndex); + const encodeType = url.slice(semicolonIndex + 1, comma); + const data = url.slice(comma + 1); + + return { + type: "image" as const, + source: { + type: encodeType, + media_type: mimeType, + data, + }, + }; + }), + }; + }); + + if (prompt[0]?.role === "assistant") { + prompt.unshift({ + role: "user", + content: ";", + }); + } + + const requestBody: AnthropicChatRequest = { + messages: prompt, + stream: shouldStream, + + model: modelConfig.model, + max_tokens: modelConfig.max_tokens, + temperature: modelConfig.temperature, + top_p: modelConfig.top_p, + // top_k: modelConfig.top_k, + top_k: 5, + }; + + const path = this.path(Anthropic.ChatPath); + + const controller = new AbortController(); + options.onController?.(controller); + + const payload = { + method: "POST", + body: JSON.stringify(requestBody), + signal: controller.signal, + headers: { + ...getHeaders(), // get common headers + "anthropic-version": accessStore.anthropicApiVersion, + // do not send `anthropicApiKey` in browser!!! + // Authorization: getAuthKey(accessStore.anthropicApiKey), + }, + }; + + if (shouldStream) { + try { + const context = { + text: "", + finished: false, + }; + + const finish = () => { + if (!context.finished) { + options.onFinish(context.text); + context.finished = true; + } + }; + + controller.signal.onabort = finish; + fetchEventSource(path, { + ...payload, + async onopen(res) { + const contentType = res.headers.get("content-type"); + console.log("response content type: ", contentType); + + if (contentType?.startsWith("text/plain")) { + context.text = await res.clone().text(); + return finish(); + } + + if ( + !res.ok || + !res.headers + .get("content-type") + ?.startsWith(EventStreamContentType) || + res.status !== 200 + ) { + const responseTexts = [context.text]; + let extraInfo = await res.clone().text(); + try { + const resJson = await res.clone().json(); + extraInfo = prettyObject(resJson); + } catch {} + + if (res.status === 401) { + responseTexts.push(Locale.Error.Unauthorized); + } + + if (extraInfo) { + responseTexts.push(extraInfo); + } + + context.text = responseTexts.join("\n\n"); + + return finish(); + } + }, + onmessage(msg) { + let chunkJson: + | undefined + | { + type: "content_block_delta" | "content_block_stop"; + delta?: { + type: "text_delta"; + text: string; + }; + index: number; + }; + try { + chunkJson = JSON.parse(msg.data); + } catch (e) { + console.error("[Response] parse error", msg.data); + } + + if (!chunkJson || chunkJson.type === "content_block_stop") { + return finish(); + } + + const { delta } = chunkJson; + if (delta?.text) { + context.text += delta.text; + options.onUpdate?.(context.text, delta.text); + } + }, + onclose() { + finish(); + }, + onerror(e) { + options.onError?.(e); + throw e; + }, + openWhenHidden: true, + }); + } catch (e) { + console.error("failed to chat", e); + options.onError?.(e as Error); + } + } else { + try { + controller.signal.onabort = () => options.onFinish(""); + + const res = await fetch(path, payload); + const resJson = await res.json(); + + const message = this.extractMessage(resJson); + options.onFinish(message); + } catch (e) { + console.error("failed to chat", e); + options.onError?.(e as Error); + } + } + } + async usage() { + return { + used: 0, + total: 0, + }; + } + async models() { + // const provider = { + // id: "anthropic", + // providerName: "Anthropic", + // providerType: "anthropic", + // }; + + return [ + // { + // name: "claude-instant-1.2", + // available: true, + // provider, + // }, + // { + // name: "claude-2.0", + // available: true, + // provider, + // }, + // { + // name: "claude-2.1", + // available: true, + // provider, + // }, + // { + // name: "claude-3-opus-20240229", + // available: true, + // provider, + // }, + // { + // name: "claude-3-sonnet-20240229", + // available: true, + // provider, + // }, + // { + // name: "claude-3-haiku-20240307", + // available: true, + // provider, + // }, + ]; + } + path(path: string): string { + const accessStore = useAccessStore.getState(); + + let baseUrl: string = ""; + + if (accessStore.useCustomConfig) { + baseUrl = accessStore.anthropicUrl; + } + + // if endpoint is empty, use default endpoint + if (baseUrl.trim().length === 0) { + const isApp = !!getClientConfig()?.isApp; + + baseUrl = isApp + ? DEFAULT_API_HOST + "/api/proxy/anthropic" + : ApiPath.Anthropic; + } + + if (!baseUrl.startsWith("http") && !baseUrl.startsWith("/api")) { + baseUrl = "https://" + baseUrl; + } + + baseUrl = trimEnd(baseUrl, "/"); + + // try rebuild url, when using cloudflare ai gateway in client + return cloudflareAIGatewayUrl(`${baseUrl}/${path}`); + } +} + +function trimEnd(s: string, end = " ") { + if (end.length === 0) return s; + + while (s.endsWith(end)) { + s = s.slice(0, -end.length); + } + + return s; +} diff --git a/app/client/platforms/baidu.ts b/app/client/platforms/baidu.ts new file mode 100644 index 00000000000..188b78bf963 --- /dev/null +++ b/app/client/platforms/baidu.ts @@ -0,0 +1,273 @@ +"use client"; +import { + ApiPath, + Baidu, + BAIDU_BASE_URL, + REQUEST_TIMEOUT_MS, +} from "@/app/constant"; +import { useAccessStore, useAppConfig, useChatStore } from "@/app/store"; +import { getAccessToken } from "@/app/utils/baidu"; + +import { + ChatOptions, + getHeaders, + LLMApi, + LLMModel, + MultimodalContent, +} from "../api"; +import Locale from "../../locales"; +import { + EventStreamContentType, + fetchEventSource, +} from "@fortaine/fetch-event-source"; +import { prettyObject } from "@/app/utils/format"; +import { getClientConfig } from "@/app/config/client"; +import { getMessageTextContent } from "@/app/utils"; + +export interface OpenAIListModelResponse { + object: string; + data: Array<{ + id: string; + object: string; + root: string; + }>; +} + +interface RequestPayload { + messages: { + role: "system" | "user" | "assistant"; + content: string | MultimodalContent[]; + }[]; + stream?: boolean; + model: string; + temperature: number; + presence_penalty: number; + frequency_penalty: number; + top_p: number; + max_tokens?: number; +} + +export class ErnieApi implements LLMApi { + path(path: string): string { + const accessStore = useAccessStore.getState(); + + let baseUrl = ""; + + if (accessStore.useCustomConfig) { + baseUrl = accessStore.baiduUrl; + } + + if (baseUrl.length === 0) { + const isApp = !!getClientConfig()?.isApp; + // do not use proxy for baidubce api + baseUrl = isApp ? BAIDU_BASE_URL : ApiPath.Baidu; + } + + if (baseUrl.endsWith("/")) { + baseUrl = baseUrl.slice(0, baseUrl.length - 1); + } + if (!baseUrl.startsWith("http") && !baseUrl.startsWith(ApiPath.Baidu)) { + baseUrl = "https://" + baseUrl; + } + + console.log("[Proxy Endpoint] ", baseUrl, path); + + return [baseUrl, path].join("/"); + } + + async chat(options: ChatOptions) { + const messages = options.messages.map((v) => ({ + role: v.role, + content: getMessageTextContent(v), + })); + + // "error_code": 336006, "error_msg": "the length of messages must be an odd number", + if (messages.length % 2 === 0) { + messages.unshift({ + role: "user", + content: " ", + }); + } + + const modelConfig = { + ...useAppConfig.getState().modelConfig, + ...useChatStore.getState().currentSession().mask.modelConfig, + ...{ + model: options.config.model, + }, + }; + + const shouldStream = !!options.config.stream; + const requestPayload: RequestPayload = { + messages, + stream: shouldStream, + model: modelConfig.model, + temperature: modelConfig.temperature, + presence_penalty: modelConfig.presence_penalty, + frequency_penalty: modelConfig.frequency_penalty, + top_p: modelConfig.top_p, + }; + + console.log("[Request] Baidu payload: ", requestPayload); + + const controller = new AbortController(); + options.onController?.(controller); + + try { + let chatPath = this.path(Baidu.ChatPath(modelConfig.model)); + + // getAccessToken can not run in browser, because cors error + if (!!getClientConfig()?.isApp) { + const accessStore = useAccessStore.getState(); + if (accessStore.useCustomConfig) { + if (accessStore.isValidBaidu()) { + const { access_token } = await getAccessToken( + accessStore.baiduApiKey, + accessStore.baiduSecretKey, + ); + chatPath = `${chatPath}${ + chatPath.includes("?") ? "&" : "?" + }access_token=${access_token}`; + } + } + } + const chatPayload = { + method: "POST", + body: JSON.stringify(requestPayload), + signal: controller.signal, + headers: getHeaders(), + }; + + // make a fetch request + const requestTimeoutId = setTimeout( + () => controller.abort(), + REQUEST_TIMEOUT_MS, + ); + + if (shouldStream) { + let responseText = ""; + let remainText = ""; + let finished = false; + + // animate response to make it looks smooth + function animateResponseText() { + if (finished || controller.signal.aborted) { + responseText += remainText; + console.log("[Response Animation] finished"); + if (responseText?.length === 0) { + options.onError?.(new Error("empty response from server")); + } + return; + } + + if (remainText.length > 0) { + const fetchCount = Math.max(1, Math.round(remainText.length / 60)); + const fetchText = remainText.slice(0, fetchCount); + responseText += fetchText; + remainText = remainText.slice(fetchCount); + options.onUpdate?.(responseText, fetchText); + } + + requestAnimationFrame(animateResponseText); + } + + // start animaion + animateResponseText(); + + const finish = () => { + if (!finished) { + finished = true; + options.onFinish(responseText + remainText); + } + }; + + controller.signal.onabort = finish; + + fetchEventSource(chatPath, { + ...chatPayload, + async onopen(res) { + clearTimeout(requestTimeoutId); + const contentType = res.headers.get("content-type"); + console.log("[Baidu] request response content type: ", contentType); + + if (contentType?.startsWith("text/plain")) { + responseText = await res.clone().text(); + return finish(); + } + + if ( + !res.ok || + !res.headers + .get("content-type") + ?.startsWith(EventStreamContentType) || + res.status !== 200 + ) { + const responseTexts = [responseText]; + let extraInfo = await res.clone().text(); + try { + const resJson = await res.clone().json(); + extraInfo = prettyObject(resJson); + } catch {} + + if (res.status === 401) { + responseTexts.push(Locale.Error.Unauthorized); + } + + if (extraInfo) { + responseTexts.push(extraInfo); + } + + responseText = responseTexts.join("\n\n"); + + return finish(); + } + }, + onmessage(msg) { + if (msg.data === "[DONE]" || finished) { + return finish(); + } + const text = msg.data; + try { + const json = JSON.parse(text); + const delta = json?.result; + if (delta) { + remainText += delta; + } + } catch (e) { + console.error("[Request] parse error", text, msg); + } + }, + onclose() { + finish(); + }, + onerror(e) { + options.onError?.(e); + throw e; + }, + openWhenHidden: true, + }); + } else { + const res = await fetch(chatPath, chatPayload); + clearTimeout(requestTimeoutId); + + const resJson = await res.json(); + const message = resJson?.result; + options.onFinish(message); + } + } catch (e) { + console.log("[Request] failed to make a chat request", e); + options.onError?.(e as Error); + } + } + async usage() { + return { + used: 0, + total: 0, + }; + } + + async models(): Promise { + return []; + } +} +export { Baidu }; diff --git a/app/client/platforms/bytedance.ts b/app/client/platforms/bytedance.ts new file mode 100644 index 00000000000..7677cafe12b --- /dev/null +++ b/app/client/platforms/bytedance.ts @@ -0,0 +1,255 @@ +"use client"; +import { + ApiPath, + ByteDance, + BYTEDANCE_BASE_URL, + REQUEST_TIMEOUT_MS, +} from "@/app/constant"; +import { useAccessStore, useAppConfig, useChatStore } from "@/app/store"; + +import { + ChatOptions, + getHeaders, + LLMApi, + LLMModel, + MultimodalContent, +} from "../api"; +import Locale from "../../locales"; +import { + EventStreamContentType, + fetchEventSource, +} from "@fortaine/fetch-event-source"; +import { prettyObject } from "@/app/utils/format"; +import { getClientConfig } from "@/app/config/client"; +import { getMessageTextContent } from "@/app/utils"; + +export interface OpenAIListModelResponse { + object: string; + data: Array<{ + id: string; + object: string; + root: string; + }>; +} + +interface RequestPayload { + messages: { + role: "system" | "user" | "assistant"; + content: string | MultimodalContent[]; + }[]; + stream?: boolean; + model: string; + temperature: number; + presence_penalty: number; + frequency_penalty: number; + top_p: number; + max_tokens?: number; +} + +export class DoubaoApi implements LLMApi { + path(path: string): string { + const accessStore = useAccessStore.getState(); + + let baseUrl = ""; + + if (accessStore.useCustomConfig) { + baseUrl = accessStore.bytedanceUrl; + } + + if (baseUrl.length === 0) { + const isApp = !!getClientConfig()?.isApp; + baseUrl = isApp ? BYTEDANCE_BASE_URL : ApiPath.ByteDance; + } + + if (baseUrl.endsWith("/")) { + baseUrl = baseUrl.slice(0, baseUrl.length - 1); + } + if (!baseUrl.startsWith("http") && !baseUrl.startsWith(ApiPath.ByteDance)) { + baseUrl = "https://" + baseUrl; + } + + console.log("[Proxy Endpoint] ", baseUrl, path); + + return [baseUrl, path].join("/"); + } + + extractMessage(res: any) { + return res.choices?.at(0)?.message?.content ?? ""; + } + + async chat(options: ChatOptions) { + const messages = options.messages.map((v) => ({ + role: v.role, + content: getMessageTextContent(v), + })); + + const modelConfig = { + ...useAppConfig.getState().modelConfig, + ...useChatStore.getState().currentSession().mask.modelConfig, + ...{ + model: options.config.model, + }, + }; + + const shouldStream = !!options.config.stream; + const requestPayload: RequestPayload = { + messages, + stream: shouldStream, + model: modelConfig.model, + temperature: modelConfig.temperature, + presence_penalty: modelConfig.presence_penalty, + frequency_penalty: modelConfig.frequency_penalty, + top_p: modelConfig.top_p, + }; + + const controller = new AbortController(); + options.onController?.(controller); + + try { + const chatPath = this.path(ByteDance.ChatPath); + const chatPayload = { + method: "POST", + body: JSON.stringify(requestPayload), + signal: controller.signal, + headers: getHeaders(), + }; + + // make a fetch request + const requestTimeoutId = setTimeout( + () => controller.abort(), + REQUEST_TIMEOUT_MS, + ); + + if (shouldStream) { + let responseText = ""; + let remainText = ""; + let finished = false; + + // animate response to make it looks smooth + function animateResponseText() { + if (finished || controller.signal.aborted) { + responseText += remainText; + console.log("[Response Animation] finished"); + if (responseText?.length === 0) { + options.onError?.(new Error("empty response from server")); + } + return; + } + + if (remainText.length > 0) { + const fetchCount = Math.max(1, Math.round(remainText.length / 60)); + const fetchText = remainText.slice(0, fetchCount); + responseText += fetchText; + remainText = remainText.slice(fetchCount); + options.onUpdate?.(responseText, fetchText); + } + + requestAnimationFrame(animateResponseText); + } + + // start animaion + animateResponseText(); + + const finish = () => { + if (!finished) { + finished = true; + options.onFinish(responseText + remainText); + } + }; + + controller.signal.onabort = finish; + + fetchEventSource(chatPath, { + ...chatPayload, + async onopen(res) { + clearTimeout(requestTimeoutId); + const contentType = res.headers.get("content-type"); + console.log( + "[ByteDance] request response content type: ", + contentType, + ); + + if (contentType?.startsWith("text/plain")) { + responseText = await res.clone().text(); + return finish(); + } + + if ( + !res.ok || + !res.headers + .get("content-type") + ?.startsWith(EventStreamContentType) || + res.status !== 200 + ) { + const responseTexts = [responseText]; + let extraInfo = await res.clone().text(); + try { + const resJson = await res.clone().json(); + extraInfo = prettyObject(resJson); + } catch {} + + if (res.status === 401) { + responseTexts.push(Locale.Error.Unauthorized); + } + + if (extraInfo) { + responseTexts.push(extraInfo); + } + + responseText = responseTexts.join("\n\n"); + + return finish(); + } + }, + onmessage(msg) { + if (msg.data === "[DONE]" || finished) { + return finish(); + } + const text = msg.data; + try { + const json = JSON.parse(text); + const choices = json.choices as Array<{ + delta: { content: string }; + }>; + const delta = choices[0]?.delta?.content; + if (delta) { + remainText += delta; + } + } catch (e) { + console.error("[Request] parse error", text, msg); + } + }, + onclose() { + finish(); + }, + onerror(e) { + options.onError?.(e); + throw e; + }, + openWhenHidden: true, + }); + } else { + const res = await fetch(chatPath, chatPayload); + clearTimeout(requestTimeoutId); + + const resJson = await res.json(); + const message = this.extractMessage(resJson); + options.onFinish(message); + } + } catch (e) { + console.log("[Request] failed to make a chat request", e); + options.onError?.(e as Error); + } + } + async usage() { + return { + used: 0, + total: 0, + }; + } + + async models(): Promise { + return []; + } +} +export { ByteDance }; diff --git a/app/client/platforms/google.ts b/app/client/platforms/google.ts new file mode 100644 index 00000000000..6054c7a476e --- /dev/null +++ b/app/client/platforms/google.ts @@ -0,0 +1,298 @@ +import { Google, REQUEST_TIMEOUT_MS } from "@/app/constant"; +import { ChatOptions, getHeaders, LLMApi, LLMModel, LLMUsage } from "../api"; +import { useAccessStore, useAppConfig, useChatStore } from "@/app/store"; +import { getClientConfig } from "@/app/config/client"; +import { DEFAULT_API_HOST } from "@/app/constant"; +import Locale from "../../locales"; +import { + EventStreamContentType, + fetchEventSource, +} from "@fortaine/fetch-event-source"; +import { prettyObject } from "@/app/utils/format"; +import { + getMessageTextContent, + getMessageImages, + isVisionModel, +} from "@/app/utils"; + +export class GeminiProApi implements LLMApi { + extractMessage(res: any) { + console.log("[Response] gemini-pro response: ", res); + + return ( + res?.candidates?.at(0)?.content?.parts.at(0)?.text || + res?.error?.message || + "" + ); + } + async chat(options: ChatOptions): Promise { + const apiClient = this; + let multimodal = false; + const messages = options.messages.map((v) => { + let parts: any[] = [{ text: getMessageTextContent(v) }]; + if (isVisionModel(options.config.model)) { + const images = getMessageImages(v); + if (images.length > 0) { + multimodal = true; + parts = parts.concat( + images.map((image) => { + const imageType = image.split(";")[0].split(":")[1]; + const imageData = image.split(",")[1]; + return { + inline_data: { + mime_type: imageType, + data: imageData, + }, + }; + }), + ); + } + } + return { + role: v.role.replace("assistant", "model").replace("system", "user"), + parts: parts, + }; + }); + + // google requires that role in neighboring messages must not be the same + for (let i = 0; i < messages.length - 1; ) { + // Check if current and next item both have the role "model" + if (messages[i].role === messages[i + 1].role) { + // Concatenate the 'parts' of the current and next item + messages[i].parts = messages[i].parts.concat(messages[i + 1].parts); + // Remove the next item + messages.splice(i + 1, 1); + } else { + // Move to the next item + i++; + } + } + // if (visionModel && messages.length > 1) { + // options.onError?.(new Error("Multiturn chat is not enabled for models/gemini-pro-vision")); + // } + const modelConfig = { + ...useAppConfig.getState().modelConfig, + ...useChatStore.getState().currentSession().mask.modelConfig, + ...{ + model: options.config.model, + }, + }; + const requestPayload = { + contents: messages, + generationConfig: { + // stopSequences: [ + // "Title" + // ], + temperature: modelConfig.temperature, + maxOutputTokens: modelConfig.max_tokens, + topP: modelConfig.top_p, + // "topK": modelConfig.top_k, + }, + safetySettings: [ + { + category: "HARM_CATEGORY_HARASSMENT", + threshold: "BLOCK_ONLY_HIGH", + }, + { + category: "HARM_CATEGORY_HATE_SPEECH", + threshold: "BLOCK_ONLY_HIGH", + }, + { + category: "HARM_CATEGORY_SEXUALLY_EXPLICIT", + threshold: "BLOCK_ONLY_HIGH", + }, + { + category: "HARM_CATEGORY_DANGEROUS_CONTENT", + threshold: "BLOCK_ONLY_HIGH", + }, + ], + }; + + const accessStore = useAccessStore.getState(); + + let baseUrl = ""; + + if (accessStore.useCustomConfig) { + baseUrl = accessStore.googleUrl; + } + + const isApp = !!getClientConfig()?.isApp; + + let shouldStream = !!options.config.stream; + const controller = new AbortController(); + options.onController?.(controller); + try { + if (!baseUrl && isApp) { + baseUrl = DEFAULT_API_HOST + "/api/proxy/google/"; + } + baseUrl = `${baseUrl}/${Google.ChatPath(modelConfig.model)}`.replaceAll( + "//", + "/", + ); + if (isApp) { + baseUrl += `?key=${accessStore.googleApiKey}`; + } + const chatPayload = { + method: "POST", + body: JSON.stringify(requestPayload), + signal: controller.signal, + headers: getHeaders(), + }; + + // make a fetch request + const requestTimeoutId = setTimeout( + () => controller.abort(), + REQUEST_TIMEOUT_MS, + ); + + if (shouldStream) { + let responseText = ""; + let remainText = ""; + let finished = false; + + const finish = () => { + if (!finished) { + finished = true; + options.onFinish(responseText + remainText); + } + }; + + // animate response to make it looks smooth + function animateResponseText() { + if (finished || controller.signal.aborted) { + responseText += remainText; + finish(); + return; + } + + if (remainText.length > 0) { + const fetchCount = Math.max(1, Math.round(remainText.length / 60)); + const fetchText = remainText.slice(0, fetchCount); + responseText += fetchText; + remainText = remainText.slice(fetchCount); + options.onUpdate?.(responseText, fetchText); + } + + requestAnimationFrame(animateResponseText); + } + + // start animaion + animateResponseText(); + + controller.signal.onabort = finish; + + // https://github.com/google-gemini/cookbook/blob/main/quickstarts/rest/Streaming_REST.ipynb + const chatPath = + baseUrl.replace("generateContent", "streamGenerateContent") + + (baseUrl.indexOf("?") > -1 ? "&alt=sse" : "?alt=sse"); + fetchEventSource(chatPath, { + ...chatPayload, + async onopen(res) { + clearTimeout(requestTimeoutId); + const contentType = res.headers.get("content-type"); + console.log( + "[Gemini] request response content type: ", + contentType, + ); + + if (contentType?.startsWith("text/plain")) { + responseText = await res.clone().text(); + return finish(); + } + + if ( + !res.ok || + !res.headers + .get("content-type") + ?.startsWith(EventStreamContentType) || + res.status !== 200 + ) { + const responseTexts = [responseText]; + let extraInfo = await res.clone().text(); + try { + const resJson = await res.clone().json(); + extraInfo = prettyObject(resJson); + } catch {} + + if (res.status === 401) { + responseTexts.push(Locale.Error.Unauthorized); + } + + if (extraInfo) { + responseTexts.push(extraInfo); + } + + responseText = responseTexts.join("\n\n"); + + return finish(); + } + }, + onmessage(msg) { + if (msg.data === "[DONE]" || finished) { + return finish(); + } + const text = msg.data; + try { + const json = JSON.parse(text); + const delta = apiClient.extractMessage(json); + + if (delta) { + remainText += delta; + } + + const blockReason = json?.promptFeedback?.blockReason; + if (blockReason) { + // being blocked + console.log(`[Google] [Safety Ratings] result:`, blockReason); + } + } catch (e) { + console.error("[Request] parse error", text, msg); + } + }, + onclose() { + finish(); + }, + onerror(e) { + options.onError?.(e); + throw e; + }, + openWhenHidden: true, + }); + } else { + const res = await fetch(baseUrl, chatPayload); + clearTimeout(requestTimeoutId); + const resJson = await res.json(); + if (resJson?.promptFeedback?.blockReason) { + // being blocked + options.onError?.( + new Error( + "Message is being blocked for reason: " + + resJson.promptFeedback.blockReason, + ), + ); + } + const message = apiClient.extractMessage(resJson); + options.onFinish(message); + } + } catch (e) { + console.log("[Request] failed to make a chat request", e); + options.onError?.(e as Error); + } + } + usage(): Promise { + throw new Error("Method not implemented."); + } + async models(): Promise { + return []; + } + path(path: string): string { + return "/api/google/" + path; + } +} + +function ensureProperEnding(str: string) { + if (str.startsWith("[") && !str.endsWith("]")) { + return str + "]"; + } + return str; +} diff --git a/app/client/platforms/openai.ts b/app/client/platforms/openai.ts index 79d485562bb..98851c224c1 100644 --- a/app/client/platforms/openai.ts +++ b/app/client/platforms/openai.ts @@ -1,28 +1,102 @@ +"use client"; +// azure and openai, using same models. so using same LLMApi. import { + ApiPath, DEFAULT_API_HOST, + DEFAULT_MODELS, OpenaiPath, + Azure, REQUEST_TIMEOUT_MS, + ServiceProvider, } from "@/app/constant"; import { useAccessStore, useAppConfig, useChatStore } from "@/app/store"; +import { collectModelsWithDefaultModel } from "@/app/utils/model"; +import { cloudflareAIGatewayUrl } from "@/app/utils/cloudflare"; -import { ChatOptions, getHeaders, LLMApi, LLMUsage } from "../api"; +import { + ChatOptions, + getHeaders, + LLMApi, + LLMModel, + LLMUsage, + MultimodalContent, +} from "../api"; import Locale from "../../locales"; import { EventStreamContentType, fetchEventSource, } from "@fortaine/fetch-event-source"; import { prettyObject } from "@/app/utils/format"; +import { getClientConfig } from "@/app/config/client"; +import { + getMessageTextContent, + getMessageImages, + isVisionModel, +} from "@/app/utils"; + +export interface OpenAIListModelResponse { + object: string; + data: Array<{ + id: string; + object: string; + root: string; + }>; +} + +export interface RequestPayload { + messages: { + role: "system" | "user" | "assistant"; + content: string | MultimodalContent[]; + }[]; + stream?: boolean; + model: string; + temperature: number; + presence_penalty: number; + frequency_penalty: number; + top_p: number; + max_tokens?: number; +} export class ChatGPTApi implements LLMApi { + private disableListModels = true; + path(path: string): string { - let openaiUrl = useAccessStore.getState().openaiUrl; - if (openaiUrl.length === 0) { - openaiUrl = DEFAULT_API_HOST; + const accessStore = useAccessStore.getState(); + + let baseUrl = ""; + + const isAzure = path.includes("deployments"); + if (accessStore.useCustomConfig) { + if (isAzure && !accessStore.isValidAzure()) { + throw Error( + "incomplete azure config, please check it in your settings page", + ); + } + + baseUrl = isAzure ? accessStore.azureUrl : accessStore.openaiUrl; } - if (openaiUrl.endsWith("/")) { - openaiUrl = openaiUrl.slice(0, openaiUrl.length - 1); + + if (baseUrl.length === 0) { + const isApp = !!getClientConfig()?.isApp; + const apiPath = isAzure ? ApiPath.Azure : ApiPath.OpenAI; + baseUrl = isApp ? DEFAULT_API_HOST + "/proxy" + apiPath : apiPath; + } + + if (baseUrl.endsWith("/")) { + baseUrl = baseUrl.slice(0, baseUrl.length - 1); } - return [openaiUrl, path].join("/"); + if ( + !baseUrl.startsWith("http") && + !isAzure && + !baseUrl.startsWith(ApiPath.OpenAI) + ) { + baseUrl = "https://" + baseUrl; + } + + console.log("[Proxy Endpoint] ", baseUrl, path); + + // try rebuild url, when using cloudflare ai gateway in client + return cloudflareAIGatewayUrl([baseUrl, path].join("/")); } extractMessage(res: any) { @@ -30,9 +104,10 @@ export class ChatGPTApi implements LLMApi { } async chat(options: ChatOptions) { + const visionModel = isVisionModel(options.config.model); const messages = options.messages.map((v) => ({ role: v.role, - content: v.content, + content: visionModel ? v.content : getMessageTextContent(v), })); const modelConfig = { @@ -40,18 +115,27 @@ export class ChatGPTApi implements LLMApi { ...useChatStore.getState().currentSession().mask.modelConfig, ...{ model: options.config.model, + providerName: options.config.providerName, }, }; - const requestPayload = { + const requestPayload: RequestPayload = { messages, stream: options.config.stream, model: modelConfig.model, temperature: modelConfig.temperature, presence_penalty: modelConfig.presence_penalty, frequency_penalty: modelConfig.frequency_penalty, + top_p: modelConfig.top_p, + // max_tokens: Math.max(modelConfig.max_tokens, 1024), + // Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore. }; + // add max_tokens to vision model + if (visionModel && modelConfig.model.includes("preview")) { + requestPayload["max_tokens"] = Math.max(modelConfig.max_tokens, 4000); + } + console.log("[Request] openai payload: ", requestPayload); const shouldStream = !!options.config.stream; @@ -59,7 +143,35 @@ export class ChatGPTApi implements LLMApi { options.onController?.(controller); try { - const chatPath = this.path(OpenaiPath.ChatPath); + let chatPath = ""; + if (modelConfig.providerName === ServiceProvider.Azure) { + // find model, and get displayName as deployName + const { models: configModels, customModels: configCustomModels } = + useAppConfig.getState(); + const { + defaultModel, + customModels: accessCustomModels, + useCustomConfig, + } = useAccessStore.getState(); + const models = collectModelsWithDefaultModel( + configModels, + [configCustomModels, accessCustomModels].join(","), + defaultModel, + ); + const model = models.find( + (model) => + model.name === modelConfig.model && + model?.provider?.providerName === ServiceProvider.Azure, + ); + chatPath = this.path( + Azure.ChatPath( + (model?.displayName ?? model?.name) as string, + useCustomConfig ? useAccessStore.getState().azureApiVersion : "", + ), + ); + } else { + chatPath = this.path(OpenaiPath.ChatPath); + } const chatPayload = { method: "POST", body: JSON.stringify(requestPayload), @@ -75,12 +187,38 @@ export class ChatGPTApi implements LLMApi { if (shouldStream) { let responseText = ""; + let remainText = ""; let finished = false; + // animate response to make it looks smooth + function animateResponseText() { + if (finished || controller.signal.aborted) { + responseText += remainText; + console.log("[Response Animation] finished"); + if (responseText?.length === 0) { + options.onError?.(new Error("empty response from server")); + } + return; + } + + if (remainText.length > 0) { + const fetchCount = Math.max(1, Math.round(remainText.length / 60)); + const fetchText = remainText.slice(0, fetchCount); + responseText += fetchText; + remainText = remainText.slice(fetchCount); + options.onUpdate?.(responseText, fetchText); + } + + requestAnimationFrame(animateResponseText); + } + + // start animaion + animateResponseText(); + const finish = () => { if (!finished) { - options.onFinish(responseText); finished = true; + options.onFinish(responseText + remainText); } }; @@ -135,10 +273,27 @@ export class ChatGPTApi implements LLMApi { const text = msg.data; try { const json = JSON.parse(text); - const delta = json.choices[0].delta.content; + const choices = json.choices as Array<{ + delta: { content: string }; + }>; + const delta = choices[0]?.delta?.content; + const textmoderation = json?.prompt_filter_results; + if (delta) { - responseText += delta; - options.onUpdate?.(responseText, delta); + remainText += delta; + } + + if ( + textmoderation && + textmoderation.length > 0 && + ServiceProvider.Azure + ) { + const contentFilterResults = + textmoderation[0]?.content_filter_results; + console.log( + `[${ServiceProvider.Azure}] [Text Moderation] flagged categories result:`, + contentFilterResults, + ); } } catch (e) { console.error("[Request] parse error", text, msg); @@ -162,7 +317,7 @@ export class ChatGPTApi implements LLMApi { options.onFinish(message); } } catch (e) { - console.log("[Request] failed to make a chat reqeust", e); + console.log("[Request] failed to make a chat request", e); options.onError?.(e as Error); } } @@ -231,5 +386,36 @@ export class ChatGPTApi implements LLMApi { total: total.hard_limit_usd, } as LLMUsage; } + + async models(): Promise { + if (this.disableListModels) { + return DEFAULT_MODELS.slice(); + } + + const res = await fetch(this.path(OpenaiPath.ListModelPath), { + method: "GET", + headers: { + ...getHeaders(), + }, + }); + + const resJson = (await res.json()) as OpenAIListModelResponse; + const chatModels = resJson.data?.filter((m) => m.id.startsWith("gpt-")); + console.log("[Models]", chatModels); + + if (!chatModels) { + return []; + } + + return chatModels.map((m) => ({ + name: m.id, + available: true, + provider: { + id: "openai", + providerName: "OpenAI", + providerType: "openai", + }, + })); + } } export { OpenaiPath }; diff --git a/app/command.ts b/app/command.ts index ba3bb65389e..e515e5f0bb4 100644 --- a/app/command.ts +++ b/app/command.ts @@ -1,3 +1,4 @@ +import { useEffect } from "react"; import { useSearchParams } from "react-router-dom"; import Locale from "./locales"; @@ -6,26 +7,29 @@ interface Commands { fill?: Command; submit?: Command; mask?: Command; + code?: Command; + settings?: Command; } export function useCommand(commands: Commands = {}) { const [searchParams, setSearchParams] = useSearchParams(); - if (commands === undefined) return; + useEffect(() => { + let shouldUpdate = false; + searchParams.forEach((param, name) => { + const commandName = name as keyof Commands; + if (typeof commands[commandName] === "function") { + commands[commandName]!(param); + searchParams.delete(name); + shouldUpdate = true; + } + }); - let shouldUpdate = false; - searchParams.forEach((param, name) => { - const commandName = name as keyof Commands; - if (typeof commands[commandName] === "function") { - commands[commandName]!(param); - searchParams.delete(name); - shouldUpdate = true; + if (shouldUpdate) { + setSearchParams(searchParams); } - }); - - if (shouldUpdate) { - setSearchParams(searchParams); - } + // eslint-disable-next-line react-hooks/exhaustive-deps + }, [searchParams, commands]); } interface ChatCommands { diff --git a/app/components/auth.tsx b/app/components/auth.tsx index de0df454283..57118349bac 100644 --- a/app/components/auth.tsx +++ b/app/components/auth.tsx @@ -7,12 +7,28 @@ import { useAccessStore } from "../store"; import Locale from "../locales"; import BotIcon from "../icons/bot.svg"; +import { useEffect } from "react"; +import { getClientConfig } from "../config/client"; export function AuthPage() { const navigate = useNavigate(); - const access = useAccessStore(); + const accessStore = useAccessStore(); const goHome = () => navigate(Path.Home); + const goChat = () => navigate(Path.Chat); + const resetAccessCode = () => { + accessStore.update((access) => { + access.openaiApiKey = ""; + access.accessCode = ""; + }); + }; // Reset access code to empty string + + useEffect(() => { + if (getClientConfig()?.isApp) { + navigate(Path.Settings); + } + // eslint-disable-next-line react-hooks/exhaustive-deps + }, []); return (
@@ -27,19 +43,54 @@ export function AuthPage() { className={styles["auth-input"]} type="password" placeholder={Locale.Auth.Input} - value={access.accessCode} + value={accessStore.accessCode} onChange={(e) => { - access.updateCode(e.currentTarget.value); + accessStore.update( + (access) => (access.accessCode = e.currentTarget.value), + ); }} /> + {!accessStore.hideUserApiKey ? ( + <> +
{Locale.Auth.SubTips}
+ { + accessStore.update( + (access) => (access.openaiApiKey = e.currentTarget.value), + ); + }} + /> + { + accessStore.update( + (access) => (access.googleApiKey = e.currentTarget.value), + ); + }} + /> + + ) : null}
+ { + resetAccessCode(); + goHome(); + }} /> -
); diff --git a/app/components/chat-list.tsx b/app/components/chat-list.tsx index a6143f3246a..7ef6e7b8337 100644 --- a/app/components/chat-list.tsx +++ b/app/components/chat-list.tsx @@ -12,12 +12,13 @@ import { import { useChatStore } from "../store"; import Locale from "../locales"; -import { Link, useNavigate } from "react-router-dom"; +import { Link, useLocation, useNavigate } from "react-router-dom"; import { Path } from "../constant"; import { MaskAvatar } from "./mask"; import { Mask } from "../store/mask"; import { useRef, useEffect } from "react"; import { showConfirm } from "./ui-lib"; +import { useMobileScreen } from "../utils"; export function ChatItem(props: { onClick?: () => void; @@ -26,7 +27,7 @@ export function ChatItem(props: { count: number; time: string; selected: boolean; - id: number; + id: string; index: number; narrow?: boolean; mask: Mask; @@ -39,12 +40,16 @@ export function ChatItem(props: { }); } }, [props.selected]); + + const { pathname: currentPath } = useLocation(); return ( {(provided) => (
{ @@ -60,7 +65,10 @@ export function ChatItem(props: { {props.narrow ? (
- +
{props.count} @@ -80,7 +88,11 @@ export function ChatItem(props: {
{ + props.onDelete?.(); + e.preventDefault(); + e.stopPropagation(); + }} >
@@ -101,6 +113,7 @@ export function ChatList(props: { narrow?: boolean }) { ); const chatStore = useChatStore(); const navigate = useNavigate(); + const isMobileScreen = useMobileScreen(); const onDragEnd: OnDragEndResponder = (result) => { const { destination, source } = result; @@ -142,7 +155,7 @@ export function ChatList(props: { narrow?: boolean }) { }} onDelete={async () => { if ( - !props.narrow || + (!props.narrow && !isMobileScreen) || (await showConfirm(Locale.Home.DeleteChat)) ) { chatStore.deleteSession(i); diff --git a/app/components/chat.module.scss b/app/components/chat.module.scss index fa3a1cf2e5c..e7619e92b89 100644 --- a/app/components/chat.module.scss +++ b/app/components/chat.module.scss @@ -1,5 +1,47 @@ @import "../styles/animation.scss"; +.attach-images { + position: absolute; + left: 30px; + bottom: 32px; + display: flex; +} + +.attach-image { + cursor: default; + width: 64px; + height: 64px; + border: rgba($color: #888, $alpha: 0.2) 1px solid; + border-radius: 5px; + margin-right: 10px; + background-size: cover; + background-position: center; + background-color: var(--white); + + .attach-image-mask { + width: 100%; + height: 100%; + opacity: 0; + transition: all ease 0.2s; + } + + .attach-image-mask:hover { + opacity: 1; + } + + .delete-image { + width: 24px; + height: 24px; + cursor: pointer; + display: flex; + align-items: center; + justify-content: center; + border-radius: 5px; + float: right; + background-color: var(--white); + } +} + .chat-input-actions { display: flex; flex-wrap: wrap; @@ -14,10 +56,11 @@ padding: 4px 10px; animation: slide-in ease 0.3s; box-shadow: var(--card-shadow); - transition: all ease 0.3s; + transition: width ease 0.3s; align-items: center; height: 16px; width: var(--icon-width); + overflow: hidden; &:not(:last-child) { margin-right: 5px; @@ -29,14 +72,16 @@ opacity: 0; transform: translateX(-5px); transition: all ease 0.3s; - transition-delay: 0.1s; pointer-events: none; } &:hover { + --delay: 0.5s; width: var(--full-width); + transition-delay: var(--delay); .text { + transition-delay: var(--delay); opacity: 1; transform: translate(0); } @@ -95,11 +140,41 @@ } .context-prompt { + .context-prompt-insert { + display: flex; + justify-content: center; + padding: 4px; + opacity: 0.2; + transition: all ease 0.3s; + background-color: rgba(0, 0, 0, 0); + cursor: pointer; + border-radius: 4px; + margin-top: 4px; + margin-bottom: 4px; + + &:hover { + opacity: 1; + background-color: rgba(0, 0, 0, 0.05); + } + } + .context-prompt-row { display: flex; justify-content: center; width: 100%; - margin-bottom: 10px; + + &:hover { + .context-drag { + opacity: 1; + } + } + + .context-drag { + display: flex; + align-items: center; + opacity: 0.5; + transition: all ease 0.3s; + } .context-role { margin-right: 10px; @@ -156,12 +231,10 @@ animation: slide-in ease 0.3s; - $linear: linear-gradient( - to right, - rgba(0, 0, 0, 0), - rgba(0, 0, 0, 1), - rgba(0, 0, 0, 0) - ); + $linear: linear-gradient(to right, + rgba(0, 0, 0, 0), + rgba(0, 0, 0, 1), + rgba(0, 0, 0, 0)); mask-image: $linear; @mixin show { @@ -240,24 +313,39 @@ &:last-child { animation: slide-in ease 0.3s; } +} - &:hover { - .chat-message-actions { - opacity: 1; - transform: translateY(0px); - max-width: 100%; - height: 40px; - } +.chat-message-user { + display: flex; + flex-direction: row-reverse; - .chat-message-action-date { - opacity: 0.2; - } + .chat-message-header { + flex-direction: row-reverse; } } -.chat-message-user { +.chat-message-header { + margin-top: 20px; display: flex; - flex-direction: row-reverse; + align-items: center; + + .chat-message-actions { + display: flex; + box-sizing: border-box; + font-size: 12px; + align-items: flex-end; + justify-content: space-between; + transition: all ease 0.3s; + transform: scale(0.9) translateY(5px); + margin: 0 10px; + opacity: 0; + pointer-events: none; + + .chat-input-actions { + display: flex; + flex-wrap: nowrap; + } + } } .chat-message-container { @@ -270,15 +358,20 @@ .chat-message-edit { opacity: 0.9; } + + .chat-message-actions { + opacity: 1; + pointer-events: all; + transform: scale(1) translateY(0); + } } } -.chat-message-user > .chat-message-container { +.chat-message-user>.chat-message-container { align-items: flex-end; } .chat-message-avatar { - margin-top: 20px; position: relative; .chat-message-edit { @@ -296,6 +389,15 @@ padding: 7px; } } + + /* Specific styles for iOS devices */ + @media screen and (max-device-width: 812px) and (-webkit-min-device-pixel-ratio: 2) { + @supports (-webkit-touch-callout: none) { + .chat-message-edit { + top: -8%; + } + } + } } .chat-message-status { @@ -318,26 +420,63 @@ border: var(--border-in-light); position: relative; transition: all ease 0.3s; +} - .chat-message-actions { - display: flex; - box-sizing: border-box; - font-size: 12px; - align-items: flex-end; - justify-content: space-between; - transition: all ease 0.3s 0.15s; - transform: translateX(-5px) scale(0.9) translateY(30px); - opacity: 0; - height: 0; - max-width: 0; - position: absolute; - left: 0; - z-index: 2; +.chat-message-item-image { + width: 100%; + margin-top: 10px; +} - .chat-input-actions { - display: flex; - flex-wrap: nowrap; - } +.chat-message-item-images { + width: 100%; + display: grid; + justify-content: left; + grid-gap: 10px; + grid-template-columns: repeat(var(--image-count), auto); + margin-top: 10px; +} + +.chat-message-item-image-multi { + object-fit: cover; + background-size: cover; + background-position: center; + background-repeat: no-repeat; +} + +.chat-message-item-image, +.chat-message-item-image-multi { + box-sizing: border-box; + border-radius: 10px; + border: rgba($color: #888, $alpha: 0.2) 1px solid; +} + + +@media only screen and (max-width: 600px) { + $calc-image-width: calc(100vw/3*2/var(--image-count)); + + .chat-message-item-image-multi { + width: $calc-image-width; + height: $calc-image-width; + } + + .chat-message-item-image { + max-width: calc(100vw/3*2); + } +} + +@media screen and (min-width: 600px) { + $max-image-width: calc(calc(1200px - var(--sidebar-width))/3*2/var(--image-count)); + $image-width: calc(calc(var(--window-width) - var(--sidebar-width))/3*2/var(--image-count)); + + .chat-message-item-image-multi { + width: $image-width; + height: $image-width; + max-width: $max-image-width; + max-height: $max-image-width; + } + + .chat-message-item-image { + max-width: calc(calc(1200px - var(--sidebar-width))/3*2); } } @@ -355,7 +494,7 @@ z-index: 1; } -.chat-message-user > .chat-message-container > .chat-message-item { +.chat-message-user>.chat-message-container>.chat-message-item { background-color: var(--second); &:hover { @@ -420,6 +559,7 @@ @include single-line(); } + .hint-content { font-size: 12px; @@ -434,15 +574,26 @@ } .chat-input-panel-inner { + cursor: text; display: flex; flex: 1; + border-radius: 10px; + border: var(--border-in-light); +} + +.chat-input-panel-inner-attach { + padding-bottom: 80px; +} + +.chat-input-panel-inner:has(.chat-input:focus) { + border: 1px solid var(--primary); } .chat-input { height: 100%; width: 100%; border-radius: 10px; - border: var(--border-in-light); + border: none; box-shadow: 0 -2px 5px rgba(0, 0, 0, 0.03); background-color: var(--white); color: var(--black); @@ -454,9 +605,7 @@ min-height: 68px; } -.chat-input:focus { - border: 1px solid var(--primary); -} +.chat-input:focus {} .chat-input-send { background-color: var(--primary); @@ -475,4 +624,4 @@ .chat-input-send { bottom: 30px; } -} +} \ No newline at end of file diff --git a/app/components/chat.tsx b/app/components/chat.tsx index ff0bc5b347d..40e02cb57ac 100644 --- a/app/components/chat.tsx +++ b/app/components/chat.tsx @@ -5,6 +5,8 @@ import React, { useEffect, useMemo, useCallback, + Fragment, + RefObject, } from "react"; import SendWhiteIcon from "../icons/send-white.svg"; @@ -14,6 +16,7 @@ import ExportIcon from "../icons/share.svg"; import ReturnIcon from "../icons/return.svg"; import CopyIcon from "../icons/copy.svg"; import LoadingIcon from "../icons/three-dots.svg"; +import LoadingButtonIcon from "../icons/loading.svg"; import PromptIcon from "../icons/prompt.svg"; import MaskIcon from "../icons/mask.svg"; import MaxIcon from "../icons/max.svg"; @@ -24,6 +27,9 @@ import SettingsIcon from "../icons/chat-settings.svg"; import DeleteIcon from "../icons/clear.svg"; import PinIcon from "../icons/pin.svg"; import EditIcon from "../icons/rename.svg"; +import ConfirmIcon from "../icons/confirm.svg"; +import CancelIcon from "../icons/cancel.svg"; +import ImageIcon from "../icons/image.svg"; import LightIcon from "../icons/light.svg"; import DarkIcon from "../icons/dark.svg"; @@ -42,17 +48,21 @@ import { Theme, useAppConfig, DEFAULT_TOPIC, - ALL_MODELS, + ModelType, } from "../store"; import { copyToClipboard, - downloadAs, selectOrCopy, autoGrowTextArea, useMobileScreen, + getMessageTextContent, + getMessageImages, + isVisionModel, } from "../utils"; +import { compressImage } from "@/app/utils/chat"; + import dynamic from "next/dynamic"; import { ChatControllerPool } from "../client/controller"; @@ -62,16 +72,33 @@ import Locale from "../locales"; import { IconButton } from "./button"; import styles from "./chat.module.scss"; -import { ListItem, Modal, showConfirm, showPrompt, showToast } from "./ui-lib"; -import { useLocation, useNavigate } from "react-router-dom"; -import { LAST_INPUT_KEY, Path, REQUEST_TIMEOUT_MS } from "../constant"; +import { + List, + ListItem, + Modal, + Selector, + showConfirm, + showPrompt, + showToast, +} from "./ui-lib"; +import { useNavigate } from "react-router-dom"; +import { + CHAT_PAGE_SIZE, + LAST_INPUT_KEY, + Path, + REQUEST_TIMEOUT_MS, + UNFINISHED_INPUT, + ServiceProvider, +} from "../constant"; import { Avatar } from "./emoji"; -import { MaskAvatar, MaskConfig } from "./mask"; +import { ContextPrompts, MaskAvatar, MaskConfig } from "./mask"; import { useMaskStore } from "../store/mask"; import { ChatCommandPrefix, useChatCommand, useCommand } from "../command"; import { prettyObject } from "../utils/format"; import { ExportMessageModal } from "./exporter"; import { getClientConfig } from "../config/client"; +import { useAllModels } from "../utils/hooks"; +import { MultimodalContent } from "../client/api"; const Markdown = dynamic(async () => (await import("./markdown")).Markdown, { loading: () => , @@ -127,6 +154,7 @@ export function SessionConfigModel(props: { onClose: () => void }) { extraListItems={ session.mask.modelConfig.sendMemory ? ( @@ -173,10 +201,31 @@ function PromptToast(props: { function useSubmitHandler() { const config = useAppConfig(); const submitKey = config.submitKey; + const isComposing = useRef(false); + + useEffect(() => { + const onCompositionStart = () => { + isComposing.current = true; + }; + const onCompositionEnd = () => { + isComposing.current = false; + }; + + window.addEventListener("compositionstart", onCompositionStart); + window.addEventListener("compositionend", onCompositionEnd); + + return () => { + window.removeEventListener("compositionstart", onCompositionStart); + window.removeEventListener("compositionend", onCompositionEnd); + }; + }, []); const shouldSubmit = (e: React.KeyboardEvent) => { + // Fix Chinese input method "Enter" on Safari + if (e.keyCode == 229) return false; if (e.key !== "Enter") return false; - if (e.key === "Enter" && e.nativeEvent.isComposing) return false; + if (e.key === "Enter" && (e.nativeEvent.isComposing || isComposing.current)) + return false; return ( (config.submitKey === SubmitKey.AltEnter && e.altKey) || (config.submitKey === SubmitKey.CtrlEnter && e.ctrlKey) || @@ -196,9 +245,11 @@ function useSubmitHandler() { }; } +export type RenderPompt = Pick; + export function PromptHints(props: { - prompts: Prompt[]; - onPromptSelect: (prompt: Prompt) => void; + prompts: RenderPompt[]; + onPromptSelect: (prompt: RenderPompt) => void; }) { const noPrompts = props.prompts.length === 0; const [selectIndex, setSelectIndex] = useState(0); @@ -336,35 +387,47 @@ function ChatAction(props: { ); } -function useScrollToBottom() { +function useScrollToBottom( + scrollRef: RefObject, + detach: boolean = false, +) { // for auto-scroll - const scrollRef = useRef(null); + const [autoScroll, setAutoScroll] = useState(true); - const scrollToBottom = useCallback(() => { + function scrollDomToBottom() { const dom = scrollRef.current; if (dom) { - requestAnimationFrame(() => dom.scrollTo(0, dom.scrollHeight)); + requestAnimationFrame(() => { + setAutoScroll(true); + dom.scrollTo(0, dom.scrollHeight); + }); } - }, []); + } // auto scroll useEffect(() => { - autoScroll && scrollToBottom(); + if (autoScroll && !detach) { + scrollDomToBottom(); + } }); return { scrollRef, autoScroll, setAutoScroll, - scrollToBottom, + scrollDomToBottom, }; } export function ChatActions(props: { + uploadImage: () => void; + setAttachImages: (images: string[]) => void; + setUploading: (uploading: boolean) => void; showPromptModal: () => void; scrollToBottom: () => void; showPromptHints: () => void; hitBottom: boolean; + uploading: boolean; }) { const config = useAppConfig(); const navigate = useNavigate(); @@ -386,16 +449,61 @@ export function ChatActions(props: { // switch model const currentModel = chatStore.currentSession().mask.modelConfig.model; - function nextModel() { - const models = ALL_MODELS.filter((m) => m.available).map((m) => m.name); - const modelIndex = models.indexOf(currentModel); - const nextIndex = (modelIndex + 1) % models.length; - const nextModel = models[nextIndex]; - chatStore.updateCurrentSession((session) => { - session.mask.modelConfig.model = nextModel; - session.mask.syncGlobalConfig = false; - }); - } + const currentProviderName = + chatStore.currentSession().mask.modelConfig?.providerName || + ServiceProvider.OpenAI; + const allModels = useAllModels(); + const models = useMemo(() => { + const filteredModels = allModels.filter((m) => m.available); + const defaultModel = filteredModels.find((m) => m.isDefault); + + if (defaultModel) { + const arr = [ + defaultModel, + ...filteredModels.filter((m) => m !== defaultModel), + ]; + return arr; + } else { + return filteredModels; + } + }, [allModels]); + const currentModelName = useMemo(() => { + const model = models.find( + (m) => + m.name == currentModel && + m?.provider?.providerName == currentProviderName, + ); + return model?.displayName ?? ""; + }, [models, currentModel, currentProviderName]); + const [showModelSelector, setShowModelSelector] = useState(false); + const [showUploadImage, setShowUploadImage] = useState(false); + + useEffect(() => { + const show = isVisionModel(currentModel); + setShowUploadImage(show); + if (!show) { + props.setAttachImages([]); + props.setUploading(false); + } + + // if current model is not available + // switch to first available model + const isUnavaliableModel = !models.some((m) => m.name === currentModel); + if (isUnavaliableModel && models.length > 0) { + // show next model to default model if exist + let nextModel = models.find((model) => model.isDefault) || models[0]; + chatStore.updateCurrentSession((session) => { + session.mask.modelConfig.model = nextModel.name; + session.mask.modelConfig.providerName = nextModel?.provider + ?.providerName as ServiceProvider; + }); + showToast( + nextModel?.provider?.providerName == "ByteDance" + ? nextModel.displayName + : nextModel.name, + ); + } + }, [chatStore, currentModel, models]); return (
@@ -421,6 +529,13 @@ export function ChatActions(props: { /> )} + {showUploadImage && ( + : } + /> + )} setShowModelSelector(true)} + text={currentModelName} icon={} /> + + {showModelSelector && ( + ({ + title: `${m.displayName}${ + m?.provider?.providerName + ? "(" + m?.provider?.providerName + ")" + : "" + }`, + value: `${m.name}@${m?.provider?.providerName}`, + }))} + onClose={() => setShowModelSelector(false)} + onSelection={(s) => { + if (s.length === 0) return; + const [model, providerName] = s[0].split("@"); + chatStore.updateCurrentSession((session) => { + session.mask.modelConfig.model = model as ModelType; + session.mask.modelConfig.providerName = + providerName as ServiceProvider; + session.mask.syncGlobalConfig = false; + }); + if (providerName == "ByteDance") { + const selectedModel = models.find( + (m) => + m.name == model && m?.provider?.providerName == providerName, + ); + showToast(selectedModel?.displayName ?? ""); + } else { + showToast(model); + } + }} + /> + )}
); } -export function Chat() { +export function EditMessageModal(props: { onClose: () => void }) { + const chatStore = useChatStore(); + const session = chatStore.currentSession(); + const [messages, setMessages] = useState(session.messages.slice()); + + return ( +
+ } + key="cancel" + onClick={() => { + props.onClose(); + }} + />, + } + key="ok" + onClick={() => { + chatStore.updateCurrentSession( + (session) => (session.messages = messages), + ); + props.onClose(); + }} + />, + ]} + > + + + + chatStore.updateCurrentSession( + (session) => (session.topic = e.currentTarget.value), + ) + } + > + + + { + const newMessages = messages.slice(); + updater(newMessages); + setMessages(newMessages); + }} + /> + +
+ ); +} + +export function DeleteImageButton(props: { deleteImage: () => void }) { + return ( +
+ +
+ ); +} + +function _Chat() { type RenderMessage = ChatMessage & { preview?: boolean }; const chatStore = useChatStore(); - const [session, sessionIndex] = useChatStore((state) => [ - state.currentSession(), - state.currentSessionIndex, - ]); + const session = chatStore.currentSession(); const config = useAppConfig(); const fontSize = config.fontSize; @@ -492,19 +708,26 @@ export function Chat() { const [userInput, setUserInput] = useState(""); const [isLoading, setIsLoading] = useState(false); const { submitKey, shouldSubmit } = useSubmitHandler(); - const { scrollRef, setAutoScroll, scrollToBottom } = useScrollToBottom(); + const scrollRef = useRef(null); + const isScrolledToBottom = scrollRef?.current + ? Math.abs( + scrollRef.current.scrollHeight - + (scrollRef.current.scrollTop + scrollRef.current.clientHeight), + ) <= 1 + : false; + const { setAutoScroll, scrollDomToBottom } = useScrollToBottom( + scrollRef, + isScrolledToBottom, + ); const [hitBottom, setHitBottom] = useState(true); const isMobileScreen = useMobileScreen(); const navigate = useNavigate(); - - const onChatBodyScroll = (e: HTMLElement) => { - const isTouchBottom = e.scrollTop + e.clientHeight >= e.scrollHeight - 10; - setHitBottom(isTouchBottom); - }; + const [attachImages, setAttachImages] = useState([]); + const [uploading, setUploading] = useState(false); // prompt hints const promptStore = usePromptStore(); - const [promptHints, setPromptHints] = useState([]); + const [promptHints, setPromptHints] = useState([]); const onSearch = useDebouncedCallback( (text: string) => { const matchedPrompts = promptStore.search(text); @@ -578,7 +801,10 @@ export function Chat() { return; } setIsLoading(true); - chatStore.onUserInput(userInput).then(() => setIsLoading(false)); + chatStore + .onUserInput(userInput, attachImages) + .then(() => setIsLoading(false)); + setAttachImages([]); localStorage.setItem(LAST_INPUT_KEY, userInput); setUserInput(""); setPromptHints([]); @@ -586,7 +812,7 @@ export function Chat() { setAutoScroll(true); }; - const onPromptSelect = (prompt: Prompt) => { + const onPromptSelect = (prompt: RenderPompt) => { setTimeout(() => { setPromptHints([]); @@ -604,8 +830,8 @@ export function Chat() { }; // stop response - const onUserStop = (messageId: number) => { - ChatControllerPool.stop(sessionIndex, messageId); + const onUserStop = (messageId: string) => { + ChatControllerPool.stop(session.id, messageId); }; useEffect(() => { @@ -656,63 +882,85 @@ export function Chat() { }; const onRightClick = (e: any, message: ChatMessage) => { // copy to clipboard - if (selectOrCopy(e.currentTarget, message.content)) { + if (selectOrCopy(e.currentTarget, getMessageTextContent(message))) { if (userInput.length === 0) { - setUserInput(message.content); + setUserInput(getMessageTextContent(message)); } e.preventDefault(); } }; - const findLastUserIndex = (messageId: number) => { - // find last user input message and resend - let lastUserMessageIndex: number | null = null; - for (let i = 0; i < session.messages.length; i += 1) { - const message = session.messages[i]; - if (message.id === messageId) { - break; - } - if (message.role === "user") { - lastUserMessageIndex = i; - } - } + const deleteMessage = (msgId?: string) => { + chatStore.updateCurrentSession( + (session) => + (session.messages = session.messages.filter((m) => m.id !== msgId)), + ); + }; - return lastUserMessageIndex; + const onDelete = (msgId: string) => { + deleteMessage(msgId); }; - const deleteMessage = (userIndex: number) => { - chatStore.updateCurrentSession((session) => - session.messages.splice(userIndex, 2), + const onResend = (message: ChatMessage) => { + // when it is resending a message + // 1. for a user's message, find the next bot response + // 2. for a bot's message, find the last user's input + // 3. delete original user input and bot's message + // 4. resend the user's input + + const resendingIndex = session.messages.findIndex( + (m) => m.id === message.id, ); - }; - const onDelete = (botMessageId: number) => { - const userIndex = findLastUserIndex(botMessageId); - if (userIndex === null) return; - deleteMessage(userIndex); - }; + if (resendingIndex < 0 || resendingIndex >= session.messages.length) { + console.error("[Chat] failed to find resending message", message); + return; + } + + let userMessage: ChatMessage | undefined; + let botMessage: ChatMessage | undefined; + + if (message.role === "assistant") { + // if it is resending a bot's message, find the user input for it + botMessage = message; + for (let i = resendingIndex; i >= 0; i -= 1) { + if (session.messages[i].role === "user") { + userMessage = session.messages[i]; + break; + } + } + } else if (message.role === "user") { + // if it is resending a user's input, find the bot's response + userMessage = message; + for (let i = resendingIndex; i < session.messages.length; i += 1) { + if (session.messages[i].role === "assistant") { + botMessage = session.messages[i]; + break; + } + } + } + + if (userMessage === undefined) { + console.error("[Chat] failed to resend", message); + return; + } - const onResend = (botMessageId: number) => { - // find last user input message and resend - const userIndex = findLastUserIndex(botMessageId); - if (userIndex === null) return; + // delete the original messages + deleteMessage(userMessage.id); + deleteMessage(botMessage?.id); + // resend the message setIsLoading(true); - const content = session.messages[userIndex].content; - deleteMessage(userIndex); - chatStore.onUserInput(content).then(() => setIsLoading(false)); + const textContent = getMessageTextContent(userMessage); + const images = getMessageImages(userMessage); + chatStore.onUserInput(textContent, images).then(() => setIsLoading(false)); inputRef.current?.focus(); }; - const onPinMessage = (botMessage: ChatMessage) => { - if (!botMessage.id) return; - const userMessageIndex = findLastUserIndex(botMessage.id); - if (userMessageIndex === null) return; - - const userMessage = session.messages[userMessageIndex]; + const onPinMessage = (message: ChatMessage) => { chatStore.updateCurrentSession((session) => - session.mask.context.push(userMessage, botMessage), + session.mask.context.push(message), ); showToast(Locale.Chat.Actions.PinToastContent, { @@ -723,10 +971,9 @@ export function Chat() { }); }; - const context: RenderMessage[] = session.mask.hideContext - ? [] - : session.mask.context.slice(); - + const context: RenderMessage[] = useMemo(() => { + return session.mask.hideContext ? [] : session.mask.context.slice(); + }, [session.mask.context, session.mask.hideContext]); const accessStore = useAccessStore(); if ( @@ -740,60 +987,98 @@ export function Chat() { context.push(copiedHello); } + // preview messages + const renderMessages = useMemo(() => { + return context + .concat(session.messages as RenderMessage[]) + .concat( + isLoading + ? [ + { + ...createMessage({ + role: "assistant", + content: "……", + }), + preview: true, + }, + ] + : [], + ) + .concat( + userInput.length > 0 && config.sendPreviewBubble + ? [ + { + ...createMessage({ + role: "user", + content: userInput, + }), + preview: true, + }, + ] + : [], + ); + }, [ + config.sendPreviewBubble, + context, + isLoading, + session.messages, + userInput, + ]); + + const [msgRenderIndex, _setMsgRenderIndex] = useState( + Math.max(0, renderMessages.length - CHAT_PAGE_SIZE), + ); + function setMsgRenderIndex(newIndex: number) { + newIndex = Math.min(renderMessages.length - CHAT_PAGE_SIZE, newIndex); + newIndex = Math.max(0, newIndex); + _setMsgRenderIndex(newIndex); + } + + const messages = useMemo(() => { + const endRenderIndex = Math.min( + msgRenderIndex + 3 * CHAT_PAGE_SIZE, + renderMessages.length, + ); + return renderMessages.slice(msgRenderIndex, endRenderIndex); + }, [msgRenderIndex, renderMessages]); + + const onChatBodyScroll = (e: HTMLElement) => { + const bottomHeight = e.scrollTop + e.clientHeight; + const edgeThreshold = e.clientHeight; + + const isTouchTopEdge = e.scrollTop <= edgeThreshold; + const isTouchBottomEdge = bottomHeight >= e.scrollHeight - edgeThreshold; + const isHitBottom = + bottomHeight >= e.scrollHeight - (isMobileScreen ? 4 : 10); + + const prevPageMsgIndex = msgRenderIndex - CHAT_PAGE_SIZE; + const nextPageMsgIndex = msgRenderIndex + CHAT_PAGE_SIZE; + + if (isTouchTopEdge && !isTouchBottomEdge) { + setMsgRenderIndex(prevPageMsgIndex); + } else if (isTouchBottomEdge) { + setMsgRenderIndex(nextPageMsgIndex); + } + + setHitBottom(isHitBottom); + setAutoScroll(isHitBottom); + }; + function scrollToBottom() { + setMsgRenderIndex(renderMessages.length - CHAT_PAGE_SIZE); + scrollDomToBottom(); + } + // clear context index = context length + index in messages const clearContextIndex = (session.clearContextIndex ?? -1) >= 0 - ? session.clearContextIndex! + context.length + ? session.clearContextIndex! + context.length - msgRenderIndex : -1; - // preview messages - const messages = context - .concat(session.messages as RenderMessage[]) - .concat( - isLoading - ? [ - { - ...createMessage({ - role: "assistant", - content: "……", - }), - preview: true, - }, - ] - : [], - ) - .concat( - userInput.length > 0 && config.sendPreviewBubble - ? [ - { - ...createMessage({ - role: "user", - content: userInput, - }), - preview: true, - }, - ] - : [], - ); - const [showPromptModal, setShowPromptModal] = useState(false); - const renameSession = () => { - showPrompt(Locale.Chat.Rename, session.topic).then((newTopic) => { - if (newTopic && newTopic !== session.topic) { - chatStore.updateCurrentSession( - (session) => (session.topic = newTopic!), - ); - } - }); - }; - const clientConfig = useMemo(() => getClientConfig(), []); - const location = useLocation(); - const isChat = location.pathname === Path.Chat; - - const autoFocus = !isMobileScreen || isChat; // only focus in chat page + const autoFocus = !isMobileScreen; // wont auto focus on mobile screen const showMaxIcon = !isMobileScreen && !clientConfig?.isApp; useCommand({ @@ -801,8 +1086,157 @@ export function Chat() { submit: (text) => { doSubmit(text); }, + code: (text) => { + if (accessStore.disableFastLink) return; + console.log("[Command] got code from url: ", text); + showConfirm(Locale.URLCommand.Code + `code = ${text}`).then((res) => { + if (res) { + accessStore.update((access) => (access.accessCode = text)); + } + }); + }, + settings: (text) => { + if (accessStore.disableFastLink) return; + + try { + const payload = JSON.parse(text) as { + key?: string; + url?: string; + }; + + console.log("[Command] got settings from url: ", payload); + + if (payload.key || payload.url) { + showConfirm( + Locale.URLCommand.Settings + + `\n${JSON.stringify(payload, null, 4)}`, + ).then((res) => { + if (!res) return; + if (payload.key) { + accessStore.update( + (access) => (access.openaiApiKey = payload.key!), + ); + } + if (payload.url) { + accessStore.update((access) => (access.openaiUrl = payload.url!)); + } + accessStore.update((access) => (access.useCustomConfig = true)); + }); + } + } catch { + console.error("[Command] failed to get settings from url: ", text); + } + }, }); + // edit / insert message modal + const [isEditingMessage, setIsEditingMessage] = useState(false); + + // remember unfinished input + useEffect(() => { + // try to load from local storage + const key = UNFINISHED_INPUT(session.id); + const mayBeUnfinishedInput = localStorage.getItem(key); + if (mayBeUnfinishedInput && userInput.length === 0) { + setUserInput(mayBeUnfinishedInput); + localStorage.removeItem(key); + } + + const dom = inputRef.current; + return () => { + localStorage.setItem(key, dom?.value ?? ""); + }; + // eslint-disable-next-line react-hooks/exhaustive-deps + }, []); + + const handlePaste = useCallback( + async (event: React.ClipboardEvent) => { + const currentModel = chatStore.currentSession().mask.modelConfig.model; + if (!isVisionModel(currentModel)) { + return; + } + const items = (event.clipboardData || window.clipboardData).items; + for (const item of items) { + if (item.kind === "file" && item.type.startsWith("image/")) { + event.preventDefault(); + const file = item.getAsFile(); + if (file) { + const images: string[] = []; + images.push(...attachImages); + images.push( + ...(await new Promise((res, rej) => { + setUploading(true); + const imagesData: string[] = []; + compressImage(file, 256 * 1024) + .then((dataUrl) => { + imagesData.push(dataUrl); + setUploading(false); + res(imagesData); + }) + .catch((e) => { + setUploading(false); + rej(e); + }); + })), + ); + const imagesLength = images.length; + + if (imagesLength > 3) { + images.splice(3, imagesLength - 3); + } + setAttachImages(images); + } + } + } + }, + [attachImages, chatStore], + ); + + async function uploadImage() { + const images: string[] = []; + images.push(...attachImages); + + images.push( + ...(await new Promise((res, rej) => { + const fileInput = document.createElement("input"); + fileInput.type = "file"; + fileInput.accept = + "image/png, image/jpeg, image/webp, image/heic, image/heif"; + fileInput.multiple = true; + fileInput.onchange = (event: any) => { + setUploading(true); + const files = event.target.files; + const imagesData: string[] = []; + for (let i = 0; i < files.length; i++) { + const file = event.target.files[i]; + compressImage(file, 256 * 1024) + .then((dataUrl) => { + imagesData.push(dataUrl); + if ( + imagesData.length === 3 || + imagesData.length === files.length + ) { + setUploading(false); + res(imagesData); + } + }) + .catch((e) => { + setUploading(false); + rej(e); + }); + } + }; + fileInput.click(); + })), + ); + + const imagesLength = images.length; + if (imagesLength > 3) { + images.splice(3, imagesLength - 3); + } + setAttachImages(images); + } + return (
@@ -822,7 +1256,7 @@ export function Chat() {
setIsEditingMessage(true)} > {!session.topic ? DEFAULT_TOPIC : session.topic}
@@ -836,7 +1270,7 @@ export function Chat() { } bordered - onClick={renameSession} + onClick={() => setIsEditingMessage(true)} />
)} @@ -877,7 +1311,6 @@ export function Chat() { ref={scrollRef} onScroll={(e) => onChatBodyScroll(e.currentTarget)} onMouseDown={() => inputRef.current?.blur()} - onWheel={(e) => setAutoScroll(hitBottom && e.deltaY > 0)} onTouchStart={() => { inputRef.current?.blur(); setAutoScroll(false); @@ -885,80 +1318,80 @@ export function Chat() { > {messages.map((message, i) => { const isUser = message.role === "user"; + const isContext = i < context.length; const showActions = - !isUser && i > 0 && - !(message.preview || message.content.length === 0); + !(message.preview || message.content.length === 0) && + !isContext; const showTyping = message.preview || message.streaming; const shouldShowClearContextDivider = i === clearContextIndex - 1; return ( - <> +
-
-
- } - onClick={async () => { - const newMessage = await showPrompt( - Locale.Chat.Actions.Edit, - message.content, - ); - chatStore.updateCurrentSession((session) => { - const m = session.messages.find( - (m) => m.id === message.id, +
+
+
+ } + onClick={async () => { + const newMessage = await showPrompt( + Locale.Chat.Actions.Edit, + getMessageTextContent(message), + 10, ); - if (m) { - m.content = newMessage; + let newContent: string | MultimodalContent[] = + newMessage; + const images = getMessageImages(message); + if (images.length > 0) { + newContent = [{ type: "text", text: newMessage }]; + for (let i = 0; i < images.length; i++) { + newContent.push({ + type: "image_url", + image_url: { + url: images[i], + }, + }); + } } - }); - }} - > -
- {isUser ? ( - - ) : ( - - )} -
- {showTyping && ( -
- {Locale.Chat.Typing} + chatStore.updateCurrentSession((session) => { + const m = session.mask.context + .concat(session.messages) + .find((m) => m.id === message.id); + if (m) { + m.content = newContent; + } + }); + }} + > +
+ {isUser ? ( + + ) : ( + <> + {["system"].includes(message.role) ? ( + + ) : ( + + )} + + )}
- )} -
- onRightClick(e, message)} - onDoubleClickCapture={() => { - if (!isMobileScreen) return; - setUserInput(message.content); - }} - fontSize={fontSize} - parentRef={scrollRef} - defaultShow={i >= messages.length - 10} - /> {showActions && (
-
+
{message.streaming ? ( } - onClick={() => onResend(message.id ?? i)} + onClick={() => onResend(message)} /> } - onClick={() => copyToClipboard(message.content)} + onClick={() => + copyToClipboard( + getMessageTextContent(message), + ) + } /> )} @@ -995,16 +1432,69 @@ export function Chat() {
)}
- - {showActions && ( -
- {message.date.toLocaleString()} + {showTyping && ( +
+ {Locale.Chat.Typing}
)} +
+ onRightClick(e, message)} + onDoubleClickCapture={() => { + if (!isMobileScreen) return; + setUserInput(getMessageTextContent(message)); + }} + fontSize={fontSize} + parentRef={scrollRef} + defaultShow={i >= messages.length - 6} + /> + {getMessageImages(message).length == 1 && ( + + )} + {getMessageImages(message).length > 1 && ( +
+ {getMessageImages(message).map((image, index) => { + return ( + + ); + })} +
+ )} +
+ +
+ {isContext + ? Locale.Chat.IsContext + : message.date.toLocaleString()} +
{shouldShowClearContextDivider && } - + ); })}
@@ -1013,9 +1503,13 @@ export function Chat() { setShowPromptModal(true)} scrollToBottom={scrollToBottom} hitBottom={hitBottom} + uploading={uploading} showPromptHints={() => { // Click again to close if (promptHints.length > 0) { @@ -1028,22 +1522,54 @@ export function Chat() { onSearch(""); }} /> -
+