diff --git a/.env.template b/.env.template index b2a0438d9..25addf2b3 100644 --- a/.env.template +++ b/.env.template @@ -1,21 +1,20 @@ - # Your openai api key. (required) OPENAI_API_KEY=sk-xxxx # Access password, separated by comma. (optional) CODE=your-password -# You can start service behind a proxy +# You can start service behind a proxy. (optional) PROXY_URL=http://localhost:7890 # (optional) # Default: Empty -# Googel Gemini Pro API key, set if you want to use Google Gemini Pro API. +# Google Gemini Pro API key, set if you want to use Google Gemini Pro API. GOOGLE_API_KEY= # (optional) # Default: https://generativelanguage.googleapis.com/ -# Googel Gemini Pro API url without pathname, set if you want to customize Google Gemini Pro API url. +# Google Gemini Pro API url without pathname, set if you want to customize Google Gemini Pro API url. GOOGLE_URL= # Override openai api request base url. (optional) @@ -47,6 +46,15 @@ ENABLE_BALANCE_QUERY= # If you want to disable parse settings from url, set this value to 1. DISABLE_FAST_LINK= +# (optional) +# Default: Empty +# To control custom models, use + to add a custom model, use - to hide a model, use name=displayName to customize model name, separated by comma. +CUSTOM_MODELS= + +# (optional) +# Default: Empty +# Change default model +DEFAULT_MODEL= # anthropic claude Api Key.(optional) ANTHROPIC_API_KEY= @@ -54,8 +62,6 @@ ANTHROPIC_API_KEY= ### anthropic claude Api version. (optional) ANTHROPIC_API_VERSION= - - ### anthropic claude Api url (optional) ANTHROPIC_URL= diff --git a/.github/ISSUE_TEMPLATE/1_bug_report.yml b/.github/ISSUE_TEMPLATE/1_bug_report.yml new file mode 100644 index 000000000..b576629e3 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/1_bug_report.yml @@ -0,0 +1,80 @@ +name: '🐛 Bug Report' +description: 'Report an bug' +title: '[Bug] ' +labels: ['bug'] +body: + - type: dropdown + attributes: + label: '📦 Deployment Method' + multiple: true + options: + - 'Official installation package' + - 'Vercel' + - 'Zeabur' + - 'Sealos' + - 'Netlify' + - 'Docker' + - 'Other' + validations: + required: true + - type: input + attributes: + label: '📌 Version' + validations: + required: true + + - type: dropdown + attributes: + label: '💻 Operating System' + multiple: true + options: + - 'Windows' + - 'macOS' + - 'Ubuntu' + - 'Other Linux' + - 'iOS' + - 'iPad OS' + - 'Android' + - 'Other' + validations: + required: true + - type: input + attributes: + label: '📌 System Version' + validations: + required: true + - type: dropdown + attributes: + label: '🌐 Browser' + multiple: true + options: + - 'Chrome' + - 'Edge' + - 'Safari' + - 'Firefox' + - 'Other' + validations: + required: true + - type: input + attributes: + label: '📌 Browser Version' + validations: + required: true + - type: textarea + attributes: + label: '🐛 Bug Description' + description: A clear and concise description of the bug, if the above option is `Other`, please also explain in detail. + validations: + required: true + - type: textarea + attributes: + label: '📷 Recurrence Steps' + description: A clear and concise description of how to recurrence. + - type: textarea + attributes: + label: '🚦 Expected Behavior' + description: A clear and concise description of what you expected to happen. + - type: textarea + attributes: + label: '📝 Additional Information' + description: If your problem needs further explanation, or if the issue you're seeing cannot be reproduced in a gist, please add more information here. \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/1_bug_report_cn.yml b/.github/ISSUE_TEMPLATE/1_bug_report_cn.yml new file mode 100644 index 000000000..1977237de --- /dev/null +++ b/.github/ISSUE_TEMPLATE/1_bug_report_cn.yml @@ -0,0 +1,80 @@ +name: '🐛 反馈缺陷' +description: '反馈一个问题/缺陷' +title: '[Bug] ' +labels: ['bug'] +body: + - type: dropdown + attributes: + label: '📦 部署方式' + multiple: true + options: + - '官方安装包' + - 'Vercel' + - 'Zeabur' + - 'Sealos' + - 'Netlify' + - 'Docker' + - 'Other' + validations: + required: true + - type: input + attributes: + label: '📌 软件版本' + validations: + required: true + + - type: dropdown + attributes: + label: '💻 系统环境' + multiple: true + options: + - 'Windows' + - 'macOS' + - 'Ubuntu' + - 'Other Linux' + - 'iOS' + - 'iPad OS' + - 'Android' + - 'Other' + validations: + required: true + - type: input + attributes: + label: '📌 系统版本' + validations: + required: true + - type: dropdown + attributes: + label: '🌐 浏览器' + multiple: true + options: + - 'Chrome' + - 'Edge' + - 'Safari' + - 'Firefox' + - 'Other' + validations: + required: true + - type: input + attributes: + label: '📌 浏览器版本' + validations: + required: true + - type: textarea + attributes: + label: '🐛 问题描述' + description: 请提供一个清晰且简洁的问题描述,若上述选项为`Other`,也请详细说明。 + validations: + required: true + - type: textarea + attributes: + label: '📷 复现步骤' + description: 请提供一个清晰且简洁的描述,说明如何复现问题。 + - type: textarea + attributes: + label: '🚦 期望结果' + description: 请提供一个清晰且简洁的描述,说明您期望发生什么。 + - type: textarea + attributes: + label: '📝 补充信息' + description: 如果您的问题需要进一步说明,或者您遇到的问题无法在一个简单的示例中复现,请在这里添加更多信息。 \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/2_feature_request.yml b/.github/ISSUE_TEMPLATE/2_feature_request.yml new file mode 100644 index 000000000..8576e8a83 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/2_feature_request.yml @@ -0,0 +1,21 @@ +name: '🌠 Feature Request' +description: 'Suggest an idea' +title: '[Feature Request] ' +labels: ['enhancement'] +body: + - type: textarea + attributes: + label: '🥰 Feature Description' + description: Please add a clear and concise description of the problem you are seeking to solve with this feature request. + validations: + required: true + - type: textarea + attributes: + label: '🧐 Proposed Solution' + description: Describe the solution you'd like in a clear and concise manner. + validations: + required: true + - type: textarea + attributes: + label: '📝 Additional Information' + description: Add any other context about the problem here. \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/2_feature_request_cn.yml b/.github/ISSUE_TEMPLATE/2_feature_request_cn.yml new file mode 100644 index 000000000..c7a3cc370 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/2_feature_request_cn.yml @@ -0,0 +1,21 @@ +name: '🌠 功能需求' +description: '提出需求或建议' +title: '[Feature Request] ' +labels: ['enhancement'] +body: + - type: textarea + attributes: + label: '🥰 需求描述' + description: 请添加一个清晰且简洁的问题描述,阐述您希望通过这个功能需求解决的问题。 + validations: + required: true + - type: textarea + attributes: + label: '🧐 解决方案' + description: 请清晰且简洁地描述您想要的解决方案。 + validations: + required: true + - type: textarea + attributes: + label: '📝 补充信息' + description: 在这里添加关于问题的任何其他背景信息。 \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml deleted file mode 100644 index bdba257d2..000000000 --- a/.github/ISSUE_TEMPLATE/bug_report.yml +++ /dev/null @@ -1,146 +0,0 @@ -name: Bug report -description: Create a report to help us improve -title: "[Bug] " -labels: ["bug"] - -body: - - type: markdown - attributes: - value: "## Describe the bug" - - type: textarea - id: bug-description - attributes: - label: "Bug Description" - description: "A clear and concise description of what the bug is." - placeholder: "Explain the bug..." - validations: - required: true - - - type: markdown - attributes: - value: "## To Reproduce" - - type: textarea - id: steps-to-reproduce - attributes: - label: "Steps to Reproduce" - description: "Steps to reproduce the behavior:" - placeholder: | - 1. Go to '...' - 2. Click on '....' - 3. Scroll down to '....' - 4. See error - validations: - required: true - - - type: markdown - attributes: - value: "## Expected behavior" - - type: textarea - id: expected-behavior - attributes: - label: "Expected Behavior" - description: "A clear and concise description of what you expected to happen." - placeholder: "Describe what you expected to happen..." - validations: - required: true - - - type: markdown - attributes: - value: "## Screenshots" - - type: textarea - id: screenshots - attributes: - label: "Screenshots" - description: "If applicable, add screenshots to help explain your problem." - placeholder: "Paste your screenshots here or write 'N/A' if not applicable..." - validations: - required: false - - - type: markdown - attributes: - value: "## Deployment" - - type: checkboxes - id: deployment - attributes: - label: "Deployment Method" - description: "Please select the deployment method you are using." - options: - - label: "Docker" - - label: "Vercel" - - label: "Server" - - - type: markdown - attributes: - value: "## Desktop (please complete the following information):" - - type: input - id: desktop-os - attributes: - label: "Desktop OS" - description: "Your desktop operating system." - placeholder: "e.g., Windows 10" - validations: - required: false - - type: input - id: desktop-browser - attributes: - label: "Desktop Browser" - description: "Your desktop browser." - placeholder: "e.g., Chrome, Safari" - validations: - required: false - - type: input - id: desktop-version - attributes: - label: "Desktop Browser Version" - description: "Version of your desktop browser." - placeholder: "e.g., 89.0" - validations: - required: false - - - type: markdown - attributes: - value: "## Smartphone (please complete the following information):" - - type: input - id: smartphone-device - attributes: - label: "Smartphone Device" - description: "Your smartphone device." - placeholder: "e.g., iPhone X" - validations: - required: false - - type: input - id: smartphone-os - attributes: - label: "Smartphone OS" - description: "Your smartphone operating system." - placeholder: "e.g., iOS 14.4" - validations: - required: false - - type: input - id: smartphone-browser - attributes: - label: "Smartphone Browser" - description: "Your smartphone browser." - placeholder: "e.g., Safari" - validations: - required: false - - type: input - id: smartphone-version - attributes: - label: "Smartphone Browser Version" - description: "Version of your smartphone browser." - placeholder: "e.g., 14" - validations: - required: false - - - type: markdown - attributes: - value: "## Additional Logs" - - type: textarea - id: additional-logs - attributes: - label: "Additional Logs" - description: "Add any logs about the problem here." - placeholder: "Paste any relevant logs here..." - validations: - required: false diff --git a/.github/ISSUE_TEMPLATE/feature_request.yml b/.github/ISSUE_TEMPLATE/feature_request.yml deleted file mode 100644 index 499781330..000000000 --- a/.github/ISSUE_TEMPLATE/feature_request.yml +++ /dev/null @@ -1,53 +0,0 @@ -name: Feature request -description: Suggest an idea for this project -title: "[Feature Request]: " -labels: ["enhancement"] - -body: - - type: markdown - attributes: - value: "## Is your feature request related to a problem? Please describe." - - type: textarea - id: problem-description - attributes: - label: Problem Description - description: "A clear and concise description of what the problem is. Example: I'm always frustrated when [...]" - placeholder: "Explain the problem you are facing..." - validations: - required: true - - - type: markdown - attributes: - value: "## Describe the solution you'd like" - - type: textarea - id: desired-solution - attributes: - label: Solution Description - description: A clear and concise description of what you want to happen. - placeholder: "Describe the solution you'd like..." - validations: - required: true - - - type: markdown - attributes: - value: "## Describe alternatives you've considered" - - type: textarea - id: alternatives-considered - attributes: - label: Alternatives Considered - description: A clear and concise description of any alternative solutions or features you've considered. - placeholder: "Describe any alternative solutions or features you've considered..." - validations: - required: false - - - type: markdown - attributes: - value: "## Additional context" - - type: textarea - id: additional-context - attributes: - label: Additional Context - description: Add any other context or screenshots about the feature request here. - placeholder: "Add any other context or screenshots about the feature request here..." - validations: - required: false diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 000000000..3c4c90803 --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,28 @@ +#### 💻 变更类型 | Change Type + + + +- [ ] feat +- [ ] fix +- [ ] refactor +- [ ] perf +- [ ] style +- [ ] test +- [ ] docs +- [ ] ci +- [ ] chore +- [ ] build + +#### 🔀 变更说明 | Description of Change + + + +#### 📝 补充信息 | Additional Information + + diff --git a/.gitignore b/.gitignore index b00b0e325..2ff556f64 100644 --- a/.gitignore +++ b/.gitignore @@ -43,4 +43,6 @@ dev .env *.key -*.key.pub \ No newline at end of file +*.key.pub + +masks.json diff --git a/Dockerfile b/Dockerfile index 436d39d82..ae9a17cdd 100644 --- a/Dockerfile +++ b/Dockerfile @@ -43,7 +43,7 @@ COPY --from=builder /app/.next/server ./.next/server EXPOSE 3000 CMD if [ -n "$PROXY_URL" ]; then \ - export HOSTNAME="127.0.0.1"; \ + export HOSTNAME="0.0.0.0"; \ protocol=$(echo $PROXY_URL | cut -d: -f1); \ host=$(echo $PROXY_URL | cut -d/ -f3 | cut -d: -f1); \ port=$(echo $PROXY_URL | cut -d: -f3); \ diff --git a/LICENSE b/LICENSE index 542e91f4e..047f9431e 100644 --- a/LICENSE +++ b/LICENSE @@ -1,6 +1,6 @@ MIT License -Copyright (c) 2023 Zhang Yifei +Copyright (c) 2023-2024 Zhang Yifei Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/README.md b/README.md index 633124ec7..c8b158956 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,8 @@
-icon + + + icon +

NextChat (ChatGPT Next Web)

@@ -14,27 +17,51 @@ One-Click to get a well-designed cross-platform ChatGPT web UI, with GPT3, GPT4 [![MacOS][MacOS-image]][download-url] [![Linux][Linux-image]][download-url] -[Web App](https://app.nextchat.dev/) / [Desktop App](https://github.com/Yidadaa/ChatGPT-Next-Web/releases) / [Discord](https://discord.gg/YCkeafCafC) / [Twitter](https://twitter.com/NextChatDev) +[Web App](https://app.nextchat.dev/) / [Desktop App](https://github.com/Yidadaa/ChatGPT-Next-Web/releases) / [Discord](https://discord.gg/YCkeafCafC) / [Enterprise Edition](#enterprise-edition) / [Twitter](https://twitter.com/NextChatDev) -[网页版](https://app.nextchat.dev/) / [客户端](https://github.com/Yidadaa/ChatGPT-Next-Web/releases) / [反馈](https://github.com/Yidadaa/ChatGPT-Next-Web/issues) +[网页版](https://app.nextchat.dev/) / [客户端](https://github.com/Yidadaa/ChatGPT-Next-Web/releases) / [企业版](#%E4%BC%81%E4%B8%9A%E7%89%88) / [反馈](https://github.com/Yidadaa/ChatGPT-Next-Web/issues) -[web-url]: https://chatgpt.nextweb.fun +[web-url]: https://app.nextchat.dev/ [download-url]: https://github.com/Yidadaa/ChatGPT-Next-Web/releases [Web-image]: https://img.shields.io/badge/Web-PWA-orange?logo=microsoftedge [Windows-image]: https://img.shields.io/badge/-Windows-blue?logo=windows [MacOS-image]: https://img.shields.io/badge/-MacOS-black?logo=apple [Linux-image]: https://img.shields.io/badge/-Linux-333?logo=ubuntu -[![Deploy with Vercel](https://vercel.com/button)](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FChatGPTNextWeb%2FChatGPT-Next-Web&env=OPENAI_API_KEY&env=CODE&project-name=nextchat&repository-name=NextChat) +[Deploy on Zeabur](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FChatGPTNextWeb%2FChatGPT-Next-Web&env=OPENAI_API_KEY&env=CODE&project-name=nextchat&repository-name=NextChat) [Deploy on Zeabur](https://zeabur.com/templates/ZBUEFA) [Open in Gitpod](https://gitpod.io/#https://github.com/Yidadaa/ChatGPT-Next-Web) -[![Deploy on Zeabur](https://zeabur.com/button.svg)](https://zeabur.com/templates/ZBUEFA) - -[![Open in Gitpod](https://gitpod.io/button/open-in-gitpod.svg)](https://gitpod.io/#https://github.com/Yidadaa/ChatGPT-Next-Web) - -![cover](./docs/images/cover.png) +[](https://monica.im/?utm=nxcrp)
+## Enterprise Edition + +Meeting Your Company's Privatization and Customization Deployment Requirements: +- **Brand Customization**: Tailored VI/UI to seamlessly align with your corporate brand image. +- **Resource Integration**: Unified configuration and management of dozens of AI resources by company administrators, ready for use by team members. +- **Permission Control**: Clearly defined member permissions, resource permissions, and knowledge base permissions, all controlled via a corporate-grade Admin Panel. +- **Knowledge Integration**: Combining your internal knowledge base with AI capabilities, making it more relevant to your company's specific business needs compared to general AI. +- **Security Auditing**: Automatically intercept sensitive inquiries and trace all historical conversation records, ensuring AI adherence to corporate information security standards. +- **Private Deployment**: Enterprise-level private deployment supporting various mainstream private cloud solutions, ensuring data security and privacy protection. +- **Continuous Updates**: Ongoing updates and upgrades in cutting-edge capabilities like multimodal AI, ensuring consistent innovation and advancement. + +For enterprise inquiries, please contact: **business@nextchat.dev** + +## 企业版 + +满足企业用户私有化部署和个性化定制需求: +- **品牌定制**:企业量身定制 VI/UI,与企业品牌形象无缝契合 +- **资源集成**:由企业管理人员统一配置和管理数十种 AI 资源,团队成员开箱即用 +- **权限管理**:成员权限、资源权限、知识库权限层级分明,企业级 Admin Panel 统一控制 +- **知识接入**:企业内部知识库与 AI 能力相结合,比通用 AI 更贴近企业自身业务需求 +- **安全审计**:自动拦截敏感提问,支持追溯全部历史对话记录,让 AI 也能遵循企业信息安全规范 +- **私有部署**:企业级私有部署,支持各类主流私有云部署,确保数据安全和隐私保护 +- **持续更新**:提供多模态、智能体等前沿能力持续更新升级服务,常用常新、持续先进 + +企业版咨询: **business@nextchat.dev** + + + ## Features - **Deploy for free with one-click** on Vercel in under 1 minute @@ -49,6 +76,12 @@ One-Click to get a well-designed cross-platform ChatGPT web UI, with GPT3, GPT4 - Automatically compresses chat history to support long conversations while also saving your tokens - I18n: English, 简体中文, 繁体中文, 日本語, Français, Español, Italiano, Türkçe, Deutsch, Tiếng Việt, Русский, Čeština, 한국어, Indonesia +
+ +![主界面](./docs/images/cover.png) + +
+ ## Roadmap - [x] System Prompt: pin a user defined prompt as system prompt [#138](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/138) @@ -57,10 +90,15 @@ One-Click to get a well-designed cross-platform ChatGPT web UI, with GPT3, GPT4 - [x] Share as image, share to ShareGPT [#1741](https://github.com/Yidadaa/ChatGPT-Next-Web/pull/1741) - [x] Desktop App with tauri - [x] Self-host Model: Fully compatible with [RWKV-Runner](https://github.com/josStorer/RWKV-Runner), as well as server deployment of [LocalAI](https://github.com/go-skynet/LocalAI): llama/gpt4all/rwkv/vicuna/koala/gpt4all-j/cerebras/falcon/dolly etc. -- [ ] Plugins: support network search, calculator, any other apis etc. [#165](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/165) +- [x] Artifacts: Easily preview, copy and share generated content/webpages through a separate window [#5092](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/pull/5092) +- [x] Plugins: support network search, calculator, any other apis etc. [#165](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/165) [#5353](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/issues/5353) + - [x] network search, calculator, any other apis etc. [#165](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/165) [#5353](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/issues/5353) +- [ ] local knowledge base ## What's New +- 🚀 v2.15.0 Now supports Plugins! Read this: [NextChat-Awesome-Plugins](https://github.com/ChatGPTNextWeb/NextChat-Awesome-Plugins) +- 🚀 v2.14.0 Now supports Artifacts & SD - 🚀 v2.10.1 support Google Gemini Pro model. - 🚀 v2.9.11 you can use azure endpoint now. - 🚀 v2.8 now we have a client that runs across all platforms! @@ -89,15 +127,21 @@ One-Click to get a well-designed cross-platform ChatGPT web UI, with GPT3, GPT4 - [x] 分享为图片,分享到 ShareGPT 链接 [#1741](https://github.com/Yidadaa/ChatGPT-Next-Web/pull/1741) - [x] 使用 tauri 打包桌面应用 - [x] 支持自部署的大语言模型:开箱即用 [RWKV-Runner](https://github.com/josStorer/RWKV-Runner) ,服务端部署 [LocalAI 项目](https://github.com/go-skynet/LocalAI) llama / gpt4all / rwkv / vicuna / koala / gpt4all-j / cerebras / falcon / dolly 等等,或者使用 [api-for-open-llm](https://github.com/xusenlinzy/api-for-open-llm) -- [ ] 插件机制,支持联网搜索、计算器、调用其他平台 api [#165](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/165) +- [x] Artifacts: 通过独立窗口,轻松预览、复制和分享生成的内容/可交互网页 [#5092](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/pull/5092) +- [x] 插件机制,支持`联网搜索`、`计算器`、调用其他平台 api [#165](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/165) [#5353](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/issues/5353) + - [x] 支持联网搜索、计算器、调用其他平台 api [#165](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/165) [#5353](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/issues/5353) + - [ ] 本地知识库 ## 最新动态 +- 🚀 v2.15.0 现在支持插件功能了!了解更多:[NextChat-Awesome-Plugins](https://github.com/ChatGPTNextWeb/NextChat-Awesome-Plugins) +- 🚀 v2.14.0 现在支持 Artifacts & SD 了。 +- 🚀 v2.10.1 现在支持 Gemini Pro 模型。 +- 🚀 v2.9.11 现在可以使用自定义 Azure 服务了。 +- 🚀 v2.8 发布了横跨 Linux/Windows/MacOS 的体积极小的客户端。 +- 🚀 v2.7 现在可以将会话分享为图片了,也可以分享到 ShareGPT 的在线链接。 - 🚀 v2.0 已经发布,现在你可以使用面具功能快速创建预制对话了! 了解更多: [ChatGPT 提示词高阶技能:零次、一次和少样本提示](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/138)。 - 💡 想要更方便地随时随地使用本项目?可以试下这款桌面插件:https://github.com/mushan0x0/AI0x0.com -- 🚀 v2.7 现在可以将会话分享为图片了,也可以分享到 ShareGPT 的在线链接。 -- 🚀 v2.8 发布了横跨 Linux/Windows/MacOS 的体积极小的客户端。 -- 🚀 v2.9.11 现在可以使用自定义 Azure 服务了。 ## Get Started @@ -180,7 +224,7 @@ Specify OpenAI organization ID. ### `AZURE_URL` (optional) -> Example: https://{azure-resource-url}/openai/deployments/{deploy-name} +> Example: https://{azure-resource-url}/openai Azure deploy url. @@ -212,6 +256,46 @@ anthropic claude Api version. anthropic claude Api Url. +### `BAIDU_API_KEY` (optional) + +Baidu Api Key. + +### `BAIDU_SECRET_KEY` (optional) + +Baidu Secret Key. + +### `BAIDU_URL` (optional) + +Baidu Api Url. + +### `BYTEDANCE_API_KEY` (optional) + +ByteDance Api Key. + +### `BYTEDANCE_URL` (optional) + +ByteDance Api Url. + +### `ALIBABA_API_KEY` (optional) + +Alibaba Cloud Api Key. + +### `ALIBABA_URL` (optional) + +Alibaba Cloud Api Url. + +### `IFLYTEK_URL` (Optional) + +iflytek Api Url. + +### `IFLYTEK_API_KEY` (Optional) + +iflytek Api Key. + +### `IFLYTEK_API_SECRET` (Optional) + +iflytek Api Secret. + ### `HIDE_USER_API_KEY` (optional) > Default: Empty @@ -245,13 +329,36 @@ To control custom models, use `+` to add a custom model, use `-` to hide a model User `-all` to disable all default models, `+all` to enable all default models. -### `WHITE_WEBDEV_ENDPOINTS` (可选) +For Azure: use `modelName@azure=deploymentName` to customize model name and deployment name. +> Example: `+gpt-3.5-turbo@azure=gpt35` will show option `gpt35(Azure)` in model list. +> If you only can use Azure model, `-all,+gpt-3.5-turbo@azure=gpt35` will `gpt35(Azure)` the only option in model list. + +For ByteDance: use `modelName@bytedance=deploymentName` to customize model name and deployment name. +> Example: `+Doubao-lite-4k@bytedance=ep-xxxxx-xxx` will show option `Doubao-lite-4k(ByteDance)` in model list. + +### `DEFAULT_MODEL` (optional) + +Change default model + +### `WHITE_WEBDEV_ENDPOINTS` (optional) You can use this option if you want to increase the number of webdav service addresses you are allowed to access, as required by the format: - Each address must be a complete endpoint > `https://xxxx/yyy` - Multiple addresses are connected by ', ' +### `DEFAULT_INPUT_TEMPLATE` (optional) + +Customize the default template used to initialize the User Input Preprocessing configuration item in Settings. + +### `STABILITY_API_KEY` (optional) + +Stability API key. + +### `STABILITY_URL` (optional) + +Customize Stability API url. + ## Requirements NodeJS >= 18, Docker >= 20 diff --git a/README_CN.md b/README_CN.md index 38ebf209d..0983323cc 100644 --- a/README_CN.md +++ b/README_CN.md @@ -1,22 +1,34 @@
-预览 + + + icon +

SoulShellGPT

一键免费部署你的私人 ChatGPT 网页应用,支持 GPT3, GPT4 & Gemini Pro 模型。 -[演示 Demo](https://chat-gpt-next-web.vercel.app/) / [反馈 Issues](https://github.com/Yidadaa/ChatGPT-Next-Web/issues) / [加入 Discord](https://discord.gg/zrhvHCr79N) +[企业版](#%E4%BC%81%E4%B8%9A%E7%89%88) /[演示 Demo](https://chat-gpt-next-web.vercel.app/) / [反馈 Issues](https://github.com/Yidadaa/ChatGPT-Next-Web/issues) / [加入 Discord](https://discord.gg/zrhvHCr79N) -[![Deploy with Vercel](https://vercel.com/button)](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FYidadaa%2FChatGPT-Next-Web&env=OPENAI_API_KEY&env=CODE&project-name=chatgpt-next-web&repository-name=ChatGPT-Next-Web) - -[![Deploy on Zeabur](https://zeabur.com/button.svg)](https://zeabur.com/templates/ZBUEFA) - -[![Open in Gitpod](https://gitpod.io/button/open-in-gitpod.svg)](https://gitpod.io/#https://github.com/Yidadaa/ChatGPT-Next-Web) - -![主界面](./docs/images/cover.png) +[Deploy on Zeabur](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FChatGPTNextWeb%2FChatGPT-Next-Web&env=OPENAI_API_KEY&env=CODE&project-name=nextchat&repository-name=NextChat) [Deploy on Zeabur](https://zeabur.com/templates/ZBUEFA) [Open in Gitpod](https://gitpod.io/#https://github.com/Yidadaa/ChatGPT-Next-Web)
+## 企业版 + +满足您公司私有化部署和定制需求 +- **品牌定制**:企业量身定制 VI/UI,与企业品牌形象无缝契合 +- **资源集成**:由企业管理人员统一配置和管理数十种 AI 资源,团队成员开箱即用 +- **权限管理**:成员权限、资源权限、知识库权限层级分明,企业级 Admin Panel 统一控制 +- **知识接入**:企业内部知识库与 AI 能力相结合,比通用 AI 更贴近企业自身业务需求 +- **安全审计**:自动拦截敏感提问,支持追溯全部历史对话记录,让 AI 也能遵循企业信息安全规范 +- **私有部署**:企业级私有部署,支持各类主流私有云部署,确保数据安全和隐私保护 +- **持续更新**:提供多模态、智能体等前沿能力持续更新升级服务,常用常新、持续先进 + +企业版咨询: **business@nextchat.dev** + + + ## 开始使用 1. 准备好你的 [OpenAI API Key](https://platform.openai.com/account/api-keys); @@ -25,6 +37,12 @@ 3. 部署完毕后,即可开始使用; 4. (可选)[绑定自定义域名](https://vercel.com/docs/concepts/projects/domains/add-a-domain):Vercel 分配的域名 DNS 在某些区域被污染了,绑定自定义域名即可直连。 +
+ +![主界面](./docs/images/cover.png) + +
+ ## 保持更新 如果你按照上述步骤一键部署了自己的项目,可能会发现总是提示“存在更新”的问题,这是由于 Vercel 会默认为你创建一个新项目而不是 fork 本项目,这会导致无法正确地检测更新。 @@ -94,7 +112,7 @@ OpenAI 接口代理 URL,如果你手动配置了 openai 接口代理,请填 ### `AZURE_URL` (可选) -> 形如:https://{azure-resource-url}/openai/deployments/{deploy-name} +> 形如:https://{azure-resource-url}/openai Azure 部署地址。 @@ -106,26 +124,68 @@ Azure 密钥。 Azure Api 版本,你可以在这里找到:[Azure 文档](https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#chat-completions)。 -### `GOOGLE_API_KEY` (optional) +### `GOOGLE_API_KEY` (可选) Google Gemini Pro 密钥. -### `GOOGLE_URL` (optional) +### `GOOGLE_URL` (可选) Google Gemini Pro Api Url. -### `ANTHROPIC_API_KEY` (optional) +### `ANTHROPIC_API_KEY` (可选) anthropic claude Api Key. -### `ANTHROPIC_API_VERSION` (optional) +### `ANTHROPIC_API_VERSION` (可选) anthropic claude Api version. -### `ANTHROPIC_URL` (optional) +### `ANTHROPIC_URL` (可选) anthropic claude Api Url. +### `BAIDU_API_KEY` (可选) + +Baidu Api Key. + +### `BAIDU_SECRET_KEY` (可选) + +Baidu Secret Key. + +### `BAIDU_URL` (可选) + +Baidu Api Url. + +### `BYTEDANCE_API_KEY` (可选) + +ByteDance Api Key. + +### `BYTEDANCE_URL` (可选) + +ByteDance Api Url. + +### `ALIBABA_API_KEY` (可选) + +阿里云(千问)Api Key. + +### `ALIBABA_URL` (可选) + +阿里云(千问)Api Url. + +### `IFLYTEK_URL` (可选) + +讯飞星火Api Url. + +### `IFLYTEK_API_KEY` (可选) + +讯飞星火Api Key. + +### `IFLYTEK_API_SECRET` (可选) + +讯飞星火Api Secret. + + + ### `HIDE_USER_API_KEY` (可选) 如果你不想让用户自行填入 API Key,将此环境变量设置为 1 即可。 @@ -156,6 +216,31 @@ anthropic claude Api Url. 用来控制模型列表,使用 `+` 增加一个模型,使用 `-` 来隐藏一个模型,使用 `模型名=展示名` 来自定义模型的展示名,用英文逗号隔开。 +在Azure的模式下,支持使用`modelName@azure=deploymentName`的方式配置模型名称和部署名称(deploy-name) +> 示例:`+gpt-3.5-turbo@azure=gpt35`这个配置会在模型列表显示一个`gpt35(Azure)`的选项。 +> 如果你只能使用Azure模式,那么设置 `-all,+gpt-3.5-turbo@azure=gpt35` 则可以让对话的默认使用 `gpt35(Azure)` + +在ByteDance的模式下,支持使用`modelName@bytedance=deploymentName`的方式配置模型名称和部署名称(deploy-name) +> 示例: `+Doubao-lite-4k@bytedance=ep-xxxxx-xxx`这个配置会在模型列表显示一个`Doubao-lite-4k(ByteDance)`的选项 + + +### `DEFAULT_MODEL` (可选) + +更改默认模型 + +### `DEFAULT_INPUT_TEMPLATE` (可选) + +自定义默认的 template,用于初始化『设置』中的『用户输入预处理』配置项 + +### `STABILITY_API_KEY` (optional) + +Stability API密钥 + +### `STABILITY_URL` (optional) + +自定义的Stability API请求地址 + + ## 开发 点击下方按钮,开始二次开发: diff --git a/README_JA.md b/README_JA.md new file mode 100644 index 000000000..6b8caadae --- /dev/null +++ b/README_JA.md @@ -0,0 +1,310 @@ +
+プレビュー + +

NextChat

+ +ワンクリックで無料であなた専用の ChatGPT ウェブアプリをデプロイ。GPT3、GPT4 & Gemini Pro モデルをサポート。 + +[企業版](#企業版) / [デモ](https://chat-gpt-next-web.vercel.app/) / [フィードバック](https://github.com/Yidadaa/ChatGPT-Next-Web/issues) / [Discordに参加](https://discord.gg/zrhvHCr79N) + +[Zeaburでデプロイ](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FChatGPTNextWeb%2FChatGPT-Next-Web&env=OPENAI_API_KEY&env=CODE&project-name=nextchat&repository-name=NextChat) [Zeaburでデプロイ](https://zeabur.com/templates/ZBUEFA) [Gitpodで開く](https://gitpod.io/#https://github.com/Yidadaa/ChatGPT-Next-Web) + + +
+ +## 企業版 + +あなたの会社のプライベートデプロイとカスタマイズのニーズに応える +- **ブランドカスタマイズ**:企業向けに特別に設計された VI/UI、企業ブランドイメージとシームレスにマッチ +- **リソース統合**:企業管理者が数十種類のAIリソースを統一管理、チームメンバーはすぐに使用可能 +- **権限管理**:メンバーの権限、リソースの権限、ナレッジベースの権限を明確にし、企業レベルのAdmin Panelで統一管理 +- **知識の統合**:企業内部のナレッジベースとAI機能を結びつけ、汎用AIよりも企業自身の業務ニーズに近づける +- **セキュリティ監査**:機密質問を自動的にブロックし、すべての履歴対話を追跡可能にし、AIも企業の情報セキュリティ基準に従わせる +- **プライベートデプロイ**:企業レベルのプライベートデプロイ、主要なプライベートクラウドデプロイをサポートし、データのセキュリティとプライバシーを保護 +- **継続的な更新**:マルチモーダル、エージェントなどの最先端機能を継続的に更新し、常に最新であり続ける + +企業版のお問い合わせ: **business@nextchat.dev** + + +## 始めに + +1. [OpenAI API Key](https://platform.openai.com/account/api-keys)を準備する; +2. 右側のボタンをクリックしてデプロイを開始: + [![Deploy with Vercel](https://vercel.com/button)](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FYidadaa%2FChatGPT-Next-Web&env=OPENAI_API_KEY&env=CODE&env=GOOGLE_API_KEY&project-name=chatgpt-next-web&repository-name=ChatGPT-Next-Web) 、GitHubアカウントで直接ログインし、環境変数ページにAPI Keyと[ページアクセスパスワード](#設定ページアクセスパスワード) CODEを入力してください; +3. デプロイが完了したら、すぐに使用を開始できます; +4. (オプション)[カスタムドメインをバインド](https://vercel.com/docs/concepts/projects/domains/add-a-domain):Vercelが割り当てたドメインDNSは一部の地域で汚染されているため、カスタムドメインをバインドすると直接接続できます。 + +
+ +![メインインターフェース](./docs/images/cover.png) + +
+ + +## 更新を維持する + +もし上記の手順に従ってワンクリックでプロジェクトをデプロイした場合、「更新があります」というメッセージが常に表示されることがあります。これは、Vercel がデフォルトで新しいプロジェクトを作成するためで、本プロジェクトを fork していないことが原因です。そのため、正しく更新を検出できません。 + +以下の手順に従って再デプロイすることをお勧めします: + +- 元のリポジトリを削除する +- ページ右上の fork ボタンを使って、本プロジェクトを fork する +- Vercel で再度選択してデプロイする、[詳細な手順はこちらを参照してください](./docs/vercel-ja.md)。 + + +### 自動更新を開く + +> Upstream Sync の実行エラーが発生した場合は、手動で Sync Fork してください! + +プロジェクトを fork した後、GitHub の制限により、fork 後のプロジェクトの Actions ページで Workflows を手動で有効にし、Upstream Sync Action を有効にする必要があります。有効化後、毎時の定期自動更新が可能になります: + +![自動更新](./docs/images/enable-actions.jpg) + +![自動更新を有効にする](./docs/images/enable-actions-sync.jpg) + + +### 手動でコードを更新する + +手動で即座に更新したい場合は、[GitHub のドキュメント](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/working-with-forks/syncing-a-fork)を参照して、fork したプロジェクトを上流のコードと同期する方法を確認してください。 + +このプロジェクトをスターまたはウォッチしたり、作者をフォローすることで、新機能の更新通知をすぐに受け取ることができます。 + + + +## ページアクセスパスワードを設定する + +> パスワードを設定すると、ユーザーは設定ページでアクセスコードを手動で入力しない限り、通常のチャットができず、未承認の状態であることを示すメッセージが表示されます。 + +> **警告**:パスワードの桁数は十分に長く設定してください。7桁以上が望ましいです。さもないと、[ブルートフォース攻撃を受ける可能性があります](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/518)。 + +このプロジェクトは限られた権限管理機能を提供しています。Vercel プロジェクトのコントロールパネルで、環境変数ページに `CODE` という名前の環境変数を追加し、値をカンマで区切ったカスタムパスワードに設定してください: + +``` +code1,code2,code3 +``` + +この環境変数を追加または変更した後、**プロジェクトを再デプロイ**して変更を有効にしてください。 + + +## 環境変数 + +> 本プロジェクトのほとんどの設定は環境変数で行います。チュートリアル:[Vercel の環境変数を変更する方法](./docs/vercel-ja.md)。 + +### `OPENAI_API_KEY` (必須) + +OpenAI の API キー。OpenAI アカウントページで申請したキーをカンマで区切って複数設定できます。これにより、ランダムにキーが選択されます。 + +### `CODE` (オプション) + +アクセスパスワード。カンマで区切って複数設定可能。 + +**警告**:この項目を設定しないと、誰でもデプロイしたウェブサイトを利用でき、トークンが急速に消耗する可能性があるため、設定をお勧めします。 + +### `BASE_URL` (オプション) + +> デフォルト: `https://api.openai.com` + +> 例: `http://your-openai-proxy.com` + +OpenAI API のプロキシ URL。手動で OpenAI API のプロキシを設定している場合はこのオプションを設定してください。 + +> SSL 証明書の問題がある場合は、`BASE_URL` のプロトコルを http に設定してください。 + +### `OPENAI_ORG_ID` (オプション) + +OpenAI の組織 ID を指定します。 + +### `AZURE_URL` (オプション) + +> 形式: https://{azure-resource-url}/openai/deployments/{deploy-name} +> `CUSTOM_MODELS` で `displayName` 形式で {deploy-name} を設定した場合、`AZURE_URL` から {deploy-name} を省略できます。 + +Azure のデプロイ URL。 + +### `AZURE_API_KEY` (オプション) + +Azure の API キー。 + +### `AZURE_API_VERSION` (オプション) + +Azure API バージョン。[Azure ドキュメント](https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#chat-completions)で確認できます。 + +### `GOOGLE_API_KEY` (オプション) + +Google Gemini Pro API キー。 + +### `GOOGLE_URL` (オプション) + +Google Gemini Pro API の URL。 + +### `ANTHROPIC_API_KEY` (オプション) + +Anthropic Claude API キー。 + +### `ANTHROPIC_API_VERSION` (オプション) + +Anthropic Claude API バージョン。 + +### `ANTHROPIC_URL` (オプション) + +Anthropic Claude API の URL。 + +### `BAIDU_API_KEY` (オプション) + +Baidu API キー。 + +### `BAIDU_SECRET_KEY` (オプション) + +Baidu シークレットキー。 + +### `BAIDU_URL` (オプション) + +Baidu API の URL。 + +### `BYTEDANCE_API_KEY` (オプション) + +ByteDance API キー。 + +### `BYTEDANCE_URL` (オプション) + +ByteDance API の URL。 + +### `ALIBABA_API_KEY` (オプション) + +アリババ(千问)API キー。 + +### `ALIBABA_URL` (オプション) + +アリババ(千问)API の URL。 + +### `HIDE_USER_API_KEY` (オプション) + +ユーザーが API キーを入力できないようにしたい場合は、この環境変数を 1 に設定します。 + +### `DISABLE_GPT4` (オプション) + +ユーザーが GPT-4 を使用できないようにしたい場合は、この環境変数を 1 に設定します。 + +### `ENABLE_BALANCE_QUERY` (オプション) + +バランスクエリ機能を有効にしたい場合は、この環境変数を 1 に設定します。 + +### `DISABLE_FAST_LINK` (オプション) + +リンクからのプリセット設定解析を無効にしたい場合は、この環境変数を 1 に設定します。 + +### `WHITE_WEBDEV_ENDPOINTS` (オプション) + +アクセス許可を与える WebDAV サービスのアドレスを追加したい場合、このオプションを使用します。フォーマット要件: +- 各アドレスは完全なエンドポイントでなければなりません。 +> `https://xxxx/xxx` +- 複数のアドレスは `,` で接続します。 + +### `CUSTOM_MODELS` (オプション) + +> 例:`+qwen-7b-chat,+glm-6b,-gpt-3.5-turbo,gpt-4-1106-preview=gpt-4-turbo` は `qwen-7b-chat` と `glm-6b` をモデルリストに追加し、`gpt-3.5-turbo` を削除し、`gpt-4-1106-preview` のモデル名を `gpt-4-turbo` として表示します。 +> すべてのモデルを無効にし、特定のモデルを有効にしたい場合は、`-all,+gpt-3.5-turbo` を使用します。これは `gpt-3.5-turbo` のみを有効にすることを意味します。 + +モデルリストを管理します。`+` でモデルを追加し、`-` でモデルを非表示にし、`モデル名=表示名` でモデルの表示名をカスタマイズし、カンマで区切ります。 + +Azure モードでは、`modelName@azure=deploymentName` 形式でモデル名とデプロイ名(deploy-name)を設定できます。 +> 例:`+gpt-3.5-turbo@azure=gpt35` この設定でモデルリストに `gpt35(Azure)` のオプションが表示されます。 + +ByteDance モードでは、`modelName@bytedance=deploymentName` 形式でモデル名とデプロイ名(deploy-name)を設定できます。 +> 例: `+Doubao-lite-4k@bytedance=ep-xxxxx-xxx` この設定でモデルリストに `Doubao-lite-4k(ByteDance)` のオプションが表示されます。 + +### `DEFAULT_MODEL` (オプション) + +デフォルトのモデルを変更します。 + +### `DEFAULT_INPUT_TEMPLATE` (オプション) + +『設定』の『ユーザー入力前処理』の初期設定に使用するテンプレートをカスタマイズします。 + + +## 開発 + +下のボタンをクリックして二次開発を開始してください: + +[![Open in Gitpod](https://gitpod.io/button/open-in-gitpod.svg)](https://gitpod.io/#https://github.com/Yidadaa/ChatGPT-Next-Web) + +コードを書く前に、プロジェクトのルートディレクトリに `.env.local` ファイルを新規作成し、環境変数を記入します: + +``` +OPENAI_API_KEY= +``` + + +### ローカル開発 + +1. Node.js 18 と Yarn をインストールします。具体的な方法は ChatGPT にお尋ねください。 +2. `yarn install && yarn dev` を実行します。⚠️ 注意:このコマンドはローカル開発用であり、デプロイには使用しないでください。 +3. ローカルでデプロイしたい場合は、`yarn install && yarn build && yarn start` コマンドを使用してください。プロセスを守るために pm2 を使用することもできます。詳細は ChatGPT にお尋ねください。 + + +## デプロイ + +### コンテナデプロイ(推奨) + +> Docker バージョンは 20 以上が必要です。それ以下だとイメージが見つからないというエラーが出ます。 + +> ⚠️ 注意:Docker バージョンは最新バージョンより 1~2 日遅れることが多いため、デプロイ後に「更新があります」の通知が出続けることがありますが、正常です。 + +```shell +docker pull yidadaa/chatgpt-next-web + +docker run -d -p 3000:3000 \ + -e OPENAI_API_KEY=sk-xxxx \ + -e CODE=ページアクセスパスワード \ + yidadaa/chatgpt-next-web +``` + +プロキシを指定することもできます: + +```shell +docker run -d -p 3000:3000 \ + -e OPENAI_API_KEY=sk-xxxx \ + -e CODE=ページアクセスパスワード \ + --net=host \ + -e PROXY_URL=http://127.0.0.1:7890 \ + yidadaa/chatgpt-next-web +``` + +ローカルプロキシがアカウントとパスワードを必要とする場合は、以下を使用できます: + +```shell +-e PROXY_URL="http://127.0.0.1:7890 user password" +``` + +他の環境変数を指定する必要がある場合は、上記のコマンドに `-e 環境変数=環境変数値` を追加して指定してください。 + + +### ローカルデプロイ + +コンソールで以下のコマンドを実行します: + +```shell +bash <(curl -s https://raw.githubusercontent.com/Yidadaa/ChatGPT-Next-Web/main/scripts/setup.sh) +``` + +⚠️ 注意:インストール中に問題が発生した場合は、Docker を使用してデプロイしてください。 + + +## 謝辞 + +### 寄付者 + +> 英語版をご覧ください。 + +### 貢献者 + +[プロジェクトの貢献者リストはこちら](https://github.com/Yidadaa/ChatGPT-Next-Web/graphs/contributors) + +### 関連プロジェクト + +- [one-api](https://github.com/songquanpeng/one-api): 一つのプラットフォームで大規模モデルのクォータ管理を提供し、市場に出回っているすべての主要な大規模言語モデルをサポートします。 + + +## オープンソースライセンス + +[MIT](https://opensource.org/license/mit/) diff --git a/app/api/[provider]/[...path]/route.ts b/app/api/[provider]/[...path]/route.ts new file mode 100644 index 000000000..24aa5ec04 --- /dev/null +++ b/app/api/[provider]/[...path]/route.ts @@ -0,0 +1,70 @@ +import { ApiPath } from "@/app/constant"; +import { NextRequest, NextResponse } from "next/server"; +import { handle as openaiHandler } from "../../openai"; +import { handle as azureHandler } from "../../azure"; +import { handle as googleHandler } from "../../google"; +import { handle as anthropicHandler } from "../../anthropic"; +import { handle as baiduHandler } from "../../baidu"; +import { handle as bytedanceHandler } from "../../bytedance"; +import { handle as alibabaHandler } from "../../alibaba"; +import { handle as moonshotHandler } from "../../moonshot"; +import { handle as stabilityHandler } from "../../stability"; +import { handle as iflytekHandler } from "../../iflytek"; +import { handle as proxyHandler } from "../../proxy"; + +async function handle( + req: NextRequest, + { params }: { params: { provider: string; path: string[] } }, +) { + const apiPath = `/api/${params.provider}`; + console.log(`[${params.provider} Route] params `, params); + switch (apiPath) { + case ApiPath.Azure: + return azureHandler(req, { params }); + case ApiPath.Google: + return googleHandler(req, { params }); + case ApiPath.Anthropic: + return anthropicHandler(req, { params }); + case ApiPath.Baidu: + return baiduHandler(req, { params }); + case ApiPath.ByteDance: + return bytedanceHandler(req, { params }); + case ApiPath.Alibaba: + return alibabaHandler(req, { params }); + // case ApiPath.Tencent: using "/api/tencent" + case ApiPath.Moonshot: + return moonshotHandler(req, { params }); + case ApiPath.Stability: + return stabilityHandler(req, { params }); + case ApiPath.Iflytek: + return iflytekHandler(req, { params }); + case ApiPath.OpenAI: + return openaiHandler(req, { params }); + default: + return proxyHandler(req, { params }); + } +} + +export const GET = handle; +export const POST = handle; + +export const runtime = "edge"; +export const preferredRegion = [ + "arn1", + "bom1", + "cdg1", + "cle1", + "cpt1", + "dub1", + "fra1", + "gru1", + "hnd1", + "iad1", + "icn1", + "kix1", + "lhr1", + "pdx1", + "sfo1", + "sin1", + "syd1", +]; diff --git a/app/api/alibaba.ts b/app/api/alibaba.ts new file mode 100644 index 000000000..675d9f301 --- /dev/null +++ b/app/api/alibaba.ts @@ -0,0 +1,131 @@ +import { getServerSideConfig } from "@/app/config/server"; +import { + Alibaba, + ALIBABA_BASE_URL, + ApiPath, + ModelProvider, + ServiceProvider, +} from "@/app/constant"; +import { prettyObject } from "@/app/utils/format"; +import { NextRequest, NextResponse } from "next/server"; +import { auth } from "@/app/api/auth"; +import { isModelAvailableInServer } from "@/app/utils/model"; +import type { RequestPayload } from "@/app/client/platforms/openai"; + +const serverConfig = getServerSideConfig(); + +export async function handle( + req: NextRequest, + { params }: { params: { path: string[] } }, +) { + console.log("[Alibaba Route] params ", params); + + if (req.method === "OPTIONS") { + return NextResponse.json({ body: "OK" }, { status: 200 }); + } + + const authResult = auth(req, ModelProvider.Qwen); + if (authResult.error) { + return NextResponse.json(authResult, { + status: 401, + }); + } + + try { + const response = await request(req); + return response; + } catch (e) { + console.error("[Alibaba] ", e); + return NextResponse.json(prettyObject(e)); + } +} + +async function request(req: NextRequest) { + const controller = new AbortController(); + + // alibaba use base url or just remove the path + let path = `${req.nextUrl.pathname}`.replaceAll(ApiPath.Alibaba, ""); + + let baseUrl = serverConfig.alibabaUrl || ALIBABA_BASE_URL; + + if (!baseUrl.startsWith("http")) { + baseUrl = `https://${baseUrl}`; + } + + if (baseUrl.endsWith("/")) { + baseUrl = baseUrl.slice(0, -1); + } + + console.log("[Proxy] ", path); + console.log("[Base Url]", baseUrl); + + const timeoutId = setTimeout( + () => { + controller.abort(); + }, + 10 * 60 * 1000, + ); + + const fetchUrl = `${baseUrl}${path}`; + const fetchOptions: RequestInit = { + headers: { + "Content-Type": "application/json", + Authorization: req.headers.get("Authorization") ?? "", + "X-DashScope-SSE": req.headers.get("X-DashScope-SSE") ?? "disable", + }, + method: req.method, + body: req.body, + redirect: "manual", + // @ts-ignore + duplex: "half", + signal: controller.signal, + }; + + // #1815 try to refuse some request to some models + if (serverConfig.customModels && req.body) { + try { + const clonedBody = await req.text(); + fetchOptions.body = clonedBody; + + const jsonBody = JSON.parse(clonedBody) as { model?: string }; + + // not undefined and is false + if ( + isModelAvailableInServer( + serverConfig.customModels, + jsonBody?.model as string, + ServiceProvider.Alibaba as string, + ) + ) { + return NextResponse.json( + { + error: true, + message: `you are not allowed to use ${jsonBody?.model} model`, + }, + { + status: 403, + }, + ); + } + } catch (e) { + console.error(`[Alibaba] filter`, e); + } + } + try { + const res = await fetch(fetchUrl, fetchOptions); + + // to prevent browser prompt for credentials + const newHeaders = new Headers(res.headers); + newHeaders.delete("www-authenticate"); + // to disable nginx buffering + newHeaders.set("X-Accel-Buffering", "no"); + + return new Response(res.body, { + status: res.status, + statusText: res.statusText, + headers: newHeaders, + }); + } finally { + clearTimeout(timeoutId); + } +} diff --git a/app/api/anthropic/[...path]/route.ts b/app/api/anthropic.ts similarity index 81% rename from app/api/anthropic/[...path]/route.ts rename to app/api/anthropic.ts index 4264893d9..d7b070247 100644 --- a/app/api/anthropic/[...path]/route.ts +++ b/app/api/anthropic.ts @@ -4,16 +4,18 @@ import { Anthropic, ApiPath, DEFAULT_MODELS, + ServiceProvider, ModelProvider, } from "@/app/constant"; import { prettyObject } from "@/app/utils/format"; import { NextRequest, NextResponse } from "next/server"; -import { auth } from "../../auth"; -import { collectModelTable } from "@/app/utils/model"; +import { auth } from "./auth"; +import { isModelAvailableInServer } from "@/app/utils/model"; +import { cloudflareAIGatewayUrl } from "@/app/utils/cloudflare"; const ALLOWD_PATH = new Set([Anthropic.ChatPath, Anthropic.ChatPath1]); -async function handle( +export async function handle( req: NextRequest, { params }: { params: { path: string[] } }, ) { @@ -54,30 +56,6 @@ async function handle( } } -export const GET = handle; -export const POST = handle; - -export const runtime = "edge"; -export const preferredRegion = [ - "arn1", - "bom1", - "cdg1", - "cle1", - "cpt1", - "dub1", - "fra1", - "gru1", - "hnd1", - "iad1", - "icn1", - "kix1", - "lhr1", - "pdx1", - "sfo1", - "sin1", - "syd1", -]; - const serverConfig = getServerSideConfig(); async function request(req: NextRequest) { @@ -113,12 +91,14 @@ async function request(req: NextRequest) { 10 * 60 * 1000, ); - const fetchUrl = `${baseUrl}${path}`; + // try rebuild url, when using cloudflare ai gateway in server + const fetchUrl = cloudflareAIGatewayUrl(`${baseUrl}${path}`); const fetchOptions: RequestInit = { headers: { "Content-Type": "application/json", "Cache-Control": "no-store", + "anthropic-dangerous-direct-browser-access": "true", [authHeaderName]: authValue, "anthropic-version": req.headers.get("anthropic-version") || @@ -136,17 +116,19 @@ async function request(req: NextRequest) { // #1815 try to refuse some request to some models if (serverConfig.customModels && req.body) { try { - const modelTable = collectModelTable( - DEFAULT_MODELS, - serverConfig.customModels, - ); const clonedBody = await req.text(); fetchOptions.body = clonedBody; const jsonBody = JSON.parse(clonedBody) as { model?: string }; // not undefined and is false - if (modelTable[jsonBody?.model ?? ""].available === false) { + if ( + isModelAvailableInServer( + serverConfig.customModels, + jsonBody?.model as string, + ServiceProvider.Anthropic as string, + ) + ) { return NextResponse.json( { error: true, @@ -161,17 +143,17 @@ async function request(req: NextRequest) { console.error(`[Anthropic] filter`, e); } } - console.log("[Anthropic request]", fetchOptions.headers, req.method); + // console.log("[Anthropic request]", fetchOptions.headers, req.method); try { const res = await fetch(fetchUrl, fetchOptions); - console.log( - "[Anthropic response]", - res.status, - " ", - res.headers, - res.url, - ); + // console.log( + // "[Anthropic response]", + // res.status, + // " ", + // res.headers, + // res.url, + // ); // to prevent browser prompt for credentials const newHeaders = new Headers(res.headers); newHeaders.delete("www-authenticate"); diff --git a/app/api/artifacts/route.ts b/app/api/artifacts/route.ts new file mode 100644 index 000000000..4707e795f --- /dev/null +++ b/app/api/artifacts/route.ts @@ -0,0 +1,73 @@ +import md5 from "spark-md5"; +import { NextRequest, NextResponse } from "next/server"; +import { getServerSideConfig } from "@/app/config/server"; + +async function handle(req: NextRequest, res: NextResponse) { + const serverConfig = getServerSideConfig(); + const storeUrl = () => + `https://api.cloudflare.com/client/v4/accounts/${serverConfig.cloudflareAccountId}/storage/kv/namespaces/${serverConfig.cloudflareKVNamespaceId}`; + const storeHeaders = () => ({ + Authorization: `Bearer ${serverConfig.cloudflareKVApiKey}`, + }); + if (req.method === "POST") { + const clonedBody = await req.text(); + const hashedCode = md5.hash(clonedBody).trim(); + const body: { + key: string; + value: string; + expiration_ttl?: number; + } = { + key: hashedCode, + value: clonedBody, + }; + try { + const ttl = parseInt(serverConfig.cloudflareKVTTL as string); + if (ttl > 60) { + body["expiration_ttl"] = ttl; + } + } catch (e) { + console.error(e); + } + const res = await fetch(`${storeUrl()}/bulk`, { + headers: { + ...storeHeaders(), + "Content-Type": "application/json", + }, + method: "PUT", + body: JSON.stringify([body]), + }); + const result = await res.json(); + console.log("save data", result); + if (result?.success) { + return NextResponse.json( + { code: 0, id: hashedCode, result }, + { status: res.status }, + ); + } + return NextResponse.json( + { error: true, msg: "Save data error" }, + { status: 400 }, + ); + } + if (req.method === "GET") { + const id = req?.nextUrl?.searchParams?.get("id"); + const res = await fetch(`${storeUrl()}/values/${id}`, { + headers: storeHeaders(), + method: "GET", + }); + return new Response(res.body, { + status: res.status, + statusText: res.statusText, + headers: res.headers, + }); + } + return NextResponse.json( + { error: true, msg: "Invalid request" }, + { status: 400 }, + ); +} + +export const POST = handle; +export const GET = handle; + +export const runtime = "edge"; diff --git a/app/api/auth.ts b/app/api/auth.ts index b750f2d17..95965ceec 100644 --- a/app/api/auth.ts +++ b/app/api/auth.ts @@ -67,15 +67,34 @@ export function auth(req: NextRequest, modelProvider: ModelProvider) { let systemApiKey: string | undefined; switch (modelProvider) { + case ModelProvider.Stability: + systemApiKey = serverConfig.stabilityApiKey; + break; case ModelProvider.GeminiPro: systemApiKey = serverConfig.googleApiKey; break; case ModelProvider.Claude: systemApiKey = serverConfig.anthropicApiKey; break; + case ModelProvider.Doubao: + systemApiKey = serverConfig.bytedanceApiKey; + break; + case ModelProvider.Ernie: + systemApiKey = serverConfig.baiduApiKey; + break; + case ModelProvider.Qwen: + systemApiKey = serverConfig.alibabaApiKey; + break; + case ModelProvider.Moonshot: + systemApiKey = serverConfig.moonshotApiKey; + break; + case ModelProvider.Iflytek: + systemApiKey = + serverConfig.iflytekApiKey + ":" + serverConfig.iflytekApiSecret; + break; case ModelProvider.GPT: default: - if (serverConfig.isAzure) { + if (req.nextUrl.pathname.includes("azure/deployments")) { systemApiKey = serverConfig.azureApiKey; } else { systemApiKey = serverConfig.apiKey; diff --git a/app/api/azure.ts b/app/api/azure.ts new file mode 100644 index 000000000..e2cb0c7e6 --- /dev/null +++ b/app/api/azure.ts @@ -0,0 +1,33 @@ +import { getServerSideConfig } from "@/app/config/server"; +import { ModelProvider } from "@/app/constant"; +import { prettyObject } from "@/app/utils/format"; +import { NextRequest, NextResponse } from "next/server"; +import { auth } from "./auth"; +import { requestOpenai } from "./common"; + +export async function handle( + req: NextRequest, + { params }: { params: { path: string[] } }, +) { + console.log("[Azure Route] params ", params); + + if (req.method === "OPTIONS") { + return NextResponse.json({ body: "OK" }, { status: 200 }); + } + + const subpath = params.path.join("/"); + + const authResult = auth(req, ModelProvider.GPT); + if (authResult.error) { + return NextResponse.json(authResult, { + status: 401, + }); + } + + try { + return await requestOpenai(req); + } catch (e) { + console.error("[Azure] ", e); + return NextResponse.json(prettyObject(e)); + } +} diff --git a/app/api/baidu.ts b/app/api/baidu.ts new file mode 100644 index 000000000..f4315d186 --- /dev/null +++ b/app/api/baidu.ts @@ -0,0 +1,145 @@ +import { getServerSideConfig } from "@/app/config/server"; +import { + BAIDU_BASE_URL, + ApiPath, + ModelProvider, + BAIDU_OATUH_URL, + ServiceProvider, +} from "@/app/constant"; +import { prettyObject } from "@/app/utils/format"; +import { NextRequest, NextResponse } from "next/server"; +import { auth } from "@/app/api/auth"; +import { isModelAvailableInServer } from "@/app/utils/model"; +import { getAccessToken } from "@/app/utils/baidu"; + +const serverConfig = getServerSideConfig(); + +export async function handle( + req: NextRequest, + { params }: { params: { path: string[] } }, +) { + console.log("[Baidu Route] params ", params); + + if (req.method === "OPTIONS") { + return NextResponse.json({ body: "OK" }, { status: 200 }); + } + + const authResult = auth(req, ModelProvider.Ernie); + if (authResult.error) { + return NextResponse.json(authResult, { + status: 401, + }); + } + + if (!serverConfig.baiduApiKey || !serverConfig.baiduSecretKey) { + return NextResponse.json( + { + error: true, + message: `missing BAIDU_API_KEY or BAIDU_SECRET_KEY in server env vars`, + }, + { + status: 401, + }, + ); + } + + try { + const response = await request(req); + return response; + } catch (e) { + console.error("[Baidu] ", e); + return NextResponse.json(prettyObject(e)); + } +} + +async function request(req: NextRequest) { + const controller = new AbortController(); + + let path = `${req.nextUrl.pathname}`.replaceAll(ApiPath.Baidu, ""); + + let baseUrl = serverConfig.baiduUrl || BAIDU_BASE_URL; + + if (!baseUrl.startsWith("http")) { + baseUrl = `https://${baseUrl}`; + } + + if (baseUrl.endsWith("/")) { + baseUrl = baseUrl.slice(0, -1); + } + + console.log("[Proxy] ", path); + console.log("[Base Url]", baseUrl); + + const timeoutId = setTimeout( + () => { + controller.abort(); + }, + 10 * 60 * 1000, + ); + + const { access_token } = await getAccessToken( + serverConfig.baiduApiKey as string, + serverConfig.baiduSecretKey as string, + ); + const fetchUrl = `${baseUrl}${path}?access_token=${access_token}`; + + const fetchOptions: RequestInit = { + headers: { + "Content-Type": "application/json", + }, + method: req.method, + body: req.body, + redirect: "manual", + // @ts-ignore + duplex: "half", + signal: controller.signal, + }; + + // #1815 try to refuse some request to some models + if (serverConfig.customModels && req.body) { + try { + const clonedBody = await req.text(); + fetchOptions.body = clonedBody; + + const jsonBody = JSON.parse(clonedBody) as { model?: string }; + + // not undefined and is false + if ( + isModelAvailableInServer( + serverConfig.customModels, + jsonBody?.model as string, + ServiceProvider.Baidu as string, + ) + ) { + return NextResponse.json( + { + error: true, + message: `you are not allowed to use ${jsonBody?.model} model`, + }, + { + status: 403, + }, + ); + } + } catch (e) { + console.error(`[Baidu] filter`, e); + } + } + try { + const res = await fetch(fetchUrl, fetchOptions); + + // to prevent browser prompt for credentials + const newHeaders = new Headers(res.headers); + newHeaders.delete("www-authenticate"); + // to disable nginx buffering + newHeaders.set("X-Accel-Buffering", "no"); + + return new Response(res.body, { + status: res.status, + statusText: res.statusText, + headers: newHeaders, + }); + } finally { + clearTimeout(timeoutId); + } +} diff --git a/app/api/bytedance.ts b/app/api/bytedance.ts new file mode 100644 index 000000000..cb65b1061 --- /dev/null +++ b/app/api/bytedance.ts @@ -0,0 +1,129 @@ +import { getServerSideConfig } from "@/app/config/server"; +import { + BYTEDANCE_BASE_URL, + ApiPath, + ModelProvider, + ServiceProvider, +} from "@/app/constant"; +import { prettyObject } from "@/app/utils/format"; +import { NextRequest, NextResponse } from "next/server"; +import { auth } from "@/app/api/auth"; +import { isModelAvailableInServer } from "@/app/utils/model"; + +const serverConfig = getServerSideConfig(); + +export async function handle( + req: NextRequest, + { params }: { params: { path: string[] } }, +) { + console.log("[ByteDance Route] params ", params); + + if (req.method === "OPTIONS") { + return NextResponse.json({ body: "OK" }, { status: 200 }); + } + + const authResult = auth(req, ModelProvider.Doubao); + if (authResult.error) { + return NextResponse.json(authResult, { + status: 401, + }); + } + + try { + const response = await request(req); + return response; + } catch (e) { + console.error("[ByteDance] ", e); + return NextResponse.json(prettyObject(e)); + } +} + +async function request(req: NextRequest) { + const controller = new AbortController(); + + let path = `${req.nextUrl.pathname}`.replaceAll(ApiPath.ByteDance, ""); + + let baseUrl = serverConfig.bytedanceUrl || BYTEDANCE_BASE_URL; + + if (!baseUrl.startsWith("http")) { + baseUrl = `https://${baseUrl}`; + } + + if (baseUrl.endsWith("/")) { + baseUrl = baseUrl.slice(0, -1); + } + + console.log("[Proxy] ", path); + console.log("[Base Url]", baseUrl); + + const timeoutId = setTimeout( + () => { + controller.abort(); + }, + 10 * 60 * 1000, + ); + + const fetchUrl = `${baseUrl}${path}`; + + const fetchOptions: RequestInit = { + headers: { + "Content-Type": "application/json", + Authorization: req.headers.get("Authorization") ?? "", + }, + method: req.method, + body: req.body, + redirect: "manual", + // @ts-ignore + duplex: "half", + signal: controller.signal, + }; + + // #1815 try to refuse some request to some models + if (serverConfig.customModels && req.body) { + try { + const clonedBody = await req.text(); + fetchOptions.body = clonedBody; + + const jsonBody = JSON.parse(clonedBody) as { model?: string }; + + // not undefined and is false + if ( + isModelAvailableInServer( + serverConfig.customModels, + jsonBody?.model as string, + ServiceProvider.ByteDance as string, + ) + ) { + return NextResponse.json( + { + error: true, + message: `you are not allowed to use ${jsonBody?.model} model`, + }, + { + status: 403, + }, + ); + } + } catch (e) { + console.error(`[ByteDance] filter`, e); + } + } + + try { + const res = await fetch(fetchUrl, fetchOptions); + + // to prevent browser prompt for credentials + const newHeaders = new Headers(res.headers); + newHeaders.delete("www-authenticate"); + // to disable nginx buffering + newHeaders.set("X-Accel-Buffering", "no"); + + return new Response(res.body, { + status: res.status, + statusText: res.statusText, + headers: newHeaders, + }); + } finally { + clearTimeout(timeoutId); + } +} diff --git a/app/api/common.ts b/app/api/common.ts index a75f2de5c..25decbf62 100644 --- a/app/api/common.ts +++ b/app/api/common.ts @@ -1,17 +1,24 @@ import { NextRequest, NextResponse } from "next/server"; import { getServerSideConfig } from "../config/server"; -import { DEFAULT_MODELS, OPENAI_BASE_URL, GEMINI_BASE_URL } from "../constant"; -import { collectModelTable } from "../utils/model"; -import { makeAzurePath } from "../azure"; +import { + DEFAULT_MODELS, + OPENAI_BASE_URL, + GEMINI_BASE_URL, + ServiceProvider, +} from "../constant"; +import { isModelAvailableInServer } from "../utils/model"; +import { cloudflareAIGatewayUrl } from "../utils/cloudflare"; const serverConfig = getServerSideConfig(); export async function requestOpenai(req: NextRequest) { const controller = new AbortController(); + const isAzure = req.nextUrl.pathname.includes("azure/deployments"); + var authValue, authHeaderName = ""; - if (serverConfig.isAzure) { + if (isAzure) { authValue = req.headers .get("Authorization") @@ -25,13 +32,10 @@ export async function requestOpenai(req: NextRequest) { authHeaderName = "Authorization"; } - let path = `${req.nextUrl.pathname}${req.nextUrl.search}`.replaceAll( - "/api/openai/", - "", - ); + let path = `${req.nextUrl.pathname}`.replaceAll("/api/openai/", ""); let baseUrl = - serverConfig.azureUrl || serverConfig.baseUrl || OPENAI_BASE_URL; + (isAzure ? serverConfig.azureUrl : serverConfig.baseUrl) || OPENAI_BASE_URL; if (!baseUrl.startsWith("http")) { baseUrl = `https://${baseUrl}`; @@ -51,17 +55,46 @@ export async function requestOpenai(req: NextRequest) { 10 * 60 * 1000, ); - if (serverConfig.isAzure) { - if (!serverConfig.azureApiVersion) { - return NextResponse.json({ - error: true, - message: `missing AZURE_API_VERSION in server env vars`, - }); + if (isAzure) { + const azureApiVersion = + req?.nextUrl?.searchParams?.get("api-version") || + serverConfig.azureApiVersion; + baseUrl = baseUrl.split("/deployments").shift() as string; + path = `${req.nextUrl.pathname.replaceAll( + "/api/azure/", + "", + )}?api-version=${azureApiVersion}`; + + // Forward compatibility: + // if display_name(deployment_name) not set, and '{deploy-id}' in AZURE_URL + // then using default '{deploy-id}' + if (serverConfig.customModels && serverConfig.azureUrl) { + const modelName = path.split("/")[1]; + let realDeployName = ""; + serverConfig.customModels + .split(",") + .filter((v) => !!v && !v.startsWith("-") && v.includes(modelName)) + .forEach((m) => { + const [fullName, displayName] = m.split("="); + const [_, providerName] = fullName.split("@"); + if (providerName === "azure" && !displayName) { + const [_, deployId] = (serverConfig?.azureUrl ?? "").split( + "deployments/", + ); + if (deployId) { + realDeployName = deployId; + } + } + }); + if (realDeployName) { + console.log("[Replace with DeployId", realDeployName); + path = path.replaceAll(modelName, realDeployName); + } } - path = makeAzurePath(path, serverConfig.azureApiVersion); } - const fetchUrl = `${baseUrl}/${path}`; + const fetchUrl = cloudflareAIGatewayUrl(`${baseUrl}/${path}`); + console.log("fetchUrl", fetchUrl); const fetchOptions: RequestInit = { headers: { "Content-Type": "application/json", @@ -83,17 +116,24 @@ export async function requestOpenai(req: NextRequest) { // #1815 try to refuse gpt4 request if (serverConfig.customModels && req.body) { try { - const modelTable = collectModelTable( - DEFAULT_MODELS, - serverConfig.customModels, - ); const clonedBody = await req.text(); fetchOptions.body = clonedBody; const jsonBody = JSON.parse(clonedBody) as { model?: string }; // not undefined and is false - if (modelTable[jsonBody?.model ?? ""].available === false) { + if ( + isModelAvailableInServer( + serverConfig.customModels, + jsonBody?.model as string, + ServiceProvider.OpenAI as string, + ) || + isModelAvailableInServer( + serverConfig.customModels, + jsonBody?.model as string, + ServiceProvider.Azure as string, + ) + ) { return NextResponse.json( { error: true, @@ -112,16 +152,16 @@ export async function requestOpenai(req: NextRequest) { try { const res = await fetch(fetchUrl, fetchOptions); - // Extract the OpenAI-Organization header from the response - const openaiOrganizationHeader = res.headers.get("OpenAI-Organization"); + // Extract the OpenAI-Organization header from the response + const openaiOrganizationHeader = res.headers.get("OpenAI-Organization"); - // Check if serverConfig.openaiOrgId is defined and not an empty string - if (serverConfig.openaiOrgId && serverConfig.openaiOrgId.trim() !== "") { - // If openaiOrganizationHeader is present, log it; otherwise, log that the header is not present - console.log("[Org ID]", openaiOrganizationHeader); - } else { - console.log("[Org ID] is not set up."); - } + // Check if serverConfig.openaiOrgId is defined and not an empty string + if (serverConfig.openaiOrgId && serverConfig.openaiOrgId.trim() !== "") { + // If openaiOrganizationHeader is present, log it; otherwise, log that the header is not present + console.log("[Org ID]", openaiOrganizationHeader); + } else { + console.log("[Org ID] is not set up."); + } // to prevent browser prompt for credentials const newHeaders = new Headers(res.headers); @@ -129,7 +169,6 @@ export async function requestOpenai(req: NextRequest) { // to disable nginx buffering newHeaders.set("X-Accel-Buffering", "no"); - // Conditionally delete the OpenAI-Organization header from the response if [Org ID] is undefined or empty (not setup in ENV) // Also, this is to prevent the header from being sent to the client if (!serverConfig.openaiOrgId || serverConfig.openaiOrgId.trim() === "") { @@ -142,7 +181,6 @@ export async function requestOpenai(req: NextRequest) { // The browser will try to decode the response with brotli and fail newHeaders.delete("content-encoding"); - return new Response(res.body, { status: res.status, statusText: res.statusText, diff --git a/app/api/google/[...path]/route.ts b/app/api/google.ts similarity index 72% rename from app/api/google/[...path]/route.ts rename to app/api/google.ts index ebd192891..98fe469bf 100644 --- a/app/api/google/[...path]/route.ts +++ b/app/api/google.ts @@ -1,11 +1,19 @@ import { NextRequest, NextResponse } from "next/server"; -import { auth } from "../../auth"; +import { auth } from "./auth"; import { getServerSideConfig } from "@/app/config/server"; -import { GEMINI_BASE_URL, Google, ModelProvider } from "@/app/constant"; +import { + ApiPath, + GEMINI_BASE_URL, + Google, + ModelProvider, +} from "@/app/constant"; +import { prettyObject } from "@/app/utils/format"; -async function handle( +const serverConfig = getServerSideConfig(); + +export async function handle( req: NextRequest, - { params }: { params: { path: string[] } }, + { params }: { params: { provider: string; path: string[] } }, ) { console.log("[Google Route] params ", params); @@ -13,32 +21,6 @@ async function handle( return NextResponse.json({ body: "OK" }, { status: 200 }); } - const controller = new AbortController(); - - const serverConfig = getServerSideConfig(); - - let baseUrl = serverConfig.googleUrl || GEMINI_BASE_URL; - - if (!baseUrl.startsWith("http")) { - baseUrl = `https://${baseUrl}`; - } - - if (baseUrl.endsWith("/")) { - baseUrl = baseUrl.slice(0, -1); - } - - let path = `${req.nextUrl.pathname}`.replaceAll("/api/google/", ""); - - console.log("[Proxy] ", path); - console.log("[Base Url]", baseUrl); - - const timeoutId = setTimeout( - () => { - controller.abort(); - }, - 10 * 60 * 1000, - ); - const authResult = auth(req, ModelProvider.GeminiPro); if (authResult.error) { return NextResponse.json(authResult, { @@ -49,9 +31,9 @@ async function handle( const bearToken = req.headers.get("Authorization") ?? ""; const token = bearToken.trim().replaceAll("Bearer ", "").trim(); - const key = token ? token : serverConfig.googleApiKey; + const apiKey = token ? token : serverConfig.googleApiKey; - if (!key) { + if (!apiKey) { return NextResponse.json( { error: true, @@ -62,8 +44,63 @@ async function handle( }, ); } + try { + const response = await request(req, apiKey); + return response; + } catch (e) { + console.error("[Google] ", e); + return NextResponse.json(prettyObject(e)); + } +} - const fetchUrl = `${baseUrl}/${path}?key=${key}`; +export const GET = handle; +export const POST = handle; + +export const runtime = "edge"; +export const preferredRegion = [ + "bom1", + "cle1", + "cpt1", + "gru1", + "hnd1", + "iad1", + "icn1", + "kix1", + "pdx1", + "sfo1", + "sin1", + "syd1", +]; + +async function request(req: NextRequest, apiKey: string) { + const controller = new AbortController(); + + let baseUrl = serverConfig.googleUrl || GEMINI_BASE_URL; + + let path = `${req.nextUrl.pathname}`.replaceAll(ApiPath.Google, ""); + + if (!baseUrl.startsWith("http")) { + baseUrl = `https://${baseUrl}`; + } + + if (baseUrl.endsWith("/")) { + baseUrl = baseUrl.slice(0, -1); + } + + console.log("[Proxy] ", path); + console.log("[Base Url]", baseUrl); + + const timeoutId = setTimeout( + () => { + controller.abort(); + }, + 10 * 60 * 1000, + ); + const fetchUrl = `${baseUrl}${path}?key=${apiKey}${ + req?.nextUrl?.searchParams?.get("alt") === "sse" ? "&alt=sse" : "" + }`; + + console.log("[Fetch Url] ", fetchUrl); const fetchOptions: RequestInit = { headers: { "Content-Type": "application/json", @@ -95,22 +132,3 @@ async function handle( clearTimeout(timeoutId); } } - -export const GET = handle; -export const POST = handle; - -export const runtime = "edge"; -export const preferredRegion = [ - "bom1", - "cle1", - "cpt1", - "gru1", - "hnd1", - "iad1", - "icn1", - "kix1", - "pdx1", - "sfo1", - "sin1", - "syd1", -]; diff --git a/app/api/iflytek.ts b/app/api/iflytek.ts new file mode 100644 index 000000000..eabdd9f4c --- /dev/null +++ b/app/api/iflytek.ts @@ -0,0 +1,131 @@ +import { getServerSideConfig } from "@/app/config/server"; +import { + Iflytek, + IFLYTEK_BASE_URL, + ApiPath, + ModelProvider, + ServiceProvider, +} from "@/app/constant"; +import { prettyObject } from "@/app/utils/format"; +import { NextRequest, NextResponse } from "next/server"; +import { auth } from "@/app/api/auth"; +import { isModelAvailableInServer } from "@/app/utils/model"; +import type { RequestPayload } from "@/app/client/platforms/openai"; +// iflytek + +const serverConfig = getServerSideConfig(); + +export async function handle( + req: NextRequest, + { params }: { params: { path: string[] } }, +) { + console.log("[Iflytek Route] params ", params); + + if (req.method === "OPTIONS") { + return NextResponse.json({ body: "OK" }, { status: 200 }); + } + + const authResult = auth(req, ModelProvider.Iflytek); + if (authResult.error) { + return NextResponse.json(authResult, { + status: 401, + }); + } + + try { + const response = await request(req); + return response; + } catch (e) { + console.error("[Iflytek] ", e); + return NextResponse.json(prettyObject(e)); + } +} + +async function request(req: NextRequest) { + const controller = new AbortController(); + + // iflytek use base url or just remove the path + let path = `${req.nextUrl.pathname}`.replaceAll(ApiPath.Iflytek, ""); + + let baseUrl = serverConfig.iflytekUrl || IFLYTEK_BASE_URL; + + if (!baseUrl.startsWith("http")) { + baseUrl = `https://${baseUrl}`; + } + + if (baseUrl.endsWith("/")) { + baseUrl = baseUrl.slice(0, -1); + } + + console.log("[Proxy] ", path); + console.log("[Base Url]", baseUrl); + + const timeoutId = setTimeout( + () => { + controller.abort(); + }, + 10 * 60 * 1000, + ); + + const fetchUrl = `${baseUrl}${path}`; + const fetchOptions: RequestInit = { + headers: { + "Content-Type": "application/json", + Authorization: req.headers.get("Authorization") ?? "", + }, + method: req.method, + body: req.body, + redirect: "manual", + // @ts-ignore + duplex: "half", + signal: controller.signal, + }; + + // try to refuse some request to some models + if (serverConfig.customModels && req.body) { + try { + const clonedBody = await req.text(); + fetchOptions.body = clonedBody; + + const jsonBody = JSON.parse(clonedBody) as { model?: string }; + + // not undefined and is false + if ( + isModelAvailableInServer( + serverConfig.customModels, + jsonBody?.model as string, + ServiceProvider.Iflytek as string, + ) + ) { + return NextResponse.json( + { + error: true, + message: `you are not allowed to use ${jsonBody?.model} model`, + }, + { + status: 403, + }, + ); + } + } catch (e) { + console.error(`[Iflytek] filter`, e); + } + } + try { + const res = await fetch(fetchUrl, fetchOptions); + + // to prevent browser prompt for credentials + const newHeaders = new Headers(res.headers); + newHeaders.delete("www-authenticate"); + // to disable nginx buffering + newHeaders.set("X-Accel-Buffering", "no"); + + return new Response(res.body, { + status: res.status, + statusText: res.statusText, + headers: newHeaders, + }); + } finally { + clearTimeout(timeoutId); + } +} diff --git a/app/api/moonshot.ts b/app/api/moonshot.ts new file mode 100644 index 000000000..247dd6183 --- /dev/null +++ b/app/api/moonshot.ts @@ -0,0 +1,130 @@ +import { getServerSideConfig } from "@/app/config/server"; +import { + Moonshot, + MOONSHOT_BASE_URL, + ApiPath, + ModelProvider, + ServiceProvider, +} from "@/app/constant"; +import { prettyObject } from "@/app/utils/format"; +import { NextRequest, NextResponse } from "next/server"; +import { auth } from "@/app/api/auth"; +import { isModelAvailableInServer } from "@/app/utils/model"; +import type { RequestPayload } from "@/app/client/platforms/openai"; + +const serverConfig = getServerSideConfig(); + +export async function handle( + req: NextRequest, + { params }: { params: { path: string[] } }, +) { + console.log("[Moonshot Route] params ", params); + + if (req.method === "OPTIONS") { + return NextResponse.json({ body: "OK" }, { status: 200 }); + } + + const authResult = auth(req, ModelProvider.Moonshot); + if (authResult.error) { + return NextResponse.json(authResult, { + status: 401, + }); + } + + try { + const response = await request(req); + return response; + } catch (e) { + console.error("[Moonshot] ", e); + return NextResponse.json(prettyObject(e)); + } +} + +async function request(req: NextRequest) { + const controller = new AbortController(); + + // alibaba use base url or just remove the path + let path = `${req.nextUrl.pathname}`.replaceAll(ApiPath.Moonshot, ""); + + let baseUrl = serverConfig.moonshotUrl || MOONSHOT_BASE_URL; + + if (!baseUrl.startsWith("http")) { + baseUrl = `https://${baseUrl}`; + } + + if (baseUrl.endsWith("/")) { + baseUrl = baseUrl.slice(0, -1); + } + + console.log("[Proxy] ", path); + console.log("[Base Url]", baseUrl); + + const timeoutId = setTimeout( + () => { + controller.abort(); + }, + 10 * 60 * 1000, + ); + + const fetchUrl = `${baseUrl}${path}`; + const fetchOptions: RequestInit = { + headers: { + "Content-Type": "application/json", + Authorization: req.headers.get("Authorization") ?? "", + }, + method: req.method, + body: req.body, + redirect: "manual", + // @ts-ignore + duplex: "half", + signal: controller.signal, + }; + + // #1815 try to refuse some request to some models + if (serverConfig.customModels && req.body) { + try { + const clonedBody = await req.text(); + fetchOptions.body = clonedBody; + + const jsonBody = JSON.parse(clonedBody) as { model?: string }; + + // not undefined and is false + if ( + isModelAvailableInServer( + serverConfig.customModels, + jsonBody?.model as string, + ServiceProvider.Moonshot as string, + ) + ) { + return NextResponse.json( + { + error: true, + message: `you are not allowed to use ${jsonBody?.model} model`, + }, + { + status: 403, + }, + ); + } + } catch (e) { + console.error(`[Moonshot] filter`, e); + } + } + try { + const res = await fetch(fetchUrl, fetchOptions); + + // to prevent browser prompt for credentials + const newHeaders = new Headers(res.headers); + newHeaders.delete("www-authenticate"); + // to disable nginx buffering + newHeaders.set("X-Accel-Buffering", "no"); + + return new Response(res.body, { + status: res.status, + statusText: res.statusText, + headers: newHeaders, + }); + } finally { + clearTimeout(timeoutId); + } +} diff --git a/app/api/openai/[...path]/route.ts b/app/api/openai.ts similarity index 80% rename from app/api/openai/[...path]/route.ts rename to app/api/openai.ts index 77059c151..7dfd84e17 100644 --- a/app/api/openai/[...path]/route.ts +++ b/app/api/openai.ts @@ -3,8 +3,8 @@ import { getServerSideConfig } from "@/app/config/server"; import { ModelProvider, OpenaiPath } from "@/app/constant"; import { prettyObject } from "@/app/utils/format"; import { NextRequest, NextResponse } from "next/server"; -import { auth } from "../../auth"; -import { requestOpenai } from "../../common"; +import { auth } from "./auth"; +import { requestOpenai } from "./common"; const ALLOWD_PATH = new Set(Object.values(OpenaiPath)); @@ -13,14 +13,16 @@ function getModels(remoteModelRes: OpenAIListModelResponse) { if (config.disableGPT4) { remoteModelRes.data = remoteModelRes.data.filter( - (m) => !m.id.startsWith("gpt-4"), + (m) => + !(m.id.startsWith("gpt-4") || m.id.startsWith("chatgpt-4o")) || + m.id.startsWith("gpt-4o-mini"), ); } return remoteModelRes; } -async function handle( +export async function handle( req: NextRequest, { params }: { params: { path: string[] } }, ) { @@ -70,27 +72,3 @@ async function handle( return NextResponse.json(prettyObject(e)); } } - -export const GET = handle; -export const POST = handle; - -export const runtime = "edge"; -export const preferredRegion = [ - "arn1", - "bom1", - "cdg1", - "cle1", - "cpt1", - "dub1", - "fra1", - "gru1", - "hnd1", - "iad1", - "icn1", - "kix1", - "lhr1", - "pdx1", - "sfo1", - "sin1", - "syd1", -]; diff --git a/app/api/proxy.ts b/app/api/proxy.ts new file mode 100644 index 000000000..731003aa1 --- /dev/null +++ b/app/api/proxy.ts @@ -0,0 +1,75 @@ +import { NextRequest, NextResponse } from "next/server"; + +export async function handle( + req: NextRequest, + { params }: { params: { path: string[] } }, +) { + console.log("[Proxy Route] params ", params); + + if (req.method === "OPTIONS") { + return NextResponse.json({ body: "OK" }, { status: 200 }); + } + + // remove path params from searchParams + req.nextUrl.searchParams.delete("path"); + req.nextUrl.searchParams.delete("provider"); + + const subpath = params.path.join("/"); + const fetchUrl = `${req.headers.get( + "x-base-url", + )}/${subpath}?${req.nextUrl.searchParams.toString()}`; + const skipHeaders = ["connection", "host", "origin", "referer", "cookie"]; + const headers = new Headers( + Array.from(req.headers.entries()).filter((item) => { + if ( + item[0].indexOf("x-") > -1 || + item[0].indexOf("sec-") > -1 || + skipHeaders.includes(item[0]) + ) { + return false; + } + return true; + }), + ); + const controller = new AbortController(); + const fetchOptions: RequestInit = { + headers, + method: req.method, + body: req.body, + // to fix #2485: https://stackoverflow.com/questions/55920957/cloudflare-worker-typeerror-one-time-use-body + redirect: "manual", + // @ts-ignore + duplex: "half", + signal: controller.signal, + }; + + const timeoutId = setTimeout( + () => { + controller.abort(); + }, + 10 * 60 * 1000, + ); + + try { + const res = await fetch(fetchUrl, fetchOptions); + // to prevent browser prompt for credentials + const newHeaders = new Headers(res.headers); + newHeaders.delete("www-authenticate"); + // to disable nginx buffering + newHeaders.set("X-Accel-Buffering", "no"); + + // The latest version of the OpenAI API forced the content-encoding to be "br" in json response + // So if the streaming is disabled, we need to remove the content-encoding header + // Because Vercel uses gzip to compress the response, if we don't remove the content-encoding header + // The browser will try to decode the response with brotli and fail + newHeaders.delete("content-encoding"); + + return new Response(res.body, { + status: res.status, + statusText: res.statusText, + headers: newHeaders, + }); + } finally { + clearTimeout(timeoutId); + } +} diff --git a/app/api/stability.ts b/app/api/stability.ts new file mode 100644 index 000000000..2646ace85 --- /dev/null +++ b/app/api/stability.ts @@ -0,0 +1,99 @@ +import { NextRequest, NextResponse } from "next/server"; +import { getServerSideConfig } from "@/app/config/server"; +import { ModelProvider, STABILITY_BASE_URL } from "@/app/constant"; +import { auth } from "@/app/api/auth"; + +export async function handle( + req: NextRequest, + { params }: { params: { path: string[] } }, +) { + console.log("[Stability] params ", params); + + if (req.method === "OPTIONS") { + return NextResponse.json({ body: "OK" }, { status: 200 }); + } + + const controller = new AbortController(); + + const serverConfig = getServerSideConfig(); + + let baseUrl = serverConfig.stabilityUrl || STABILITY_BASE_URL; + + if (!baseUrl.startsWith("http")) { + baseUrl = `https://${baseUrl}`; + } + + if (baseUrl.endsWith("/")) { + baseUrl = baseUrl.slice(0, -1); + } + + let path = `${req.nextUrl.pathname}`.replaceAll("/api/stability/", ""); + + console.log("[Stability Proxy] ", path); + console.log("[Stability Base Url]", baseUrl); + + const timeoutId = setTimeout( + () => { + controller.abort(); + }, + 10 * 60 * 1000, + ); + + const authResult = auth(req, ModelProvider.Stability); + + if (authResult.error) { + return NextResponse.json(authResult, { + status: 401, + }); + } + + const bearToken = req.headers.get("Authorization") ?? ""; + const token = bearToken.trim().replaceAll("Bearer ", "").trim(); + + const key = token ? token : serverConfig.stabilityApiKey; + + if (!key) { + return NextResponse.json( + { + error: true, + message: `missing STABILITY_API_KEY in server env vars`, + }, + { + status: 401, + }, + ); + } + + const fetchUrl = `${baseUrl}/${path}`; + console.log("[Stability Url] ", fetchUrl); + const fetchOptions: RequestInit = { + headers: { + "Content-Type": req.headers.get("Content-Type") || "multipart/form-data", + Accept: req.headers.get("Accept") || "application/json", + Authorization: `Bearer ${key}`, + }, + method: req.method, + body: req.body, + // to fix #2485: https://stackoverflow.com/questions/55920957/cloudflare-worker-typeerror-one-time-use-body + redirect: "manual", + // @ts-ignore + duplex: "half", + signal: controller.signal, + }; + + try { + const res = await fetch(fetchUrl, fetchOptions); + // to prevent browser prompt for credentials + const newHeaders = new Headers(res.headers); + newHeaders.delete("www-authenticate"); + // to disable nginx buffering + newHeaders.set("X-Accel-Buffering", "no"); + return new Response(res.body, { + status: res.status, + statusText: res.statusText, + headers: newHeaders, + }); + } finally { + clearTimeout(timeoutId); + } +} diff --git a/app/api/tencent/route.ts b/app/api/tencent/route.ts new file mode 100644 index 000000000..885909e7a --- /dev/null +++ b/app/api/tencent/route.ts @@ -0,0 +1,124 @@ +import { getServerSideConfig } from "@/app/config/server"; +import { + TENCENT_BASE_URL, + ApiPath, + ModelProvider, + ServiceProvider, + Tencent, +} from "@/app/constant"; +import { prettyObject } from "@/app/utils/format"; +import { NextRequest, NextResponse } from "next/server"; +import { auth } from "@/app/api/auth"; +import { isModelAvailableInServer } from "@/app/utils/model"; +import { getHeader } from "@/app/utils/tencent"; + +const serverConfig = getServerSideConfig(); + +async function handle( + req: NextRequest, + { params }: { params: { path: string[] } }, +) { + console.log("[Tencent Route] params ", params); + + if (req.method === "OPTIONS") { + return NextResponse.json({ body: "OK" }, { status: 200 }); + } + + const authResult = auth(req, ModelProvider.Hunyuan); + if (authResult.error) { + return NextResponse.json(authResult, { + status: 401, + }); + } + + try { + const response = await request(req); + return response; + } catch (e) { + console.error("[Tencent] ", e); + return NextResponse.json(prettyObject(e)); + } +} + +export const GET = handle; +export const POST = handle; + +export const runtime = "edge"; +export const preferredRegion = [ + "arn1", + "bom1", + "cdg1", + "cle1", + "cpt1", + "dub1", + "fra1", + "gru1", + "hnd1", + "iad1", + "icn1", + "kix1", + "lhr1", + "pdx1", + "sfo1", + "sin1", + "syd1", +]; + +async function request(req: NextRequest) { + const controller = new AbortController(); + + let baseUrl = serverConfig.tencentUrl || TENCENT_BASE_URL; + + if (!baseUrl.startsWith("http")) { + baseUrl = `https://${baseUrl}`; + } + + if (baseUrl.endsWith("/")) { + baseUrl = baseUrl.slice(0, -1); + } + + console.log("[Base Url]", baseUrl); + + const timeoutId = setTimeout( + () => { + controller.abort(); + }, + 10 * 60 * 1000, + ); + + const fetchUrl = baseUrl; + + const body = await req.text(); + const headers = await getHeader( + body, + serverConfig.tencentSecretId as string, + serverConfig.tencentSecretKey as string, + ); + const fetchOptions: RequestInit = { + headers, + method: req.method, + body, + redirect: "manual", + // @ts-ignore + duplex: "half", + signal: controller.signal, + }; + + try { + const res = await fetch(fetchUrl, fetchOptions); + + // to prevent browser prompt for credentials + const newHeaders = new Headers(res.headers); + newHeaders.delete("www-authenticate"); + // to disable nginx buffering + newHeaders.set("X-Accel-Buffering", "no"); + + return new Response(res.body, { + status: res.status, + statusText: res.statusText, + headers: newHeaders, + }); + } finally { + clearTimeout(timeoutId); + } +} diff --git a/app/api/webdav/[...path]/route.ts b/app/api/webdav/[...path]/route.ts index 816c2046b..9f96cbfcf 100644 --- a/app/api/webdav/[...path]/route.ts +++ b/app/api/webdav/[...path]/route.ts @@ -9,6 +9,14 @@ const mergedAllowedWebDavEndpoints = [ ...config.allowedWebDevEndpoints, ].filter((domain) => Boolean(domain.trim())); +const normalizeUrl = (url: string) => { + try { + return new URL(url); + } catch (err) { + return null; + } +}; + async function handle( req: NextRequest, { params }: { params: { path: string[] } }, @@ -21,12 +29,23 @@ async function handle( const requestUrl = new URL(req.url); let endpoint = requestUrl.searchParams.get("endpoint"); + let proxy_method = requestUrl.searchParams.get("proxy_method") || req.method; // Validate the endpoint to prevent potential SSRF attacks if ( - !mergedAllowedWebDavEndpoints.some( - (allowedEndpoint) => endpoint?.startsWith(allowedEndpoint), - ) + !endpoint || + !mergedAllowedWebDavEndpoints.some((allowedEndpoint) => { + const normalizedAllowedEndpoint = normalizeUrl(allowedEndpoint); + const normalizedEndpoint = normalizeUrl(endpoint as string); + + return ( + normalizedEndpoint && + normalizedEndpoint.hostname === normalizedAllowedEndpoint?.hostname && + normalizedEndpoint.pathname.startsWith( + normalizedAllowedEndpoint.pathname, + ) + ); + }) ) { return NextResponse.json( { @@ -47,7 +66,11 @@ async function handle( const targetPath = `${endpoint}${endpointPath}`; // only allow MKCOL, GET, PUT - if (req.method !== "MKCOL" && req.method !== "GET" && req.method !== "PUT") { + if ( + proxy_method !== "MKCOL" && + proxy_method !== "GET" && + proxy_method !== "PUT" + ) { return NextResponse.json( { error: true, @@ -60,7 +83,7 @@ async function handle( } // for MKCOL request, only allow request ${folder} - if (req.method === "MKCOL" && !targetPath.endsWith(folder)) { + if (proxy_method === "MKCOL" && !targetPath.endsWith(folder)) { return NextResponse.json( { error: true, @@ -73,7 +96,7 @@ async function handle( } // for GET request, only allow request ending with fileName - if (req.method === "GET" && !targetPath.endsWith(fileName)) { + if (proxy_method === "GET" && !targetPath.endsWith(fileName)) { return NextResponse.json( { error: true, @@ -86,7 +109,7 @@ async function handle( } // for PUT request, only allow request ending with fileName - if (req.method === "PUT" && !targetPath.endsWith(fileName)) { + if (proxy_method === "PUT" && !targetPath.endsWith(fileName)) { return NextResponse.json( { error: true, @@ -100,7 +123,7 @@ async function handle( const targetUrl = targetPath; - const method = req.method; + const method = proxy_method || req.method; const shouldNotHaveBody = ["get", "head"].includes( method?.toLowerCase() ?? "", ); @@ -125,7 +148,7 @@ async function handle( "[Any Proxy]", targetUrl, { - method: req.method, + method: method, }, { status: fetchResult?.status, diff --git a/app/azure.ts b/app/azure.ts deleted file mode 100644 index 48406c55b..000000000 --- a/app/azure.ts +++ /dev/null @@ -1,9 +0,0 @@ -export function makeAzurePath(path: string, apiVersion: string) { - // should omit /v1 prefix - path = path.replaceAll("v1/", ""); - - // should add api-key to query string - path += `${path.includes("?") ? "&" : "?"}api-version=${apiVersion}`; - - return path; -} diff --git a/app/client/api.ts b/app/client/api.ts index 7bee546b4..cecc453ba 100644 --- a/app/client/api.ts +++ b/app/client/api.ts @@ -5,10 +5,23 @@ import { ModelProvider, ServiceProvider, } from "../constant"; -import { ChatMessage, ModelType, useAccessStore, useChatStore } from "../store"; -import { ChatGPTApi } from "./platforms/openai"; +import { + ChatMessageTool, + ChatMessage, + ModelType, + useAccessStore, + useChatStore, +} from "../store"; +import { ChatGPTApi, DalleRequestPayload } from "./platforms/openai"; import { GeminiProApi } from "./platforms/google"; import { ClaudeApi } from "./platforms/anthropic"; +import { ErnieApi } from "./platforms/baidu"; +import { DoubaoApi } from "./platforms/bytedance"; +import { QwenApi } from "./platforms/alibaba"; +import { HunyuanApi } from "./platforms/tencent"; +import { MoonshotApi } from "./platforms/moonshot"; +import { SparkApi } from "./platforms/iflytek"; + export const ROLES = ["system", "user", "assistant"] as const; export type MessageRole = (typeof ROLES)[number]; @@ -30,11 +43,15 @@ export interface RequestMessage { export interface LLMConfig { model: string; + providerName?: string; temperature?: number; top_p?: number; stream?: boolean; presence_penalty?: number; frequency_penalty?: number; + size?: DalleRequestPayload["size"]; + quality?: DalleRequestPayload["quality"]; + style?: DalleRequestPayload["style"]; } export interface ChatOptions { @@ -45,6 +62,8 @@ export interface ChatOptions { onFinish: (message: string) => void; onError?: (err: Error) => void; onController?: (controller: AbortController) => void; + onBeforeTool?: (tool: ChatMessageTool) => void; + onAfterTool?: (tool: ChatMessageTool) => void; } export interface LLMUsage { @@ -54,14 +73,17 @@ export interface LLMUsage { export interface LLMModel { name: string; + displayName?: string; available: boolean; provider: LLMModelProvider; + sorted: number; } export interface LLMModelProvider { id: string; providerName: string; providerType: string; + sorted: number; } export abstract class LLMApi { @@ -102,6 +124,24 @@ export class ClientApi { case ModelProvider.Claude: this.llm = new ClaudeApi(); break; + case ModelProvider.Ernie: + this.llm = new ErnieApi(); + break; + case ModelProvider.Doubao: + this.llm = new DoubaoApi(); + break; + case ModelProvider.Qwen: + this.llm = new QwenApi(); + break; + case ModelProvider.Hunyuan: + this.llm = new HunyuanApi(); + break; + case ModelProvider.Moonshot: + this.llm = new MoonshotApi(); + break; + case ModelProvider.Iflytek: + this.llm = new SparkApi(); + break; default: this.llm = new ChatGPTApi(); } @@ -153,39 +193,122 @@ export class ClientApi { } } +export function getBearerToken( + apiKey: string, + noBearer: boolean = false, +): string { + return validString(apiKey) + ? `${noBearer ? "" : "Bearer "}${apiKey.trim()}` + : ""; +} + +export function validString(x: string): boolean { + return x?.length > 0; +} + export function getHeaders() { const accessStore = useAccessStore.getState(); + const chatStore = useChatStore.getState(); const headers: Record = { "Content-Type": "application/json", Accept: "application/json", }; - const modelConfig = useChatStore.getState().currentSession().mask.modelConfig; - const isGoogle = modelConfig.model.startsWith("gemini"); - const isAzure = accessStore.provider === ServiceProvider.Azure; - const authHeader = isAzure ? "api-key" : "Authorization"; - const apiKey = isGoogle - ? accessStore.googleApiKey - : isAzure - ? accessStore.azureApiKey - : accessStore.openaiApiKey; - const clientConfig = getClientConfig(); - const makeBearer = (s: string) => `${isAzure ? "" : "Bearer "}${s.trim()}`; - const validString = (x: string) => x && x.length > 0; + const clientConfig = getClientConfig(); + + function getConfig() { + const modelConfig = chatStore.currentSession().mask.modelConfig; + const isGoogle = modelConfig.providerName == ServiceProvider.Google; + const isAzure = modelConfig.providerName === ServiceProvider.Azure; + const isAnthropic = modelConfig.providerName === ServiceProvider.Anthropic; + const isBaidu = modelConfig.providerName == ServiceProvider.Baidu; + const isByteDance = modelConfig.providerName === ServiceProvider.ByteDance; + const isAlibaba = modelConfig.providerName === ServiceProvider.Alibaba; + const isMoonshot = modelConfig.providerName === ServiceProvider.Moonshot; + const isIflytek = modelConfig.providerName === ServiceProvider.Iflytek; + const isEnabledAccessControl = accessStore.enabledAccessControl(); + const apiKey = isGoogle + ? accessStore.googleApiKey + : isAzure + ? accessStore.azureApiKey + : isAnthropic + ? accessStore.anthropicApiKey + : isByteDance + ? accessStore.bytedanceApiKey + : isAlibaba + ? accessStore.alibabaApiKey + : isMoonshot + ? accessStore.moonshotApiKey + : isIflytek + ? accessStore.iflytekApiKey && accessStore.iflytekApiSecret + ? accessStore.iflytekApiKey + ":" + accessStore.iflytekApiSecret + : "" + : accessStore.openaiApiKey; + return { + isGoogle, + isAzure, + isAnthropic, + isBaidu, + isByteDance, + isAlibaba, + isMoonshot, + isIflytek, + apiKey, + isEnabledAccessControl, + }; + } + + function getAuthHeader(): string { + return isAzure ? "api-key" : isAnthropic ? "x-api-key" : "Authorization"; + } + + const { + isGoogle, + isAzure, + isAnthropic, + isBaidu, + apiKey, + isEnabledAccessControl, + } = getConfig(); // when using google api in app, not set auth header - if (!(isGoogle && clientConfig?.isApp)) { - // use user's api key first - if (validString(apiKey)) { - headers[authHeader] = makeBearer(apiKey); - } else if ( - accessStore.enabledAccessControl() && - validString(accessStore.accessCode) - ) { - headers[authHeader] = makeBearer( - ACCESS_CODE_PREFIX + accessStore.accessCode, - ); - } + if (isGoogle && clientConfig?.isApp) return headers; + // when using baidu api in app, not set auth header + if (isBaidu && clientConfig?.isApp) return headers; + + const authHeader = getAuthHeader(); + + const bearerToken = getBearerToken(apiKey, isAzure || isAnthropic); + + if (bearerToken) { + headers[authHeader] = bearerToken; + } else if (isEnabledAccessControl && validString(accessStore.accessCode)) { + headers["Authorization"] = getBearerToken( + ACCESS_CODE_PREFIX + accessStore.accessCode, + ); } return headers; } + +export function getClientApi(provider: ServiceProvider): ClientApi { + switch (provider) { + case ServiceProvider.Google: + return new ClientApi(ModelProvider.GeminiPro); + case ServiceProvider.Anthropic: + return new ClientApi(ModelProvider.Claude); + case ServiceProvider.Baidu: + return new ClientApi(ModelProvider.Ernie); + case ServiceProvider.ByteDance: + return new ClientApi(ModelProvider.Doubao); + case ServiceProvider.Alibaba: + return new ClientApi(ModelProvider.Qwen); + case ServiceProvider.Tencent: + return new ClientApi(ModelProvider.Hunyuan); + case ServiceProvider.Moonshot: + return new ClientApi(ModelProvider.Moonshot); + case ServiceProvider.Iflytek: + return new ClientApi(ModelProvider.Iflytek); + default: + return new ClientApi(ModelProvider.GPT); + } +} diff --git a/app/client/platforms/alibaba.ts b/app/client/platforms/alibaba.ts new file mode 100644 index 000000000..d5fa3042f --- /dev/null +++ b/app/client/platforms/alibaba.ts @@ -0,0 +1,268 @@ +"use client"; +import { + ApiPath, + Alibaba, + ALIBABA_BASE_URL, + REQUEST_TIMEOUT_MS, +} from "@/app/constant"; +import { useAccessStore, useAppConfig, useChatStore } from "@/app/store"; + +import { + ChatOptions, + getHeaders, + LLMApi, + LLMModel, + MultimodalContent, +} from "../api"; +import Locale from "../../locales"; +import { + EventStreamContentType, + fetchEventSource, +} from "@fortaine/fetch-event-source"; +import { prettyObject } from "@/app/utils/format"; +import { getClientConfig } from "@/app/config/client"; +import { getMessageTextContent } from "@/app/utils"; + +export interface OpenAIListModelResponse { + object: string; + data: Array<{ + id: string; + object: string; + root: string; + }>; +} + +interface RequestInput { + messages: { + role: "system" | "user" | "assistant"; + content: string | MultimodalContent[]; + }[]; +} +interface RequestParam { + result_format: string; + incremental_output?: boolean; + temperature: number; + repetition_penalty?: number; + top_p: number; + max_tokens?: number; +} +interface RequestPayload { + model: string; + input: RequestInput; + parameters: RequestParam; +} + +export class QwenApi implements LLMApi { + path(path: string): string { + const accessStore = useAccessStore.getState(); + + let baseUrl = ""; + + if (accessStore.useCustomConfig) { + baseUrl = accessStore.alibabaUrl; + } + + if (baseUrl.length === 0) { + const isApp = !!getClientConfig()?.isApp; + baseUrl = isApp ? ALIBABA_BASE_URL : ApiPath.Alibaba; + } + + if (baseUrl.endsWith("/")) { + baseUrl = baseUrl.slice(0, baseUrl.length - 1); + } + if (!baseUrl.startsWith("http") && !baseUrl.startsWith(ApiPath.Alibaba)) { + baseUrl = "https://" + baseUrl; + } + + console.log("[Proxy Endpoint] ", baseUrl, path); + + return [baseUrl, path].join("/"); + } + + extractMessage(res: any) { + return res?.output?.choices?.at(0)?.message?.content ?? ""; + } + + async chat(options: ChatOptions) { + const messages = options.messages.map((v) => ({ + role: v.role, + content: getMessageTextContent(v), + })); + + const modelConfig = { + ...useAppConfig.getState().modelConfig, + ...useChatStore.getState().currentSession().mask.modelConfig, + ...{ + model: options.config.model, + }, + }; + + const shouldStream = !!options.config.stream; + const requestPayload: RequestPayload = { + model: modelConfig.model, + input: { + messages, + }, + parameters: { + result_format: "message", + incremental_output: shouldStream, + temperature: modelConfig.temperature, + // max_tokens: modelConfig.max_tokens, + top_p: modelConfig.top_p === 1 ? 0.99 : modelConfig.top_p, // qwen top_p is should be < 1 + }, + }; + + const controller = new AbortController(); + options.onController?.(controller); + + try { + const chatPath = this.path(Alibaba.ChatPath); + const chatPayload = { + method: "POST", + body: JSON.stringify(requestPayload), + signal: controller.signal, + headers: { + ...getHeaders(), + "X-DashScope-SSE": shouldStream ? "enable" : "disable", + }, + }; + + // make a fetch request + const requestTimeoutId = setTimeout( + () => controller.abort(), + REQUEST_TIMEOUT_MS, + ); + + if (shouldStream) { + let responseText = ""; + let remainText = ""; + let finished = false; + + // animate response to make it looks smooth + function animateResponseText() { + if (finished || controller.signal.aborted) { + responseText += remainText; + console.log("[Response Animation] finished"); + if (responseText?.length === 0) { + options.onError?.(new Error("empty response from server")); + } + return; + } + + if (remainText.length > 0) { + const fetchCount = Math.max(1, Math.round(remainText.length / 60)); + const fetchText = remainText.slice(0, fetchCount); + responseText += fetchText; + remainText = remainText.slice(fetchCount); + options.onUpdate?.(responseText, fetchText); + } + + requestAnimationFrame(animateResponseText); + } + + // start animaion + animateResponseText(); + + const finish = () => { + if (!finished) { + finished = true; + options.onFinish(responseText + remainText); + } + }; + + controller.signal.onabort = finish; + + fetchEventSource(chatPath, { + ...chatPayload, + async onopen(res) { + clearTimeout(requestTimeoutId); + const contentType = res.headers.get("content-type"); + console.log( + "[Alibaba] request response content type: ", + contentType, + ); + + if (contentType?.startsWith("text/plain")) { + responseText = await res.clone().text(); + return finish(); + } + + if ( + !res.ok || + !res.headers + .get("content-type") + ?.startsWith(EventStreamContentType) || + res.status !== 200 + ) { + const responseTexts = [responseText]; + let extraInfo = await res.clone().text(); + try { + const resJson = await res.clone().json(); + extraInfo = prettyObject(resJson); + } catch {} + + if (res.status === 401) { + responseTexts.push(Locale.Error.Unauthorized); + } + + if (extraInfo) { + responseTexts.push(extraInfo); + } + + responseText = responseTexts.join("\n\n"); + + return finish(); + } + }, + onmessage(msg) { + if (msg.data === "[DONE]" || finished) { + return finish(); + } + const text = msg.data; + try { + const json = JSON.parse(text); + const choices = json.output.choices as Array<{ + message: { content: string }; + }>; + const delta = choices[0]?.message?.content; + if (delta) { + remainText += delta; + } + } catch (e) { + console.error("[Request] parse error", text, msg); + } + }, + onclose() { + finish(); + }, + onerror(e) { + options.onError?.(e); + throw e; + }, + openWhenHidden: true, + }); + } else { + const res = await fetch(chatPath, chatPayload); + clearTimeout(requestTimeoutId); + + const resJson = await res.json(); + const message = this.extractMessage(resJson); + options.onFinish(message); + } + } catch (e) { + console.log("[Request] failed to make a chat request", e); + options.onError?.(e as Error); + } + } + async usage() { + return { + used: 0, + total: 0, + }; + } + + async models(): Promise { + return []; + } +} +export { Alibaba }; diff --git a/app/client/platforms/anthropic.ts b/app/client/platforms/anthropic.ts index e90c8f057..7dd39c9cd 100644 --- a/app/client/platforms/anthropic.ts +++ b/app/client/platforms/anthropic.ts @@ -1,9 +1,14 @@ import { ACCESS_CODE_PREFIX, Anthropic, ApiPath } from "@/app/constant"; -import { ChatOptions, LLMApi, MultimodalContent } from "../api"; -import { useAccessStore, useAppConfig, useChatStore } from "@/app/store"; +import { ChatOptions, getHeaders, LLMApi, MultimodalContent } from "../api"; +import { + useAccessStore, + useAppConfig, + useChatStore, + usePluginStore, + ChatMessageTool, +} from "@/app/store"; import { getClientConfig } from "@/app/config/client"; import { DEFAULT_API_HOST } from "@/app/constant"; -import { RequestMessage } from "@/app/typing"; import { EventStreamContentType, fetchEventSource, @@ -12,6 +17,9 @@ import { import Locale from "../../locales"; import { prettyObject } from "@/app/utils/format"; import { getMessageTextContent, isVisionModel } from "@/app/utils"; +import { preProcessImageContent, stream } from "@/app/utils/chat"; +import { cloudflareAIGatewayUrl } from "@/app/utils/cloudflare"; +import { RequestPayload } from "./openai"; export type MultiBlockContent = { type: "image" | "text"; @@ -92,7 +100,12 @@ export class ClaudeApi implements LLMApi { }, }; - const messages = [...options.messages]; + // try get base64image from local cache image_url + const messages: ChatOptions["messages"] = []; + for (const v of options.messages) { + const content = await preProcessImageContent(v.content); + messages.push({ role: v.role, content }); + } const keys = ["system", "user"]; @@ -185,113 +198,126 @@ export class ClaudeApi implements LLMApi { const controller = new AbortController(); options.onController?.(controller); - const payload = { - method: "POST", - body: JSON.stringify(requestBody), - signal: controller.signal, - headers: { - "Content-Type": "application/json", - Accept: "application/json", - "x-api-key": accessStore.anthropicApiKey, - "anthropic-version": accessStore.anthropicApiVersion, - Authorization: getAuthKey(accessStore.anthropicApiKey), - }, - }; - if (shouldStream) { - try { - const context = { - text: "", - finished: false, - }; - - const finish = () => { - if (!context.finished) { - options.onFinish(context.text); - context.finished = true; - } - }; - - controller.signal.onabort = finish; - fetchEventSource(path, { - ...payload, - async onopen(res) { - const contentType = res.headers.get("content-type"); - console.log("response content type: ", contentType); - - if (contentType?.startsWith("text/plain")) { - context.text = await res.clone().text(); - return finish(); - } - - if ( - !res.ok || - !res.headers - .get("content-type") - ?.startsWith(EventStreamContentType) || - res.status !== 200 - ) { - const responseTexts = [context.text]; - let extraInfo = await res.clone().text(); - try { - const resJson = await res.clone().json(); - extraInfo = prettyObject(resJson); - } catch {} - - if (res.status === 401) { - responseTexts.push(Locale.Error.Unauthorized); - } - - if (extraInfo) { - responseTexts.push(extraInfo); - } - - context.text = responseTexts.join("\n\n"); - - return finish(); - } - }, - onmessage(msg) { - let chunkJson: - | undefined - | { - type: "content_block_delta" | "content_block_stop"; - delta?: { - type: "text_delta"; - text: string; - }; - index: number; + let index = -1; + const [tools, funcs] = usePluginStore + .getState() + .getAsTools( + useChatStore.getState().currentSession().mask?.plugin || [], + ); + return stream( + path, + requestBody, + { + ...getHeaders(), + "anthropic-version": accessStore.anthropicApiVersion, + }, + // @ts-ignore + tools.map((tool) => ({ + name: tool?.function?.name, + description: tool?.function?.description, + input_schema: tool?.function?.parameters, + })), + funcs, + controller, + // parseSSE + (text: string, runTools: ChatMessageTool[]) => { + // console.log("parseSSE", text, runTools); + let chunkJson: + | undefined + | { + type: "content_block_delta" | "content_block_stop"; + content_block?: { + type: "tool_use"; + id: string; + name: string; }; - try { - chunkJson = JSON.parse(msg.data); - } catch (e) { - console.error("[Response] parse error", msg.data); - } + delta?: { + type: "text_delta" | "input_json_delta"; + text?: string; + partial_json?: string; + }; + index: number; + }; + chunkJson = JSON.parse(text); - if (!chunkJson || chunkJson.type === "content_block_stop") { - return finish(); - } - - const { delta } = chunkJson; - if (delta?.text) { - context.text += delta.text; - options.onUpdate?.(context.text, delta.text); - } - }, - onclose() { - finish(); - }, - onerror(e) { - options.onError?.(e); - throw e; - }, - openWhenHidden: true, - }); - } catch (e) { - console.error("failed to chat", e); - options.onError?.(e as Error); - } + if (chunkJson?.content_block?.type == "tool_use") { + index += 1; + const id = chunkJson?.content_block.id; + const name = chunkJson?.content_block.name; + runTools.push({ + id, + type: "function", + function: { + name, + arguments: "", + }, + }); + } + if ( + chunkJson?.delta?.type == "input_json_delta" && + chunkJson?.delta?.partial_json + ) { + // @ts-ignore + runTools[index]["function"]["arguments"] += + chunkJson?.delta?.partial_json; + } + return chunkJson?.delta?.text; + }, + // processToolMessage, include tool_calls message and tool call results + ( + requestPayload: RequestPayload, + toolCallMessage: any, + toolCallResult: any[], + ) => { + // reset index value + index = -1; + // @ts-ignore + requestPayload?.messages?.splice( + // @ts-ignore + requestPayload?.messages?.length, + 0, + { + role: "assistant", + content: toolCallMessage.tool_calls.map( + (tool: ChatMessageTool) => ({ + type: "tool_use", + id: tool.id, + name: tool?.function?.name, + input: tool?.function?.arguments + ? JSON.parse(tool?.function?.arguments) + : {}, + }), + ), + }, + // @ts-ignore + ...toolCallResult.map((result) => ({ + role: "user", + content: [ + { + type: "tool_result", + tool_use_id: result.tool_call_id, + content: result.content, + }, + ], + })), + ); + }, + options, + ); } else { + const payload = { + method: "POST", + body: JSON.stringify(requestBody), + signal: controller.signal, + headers: { + ...getHeaders(), // get common headers + "anthropic-version": accessStore.anthropicApiVersion, + // do not send `anthropicApiKey` in browser!!! + // Authorization: getAuthKey(accessStore.anthropicApiKey), + }, + }; + try { controller.signal.onabort = () => options.onFinish(""); @@ -376,7 +402,8 @@ export class ClaudeApi implements LLMApi { baseUrl = trimEnd(baseUrl, "/"); - return `${baseUrl}/${path}`; + // try rebuild url, when using cloudflare ai gateway in client + return cloudflareAIGatewayUrl(`${baseUrl}/${path}`); } } @@ -389,27 +416,3 @@ function trimEnd(s: string, end = " ") { return s; } - -function bearer(value: string) { - return `Bearer ${value.trim()}`; -} - -function getAuthKey(apiKey = "") { - const accessStore = useAccessStore.getState(); - const isApp = !!getClientConfig()?.isApp; - let authKey = ""; - - if (apiKey) { - // use user's api key first - authKey = bearer(apiKey); - } else if ( - accessStore.enabledAccessControl() && - !isApp && - !!accessStore.accessCode - ) { - // or use access code - authKey = bearer(ACCESS_CODE_PREFIX + accessStore.accessCode); - } - - return authKey; -} diff --git a/app/client/platforms/baidu.ts b/app/client/platforms/baidu.ts new file mode 100644 index 000000000..3be147f49 --- /dev/null +++ b/app/client/platforms/baidu.ts @@ -0,0 +1,281 @@ +"use client"; +import { + ApiPath, + Baidu, + BAIDU_BASE_URL, + REQUEST_TIMEOUT_MS, +} from "@/app/constant"; +import { useAccessStore, useAppConfig, useChatStore } from "@/app/store"; +import { getAccessToken } from "@/app/utils/baidu"; + +import { + ChatOptions, + getHeaders, + LLMApi, + LLMModel, + MultimodalContent, +} from "../api"; +import Locale from "../../locales"; +import { + EventStreamContentType, + fetchEventSource, +} from "@fortaine/fetch-event-source"; +import { prettyObject } from "@/app/utils/format"; +import { getClientConfig } from "@/app/config/client"; +import { getMessageTextContent } from "@/app/utils"; + +export interface OpenAIListModelResponse { + object: string; + data: Array<{ + id: string; + object: string; + root: string; + }>; +} + +interface RequestPayload { + messages: { + role: "system" | "user" | "assistant"; + content: string | MultimodalContent[]; + }[]; + stream?: boolean; + model: string; + temperature: number; + presence_penalty: number; + frequency_penalty: number; + top_p: number; + max_tokens?: number; +} + +export class ErnieApi implements LLMApi { + path(path: string): string { + const accessStore = useAccessStore.getState(); + + let baseUrl = ""; + + if (accessStore.useCustomConfig) { + baseUrl = accessStore.baiduUrl; + } + + if (baseUrl.length === 0) { + const isApp = !!getClientConfig()?.isApp; + // do not use proxy for baidubce api + baseUrl = isApp ? BAIDU_BASE_URL : ApiPath.Baidu; + } + + if (baseUrl.endsWith("/")) { + baseUrl = baseUrl.slice(0, baseUrl.length - 1); + } + if (!baseUrl.startsWith("http") && !baseUrl.startsWith(ApiPath.Baidu)) { + baseUrl = "https://" + baseUrl; + } + + console.log("[Proxy Endpoint] ", baseUrl, path); + + return [baseUrl, path].join("/"); + } + + async chat(options: ChatOptions) { + const messages = options.messages.map((v) => ({ + // "error_code": 336006, "error_msg": "the role of message with even index in the messages must be user or function", + role: v.role === "system" ? "user" : v.role, + content: getMessageTextContent(v), + })); + + // "error_code": 336006, "error_msg": "the length of messages must be an odd number", + if (messages.length % 2 === 0) { + if (messages.at(0)?.role === "user") { + messages.splice(1, 0, { + role: "assistant", + content: " ", + }); + } else { + messages.unshift({ + role: "user", + content: " ", + }); + } + } + + const modelConfig = { + ...useAppConfig.getState().modelConfig, + ...useChatStore.getState().currentSession().mask.modelConfig, + ...{ + model: options.config.model, + }, + }; + + const shouldStream = !!options.config.stream; + const requestPayload: RequestPayload = { + messages, + stream: shouldStream, + model: modelConfig.model, + temperature: modelConfig.temperature, + presence_penalty: modelConfig.presence_penalty, + frequency_penalty: modelConfig.frequency_penalty, + top_p: modelConfig.top_p, + }; + + console.log("[Request] Baidu payload: ", requestPayload); + + const controller = new AbortController(); + options.onController?.(controller); + + try { + let chatPath = this.path(Baidu.ChatPath(modelConfig.model)); + + // getAccessToken can not run in browser, because cors error + if (!!getClientConfig()?.isApp) { + const accessStore = useAccessStore.getState(); + if (accessStore.useCustomConfig) { + if (accessStore.isValidBaidu()) { + const { access_token } = await getAccessToken( + accessStore.baiduApiKey, + accessStore.baiduSecretKey, + ); + chatPath = `${chatPath}${ + chatPath.includes("?") ? "&" : "?" + }access_token=${access_token}`; + } + } + } + const chatPayload = { + method: "POST", + body: JSON.stringify(requestPayload), + signal: controller.signal, + headers: getHeaders(), + }; + + // make a fetch request + const requestTimeoutId = setTimeout( + () => controller.abort(), + REQUEST_TIMEOUT_MS, + ); + + if (shouldStream) { + let responseText = ""; + let remainText = ""; + let finished = false; + + // animate response to make it looks smooth + function animateResponseText() { + if (finished || controller.signal.aborted) { + responseText += remainText; + console.log("[Response Animation] finished"); + if (responseText?.length === 0) { + options.onError?.(new Error("empty response from server")); + } + return; + } + + if (remainText.length > 0) { + const fetchCount = Math.max(1, Math.round(remainText.length / 60)); + const fetchText = remainText.slice(0, fetchCount); + responseText += fetchText; + remainText = remainText.slice(fetchCount); + options.onUpdate?.(responseText, fetchText); + } + + requestAnimationFrame(animateResponseText); + } + + // start animaion + animateResponseText(); + + const finish = () => { + if (!finished) { + finished = true; + options.onFinish(responseText + remainText); + } + }; + + controller.signal.onabort = finish; + + fetchEventSource(chatPath, { + ...chatPayload, + async onopen(res) { + clearTimeout(requestTimeoutId); + const contentType = res.headers.get("content-type"); + console.log("[Baidu] request response content type: ", contentType); + + if (contentType?.startsWith("text/plain")) { + responseText = await res.clone().text(); + return finish(); + } + + if ( + !res.ok || + !res.headers + .get("content-type") + ?.startsWith(EventStreamContentType) || + res.status !== 200 + ) { + const responseTexts = [responseText]; + let extraInfo = await res.clone().text(); + try { + const resJson = await res.clone().json(); + extraInfo = prettyObject(resJson); + } catch {} + + if (res.status === 401) { + responseTexts.push(Locale.Error.Unauthorized); + } + + if (extraInfo) { + responseTexts.push(extraInfo); + } + + responseText = responseTexts.join("\n\n"); + + return finish(); + } + }, + onmessage(msg) { + if (msg.data === "[DONE]" || finished) { + return finish(); + } + const text = msg.data; + try { + const json = JSON.parse(text); + const delta = json?.result; + if (delta) { + remainText += delta; + } + } catch (e) { + console.error("[Request] parse error", text, msg); + } + }, + onclose() { + finish(); + }, + onerror(e) { + options.onError?.(e); + throw e; + }, + openWhenHidden: true, + }); + } else { + const res = await fetch(chatPath, chatPayload); + clearTimeout(requestTimeoutId); + + const resJson = await res.json(); + const message = resJson?.result; + options.onFinish(message); + } + } catch (e) { + console.log("[Request] failed to make a chat request", e); + options.onError?.(e as Error); + } + } + async usage() { + return { + used: 0, + total: 0, + }; + } + + async models(): Promise { + return []; + } +} +export { Baidu }; diff --git a/app/client/platforms/bytedance.ts b/app/client/platforms/bytedance.ts new file mode 100644 index 000000000..7677cafe1 --- /dev/null +++ b/app/client/platforms/bytedance.ts @@ -0,0 +1,255 @@ +"use client"; +import { + ApiPath, + ByteDance, + BYTEDANCE_BASE_URL, + REQUEST_TIMEOUT_MS, +} from "@/app/constant"; +import { useAccessStore, useAppConfig, useChatStore } from "@/app/store"; + +import { + ChatOptions, + getHeaders, + LLMApi, + LLMModel, + MultimodalContent, +} from "../api"; +import Locale from "../../locales"; +import { + EventStreamContentType, + fetchEventSource, +} from "@fortaine/fetch-event-source"; +import { prettyObject } from "@/app/utils/format"; +import { getClientConfig } from "@/app/config/client"; +import { getMessageTextContent } from "@/app/utils"; + +export interface OpenAIListModelResponse { + object: string; + data: Array<{ + id: string; + object: string; + root: string; + }>; +} + +interface RequestPayload { + messages: { + role: "system" | "user" | "assistant"; + content: string | MultimodalContent[]; + }[]; + stream?: boolean; + model: string; + temperature: number; + presence_penalty: number; + frequency_penalty: number; + top_p: number; + max_tokens?: number; +} + +export class DoubaoApi implements LLMApi { + path(path: string): string { + const accessStore = useAccessStore.getState(); + + let baseUrl = ""; + + if (accessStore.useCustomConfig) { + baseUrl = accessStore.bytedanceUrl; + } + + if (baseUrl.length === 0) { + const isApp = !!getClientConfig()?.isApp; + baseUrl = isApp ? BYTEDANCE_BASE_URL : ApiPath.ByteDance; + } + + if (baseUrl.endsWith("/")) { + baseUrl = baseUrl.slice(0, baseUrl.length - 1); + } + if (!baseUrl.startsWith("http") && !baseUrl.startsWith(ApiPath.ByteDance)) { + baseUrl = "https://" + baseUrl; + } + + console.log("[Proxy Endpoint] ", baseUrl, path); + + return [baseUrl, path].join("/"); + } + + extractMessage(res: any) { + return res.choices?.at(0)?.message?.content ?? ""; + } + + async chat(options: ChatOptions) { + const messages = options.messages.map((v) => ({ + role: v.role, + content: getMessageTextContent(v), + })); + + const modelConfig = { + ...useAppConfig.getState().modelConfig, + ...useChatStore.getState().currentSession().mask.modelConfig, + ...{ + model: options.config.model, + }, + }; + + const shouldStream = !!options.config.stream; + const requestPayload: RequestPayload = { + messages, + stream: shouldStream, + model: modelConfig.model, + temperature: modelConfig.temperature, + presence_penalty: modelConfig.presence_penalty, + frequency_penalty: modelConfig.frequency_penalty, + top_p: modelConfig.top_p, + }; + + const controller = new AbortController(); + options.onController?.(controller); + + try { + const chatPath = this.path(ByteDance.ChatPath); + const chatPayload = { + method: "POST", + body: JSON.stringify(requestPayload), + signal: controller.signal, + headers: getHeaders(), + }; + + // make a fetch request + const requestTimeoutId = setTimeout( + () => controller.abort(), + REQUEST_TIMEOUT_MS, + ); + + if (shouldStream) { + let responseText = ""; + let remainText = ""; + let finished = false; + + // animate response to make it looks smooth + function animateResponseText() { + if (finished || controller.signal.aborted) { + responseText += remainText; + console.log("[Response Animation] finished"); + if (responseText?.length === 0) { + options.onError?.(new Error("empty response from server")); + } + return; + } + + if (remainText.length > 0) { + const fetchCount = Math.max(1, Math.round(remainText.length / 60)); + const fetchText = remainText.slice(0, fetchCount); + responseText += fetchText; + remainText = remainText.slice(fetchCount); + options.onUpdate?.(responseText, fetchText); + } + + requestAnimationFrame(animateResponseText); + } + + // start animaion + animateResponseText(); + + const finish = () => { + if (!finished) { + finished = true; + options.onFinish(responseText + remainText); + } + }; + + controller.signal.onabort = finish; + + fetchEventSource(chatPath, { + ...chatPayload, + async onopen(res) { + clearTimeout(requestTimeoutId); + const contentType = res.headers.get("content-type"); + console.log( + "[ByteDance] request response content type: ", + contentType, + ); + + if (contentType?.startsWith("text/plain")) { + responseText = await res.clone().text(); + return finish(); + } + + if ( + !res.ok || + !res.headers + .get("content-type") + ?.startsWith(EventStreamContentType) || + res.status !== 200 + ) { + const responseTexts = [responseText]; + let extraInfo = await res.clone().text(); + try { + const resJson = await res.clone().json(); + extraInfo = prettyObject(resJson); + } catch {} + + if (res.status === 401) { + responseTexts.push(Locale.Error.Unauthorized); + } + + if (extraInfo) { + responseTexts.push(extraInfo); + } + + responseText = responseTexts.join("\n\n"); + + return finish(); + } + }, + onmessage(msg) { + if (msg.data === "[DONE]" || finished) { + return finish(); + } + const text = msg.data; + try { + const json = JSON.parse(text); + const choices = json.choices as Array<{ + delta: { content: string }; + }>; + const delta = choices[0]?.delta?.content; + if (delta) { + remainText += delta; + } + } catch (e) { + console.error("[Request] parse error", text, msg); + } + }, + onclose() { + finish(); + }, + onerror(e) { + options.onError?.(e); + throw e; + }, + openWhenHidden: true, + }); + } else { + const res = await fetch(chatPath, chatPayload); + clearTimeout(requestTimeoutId); + + const resJson = await res.json(); + const message = this.extractMessage(resJson); + options.onFinish(message); + } + } catch (e) { + console.log("[Request] failed to make a chat request", e); + options.onError?.(e as Error); + } + } + async usage() { + return { + used: 0, + total: 0, + }; + } + + async models(): Promise { + return []; + } +} +export { ByteDance }; diff --git a/app/client/platforms/google.ts b/app/client/platforms/google.ts index a786f5275..12d884635 100644 --- a/app/client/platforms/google.ts +++ b/app/client/platforms/google.ts @@ -1,15 +1,52 @@ -import { Google, REQUEST_TIMEOUT_MS } from "@/app/constant"; +import { ApiPath, Google, REQUEST_TIMEOUT_MS } from "@/app/constant"; import { ChatOptions, getHeaders, LLMApi, LLMModel, LLMUsage } from "../api"; import { useAccessStore, useAppConfig, useChatStore } from "@/app/store"; import { getClientConfig } from "@/app/config/client"; import { DEFAULT_API_HOST } from "@/app/constant"; +import Locale from "../../locales"; +import { + EventStreamContentType, + fetchEventSource, +} from "@fortaine/fetch-event-source"; +import { prettyObject } from "@/app/utils/format"; import { getMessageTextContent, getMessageImages, isVisionModel, } from "@/app/utils"; +import { preProcessImageContent } from "@/app/utils/chat"; export class GeminiProApi implements LLMApi { + path(path: string): string { + const accessStore = useAccessStore.getState(); + + let baseUrl = ""; + if (accessStore.useCustomConfig) { + baseUrl = accessStore.googleUrl; + } + + const isApp = !!getClientConfig()?.isApp; + if (baseUrl.length === 0) { + baseUrl = isApp ? DEFAULT_API_HOST + `/api/proxy/google` : ApiPath.Google; + } + if (baseUrl.endsWith("/")) { + baseUrl = baseUrl.slice(0, baseUrl.length - 1); + } + if (!baseUrl.startsWith("http") && !baseUrl.startsWith(ApiPath.Google)) { + baseUrl = "https://" + baseUrl; + } + + console.log("[Proxy Endpoint] ", baseUrl, path); + + let chatPath = [baseUrl, path].join("/"); + + chatPath += chatPath.includes("?") ? "&alt=sse" : "?alt=sse"; + // if chatPath.startsWith('http') then add key in query string + if (chatPath.startsWith("http") && accessStore.googleApiKey) { + chatPath += `&key=${accessStore.googleApiKey}`; + } + return chatPath; + } extractMessage(res: any) { console.log("[Response] gemini-pro response: ", res); @@ -20,9 +57,16 @@ export class GeminiProApi implements LLMApi { ); } async chat(options: ChatOptions): Promise { - // const apiClient = this; + const apiClient = this; let multimodal = false; - const messages = options.messages.map((v) => { + + // try get base64image from local cache image_url + const _messages: ChatOptions["messages"] = []; + for (const v of options.messages) { + const content = await preProcessImageContent(v.content); + _messages.push({ role: v.role, content }); + } + const messages = _messages.map((v) => { let parts: any[] = [{ text: getMessageTextContent(v) }]; if (isVisionModel(options.config.model)) { const images = getMessageImages(v); @@ -64,6 +108,9 @@ export class GeminiProApi implements LLMApi { // if (visionModel && messages.length > 1) { // options.onError?.(new Error("Multiturn chat is not enabled for models/gemini-pro-vision")); // } + + const accessStore = useAccessStore.getState(); + const modelConfig = { ...useAppConfig.getState().modelConfig, ...useChatStore.getState().currentSession().mask.modelConfig, @@ -85,48 +132,30 @@ export class GeminiProApi implements LLMApi { safetySettings: [ { category: "HARM_CATEGORY_HARASSMENT", - threshold: "BLOCK_ONLY_HIGH", + threshold: accessStore.googleSafetySettings, }, { category: "HARM_CATEGORY_HATE_SPEECH", - threshold: "BLOCK_ONLY_HIGH", + threshold: accessStore.googleSafetySettings, }, { category: "HARM_CATEGORY_SEXUALLY_EXPLICIT", - threshold: "BLOCK_ONLY_HIGH", + threshold: accessStore.googleSafetySettings, }, { category: "HARM_CATEGORY_DANGEROUS_CONTENT", - threshold: "BLOCK_ONLY_HIGH", + threshold: accessStore.googleSafetySettings, }, ], }; - const accessStore = useAccessStore.getState(); - - let baseUrl = ""; - - if (accessStore.useCustomConfig) { - baseUrl = accessStore.googleUrl; - } - - const isApp = !!getClientConfig()?.isApp; - let shouldStream = !!options.config.stream; const controller = new AbortController(); options.onController?.(controller); try { - // let baseUrl = accessStore.googleUrl; + // https://github.com/google-gemini/cookbook/blob/main/quickstarts/rest/Streaming_REST.ipynb + const chatPath = this.path(Google.ChatPath(modelConfig.model)); - if (!baseUrl) { - baseUrl = isApp - ? DEFAULT_API_HOST + "/api/proxy/google/" + Google.ChatPath(modelConfig.model) - : this.path(Google.ChatPath(modelConfig.model)); - } - - if (isApp) { - baseUrl += `?key=${accessStore.googleApiKey}`; - } const chatPayload = { method: "POST", body: JSON.stringify(requestPayload), @@ -139,16 +168,17 @@ export class GeminiProApi implements LLMApi { () => controller.abort(), REQUEST_TIMEOUT_MS, ); - + if (shouldStream) { let responseText = ""; let remainText = ""; let finished = false; - let existingTexts: string[] = []; const finish = () => { - finished = true; - options.onFinish(existingTexts.join("")); + if (!finished) { + finished = true; + options.onFinish(responseText + remainText); + } }; // animate response to make it looks smooth @@ -173,74 +203,83 @@ export class GeminiProApi implements LLMApi { // start animaion animateResponseText(); - fetch( - baseUrl.replace("generateContent", "streamGenerateContent"), - chatPayload, - ) - .then((response) => { - const reader = response?.body?.getReader(); - const decoder = new TextDecoder(); - let partialData = ""; + controller.signal.onabort = finish; - return reader?.read().then(function processText({ - done, - value, - }): Promise { - if (done) { - if (response.status !== 200) { - try { - let data = JSON.parse(ensureProperEnding(partialData)); - if (data && data[0].error) { - options.onError?.(new Error(data[0].error.message)); - } else { - options.onError?.(new Error("Request failed")); - } - } catch (_) { - options.onError?.(new Error("Request failed")); - } - } + fetchEventSource(chatPath, { + ...chatPayload, + async onopen(res) { + clearTimeout(requestTimeoutId); + const contentType = res.headers.get("content-type"); + console.log( + "[Gemini] request response content type: ", + contentType, + ); - console.log("Stream complete"); - // options.onFinish(responseText + remainText); - finished = true; - return Promise.resolve(); - } - - partialData += decoder.decode(value, { stream: true }); + if (contentType?.startsWith("text/plain")) { + responseText = await res.clone().text(); + return finish(); + } + if ( + !res.ok || + !res.headers + .get("content-type") + ?.startsWith(EventStreamContentType) || + res.status !== 200 + ) { + const responseTexts = [responseText]; + let extraInfo = await res.clone().text(); try { - let data = JSON.parse(ensureProperEnding(partialData)); + const resJson = await res.clone().json(); + extraInfo = prettyObject(resJson); + } catch {} - const textArray = data.reduce( - (acc: string[], item: { candidates: any[] }) => { - const texts = item.candidates.map((candidate) => - candidate.content.parts - .map((part: { text: any }) => part.text) - .join(""), - ); - return acc.concat(texts); - }, - [], - ); - - if (textArray.length > existingTexts.length) { - const deltaArray = textArray.slice(existingTexts.length); - existingTexts = textArray; - remainText += deltaArray.join(""); - } - } catch (error) { - // console.log("[Response Animation] error: ", error,partialData); - // skip error message when parsing json + if (res.status === 401) { + responseTexts.push(Locale.Error.Unauthorized); } - return reader.read().then(processText); - }); - }) - .catch((error) => { - console.error("Error:", error); - }); + if (extraInfo) { + responseTexts.push(extraInfo); + } + + responseText = responseTexts.join("\n\n"); + + return finish(); + } + }, + onmessage(msg) { + if (msg.data === "[DONE]" || finished) { + return finish(); + } + const text = msg.data; + try { + const json = JSON.parse(text); + const delta = apiClient.extractMessage(json); + + if (delta) { + remainText += delta; + } + + const blockReason = json?.promptFeedback?.blockReason; + if (blockReason) { + // being blocked + console.log(`[Google] [Safety Ratings] result:`, blockReason); + } + } catch (e) { + console.error("[Request] parse error", text, msg); + } + }, + onclose() { + finish(); + }, + onerror(e) { + options.onError?.(e); + throw e; + }, + openWhenHidden: true, + }); } else { - const res = await fetch(baseUrl, chatPayload); + const res = await fetch(chatPath, chatPayload); clearTimeout(requestTimeoutId); const resJson = await res.json(); if (resJson?.promptFeedback?.blockReason) { @@ -252,7 +291,7 @@ export class GeminiProApi implements LLMApi { ), ); } - const message = this.extractMessage(resJson); + const message = apiClient.extractMessage(resJson); options.onFinish(message); } } catch (e) { @@ -266,14 +305,4 @@ export class GeminiProApi implements LLMApi { async models(): Promise { return []; } - path(path: string): string { - return "/api/google/" + path; - } -} - -function ensureProperEnding(str: string) { - if (str.startsWith("[") && !str.endsWith("]")) { - return str + "]"; - } - return str; } diff --git a/app/client/platforms/iflytek.ts b/app/client/platforms/iflytek.ts new file mode 100644 index 000000000..73cea5ba0 --- /dev/null +++ b/app/client/platforms/iflytek.ts @@ -0,0 +1,240 @@ +"use client"; +import { + ApiPath, + DEFAULT_API_HOST, + Iflytek, + REQUEST_TIMEOUT_MS, +} from "@/app/constant"; +import { useAccessStore, useAppConfig, useChatStore } from "@/app/store"; + +import { ChatOptions, getHeaders, LLMApi, LLMModel } from "../api"; +import Locale from "../../locales"; +import { + EventStreamContentType, + fetchEventSource, +} from "@fortaine/fetch-event-source"; +import { prettyObject } from "@/app/utils/format"; +import { getClientConfig } from "@/app/config/client"; +import { getMessageTextContent } from "@/app/utils"; + +import { OpenAIListModelResponse, RequestPayload } from "./openai"; + +export class SparkApi implements LLMApi { + private disableListModels = true; + + path(path: string): string { + const accessStore = useAccessStore.getState(); + + let baseUrl = ""; + + if (accessStore.useCustomConfig) { + baseUrl = accessStore.iflytekUrl; + } + + if (baseUrl.length === 0) { + const isApp = !!getClientConfig()?.isApp; + const apiPath = ApiPath.Iflytek; + baseUrl = isApp ? DEFAULT_API_HOST + "/proxy" + apiPath : apiPath; + } + + if (baseUrl.endsWith("/")) { + baseUrl = baseUrl.slice(0, baseUrl.length - 1); + } + if (!baseUrl.startsWith("http") && !baseUrl.startsWith(ApiPath.Iflytek)) { + baseUrl = "https://" + baseUrl; + } + + console.log("[Proxy Endpoint] ", baseUrl, path); + + return [baseUrl, path].join("/"); + } + + extractMessage(res: any) { + return res.choices?.at(0)?.message?.content ?? ""; + } + + async chat(options: ChatOptions) { + const messages: ChatOptions["messages"] = []; + for (const v of options.messages) { + const content = getMessageTextContent(v); + messages.push({ role: v.role, content }); + } + + const modelConfig = { + ...useAppConfig.getState().modelConfig, + ...useChatStore.getState().currentSession().mask.modelConfig, + ...{ + model: options.config.model, + providerName: options.config.providerName, + }, + }; + + const requestPayload: RequestPayload = { + messages, + stream: options.config.stream, + model: modelConfig.model, + temperature: modelConfig.temperature, + presence_penalty: modelConfig.presence_penalty, + frequency_penalty: modelConfig.frequency_penalty, + top_p: modelConfig.top_p, + // max_tokens: Math.max(modelConfig.max_tokens, 1024), + // Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore. + }; + + console.log("[Request] Spark payload: ", requestPayload); + + const shouldStream = !!options.config.stream; + const controller = new AbortController(); + options.onController?.(controller); + + try { + const chatPath = this.path(Iflytek.ChatPath); + const chatPayload = { + method: "POST", + body: JSON.stringify(requestPayload), + signal: controller.signal, + headers: getHeaders(), + }; + + // Make a fetch request + const requestTimeoutId = setTimeout( + () => controller.abort(), + REQUEST_TIMEOUT_MS, + ); + + if (shouldStream) { + let responseText = ""; + let remainText = ""; + let finished = false; + + // Animate response text to make it look smooth + function animateResponseText() { + if (finished || controller.signal.aborted) { + responseText += remainText; + console.log("[Response Animation] finished"); + return; + } + + if (remainText.length > 0) { + const fetchCount = Math.max(1, Math.round(remainText.length / 60)); + const fetchText = remainText.slice(0, fetchCount); + responseText += fetchText; + remainText = remainText.slice(fetchCount); + options.onUpdate?.(responseText, fetchText); + } + + requestAnimationFrame(animateResponseText); + } + + // Start animation + animateResponseText(); + + const finish = () => { + if (!finished) { + finished = true; + options.onFinish(responseText + remainText); + } + }; + + controller.signal.onabort = finish; + + fetchEventSource(chatPath, { + ...chatPayload, + async onopen(res) { + clearTimeout(requestTimeoutId); + const contentType = res.headers.get("content-type"); + console.log("[Spark] request response content type: ", contentType); + + if (contentType?.startsWith("text/plain")) { + responseText = await res.clone().text(); + return finish(); + } + + // Handle different error scenarios + if ( + !res.ok || + !res.headers + .get("content-type") + ?.startsWith(EventStreamContentType) || + res.status !== 200 + ) { + let extraInfo = await res.clone().text(); + try { + const resJson = await res.clone().json(); + extraInfo = prettyObject(resJson); + } catch {} + + if (res.status === 401) { + extraInfo = Locale.Error.Unauthorized; + } + + options.onError?.( + new Error( + `Request failed with status ${res.status}: ${extraInfo}`, + ), + ); + return finish(); + } + }, + onmessage(msg) { + if (msg.data === "[DONE]" || finished) { + return finish(); + } + const text = msg.data; + try { + const json = JSON.parse(text); + const choices = json.choices as Array<{ + delta: { content: string }; + }>; + const delta = choices[0]?.delta?.content; + + if (delta) { + remainText += delta; + } + } catch (e) { + console.error("[Request] parse error", text); + options.onError?.(new Error(`Failed to parse response: ${text}`)); + } + }, + onclose() { + finish(); + }, + onerror(e) { + options.onError?.(e); + throw e; + }, + openWhenHidden: true, + }); + } else { + const res = await fetch(chatPath, chatPayload); + clearTimeout(requestTimeoutId); + + if (!res.ok) { + const errorText = await res.text(); + options.onError?.( + new Error(`Request failed with status ${res.status}: ${errorText}`), + ); + return; + } + + const resJson = await res.json(); + const message = this.extractMessage(resJson); + options.onFinish(message); + } + } catch (e) { + console.log("[Request] failed to make a chat request", e); + options.onError?.(e as Error); + } + } + + async usage() { + return { + used: 0, + total: 0, + }; + } + + async models(): Promise { + return []; + } +} diff --git a/app/client/platforms/moonshot.ts b/app/client/platforms/moonshot.ts new file mode 100644 index 000000000..cd10d2f6c --- /dev/null +++ b/app/client/platforms/moonshot.ts @@ -0,0 +1,208 @@ +"use client"; +// azure and openai, using same models. so using same LLMApi. +import { + ApiPath, + DEFAULT_API_HOST, + DEFAULT_MODELS, + Moonshot, + REQUEST_TIMEOUT_MS, + ServiceProvider, +} from "@/app/constant"; +import { + useAccessStore, + useAppConfig, + useChatStore, + ChatMessageTool, + usePluginStore, +} from "@/app/store"; +import { collectModelsWithDefaultModel } from "@/app/utils/model"; +import { preProcessImageContent, stream } from "@/app/utils/chat"; +import { cloudflareAIGatewayUrl } from "@/app/utils/cloudflare"; + +import { + ChatOptions, + getHeaders, + LLMApi, + LLMModel, + LLMUsage, + MultimodalContent, +} from "../api"; +import Locale from "../../locales"; +import { + EventStreamContentType, + fetchEventSource, +} from "@fortaine/fetch-event-source"; +import { prettyObject } from "@/app/utils/format"; +import { getClientConfig } from "@/app/config/client"; +import { getMessageTextContent } from "@/app/utils"; + +import { OpenAIListModelResponse, RequestPayload } from "./openai"; + +export class MoonshotApi implements LLMApi { + private disableListModels = true; + + path(path: string): string { + const accessStore = useAccessStore.getState(); + + let baseUrl = ""; + + if (accessStore.useCustomConfig) { + baseUrl = accessStore.moonshotUrl; + } + + if (baseUrl.length === 0) { + const isApp = !!getClientConfig()?.isApp; + const apiPath = ApiPath.Moonshot; + baseUrl = isApp ? DEFAULT_API_HOST + "/proxy" + apiPath : apiPath; + } + + if (baseUrl.endsWith("/")) { + baseUrl = baseUrl.slice(0, baseUrl.length - 1); + } + if (!baseUrl.startsWith("http") && !baseUrl.startsWith(ApiPath.Moonshot)) { + baseUrl = "https://" + baseUrl; + } + + console.log("[Proxy Endpoint] ", baseUrl, path); + + return [baseUrl, path].join("/"); + } + + extractMessage(res: any) { + return res.choices?.at(0)?.message?.content ?? ""; + } + + async chat(options: ChatOptions) { + const messages: ChatOptions["messages"] = []; + for (const v of options.messages) { + const content = getMessageTextContent(v); + messages.push({ role: v.role, content }); + } + + const modelConfig = { + ...useAppConfig.getState().modelConfig, + ...useChatStore.getState().currentSession().mask.modelConfig, + ...{ + model: options.config.model, + providerName: options.config.providerName, + }, + }; + + const requestPayload: RequestPayload = { + messages, + stream: options.config.stream, + model: modelConfig.model, + temperature: modelConfig.temperature, + presence_penalty: modelConfig.presence_penalty, + frequency_penalty: modelConfig.frequency_penalty, + top_p: modelConfig.top_p, + // max_tokens: Math.max(modelConfig.max_tokens, 1024), + // Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore. + }; + + console.log("[Request] openai payload: ", requestPayload); + + const shouldStream = !!options.config.stream; + const controller = new AbortController(); + options.onController?.(controller); + + try { + const chatPath = this.path(Moonshot.ChatPath); + const chatPayload = { + method: "POST", + body: JSON.stringify(requestPayload), + signal: controller.signal, + headers: getHeaders(), + }; + + // make a fetch request + const requestTimeoutId = setTimeout( + () => controller.abort(), + REQUEST_TIMEOUT_MS, + ); + + if (shouldStream) { + const [tools, funcs] = usePluginStore + .getState() + .getAsTools( + useChatStore.getState().currentSession().mask?.plugin || [], + ); + return stream( + chatPath, + requestPayload, + getHeaders(), + tools as any, + funcs, + controller, + // parseSSE + (text: string, runTools: ChatMessageTool[]) => { + // console.log("parseSSE", text, runTools); + const json = JSON.parse(text); + const choices = json.choices as Array<{ + delta: { + content: string; + tool_calls: ChatMessageTool[]; + }; + }>; + const tool_calls = choices[0]?.delta?.tool_calls; + if (tool_calls?.length > 0) { + const index = tool_calls[0]?.index; + const id = tool_calls[0]?.id; + const args = tool_calls[0]?.function?.arguments; + if (id) { + runTools.push({ + id, + type: tool_calls[0]?.type, + function: { + name: tool_calls[0]?.function?.name as string, + arguments: args, + }, + }); + } else { + // @ts-ignore + runTools[index]["function"]["arguments"] += args; + } + } + return choices[0]?.delta?.content; + }, + // processToolMessage, include tool_calls message and tool call results + ( + requestPayload: RequestPayload, + toolCallMessage: any, + toolCallResult: any[], + ) => { + // @ts-ignore + requestPayload?.messages?.splice( + // @ts-ignore + requestPayload?.messages?.length, + 0, + toolCallMessage, + ...toolCallResult, + ); + }, + options, + ); + } else { + const res = await fetch(chatPath, chatPayload); + clearTimeout(requestTimeoutId); + + const resJson = await res.json(); + const message = this.extractMessage(resJson); + options.onFinish(message); + } + } catch (e) { + console.log("[Request] failed to make a chat request", e); + options.onError?.(e as Error); + } + } + async usage() { + return { + used: 0, + total: 0, + }; + } + + async models(): Promise { + return []; + } +} diff --git a/app/client/platforms/openai.ts b/app/client/platforms/openai.ts index f35992630..664ff872b 100644 --- a/app/client/platforms/openai.ts +++ b/app/client/platforms/openai.ts @@ -1,13 +1,30 @@ "use client"; +// azure and openai, using same models. so using same LLMApi. import { ApiPath, DEFAULT_API_HOST, DEFAULT_MODELS, OpenaiPath, + Azure, REQUEST_TIMEOUT_MS, ServiceProvider, } from "@/app/constant"; -import { useAccessStore, useAppConfig, useChatStore } from "@/app/store"; +import { + ChatMessageTool, + useAccessStore, + useAppConfig, + useChatStore, + usePluginStore, +} from "@/app/store"; +import { collectModelsWithDefaultModel } from "@/app/utils/model"; +import { + preProcessImageContent, + uploadImage, + base64Image2Blob, + stream, +} from "@/app/utils/chat"; +import { cloudflareAIGatewayUrl } from "@/app/utils/cloudflare"; +import { DalleSize, DalleQuality, DalleStyle } from "@/app/typing"; import { ChatOptions, @@ -24,11 +41,11 @@ import { } from "@fortaine/fetch-event-source"; import { prettyObject } from "@/app/utils/format"; import { getClientConfig } from "@/app/config/client"; -import { makeAzurePath } from "@/app/azure"; import { getMessageTextContent, getMessageImages, isVisionModel, + isDalle3 as _isDalle3, } from "@/app/utils"; export interface OpenAIListModelResponse { @@ -40,7 +57,7 @@ export interface OpenAIListModelResponse { }>; } -interface RequestPayload { +export interface RequestPayload { messages: { role: "system" | "user" | "assistant"; content: string | MultimodalContent[]; @@ -54,6 +71,16 @@ interface RequestPayload { max_tokens?: number; } +export interface DalleRequestPayload { + model: string; + prompt: string; + response_format: "url" | "b64_json"; + n: number; + size: DalleSize; + quality: DalleQuality; + style: DalleStyle; +} + export class ChatGPTApi implements LLMApi { private disableListModels = true; @@ -62,227 +89,241 @@ export class ChatGPTApi implements LLMApi { let baseUrl = ""; + const isAzure = path.includes("deployments"); if (accessStore.useCustomConfig) { - const isAzure = accessStore.provider === ServiceProvider.Azure; - if (isAzure && !accessStore.isValidAzure()) { throw Error( "incomplete azure config, please check it in your settings page", ); } - if (isAzure) { - path = makeAzurePath(path, accessStore.azureApiVersion); - } - baseUrl = isAzure ? accessStore.azureUrl : accessStore.openaiUrl; } if (baseUrl.length === 0) { const isApp = !!getClientConfig()?.isApp; - baseUrl = isApp - ? DEFAULT_API_HOST + "/proxy" + ApiPath.OpenAI - : ApiPath.OpenAI; + const apiPath = isAzure ? ApiPath.Azure : ApiPath.OpenAI; + baseUrl = isApp ? DEFAULT_API_HOST + "/proxy" + apiPath : apiPath; } if (baseUrl.endsWith("/")) { baseUrl = baseUrl.slice(0, baseUrl.length - 1); } - if (!baseUrl.startsWith("http") && !baseUrl.startsWith(ApiPath.OpenAI)) { + if ( + !baseUrl.startsWith("http") && + !isAzure && + !baseUrl.startsWith(ApiPath.OpenAI) + ) { baseUrl = "https://" + baseUrl; } console.log("[Proxy Endpoint] ", baseUrl, path); - return [baseUrl, path].join("/"); + // try rebuild url, when using cloudflare ai gateway in client + return cloudflareAIGatewayUrl([baseUrl, path].join("/")); } - extractMessage(res: any) { - return res.choices?.at(0)?.message?.content ?? ""; + async extractMessage(res: any) { + if (res.error) { + return "```\n" + JSON.stringify(res, null, 4) + "\n```"; + } + // dalle3 model return url, using url create image message + if (res.data) { + let url = res.data?.at(0)?.url ?? ""; + const b64_json = res.data?.at(0)?.b64_json ?? ""; + if (!url && b64_json) { + // uploadImage + url = await uploadImage(base64Image2Blob(b64_json, "image/png")); + } + return [ + { + type: "image_url", + image_url: { + url, + }, + }, + ]; + } + return res.choices?.at(0)?.message?.content ?? res; } async chat(options: ChatOptions) { - const visionModel = isVisionModel(options.config.model); - const messages = options.messages.map((v) => ({ - role: v.role, - content: visionModel ? v.content : getMessageTextContent(v), - })); - const modelConfig = { ...useAppConfig.getState().modelConfig, ...useChatStore.getState().currentSession().mask.modelConfig, ...{ model: options.config.model, + providerName: options.config.providerName, }, }; - const requestPayload: RequestPayload = { - messages, - stream: options.config.stream, - model: modelConfig.model, - temperature: modelConfig.temperature, - presence_penalty: modelConfig.presence_penalty, - frequency_penalty: modelConfig.frequency_penalty, - top_p: modelConfig.top_p, - // max_tokens: Math.max(modelConfig.max_tokens, 1024), - // Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore. - }; + let requestPayload: RequestPayload | DalleRequestPayload; - // add max_tokens to vision model - if (visionModel && modelConfig.model.includes("preview")) { - requestPayload["max_tokens"] = Math.max(modelConfig.max_tokens, 4000); + const isDalle3 = _isDalle3(options.config.model); + const isO1 = options.config.model.startsWith("o1"); + if (isDalle3) { + const prompt = getMessageTextContent( + options.messages.slice(-1)?.pop() as any, + ); + requestPayload = { + model: options.config.model, + prompt, + // URLs are only valid for 60 minutes after the image has been generated. + response_format: "b64_json", // using b64_json, and save image in CacheStorage + n: 1, + size: options.config?.size ?? "1024x1024", + quality: options.config?.quality ?? "standard", + style: options.config?.style ?? "vivid", + }; + } else { + const visionModel = isVisionModel(options.config.model); + const messages: ChatOptions["messages"] = []; + for (const v of options.messages) { + const content = visionModel + ? await preProcessImageContent(v.content) + : getMessageTextContent(v); + if (!(isO1 && v.role === "system")) + messages.push({ role: v.role, content }); + } + + // O1 not support image, tools (plugin in ChatGPTNextWeb) and system, stream, logprobs, temperature, top_p, n, presence_penalty, frequency_penalty yet. + requestPayload = { + messages, + stream: !isO1 ? options.config.stream : false, + model: modelConfig.model, + temperature: !isO1 ? modelConfig.temperature : 1, + presence_penalty: !isO1 ? modelConfig.presence_penalty : 0, + frequency_penalty: !isO1 ? modelConfig.frequency_penalty : 0, + top_p: !isO1 ? modelConfig.top_p : 1, + // max_tokens: Math.max(modelConfig.max_tokens, 1024), + // Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore. + }; + + // add max_tokens to vision model + if (visionModel) { + requestPayload["max_tokens"] = Math.max(modelConfig.max_tokens, 4000); + } } console.log("[Request] openai payload: ", requestPayload); - const shouldStream = !!options.config.stream; + const shouldStream = !isDalle3 && !!options.config.stream && !isO1; const controller = new AbortController(); options.onController?.(controller); try { - const chatPath = this.path(OpenaiPath.ChatPath); - const chatPayload = { - method: "POST", - body: JSON.stringify(requestPayload), - signal: controller.signal, - headers: getHeaders(), - }; - - // make a fetch request - const requestTimeoutId = setTimeout( - () => controller.abort(), - REQUEST_TIMEOUT_MS, - ); - + let chatPath = ""; + if (modelConfig.providerName === ServiceProvider.Azure) { + // find model, and get displayName as deployName + const { models: configModels, customModels: configCustomModels } = + useAppConfig.getState(); + const { + defaultModel, + customModels: accessCustomModels, + useCustomConfig, + } = useAccessStore.getState(); + const models = collectModelsWithDefaultModel( + configModels, + [configCustomModels, accessCustomModels].join(","), + defaultModel, + ); + const model = models.find( + (model) => + model.name === modelConfig.model && + model?.provider?.providerName === ServiceProvider.Azure, + ); + chatPath = this.path( + (isDalle3 ? Azure.ImagePath : Azure.ChatPath)( + (model?.displayName ?? model?.name) as string, + useCustomConfig ? useAccessStore.getState().azureApiVersion : "", + ), + ); + } else { + chatPath = this.path( + isDalle3 ? OpenaiPath.ImagePath : OpenaiPath.ChatPath, + ); + } if (shouldStream) { - let responseText = ""; - let remainText = ""; - let finished = false; - - // animate response to make it looks smooth - function animateResponseText() { - if (finished || controller.signal.aborted) { - responseText += remainText; - console.log("[Response Animation] finished"); - if (responseText?.length === 0) { - options.onError?.(new Error("empty response from server")); + const [tools, funcs] = usePluginStore + .getState() + .getAsTools( + useChatStore.getState().currentSession().mask?.plugin || [], + ); + // console.log("getAsTools", tools, funcs); + stream( + chatPath, + requestPayload, + getHeaders(), + tools as any, + funcs, + controller, + // parseSSE + (text: string, runTools: ChatMessageTool[]) => { + // console.log("parseSSE", text, runTools); + const json = JSON.parse(text); + const choices = json.choices as Array<{ + delta: { + content: string; + tool_calls: ChatMessageTool[]; + }; + }>; + const tool_calls = choices[0]?.delta?.tool_calls; + if (tool_calls?.length > 0) { + const index = tool_calls[0]?.index; + const id = tool_calls[0]?.id; + const args = tool_calls[0]?.function?.arguments; + if (id) { + runTools.push({ + id, + type: tool_calls[0]?.type, + function: { + name: tool_calls[0]?.function?.name as string, + arguments: args, + }, + }); + } else { + // @ts-ignore + runTools[index]["function"]["arguments"] += args; + } } - return; - } - - if (remainText.length > 0) { - const fetchCount = Math.max(1, Math.round(remainText.length / 60)); - const fetchText = remainText.slice(0, fetchCount); - responseText += fetchText; - remainText = remainText.slice(fetchCount); - options.onUpdate?.(responseText, fetchText); - } - - requestAnimationFrame(animateResponseText); - } - - // start animaion - animateResponseText(); - - const finish = () => { - if (!finished) { - finished = true; - options.onFinish(responseText + remainText); - } + return choices[0]?.delta?.content; + }, + // processToolMessage, include tool_calls message and tool call results + ( + requestPayload: RequestPayload, + toolCallMessage: any, + toolCallResult: any[], + ) => { + // @ts-ignore + requestPayload?.messages?.splice( + // @ts-ignore + requestPayload?.messages?.length, + 0, + toolCallMessage, + ...toolCallResult, + ); + }, + options, + ); + } else { + const chatPayload = { + method: "POST", + body: JSON.stringify(requestPayload), + signal: controller.signal, + headers: getHeaders(), }; - controller.signal.onabort = finish; + // make a fetch request + const requestTimeoutId = setTimeout( + () => controller.abort(), + isDalle3 || isO1 ? REQUEST_TIMEOUT_MS * 2 : REQUEST_TIMEOUT_MS, // dalle3 using b64_json is slow. + ); - fetchEventSource(chatPath, { - ...chatPayload, - async onopen(res) { - clearTimeout(requestTimeoutId); - const contentType = res.headers.get("content-type"); - console.log( - "[OpenAI] request response content type: ", - contentType, - ); - - if (contentType?.startsWith("text/plain")) { - responseText = await res.clone().text(); - return finish(); - } - - if ( - !res.ok || - !res.headers - .get("content-type") - ?.startsWith(EventStreamContentType) || - res.status !== 200 - ) { - const responseTexts = [responseText]; - let extraInfo = await res.clone().text(); - try { - const resJson = await res.clone().json(); - extraInfo = prettyObject(resJson); - } catch {} - - if (res.status === 401) { - responseTexts.push(Locale.Error.Unauthorized); - } - - if (extraInfo) { - responseTexts.push(extraInfo); - } - - responseText = responseTexts.join("\n\n"); - - return finish(); - } - }, - onmessage(msg) { - if (msg.data === "[DONE]" || finished) { - return finish(); - } - const text = msg.data; - try { - const json = JSON.parse(text); - const choices = json.choices as Array<{ - delta: { content: string }; - }>; - const delta = choices[0]?.delta?.content; - const textmoderation = json?.prompt_filter_results; - - if (delta) { - remainText += delta; - } - - if ( - textmoderation && - textmoderation.length > 0 && - ServiceProvider.Azure - ) { - const contentFilterResults = - textmoderation[0]?.content_filter_results; - console.log( - `[${ServiceProvider.Azure}] [Text Moderation] flagged categories result:`, - contentFilterResults, - ); - } - } catch (e) { - console.error("[Request] parse error", text, msg); - } - }, - onclose() { - finish(); - }, - onerror(e) { - options.onError?.(e); - throw e; - }, - openWhenHidden: true, - }); - } else { const res = await fetch(chatPath, chatPayload); clearTimeout(requestTimeoutId); const resJson = await res.json(); - const message = this.extractMessage(resJson); + const message = await this.extractMessage(resJson); options.onFinish(message); } } catch (e) { @@ -369,20 +410,26 @@ export class ChatGPTApi implements LLMApi { }); const resJson = (await res.json()) as OpenAIListModelResponse; - const chatModels = resJson.data?.filter((m) => m.id.startsWith("gpt-")); + const chatModels = resJson.data?.filter( + (m) => m.id.startsWith("gpt-") || m.id.startsWith("chatgpt-"), + ); console.log("[Models]", chatModels); if (!chatModels) { return []; } + //由于目前 OpenAI 的 disableListModels 默认为 true,所以当前实际不会运行到这场 + let seq = 1000; //同 Constant.ts 中的排序保持一致 return chatModels.map((m) => ({ name: m.id, available: true, + sorted: seq++, provider: { id: "openai", providerName: "OpenAI", providerType: "openai", + sorted: 1, }, })); } diff --git a/app/client/platforms/tencent.ts b/app/client/platforms/tencent.ts new file mode 100644 index 000000000..579008a9b --- /dev/null +++ b/app/client/platforms/tencent.ts @@ -0,0 +1,268 @@ +"use client"; +import { ApiPath, DEFAULT_API_HOST, REQUEST_TIMEOUT_MS } from "@/app/constant"; +import { useAccessStore, useAppConfig, useChatStore } from "@/app/store"; + +import { + ChatOptions, + getHeaders, + LLMApi, + LLMModel, + MultimodalContent, +} from "../api"; +import Locale from "../../locales"; +import { + EventStreamContentType, + fetchEventSource, +} from "@fortaine/fetch-event-source"; +import { prettyObject } from "@/app/utils/format"; +import { getClientConfig } from "@/app/config/client"; +import { getMessageTextContent, isVisionModel } from "@/app/utils"; +import mapKeys from "lodash-es/mapKeys"; +import mapValues from "lodash-es/mapValues"; +import isArray from "lodash-es/isArray"; +import isObject from "lodash-es/isObject"; + +export interface OpenAIListModelResponse { + object: string; + data: Array<{ + id: string; + object: string; + root: string; + }>; +} + +interface RequestPayload { + Messages: { + Role: "system" | "user" | "assistant"; + Content: string | MultimodalContent[]; + }[]; + Stream?: boolean; + Model: string; + Temperature: number; + TopP: number; +} + +function capitalizeKeys(obj: any): any { + if (isArray(obj)) { + return obj.map(capitalizeKeys); + } else if (isObject(obj)) { + return mapValues( + mapKeys(obj, (value: any, key: string) => + key.replace(/(^|_)(\w)/g, (m, $1, $2) => $2.toUpperCase()), + ), + capitalizeKeys, + ); + } else { + return obj; + } +} + +export class HunyuanApi implements LLMApi { + path(): string { + const accessStore = useAccessStore.getState(); + + let baseUrl = ""; + + if (accessStore.useCustomConfig) { + baseUrl = accessStore.tencentUrl; + } + + if (baseUrl.length === 0) { + const isApp = !!getClientConfig()?.isApp; + baseUrl = isApp + ? DEFAULT_API_HOST + "/api/proxy/tencent" + : ApiPath.Tencent; + } + + if (baseUrl.endsWith("/")) { + baseUrl = baseUrl.slice(0, baseUrl.length - 1); + } + if (!baseUrl.startsWith("http") && !baseUrl.startsWith(ApiPath.Tencent)) { + baseUrl = "https://" + baseUrl; + } + + console.log("[Proxy Endpoint] ", baseUrl); + return baseUrl; + } + + extractMessage(res: any) { + return res.Choices?.at(0)?.Message?.Content ?? ""; + } + + async chat(options: ChatOptions) { + const visionModel = isVisionModel(options.config.model); + const messages = options.messages.map((v, index) => ({ + // "Messages 中 system 角色必须位于列表的最开始" + role: index !== 0 && v.role === "system" ? "user" : v.role, + content: visionModel ? v.content : getMessageTextContent(v), + })); + + const modelConfig = { + ...useAppConfig.getState().modelConfig, + ...useChatStore.getState().currentSession().mask.modelConfig, + ...{ + model: options.config.model, + }, + }; + + const requestPayload: RequestPayload = capitalizeKeys({ + model: modelConfig.model, + messages, + temperature: modelConfig.temperature, + top_p: modelConfig.top_p, + stream: options.config.stream, + }); + + console.log("[Request] Tencent payload: ", requestPayload); + + const shouldStream = !!options.config.stream; + const controller = new AbortController(); + options.onController?.(controller); + + try { + const chatPath = this.path(); + const chatPayload = { + method: "POST", + body: JSON.stringify(requestPayload), + signal: controller.signal, + headers: getHeaders(), + }; + + // make a fetch request + const requestTimeoutId = setTimeout( + () => controller.abort(), + REQUEST_TIMEOUT_MS, + ); + + if (shouldStream) { + let responseText = ""; + let remainText = ""; + let finished = false; + + // animate response to make it looks smooth + function animateResponseText() { + if (finished || controller.signal.aborted) { + responseText += remainText; + console.log("[Response Animation] finished"); + if (responseText?.length === 0) { + options.onError?.(new Error("empty response from server")); + } + return; + } + + if (remainText.length > 0) { + const fetchCount = Math.max(1, Math.round(remainText.length / 60)); + const fetchText = remainText.slice(0, fetchCount); + responseText += fetchText; + remainText = remainText.slice(fetchCount); + options.onUpdate?.(responseText, fetchText); + } + + requestAnimationFrame(animateResponseText); + } + + // start animaion + animateResponseText(); + + const finish = () => { + if (!finished) { + finished = true; + options.onFinish(responseText + remainText); + } + }; + + controller.signal.onabort = finish; + + fetchEventSource(chatPath, { + ...chatPayload, + async onopen(res) { + clearTimeout(requestTimeoutId); + const contentType = res.headers.get("content-type"); + console.log( + "[Tencent] request response content type: ", + contentType, + ); + + if (contentType?.startsWith("text/plain")) { + responseText = await res.clone().text(); + return finish(); + } + + if ( + !res.ok || + !res.headers + .get("content-type") + ?.startsWith(EventStreamContentType) || + res.status !== 200 + ) { + const responseTexts = [responseText]; + let extraInfo = await res.clone().text(); + try { + const resJson = await res.clone().json(); + extraInfo = prettyObject(resJson); + } catch {} + + if (res.status === 401) { + responseTexts.push(Locale.Error.Unauthorized); + } + + if (extraInfo) { + responseTexts.push(extraInfo); + } + + responseText = responseTexts.join("\n\n"); + + return finish(); + } + }, + onmessage(msg) { + if (msg.data === "[DONE]" || finished) { + return finish(); + } + const text = msg.data; + try { + const json = JSON.parse(text); + const choices = json.Choices as Array<{ + Delta: { Content: string }; + }>; + const delta = choices[0]?.Delta?.Content; + if (delta) { + remainText += delta; + } + } catch (e) { + console.error("[Request] parse error", text, msg); + } + }, + onclose() { + finish(); + }, + onerror(e) { + options.onError?.(e); + throw e; + }, + openWhenHidden: true, + }); + } else { + const res = await fetch(chatPath, chatPayload); + clearTimeout(requestTimeoutId); + + const resJson = await res.json(); + const message = this.extractMessage(resJson); + options.onFinish(message); + } + } catch (e) { + console.log("[Request] failed to make a chat request", e); + options.onError?.(e as Error); + } + } + async usage() { + return { + used: 0, + total: 0, + }; + } + + async models(): Promise { + return []; + } +} diff --git a/app/command.ts b/app/command.ts index e515e5f0b..bea4e06f3 100644 --- a/app/command.ts +++ b/app/command.ts @@ -41,13 +41,16 @@ interface ChatCommands { del?: Command; } -export const ChatCommandPrefix = ":"; +// Compatible with Chinese colon character ":" +export const ChatCommandPrefix = /^[::]/; export function useChatCommand(commands: ChatCommands = {}) { function extract(userInput: string) { - return ( - userInput.startsWith(ChatCommandPrefix) ? userInput.slice(1) : userInput - ) as keyof ChatCommands; + const match = userInput.match(ChatCommandPrefix); + if (match) { + return userInput.slice(1) as keyof ChatCommands; + } + return userInput as keyof ChatCommands; } function search(userInput: string) { @@ -57,7 +60,7 @@ export function useChatCommand(commands: ChatCommands = {}) { .filter((c) => c.startsWith(input)) .map((c) => ({ title: desc[c as keyof ChatCommands], - content: ChatCommandPrefix + c, + content: ":" + c, })); } diff --git a/app/components/artifacts.module.scss b/app/components/artifacts.module.scss new file mode 100644 index 000000000..6bd0fd9cf --- /dev/null +++ b/app/components/artifacts.module.scss @@ -0,0 +1,31 @@ +.artifacts { + display: flex; + width: 100%; + height: 100%; + flex-direction: column; + &-header { + display: flex; + align-items: center; + height: 36px; + padding: 20px; + background: var(--second); + } + &-title { + flex: 1; + text-align: center; + font-weight: bold; + font-size: 24px; + } + &-content { + flex-grow: 1; + padding: 0 20px 20px 20px; + background-color: var(--second); + } +} + +.artifacts-iframe { + width: 100%; + border: var(--border-in-light); + border-radius: 6px; + background-color: var(--gray); +} diff --git a/app/components/artifacts.tsx b/app/components/artifacts.tsx new file mode 100644 index 000000000..d725ee659 --- /dev/null +++ b/app/components/artifacts.tsx @@ -0,0 +1,267 @@ +import { + useEffect, + useState, + useRef, + useMemo, + forwardRef, + useImperativeHandle, +} from "react"; +import { useParams } from "react-router"; +import { useWindowSize } from "@/app/utils"; +import { IconButton } from "./button"; +import { nanoid } from "nanoid"; +import ExportIcon from "../icons/share.svg"; +import CopyIcon from "../icons/copy.svg"; +import DownloadIcon from "../icons/download.svg"; +import GithubIcon from "../icons/github.svg"; +import LoadingButtonIcon from "../icons/loading.svg"; +import ReloadButtonIcon from "../icons/reload.svg"; +import Locale from "../locales"; +import { Modal, showToast } from "./ui-lib"; +import { copyToClipboard, downloadAs } from "../utils"; +import { Path, ApiPath, REPO_URL } from "@/app/constant"; +import { Loading } from "./home"; +import styles from "./artifacts.module.scss"; + +type HTMLPreviewProps = { + code: string; + autoHeight?: boolean; + height?: number | string; + onLoad?: (title?: string) => void; +}; + +export type HTMLPreviewHander = { + reload: () => void; +}; + +export const HTMLPreview = forwardRef( + function HTMLPreview(props, ref) { + const iframeRef = useRef(null); + const [frameId, setFrameId] = useState(nanoid()); + const [iframeHeight, setIframeHeight] = useState(600); + const [title, setTitle] = useState(""); + /* + * https://stackoverflow.com/questions/19739001/what-is-the-difference-between-srcdoc-and-src-datatext-html-in-an + * 1. using srcdoc + * 2. using src with dataurl: + * easy to share + * length limit (Data URIs cannot be larger than 32,768 characters.) + */ + + useEffect(() => { + const handleMessage = (e: any) => { + const { id, height, title } = e.data; + setTitle(title); + if (id == frameId) { + setIframeHeight(height); + } + }; + window.addEventListener("message", handleMessage); + return () => { + window.removeEventListener("message", handleMessage); + }; + }, [frameId]); + + useImperativeHandle(ref, () => ({ + reload: () => { + setFrameId(nanoid()); + }, + })); + + const height = useMemo(() => { + if (!props.autoHeight) return props.height || 600; + if (typeof props.height === "string") { + return props.height; + } + const parentHeight = props.height || 600; + return iframeHeight + 40 > parentHeight + ? parentHeight + : iframeHeight + 40; + }, [props.autoHeight, props.height, iframeHeight]); + + const srcDoc = useMemo(() => { + const script = ``; + if (props.code.includes("")) { + props.code.replace("", "" + script); + } + return script + props.code; + }, [props.code, frameId]); + + const handleOnLoad = () => { + if (props?.onLoad) { + props.onLoad(title); + } + }; + + return ( +