diff --git a/.env.template b/.env.template index c4ca732c9..25addf2b3 100644 --- a/.env.template +++ b/.env.template @@ -9,12 +9,12 @@ PROXY_URL=http://localhost:7890 # (optional) # Default: Empty -# Googel Gemini Pro API key, set if you want to use Google Gemini Pro API. +# Google Gemini Pro API key, set if you want to use Google Gemini Pro API. GOOGLE_API_KEY= # (optional) # Default: https://generativelanguage.googleapis.com/ -# Googel Gemini Pro API url without pathname, set if you want to customize Google Gemini Pro API url. +# Google Gemini Pro API url without pathname, set if you want to customize Google Gemini Pro API url. GOOGLE_URL= # Override openai api request base url. (optional) diff --git a/README.md b/README.md index 24967c164..472102cdc 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,8 @@
-icon + + + icon +

NextChat (ChatGPT Next Web)

@@ -14,9 +17,9 @@ One-Click to get a well-designed cross-platform ChatGPT web UI, with GPT3, GPT4 [![MacOS][MacOS-image]][download-url] [![Linux][Linux-image]][download-url] -[Web App](https://app.nextchat.dev/) / [Desktop App](https://github.com/Yidadaa/ChatGPT-Next-Web/releases) / [Discord](https://discord.gg/YCkeafCafC) / [Twitter](https://twitter.com/NextChatDev) +[Web App](https://app.nextchat.dev/) / [Desktop App](https://github.com/Yidadaa/ChatGPT-Next-Web/releases) / [Discord](https://discord.gg/YCkeafCafC) / [Enterprise Edition](#enterprise-edition) / [Twitter](https://twitter.com/NextChatDev) -[网页版](https://app.nextchat.dev/) / [客户端](https://github.com/Yidadaa/ChatGPT-Next-Web/releases) / [反馈](https://github.com/Yidadaa/ChatGPT-Next-Web/issues) +[网页版](https://app.nextchat.dev/) / [客户端](https://github.com/Yidadaa/ChatGPT-Next-Web/releases) / [企业版](#%E4%BC%81%E4%B8%9A%E7%89%88) / [反馈](https://github.com/Yidadaa/ChatGPT-Next-Web/issues) [web-url]: https://app.nextchat.dev/ [download-url]: https://github.com/Yidadaa/ChatGPT-Next-Web/releases @@ -25,16 +28,38 @@ One-Click to get a well-designed cross-platform ChatGPT web UI, with GPT3, GPT4 [MacOS-image]: https://img.shields.io/badge/-MacOS-black?logo=apple [Linux-image]: https://img.shields.io/badge/-Linux-333?logo=ubuntu -[![Deploy with Vercel](https://vercel.com/button)](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FChatGPTNextWeb%2FChatGPT-Next-Web&env=OPENAI_API_KEY&env=CODE&project-name=nextchat&repository-name=NextChat) - -[![Deploy on Zeabur](https://zeabur.com/button.svg)](https://zeabur.com/templates/ZBUEFA) - -[![Open in Gitpod](https://gitpod.io/button/open-in-gitpod.svg)](https://gitpod.io/#https://github.com/Yidadaa/ChatGPT-Next-Web) - -![cover](./docs/images/cover.png) +[Deploy on Zeabur](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FChatGPTNextWeb%2FChatGPT-Next-Web&env=OPENAI_API_KEY&env=CODE&project-name=nextchat&repository-name=NextChat) [Deploy on Zeabur](https://zeabur.com/templates/ZBUEFA) [Open in Gitpod](https://gitpod.io/#https://github.com/Yidadaa/ChatGPT-Next-Web)
+## Enterprise Edition + +Meeting Your Company's Privatization and Customization Deployment Requirements: +- **Brand Customization**: Tailored VI/UI to seamlessly align with your corporate brand image. +- **Resource Integration**: Unified configuration and management of dozens of AI resources by company administrators, ready for use by team members. +- **Permission Control**: Clearly defined member permissions, resource permissions, and knowledge base permissions, all controlled via a corporate-grade Admin Panel. +- **Knowledge Integration**: Combining your internal knowledge base with AI capabilities, making it more relevant to your company's specific business needs compared to general AI. +- **Security Auditing**: Automatically intercept sensitive inquiries and trace all historical conversation records, ensuring AI adherence to corporate information security standards. +- **Private Deployment**: Enterprise-level private deployment supporting various mainstream private cloud solutions, ensuring data security and privacy protection. +- **Continuous Updates**: Ongoing updates and upgrades in cutting-edge capabilities like multimodal AI, ensuring consistent innovation and advancement. + +For enterprise inquiries, please contact: **business@nextchat.dev** + +## 企业版 + +满足企业用户私有化部署和个性化定制需求: +- **品牌定制**:企业量身定制 VI/UI,与企业品牌形象无缝契合 +- **资源集成**:由企业管理人员统一配置和管理数十种 AI 资源,团队成员开箱即用 +- **权限管理**:成员权限、资源权限、知识库权限层级分明,企业级 Admin Panel 统一控制 +- **知识接入**:企业内部知识库与 AI 能力相结合,比通用 AI 更贴近企业自身业务需求 +- **安全审计**:自动拦截敏感提问,支持追溯全部历史对话记录,让 AI 也能遵循企业信息安全规范 +- **私有部署**:企业级私有部署,支持各类主流私有云部署,确保数据安全和隐私保护 +- **持续更新**:提供多模态、智能体等前沿能力持续更新升级服务,常用常新、持续先进 + +企业版咨询: **business@nextchat.dev** + + + ## Features - **Deploy for free with one-click** on Vercel in under 1 minute @@ -49,6 +74,12 @@ One-Click to get a well-designed cross-platform ChatGPT web UI, with GPT3, GPT4 - Automatically compresses chat history to support long conversations while also saving your tokens - I18n: English, 简体中文, 繁体中文, 日本語, Français, Español, Italiano, Türkçe, Deutsch, Tiếng Việt, Русский, Čeština, 한국어, Indonesia +
+ +![主界面](./docs/images/cover.png) + +
+ ## Roadmap - [x] System Prompt: pin a user defined prompt as system prompt [#138](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/138) @@ -180,8 +211,7 @@ Specify OpenAI organization ID. ### `AZURE_URL` (optional) -> Example: https://{azure-resource-url}/openai/deployments/{deploy-name} -> if you config deployment name in `CUSTOM_MODELS`, you can remove `{deploy-name}` in `AZURE_URL` +> Example: https://{azure-resource-url}/openai Azure deploy url. @@ -276,6 +306,7 @@ User `-all` to disable all default models, `+all` to enable all default models. For Azure: use `modelName@azure=deploymentName` to customize model name and deployment name. > Example: `+gpt-3.5-turbo@azure=gpt35` will show option `gpt35(Azure)` in model list. +> If you only can use Azure model, `-all,+gpt-3.5-turbo@azure=gpt35` will `gpt35(Azure)` the only option in model list. For ByteDance: use `modelName@bytedance=deploymentName` to customize model name and deployment name. > Example: `+Doubao-lite-4k@bytedance=ep-xxxxx-xxx` will show option `Doubao-lite-4k(ByteDance)` in model list. diff --git a/README_CN.md b/README_CN.md index 5400bb276..e42288bb5 100644 --- a/README_CN.md +++ b/README_CN.md @@ -1,22 +1,34 @@
-预览 + + + icon +

NextChat

一键免费部署你的私人 ChatGPT 网页应用,支持 GPT3, GPT4 & Gemini Pro 模型。 -[演示 Demo](https://chat-gpt-next-web.vercel.app/) / [反馈 Issues](https://github.com/Yidadaa/ChatGPT-Next-Web/issues) / [加入 Discord](https://discord.gg/zrhvHCr79N) +[企业版](#%E4%BC%81%E4%B8%9A%E7%89%88) /[演示 Demo](https://chat-gpt-next-web.vercel.app/) / [反馈 Issues](https://github.com/Yidadaa/ChatGPT-Next-Web/issues) / [加入 Discord](https://discord.gg/zrhvHCr79N) -[![Deploy with Vercel](https://vercel.com/button)](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FYidadaa%2FChatGPT-Next-Web&env=OPENAI_API_KEY&env=CODE&project-name=chatgpt-next-web&repository-name=ChatGPT-Next-Web) - -[![Deploy on Zeabur](https://zeabur.com/button.svg)](https://zeabur.com/templates/ZBUEFA) - -[![Open in Gitpod](https://gitpod.io/button/open-in-gitpod.svg)](https://gitpod.io/#https://github.com/Yidadaa/ChatGPT-Next-Web) - -![主界面](./docs/images/cover.png) +[Deploy on Zeabur](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FChatGPTNextWeb%2FChatGPT-Next-Web&env=OPENAI_API_KEY&env=CODE&project-name=nextchat&repository-name=NextChat) [Deploy on Zeabur](https://zeabur.com/templates/ZBUEFA) [Open in Gitpod](https://gitpod.io/#https://github.com/Yidadaa/ChatGPT-Next-Web)
+## 企业版 + +满足您公司私有化部署和定制需求 +- **品牌定制**:企业量身定制 VI/UI,与企业品牌形象无缝契合 +- **资源集成**:由企业管理人员统一配置和管理数十种 AI 资源,团队成员开箱即用 +- **权限管理**:成员权限、资源权限、知识库权限层级分明,企业级 Admin Panel 统一控制 +- **知识接入**:企业内部知识库与 AI 能力相结合,比通用 AI 更贴近企业自身业务需求 +- **安全审计**:自动拦截敏感提问,支持追溯全部历史对话记录,让 AI 也能遵循企业信息安全规范 +- **私有部署**:企业级私有部署,支持各类主流私有云部署,确保数据安全和隐私保护 +- **持续更新**:提供多模态、智能体等前沿能力持续更新升级服务,常用常新、持续先进 + +企业版咨询: **business@nextchat.dev** + + + ## 开始使用 1. 准备好你的 [OpenAI API Key](https://platform.openai.com/account/api-keys); @@ -25,6 +37,12 @@ 3. 部署完毕后,即可开始使用; 4. (可选)[绑定自定义域名](https://vercel.com/docs/concepts/projects/domains/add-a-domain):Vercel 分配的域名 DNS 在某些区域被污染了,绑定自定义域名即可直连。 +
+ +![主界面](./docs/images/cover.png) + +
+ ## 保持更新 如果你按照上述步骤一键部署了自己的项目,可能会发现总是提示“存在更新”的问题,这是由于 Vercel 会默认为你创建一个新项目而不是 fork 本项目,这会导致无法正确地检测更新。 @@ -94,8 +112,7 @@ OpenAI 接口代理 URL,如果你手动配置了 openai 接口代理,请填 ### `AZURE_URL` (可选) -> 形如:https://{azure-resource-url}/openai/deployments/{deploy-name} -> 如果你已经在`CUSTOM_MODELS`中参考`displayName`的方式配置了{deploy-name},那么可以从`AZURE_URL`中移除`{deploy-name}` +> 形如:https://{azure-resource-url}/openai Azure 部署地址。 @@ -186,7 +203,8 @@ ByteDance Api Url. 用来控制模型列表,使用 `+` 增加一个模型,使用 `-` 来隐藏一个模型,使用 `模型名=展示名` 来自定义模型的展示名,用英文逗号隔开。 在Azure的模式下,支持使用`modelName@azure=deploymentName`的方式配置模型名称和部署名称(deploy-name) -> 示例:`+gpt-3.5-turbo@azure=gpt35`这个配置会在模型列表显示一个`gpt35(Azure)`的选项 +> 示例:`+gpt-3.5-turbo@azure=gpt35`这个配置会在模型列表显示一个`gpt35(Azure)`的选项。 +> 如果你只能使用Azure模式,那么设置 `-all,+gpt-3.5-turbo@azure=gpt35` 则可以让对话的默认使用 `gpt35(Azure)` 在ByteDance的模式下,支持使用`modelName@bytedance=deploymentName`的方式配置模型名称和部署名称(deploy-name) > 示例: `+Doubao-lite-4k@bytedance=ep-xxxxx-xxx`这个配置会在模型列表显示一个`Doubao-lite-4k(ByteDance)`的选项 diff --git a/app/api/google/[...path]/route.ts b/app/api/google/[...path]/route.ts index dc241ba1d..83a7ce794 100644 --- a/app/api/google/[...path]/route.ts +++ b/app/api/google/[...path]/route.ts @@ -1,7 +1,15 @@ import { NextRequest, NextResponse } from "next/server"; import { auth } from "../../auth"; import { getServerSideConfig } from "@/app/config/server"; -import { GEMINI_BASE_URL, Google, ModelProvider } from "@/app/constant"; +import { + ApiPath, + GEMINI_BASE_URL, + Google, + ModelProvider, +} from "@/app/constant"; +import { prettyObject } from "@/app/utils/format"; + +const serverConfig = getServerSideConfig(); async function handle( req: NextRequest, @@ -13,32 +21,6 @@ async function handle( return NextResponse.json({ body: "OK" }, { status: 200 }); } - const controller = new AbortController(); - - const serverConfig = getServerSideConfig(); - - let baseUrl = serverConfig.googleUrl || GEMINI_BASE_URL; - - if (!baseUrl.startsWith("http")) { - baseUrl = `https://${baseUrl}`; - } - - if (baseUrl.endsWith("/")) { - baseUrl = baseUrl.slice(0, -1); - } - - let path = `${req.nextUrl.pathname}`.replaceAll("/api/google/", ""); - - console.log("[Proxy] ", path); - console.log("[Base Url]", baseUrl); - - const timeoutId = setTimeout( - () => { - controller.abort(); - }, - 10 * 60 * 1000, - ); - const authResult = auth(req, ModelProvider.GeminiPro); if (authResult.error) { return NextResponse.json(authResult, { @@ -49,9 +31,9 @@ async function handle( const bearToken = req.headers.get("Authorization") ?? ""; const token = bearToken.trim().replaceAll("Bearer ", "").trim(); - const key = token ? token : serverConfig.googleApiKey; + const apiKey = token ? token : serverConfig.googleApiKey; - if (!key) { + if (!apiKey) { return NextResponse.json( { error: true, @@ -62,10 +44,63 @@ async function handle( }, ); } + try { + const response = await request(req, apiKey); + return response; + } catch (e) { + console.error("[Google] ", e); + return NextResponse.json(prettyObject(e)); + } +} - const fetchUrl = `${baseUrl}/${path}?key=${key}${ - req?.nextUrl?.searchParams?.get("alt") == "sse" ? "&alt=sse" : "" +export const GET = handle; +export const POST = handle; + +export const runtime = "edge"; +export const preferredRegion = [ + "bom1", + "cle1", + "cpt1", + "gru1", + "hnd1", + "iad1", + "icn1", + "kix1", + "pdx1", + "sfo1", + "sin1", + "syd1", +]; + +async function request(req: NextRequest, apiKey: string) { + const controller = new AbortController(); + + let baseUrl = serverConfig.googleUrl || GEMINI_BASE_URL; + + let path = `${req.nextUrl.pathname}`.replaceAll(ApiPath.Google, ""); + + if (!baseUrl.startsWith("http")) { + baseUrl = `https://${baseUrl}`; + } + + if (baseUrl.endsWith("/")) { + baseUrl = baseUrl.slice(0, -1); + } + + console.log("[Proxy] ", path); + console.log("[Base Url]", baseUrl); + + const timeoutId = setTimeout( + () => { + controller.abort(); + }, + 10 * 60 * 1000, + ); + const fetchUrl = `${baseUrl}${path}?key=${apiKey}${ + req?.nextUrl?.searchParams?.get("alt") === "sse" ? "&alt=sse" : "" }`; + + console.log("[Fetch Url] ", fetchUrl); const fetchOptions: RequestInit = { headers: { "Content-Type": "application/json", @@ -97,22 +132,3 @@ async function handle( clearTimeout(timeoutId); } } - -export const GET = handle; -export const POST = handle; - -// export const runtime = "edge"; -export const preferredRegion = [ - "bom1", - "cle1", - "cpt1", - "gru1", - "hnd1", - "iad1", - "icn1", - "kix1", - "pdx1", - "sfo1", - "sin1", - "syd1", -]; diff --git a/app/client/platforms/google.ts b/app/client/platforms/google.ts index b6cf6f9d5..8acde1a83 100644 --- a/app/client/platforms/google.ts +++ b/app/client/platforms/google.ts @@ -1,4 +1,4 @@ -import { Google, REQUEST_TIMEOUT_MS, ApiPath } from "@/app/constant"; +import { ApiPath, Google, REQUEST_TIMEOUT_MS } from "@/app/constant"; import { ChatOptions, getHeaders, LLMApi, LLMModel, LLMUsage } from "../api"; import { useAccessStore, useAppConfig, useChatStore } from "@/app/store"; import { getClientConfig } from "@/app/config/client"; @@ -16,6 +16,34 @@ import { } from "@/app/utils"; export class GeminiProApi implements LLMApi { + path(path: string): string { + const accessStore = useAccessStore.getState(); + + let baseUrl = ""; + if (accessStore.useCustomConfig) { + baseUrl = accessStore.googleUrl; + } + + if (baseUrl.length === 0) { + const isApp = !!getClientConfig()?.isApp; + baseUrl = isApp + ? DEFAULT_API_HOST + `/api/proxy/google?key=${accessStore.googleApiKey}` + : ApiPath.Google; + } + if (baseUrl.endsWith("/")) { + baseUrl = baseUrl.slice(0, baseUrl.length - 1); + } + if (!baseUrl.startsWith("http") && !baseUrl.startsWith(ApiPath.Google)) { + baseUrl = "https://" + baseUrl; + } + + console.log("[Proxy Endpoint] ", baseUrl, path); + + let chatPath = [baseUrl, path].join("/"); + + chatPath += chatPath.includes("?") ? "&alt=sse" : "?alt=sse"; + return chatPath; + } extractMessage(res: any) { console.log("[Response] gemini-pro response: ", res); @@ -108,30 +136,13 @@ export class GeminiProApi implements LLMApi { ], }; - const accessStore = useAccessStore.getState(); - - let baseUrl: string = ApiPath.Google; - - if (accessStore.useCustomConfig) { - baseUrl = accessStore.googleUrl; - } - - const isApp = !!getClientConfig()?.isApp; - let shouldStream = !!options.config.stream; const controller = new AbortController(); options.onController?.(controller); try { - if (!baseUrl && isApp) { - baseUrl = DEFAULT_API_HOST + "/api/proxy/google/"; - } - baseUrl = `${baseUrl}/${Google.ChatPath(modelConfig.model)}`.replaceAll( - "//", - "/", - ); - if (isApp) { - baseUrl += `?key=${accessStore.googleApiKey}`; - } + // https://github.com/google-gemini/cookbook/blob/main/quickstarts/rest/Streaming_REST.ipynb + const chatPath = this.path(Google.ChatPath(modelConfig.model)); + const chatPayload = { method: "POST", body: JSON.stringify(requestPayload), @@ -181,10 +192,6 @@ export class GeminiProApi implements LLMApi { controller.signal.onabort = finish; - // https://github.com/google-gemini/cookbook/blob/main/quickstarts/rest/Streaming_REST.ipynb - const chatPath = - baseUrl.replace("generateContent", "streamGenerateContent") + - (baseUrl.indexOf("?") > -1 ? "&alt=sse" : "?alt=sse"); fetchEventSource(chatPath, { ...chatPayload, async onopen(res) { @@ -259,7 +266,7 @@ export class GeminiProApi implements LLMApi { openWhenHidden: true, }); } else { - const res = await fetch(baseUrl, chatPayload); + const res = await fetch(chatPath, chatPayload); clearTimeout(requestTimeoutId); const resJson = await res.json(); if (resJson?.promptFeedback?.blockReason) { @@ -285,14 +292,4 @@ export class GeminiProApi implements LLMApi { async models(): Promise { return []; } - path(path: string): string { - return "/api/google/" + path; - } -} - -function ensureProperEnding(str: string) { - if (str.startsWith("[") && !str.endsWith("]")) { - return str + "]"; - } - return str; } diff --git a/app/config/server.ts b/app/config/server.ts index 71ea0bacf..2c1e54347 100644 --- a/app/config/server.ts +++ b/app/config/server.ts @@ -22,7 +22,7 @@ declare global { ENABLE_BALANCE_QUERY?: string; // allow user to query balance or not DISABLE_FAST_LINK?: string; // disallow parse settings from url or not CUSTOM_MODELS?: string; // to control custom models - DEFAULT_MODEL?: string; // to cnntrol default model in every new chat window + DEFAULT_MODEL?: string; // to control default model in every new chat window // azure only AZURE_URL?: string; // https://{azure-url}/openai/deployments/{deploy-name} diff --git a/app/constant.ts b/app/constant.ts index 11247ccc9..ee45d0e49 100644 --- a/app/constant.ts +++ b/app/constant.ts @@ -122,9 +122,8 @@ export const Azure = { export const Google = { ExampleEndpoint: "https://generativelanguage.googleapis.com/", - ChatPath: (modelName: string) => `v1beta/models/${modelName}:generateContent`, - // VisionChatPath: (modelName: string) => - // `v1beta/models/${modelName}:generateContent`, + ChatPath: (modelName: string) => + `v1beta/models/${modelName}:streamGenerateContent`, }; export const Baidu = { @@ -188,6 +187,8 @@ export const KnowledgeCutOffDate: Record = { "gpt-4-turbo-2024-04-09": "2023-12", "gpt-4-turbo-preview": "2023-12", "gpt-4o-2024-05-13": "2023-10", + "gpt-4o-mini": "2023-10", + "gpt-4o-mini-2024-07-18": "2023-10", "gpt-4-vision-preview": "2023-04", // After improvements, // it's now easier to add "KnowledgeCutOffDate" instead of stupid hardcoding it, as was done previously. @@ -207,6 +208,8 @@ const openaiModels = [ "gpt-4-turbo-preview", "gpt-4o", "gpt-4o-2024-05-13", + "gpt-4o-mini", + "gpt-4o-mini-2024-07-18", "gpt-4-vision-preview", "gpt-4-turbo-2024-04-09", "gpt-4-1106-preview", diff --git a/app/store/chat.ts b/app/store/chat.ts index 243aa25ad..ffc6f7c31 100644 --- a/app/store/chat.ts +++ b/app/store/chat.ts @@ -9,8 +9,6 @@ import { DEFAULT_MODELS, DEFAULT_SYSTEM_TEMPLATE, KnowledgeCutOffDate, - ServiceProvider, - ModelProvider, StoreKey, SUMMARIZE_MODEL, GEMINI_SUMMARIZE_MODEL, diff --git a/app/store/prompt.ts b/app/store/prompt.ts index 0e16a6380..ea4a26b78 100644 --- a/app/store/prompt.ts +++ b/app/store/prompt.ts @@ -168,7 +168,7 @@ export const usePromptStore = createPersistStore( fetch(PROMPT_URL) .then((res) => res.json()) .then((res) => { - let fetchPrompts = [res.en, res.cn]; + let fetchPrompts = [res.en, res.tw, res.cn]; if (getLang() === "cn") { fetchPrompts = fetchPrompts.reverse(); } @@ -183,50 +183,59 @@ export const usePromptStore = createPersistStore( }) as Prompt, ); }); + + const userPrompts = usePromptStore.getState().getUserPrompts() ?? []; + + const allPromptsForSearch = builtinPrompts + .reduce((pre, cur) => pre.concat(cur), []) + .filter((v) => !!v.title && !!v.content); + SearchService.count.builtin = + res.en.length + res.cn.length + res.tw.length; + SearchService.init(allPromptsForSearch, userPrompts); // let gptPrompts: Prompt[] = []; - try { - fetch(GPT_PROMPT_URL) - .then((res2) => res2.json()) - .then((res2) => { - const gptPrompts: Prompt[] = res2["items"].map( - (prompt: { - id: string; - title: string; - description: string; - prompt: string; - category: string; - }) => { - return { - id: prompt["id"], - title: prompt["title"], - content: prompt["prompt"], - createdAt: Date.now(), - }; - }, - ); - const userPrompts = - usePromptStore.getState().getUserPrompts() ?? []; - const allPromptsForSearch = builtinPrompts - .reduce((pre, cur) => pre.concat(cur), []) - .filter((v) => !!v.title && !!v.content); - SearchService.count.builtin = - res.en.length + res.cn.length + res["total"]; - SearchService.init( - allPromptsForSearch, - userPrompts, - gptPrompts, - ); - }); - } catch (e) { - console.log("[gpt prompt]", e); - const userPrompts = - usePromptStore.getState().getUserPrompts() ?? []; - const allPromptsForSearch = builtinPrompts - .reduce((pre, cur) => pre.concat(cur), []) - .filter((v) => !!v.title && !!v.content); - SearchService.count.builtin = res.en.length + res.cn.length; - SearchService.init(allPromptsForSearch, userPrompts); - } + // try { + // fetch(GPT_PROMPT_URL) + // .then((res2) => res2.json()) + // .then((res2) => { + // const gptPrompts: Prompt[] = res2["items"].map( + // (prompt: { + // id: string; + // title: string; + // description: string; + // prompt: string; + // category: string; + // }) => { + // return { + // id: prompt["id"], + // title: prompt["title"], + // content: prompt["prompt"], + // createdAt: Date.now(), + // }; + // }, + // ); + // const userPrompts = + // usePromptStore.getState().getUserPrompts() ?? []; + // const allPromptsForSearch = builtinPrompts + // .reduce((pre, cur) => pre.concat(cur), []) + // .filter((v) => !!v.title && !!v.content); + // SearchService.count.builtin = + // res.en.length + res.cn.length + res["total"]; + // SearchService.init( + // allPromptsForSearch, + // userPrompts, + // gptPrompts, + // ); + // }); + // } catch (e) { + // console.log("[gpt prompt]", e); + // const userPrompts = + // usePromptStore.getState().getUserPrompts() ?? []; + // const allPromptsForSearch = builtinPrompts + // .reduce((pre, cur) => pre.concat(cur), []) + // .filter((v) => !!v.title && !!v.content); + // SearchService.count.builtin = res.en.length + res.cn.length; + // SearchService.init(allPromptsForSearch, userPrompts); + // } }); }, }, diff --git a/app/utils.ts b/app/utils.ts index 8f7adc7e2..2f2c8ae95 100644 --- a/app/utils.ts +++ b/app/utils.ts @@ -256,6 +256,7 @@ export function isVisionModel(model: string) { "gemini-1.5-pro", "gemini-1.5-flash", "gpt-4o", + "gpt-4o-mini", ]; const isGpt4Turbo = model.includes("gpt-4-turbo") && !model.includes("preview"); diff --git a/docs/images/ent.svg b/docs/images/ent.svg new file mode 100644 index 000000000..749d66743 --- /dev/null +++ b/docs/images/ent.svg @@ -0,0 +1,47 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/scripts/fetch-prompts.mjs b/scripts/fetch-prompts.mjs index 56c042022..4107f60a9 100644 --- a/scripts/fetch-prompts.mjs +++ b/scripts/fetch-prompts.mjs @@ -6,11 +6,13 @@ const MIRRORF_FILE_URL = "http://raw.fgit.ml/"; const RAW_CN_URL = "PlexPt/awesome-chatgpt-prompts-zh/main/prompts-zh.json"; const CN_URL = MIRRORF_FILE_URL + RAW_CN_URL; +const RAW_TW_URL = "PlexPt/awesome-chatgpt-prompts-zh/main/prompts-zh-TW.json"; +const TW_URL = MIRRORF_FILE_URL + RAW_TW_URL; const RAW_EN_URL = "f/awesome-chatgpt-prompts/main/prompts.csv"; const EN_URL = MIRRORF_FILE_URL + RAW_EN_URL; const FILE = "./public/prompts.json"; -const ignoreWords = ["涩涩", "魅魔"]; +const ignoreWords = ["涩涩", "魅魔", "澀澀"]; const timeoutPromise = (timeout) => { return new Promise((resolve, reject) => { @@ -39,6 +41,25 @@ async function fetchCN() { } } +async function fetchTW() { + console.log("[Fetch] fetching tw prompts..."); + try { + const response = await Promise.race([fetch(TW_URL), timeoutPromise(5000)]); + const raw = await response.json(); + return raw + .map((v) => [v.act, v.prompt]) + .filter( + (v) => + v[0] && + v[1] && + ignoreWords.every((w) => !v[0].includes(w) && !v[1].includes(w)), + ); + } catch (error) { + console.error("[Fetch] failed to fetch tw prompts", error); + return []; + } +} + async function fetchEN() { console.log("[Fetch] fetching en prompts..."); try { @@ -61,13 +82,13 @@ async function fetchEN() { } async function main() { - Promise.all([fetchCN(), fetchEN()]) - .then(([cn, en]) => { - fs.writeFile(FILE, JSON.stringify({ cn, en })); + Promise.all([fetchCN(), fetchTW(), fetchEN()]) + .then(([cn, tw, en]) => { + fs.writeFile(FILE, JSON.stringify({ cn, tw, en })); }) .catch((e) => { console.error("[Fetch] failed to fetch prompts"); - fs.writeFile(FILE, JSON.stringify({ cn: [], en: [] })); + fs.writeFile(FILE, JSON.stringify({ cn: [], tw: [], en: [] })); }) .finally(() => { console.log("[Fetch] saved to " + FILE);