mirror of
https://github.com/songquanpeng/one-api.git
synced 2025-09-22 03:16:38 +08:00
Merge remote-tracking branch 'origin/upstream/main'
This commit is contained in:
commit
4e3782d524
@ -85,6 +85,7 @@ docker image: `ppcelery/one-api:latest`
|
|||||||
+ [x] [DeepL](https://www.deepl.com/)
|
+ [x] [DeepL](https://www.deepl.com/)
|
||||||
+ [x] [together.ai](https://www.together.ai/)
|
+ [x] [together.ai](https://www.together.ai/)
|
||||||
+ [x] [novita.ai](https://www.novita.ai/)
|
+ [x] [novita.ai](https://www.novita.ai/)
|
||||||
|
+ [x] [硅基流动 SiliconCloud](https://siliconflow.cn/siliconcloud)
|
||||||
2. 支持配置镜像以及众多[第三方代理服务](https://iamazing.cn/page/openai-api-third-party-services)。
|
2. 支持配置镜像以及众多[第三方代理服务](https://iamazing.cn/page/openai-api-third-party-services)。
|
||||||
3. 支持通过**负载均衡**的方式访问多个渠道。
|
3. 支持通过**负载均衡**的方式访问多个渠道。
|
||||||
4. 支持 **stream 模式**,可以通过流式传输实现打字机效果。
|
4. 支持 **stream 模式**,可以通过流式传输实现打字机效果。
|
||||||
@ -247,9 +248,9 @@ docker run --name chatgpt-web -d -p 3002:3002 -e OPENAI_API_BASE_URL=https://ope
|
|||||||
#### QChatGPT - QQ机器人
|
#### QChatGPT - QQ机器人
|
||||||
项目主页:https://github.com/RockChinQ/QChatGPT
|
项目主页:https://github.com/RockChinQ/QChatGPT
|
||||||
|
|
||||||
根据文档完成部署后,在`config.py`设置配置项`openai_config`的`reverse_proxy`为 One API 后端地址,设置`api_key`为 One API 生成的key,并在配置项`completion_api_params`的`model`参数设置为 One API 支持的模型名称。
|
根据[文档](https://qchatgpt.rockchin.top)完成部署后,在 `data/provider.json`设置`requester.openai-chat-completions.base-url`为 One API 实例地址,并填写 API Key 到 `keys.openai` 组中,设置 `model` 为要使用的模型名称。
|
||||||
|
|
||||||
可安装 [Switcher 插件](https://github.com/RockChinQ/Switcher)在运行时切换所使用的模型。
|
运行期间可以通过`!model`命令查看、切换可用模型。
|
||||||
|
|
||||||
### 部署到第三方平台
|
### 部署到第三方平台
|
||||||
<details>
|
<details>
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
package cloudflare
|
package cloudflare
|
||||||
|
|
||||||
var ModelList = []string{
|
var ModelList = []string{
|
||||||
|
"@cf/meta/llama-3.1-8b-instruct",
|
||||||
"@cf/meta/llama-2-7b-chat-fp16",
|
"@cf/meta/llama-2-7b-chat-fp16",
|
||||||
"@cf/meta/llama-2-7b-chat-int8",
|
"@cf/meta/llama-2-7b-chat-int8",
|
||||||
"@cf/mistral/mistral-7b-instruct-v0.1",
|
"@cf/mistral/mistral-7b-instruct-v0.1",
|
||||||
|
@ -3,6 +3,5 @@ package gemini
|
|||||||
// https://ai.google.dev/models/gemini
|
// https://ai.google.dev/models/gemini
|
||||||
|
|
||||||
var ModelList = []string{
|
var ModelList = []string{
|
||||||
"gemini-pro", "gemini-1.0-pro-001", "gemini-1.5-pro",
|
"gemini-pro", "gemini-1.0-pro", "gemini-1.5-flash", "gemini-1.5-pro", "text-embedding-004", "aqa",
|
||||||
"gemini-pro-vision", "gemini-1.0-pro-vision-001", "embedding-001", "text-embedding-004",
|
|
||||||
}
|
}
|
||||||
|
@ -4,9 +4,14 @@ package groq
|
|||||||
|
|
||||||
var ModelList = []string{
|
var ModelList = []string{
|
||||||
"gemma-7b-it",
|
"gemma-7b-it",
|
||||||
"llama2-7b-2048",
|
|
||||||
"llama2-70b-4096",
|
|
||||||
"mixtral-8x7b-32768",
|
"mixtral-8x7b-32768",
|
||||||
"llama3-8b-8192",
|
"llama3-8b-8192",
|
||||||
"llama3-70b-8192",
|
"llama3-70b-8192",
|
||||||
|
"gemma2-9b-it",
|
||||||
|
"llama-3.1-405b-reasoning",
|
||||||
|
"llama-3.1-70b-versatile",
|
||||||
|
"llama-3.1-8b-instant",
|
||||||
|
"llama3-groq-70b-8192-tool-use-preview",
|
||||||
|
"llama3-groq-8b-8192-tool-use-preview",
|
||||||
|
"whisper-large-v3",
|
||||||
}
|
}
|
||||||
|
@ -24,7 +24,7 @@ func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error) {
|
|||||||
// https://github.com/ollama/ollama/blob/main/docs/api.md
|
// https://github.com/ollama/ollama/blob/main/docs/api.md
|
||||||
fullRequestURL := fmt.Sprintf("%s/api/chat", meta.BaseURL)
|
fullRequestURL := fmt.Sprintf("%s/api/chat", meta.BaseURL)
|
||||||
if meta.Mode == relaymode.Embeddings {
|
if meta.Mode == relaymode.Embeddings {
|
||||||
fullRequestURL = fmt.Sprintf("%s/api/embeddings", meta.BaseURL)
|
fullRequestURL = fmt.Sprintf("%s/api/embed", meta.BaseURL)
|
||||||
}
|
}
|
||||||
return fullRequestURL, nil
|
return fullRequestURL, nil
|
||||||
}
|
}
|
||||||
|
@ -30,6 +30,8 @@ func ConvertRequest(request model.GeneralOpenAIRequest) *ChatRequest {
|
|||||||
TopP: request.TopP,
|
TopP: request.TopP,
|
||||||
FrequencyPenalty: request.FrequencyPenalty,
|
FrequencyPenalty: request.FrequencyPenalty,
|
||||||
PresencePenalty: request.PresencePenalty,
|
PresencePenalty: request.PresencePenalty,
|
||||||
|
NumPredict: request.MaxTokens,
|
||||||
|
NumCtx: request.NumCtx,
|
||||||
},
|
},
|
||||||
Stream: request.Stream,
|
Stream: request.Stream,
|
||||||
}
|
}
|
||||||
@ -117,8 +119,10 @@ func StreamHandler(c *gin.Context, resp *http.Response) (*model.ErrorWithStatusC
|
|||||||
common.SetEventStreamHeaders(c)
|
common.SetEventStreamHeaders(c)
|
||||||
|
|
||||||
for scanner.Scan() {
|
for scanner.Scan() {
|
||||||
data := strings.TrimPrefix(scanner.Text(), "}")
|
data := scanner.Text()
|
||||||
data = data + "}"
|
if strings.HasPrefix(data, "}") {
|
||||||
|
data = strings.TrimPrefix(data, "}") + "}"
|
||||||
|
}
|
||||||
|
|
||||||
var ollamaResponse ChatResponse
|
var ollamaResponse ChatResponse
|
||||||
err := json.Unmarshal([]byte(data), &ollamaResponse)
|
err := json.Unmarshal([]byte(data), &ollamaResponse)
|
||||||
@ -157,7 +161,14 @@ func StreamHandler(c *gin.Context, resp *http.Response) (*model.ErrorWithStatusC
|
|||||||
func ConvertEmbeddingRequest(request model.GeneralOpenAIRequest) *EmbeddingRequest {
|
func ConvertEmbeddingRequest(request model.GeneralOpenAIRequest) *EmbeddingRequest {
|
||||||
return &EmbeddingRequest{
|
return &EmbeddingRequest{
|
||||||
Model: request.Model,
|
Model: request.Model,
|
||||||
Prompt: strings.Join(request.ParseInput(), " "),
|
Input: request.ParseInput(),
|
||||||
|
Options: &Options{
|
||||||
|
Seed: int(request.Seed),
|
||||||
|
Temperature: request.Temperature,
|
||||||
|
TopP: request.TopP,
|
||||||
|
FrequencyPenalty: request.FrequencyPenalty,
|
||||||
|
PresencePenalty: request.PresencePenalty,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -200,15 +211,17 @@ func embeddingResponseOllama2OpenAI(response *EmbeddingResponse) *openai.Embeddi
|
|||||||
openAIEmbeddingResponse := openai.EmbeddingResponse{
|
openAIEmbeddingResponse := openai.EmbeddingResponse{
|
||||||
Object: "list",
|
Object: "list",
|
||||||
Data: make([]openai.EmbeddingResponseItem, 0, 1),
|
Data: make([]openai.EmbeddingResponseItem, 0, 1),
|
||||||
Model: "text-embedding-v1",
|
Model: response.Model,
|
||||||
Usage: model.Usage{TotalTokens: 0},
|
Usage: model.Usage{TotalTokens: 0},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for i, embedding := range response.Embeddings {
|
||||||
openAIEmbeddingResponse.Data = append(openAIEmbeddingResponse.Data, openai.EmbeddingResponseItem{
|
openAIEmbeddingResponse.Data = append(openAIEmbeddingResponse.Data, openai.EmbeddingResponseItem{
|
||||||
Object: `embedding`,
|
Object: `embedding`,
|
||||||
Index: 0,
|
Index: i,
|
||||||
Embedding: response.Embedding,
|
Embedding: embedding,
|
||||||
})
|
})
|
||||||
|
}
|
||||||
return &openAIEmbeddingResponse
|
return &openAIEmbeddingResponse
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -7,6 +7,8 @@ type Options struct {
|
|||||||
TopP float64 `json:"top_p,omitempty"`
|
TopP float64 `json:"top_p,omitempty"`
|
||||||
FrequencyPenalty float64 `json:"frequency_penalty,omitempty"`
|
FrequencyPenalty float64 `json:"frequency_penalty,omitempty"`
|
||||||
PresencePenalty float64 `json:"presence_penalty,omitempty"`
|
PresencePenalty float64 `json:"presence_penalty,omitempty"`
|
||||||
|
NumPredict int `json:"num_predict,omitempty"`
|
||||||
|
NumCtx int `json:"num_ctx,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type Message struct {
|
type Message struct {
|
||||||
@ -38,10 +40,14 @@ type ChatResponse struct {
|
|||||||
|
|
||||||
type EmbeddingRequest struct {
|
type EmbeddingRequest struct {
|
||||||
Model string `json:"model"`
|
Model string `json:"model"`
|
||||||
Prompt string `json:"prompt"`
|
Input []string `json:"input"`
|
||||||
|
// Truncate bool `json:"truncate,omitempty"`
|
||||||
|
Options *Options `json:"options,omitempty"`
|
||||||
|
// KeepAlive string `json:"keep_alive,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type EmbeddingResponse struct {
|
type EmbeddingResponse struct {
|
||||||
Error string `json:"error,omitempty"`
|
Error string `json:"error,omitempty"`
|
||||||
Embedding []float64 `json:"embedding,omitempty"`
|
Model string `json:"model"`
|
||||||
|
Embeddings [][]float64 `json:"embeddings"`
|
||||||
}
|
}
|
||||||
|
@ -13,6 +13,7 @@ import (
|
|||||||
"github.com/songquanpeng/one-api/relay/adaptor/novita"
|
"github.com/songquanpeng/one-api/relay/adaptor/novita"
|
||||||
"github.com/songquanpeng/one-api/relay/adaptor/stepfun"
|
"github.com/songquanpeng/one-api/relay/adaptor/stepfun"
|
||||||
"github.com/songquanpeng/one-api/relay/adaptor/togetherai"
|
"github.com/songquanpeng/one-api/relay/adaptor/togetherai"
|
||||||
|
"github.com/songquanpeng/one-api/relay/adaptor/siliconflow"
|
||||||
"github.com/songquanpeng/one-api/relay/channeltype"
|
"github.com/songquanpeng/one-api/relay/channeltype"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -30,6 +31,7 @@ var CompatibleChannels = []int{
|
|||||||
channeltype.DeepSeek,
|
channeltype.DeepSeek,
|
||||||
channeltype.TogetherAI,
|
channeltype.TogetherAI,
|
||||||
channeltype.Novita,
|
channeltype.Novita,
|
||||||
|
channeltype.SiliconFlow,
|
||||||
}
|
}
|
||||||
|
|
||||||
func GetCompatibleChannelMeta(channelType int) (string, []string) {
|
func GetCompatibleChannelMeta(channelType int) (string, []string) {
|
||||||
@ -60,6 +62,8 @@ func GetCompatibleChannelMeta(channelType int) (string, []string) {
|
|||||||
return "doubao", doubao.ModelList
|
return "doubao", doubao.ModelList
|
||||||
case channeltype.Novita:
|
case channeltype.Novita:
|
||||||
return "novita", novita.ModelList
|
return "novita", novita.ModelList
|
||||||
|
case channeltype.SiliconFlow:
|
||||||
|
return "siliconflow", siliconflow.ModelList
|
||||||
default:
|
default:
|
||||||
return "openai", ModelList
|
return "openai", ModelList
|
||||||
}
|
}
|
||||||
|
36
relay/adaptor/siliconflow/constants.go
Normal file
36
relay/adaptor/siliconflow/constants.go
Normal file
@ -0,0 +1,36 @@
|
|||||||
|
package siliconflow
|
||||||
|
|
||||||
|
// https://docs.siliconflow.cn/docs/getting-started
|
||||||
|
|
||||||
|
var ModelList = []string{
|
||||||
|
"deepseek-ai/deepseek-llm-67b-chat",
|
||||||
|
"Qwen/Qwen1.5-14B-Chat",
|
||||||
|
"Qwen/Qwen1.5-7B-Chat",
|
||||||
|
"Qwen/Qwen1.5-110B-Chat",
|
||||||
|
"Qwen/Qwen1.5-32B-Chat",
|
||||||
|
"01-ai/Yi-1.5-6B-Chat",
|
||||||
|
"01-ai/Yi-1.5-9B-Chat-16K",
|
||||||
|
"01-ai/Yi-1.5-34B-Chat-16K",
|
||||||
|
"THUDM/chatglm3-6b",
|
||||||
|
"deepseek-ai/DeepSeek-V2-Chat",
|
||||||
|
"THUDM/glm-4-9b-chat",
|
||||||
|
"Qwen/Qwen2-72B-Instruct",
|
||||||
|
"Qwen/Qwen2-7B-Instruct",
|
||||||
|
"Qwen/Qwen2-57B-A14B-Instruct",
|
||||||
|
"deepseek-ai/DeepSeek-Coder-V2-Instruct",
|
||||||
|
"Qwen/Qwen2-1.5B-Instruct",
|
||||||
|
"internlm/internlm2_5-7b-chat",
|
||||||
|
"BAAI/bge-large-en-v1.5",
|
||||||
|
"BAAI/bge-large-zh-v1.5",
|
||||||
|
"Pro/Qwen/Qwen2-7B-Instruct",
|
||||||
|
"Pro/Qwen/Qwen2-1.5B-Instruct",
|
||||||
|
"Pro/Qwen/Qwen1.5-7B-Chat",
|
||||||
|
"Pro/THUDM/glm-4-9b-chat",
|
||||||
|
"Pro/THUDM/chatglm3-6b",
|
||||||
|
"Pro/01-ai/Yi-1.5-9B-Chat-16K",
|
||||||
|
"Pro/01-ai/Yi-1.5-6B-Chat",
|
||||||
|
"Pro/google/gemma-2-9b-it",
|
||||||
|
"Pro/internlm/internlm2_5-7b-chat",
|
||||||
|
"Pro/meta-llama/Meta-Llama-3-8B-Instruct",
|
||||||
|
"Pro/mistralai/Mistral-7B-Instruct-v0.2",
|
||||||
|
}
|
@ -98,12 +98,11 @@ var ModelRatio = map[string]float64{
|
|||||||
"bge-large-en": 0.002 * RMB,
|
"bge-large-en": 0.002 * RMB,
|
||||||
"tao-8k": 0.002 * RMB,
|
"tao-8k": 0.002 * RMB,
|
||||||
// https://ai.google.dev/pricing
|
// https://ai.google.dev/pricing
|
||||||
"PaLM-2": 1,
|
|
||||||
"gemini-pro": 1, // $0.00025 / 1k characters -> $0.001 / 1k tokens
|
"gemini-pro": 1, // $0.00025 / 1k characters -> $0.001 / 1k tokens
|
||||||
"gemini-pro-vision": 1, // $0.00025 / 1k characters -> $0.001 / 1k tokens
|
"gemini-1.0-pro": 1,
|
||||||
"gemini-1.0-pro-vision-001": 1,
|
"gemini-1.5-flash": 1,
|
||||||
"gemini-1.0-pro-001": 1,
|
|
||||||
"gemini-1.5-pro": 1,
|
"gemini-1.5-pro": 1,
|
||||||
|
"aqa": 1,
|
||||||
// https://open.bigmodel.cn/pricing
|
// https://open.bigmodel.cn/pricing
|
||||||
"glm-4": 0.1 * RMB,
|
"glm-4": 0.1 * RMB,
|
||||||
"glm-4v": 0.1 * RMB,
|
"glm-4v": 0.1 * RMB,
|
||||||
@ -158,12 +157,16 @@ var ModelRatio = map[string]float64{
|
|||||||
"mistral-large-latest": 8.0 / 1000 * USD,
|
"mistral-large-latest": 8.0 / 1000 * USD,
|
||||||
"mistral-embed": 0.1 / 1000 * USD,
|
"mistral-embed": 0.1 / 1000 * USD,
|
||||||
// https://wow.groq.com/#:~:text=inquiries%C2%A0here.-,Model,-Current%20Speed
|
// https://wow.groq.com/#:~:text=inquiries%C2%A0here.-,Model,-Current%20Speed
|
||||||
"llama3-70b-8192": 0.59 / 1000 * USD,
|
"gemma-7b-it": 0.07 / 1000000 * USD,
|
||||||
"mixtral-8x7b-32768": 0.27 / 1000 * USD,
|
"mixtral-8x7b-32768": 0.24 / 1000000 * USD,
|
||||||
"llama3-8b-8192": 0.05 / 1000 * USD,
|
"llama3-8b-8192": 0.05 / 1000000 * USD,
|
||||||
"gemma-7b-it": 0.1 / 1000 * USD,
|
"llama3-70b-8192": 0.59 / 1000000 * USD,
|
||||||
"llama2-70b-4096": 0.64 / 1000 * USD,
|
"gemma2-9b-it": 0.20 / 1000000 * USD,
|
||||||
"llama2-7b-2048": 0.1 / 1000 * USD,
|
"llama-3.1-405b-reasoning": 0.89 / 1000000 * USD,
|
||||||
|
"llama-3.1-70b-versatile": 0.59 / 1000000 * USD,
|
||||||
|
"llama-3.1-8b-instant": 0.05 / 1000000 * USD,
|
||||||
|
"llama3-groq-70b-8192-tool-use-preview": 0.89 / 1000000 * USD,
|
||||||
|
"llama3-groq-8b-8192-tool-use-preview": 0.19 / 1000000 * USD,
|
||||||
// https://platform.lingyiwanwu.com/docs#-计费单元
|
// https://platform.lingyiwanwu.com/docs#-计费单元
|
||||||
"yi-34b-chat-0205": 2.5 / 1000 * RMB,
|
"yi-34b-chat-0205": 2.5 / 1000 * RMB,
|
||||||
"yi-34b-chat-200k": 12.0 / 1000 * RMB,
|
"yi-34b-chat-200k": 12.0 / 1000 * RMB,
|
||||||
|
@ -45,5 +45,6 @@ const (
|
|||||||
Novita
|
Novita
|
||||||
VertextAI
|
VertextAI
|
||||||
Proxy
|
Proxy
|
||||||
|
SiliconFlow
|
||||||
Dummy
|
Dummy
|
||||||
)
|
)
|
||||||
|
@ -45,6 +45,7 @@ var ChannelBaseURLs = []string{
|
|||||||
"https://api.novita.ai/v3/openai", // 41
|
"https://api.novita.ai/v3/openai", // 41
|
||||||
"", // 42
|
"", // 42
|
||||||
"", // 43
|
"", // 43
|
||||||
|
"https://api.siliconflow.cn", // 44
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
@ -29,6 +29,7 @@ type GeneralOpenAIRequest struct {
|
|||||||
Dimensions int `json:"dimensions,omitempty"`
|
Dimensions int `json:"dimensions,omitempty"`
|
||||||
Instruction string `json:"instruction,omitempty"`
|
Instruction string `json:"instruction,omitempty"`
|
||||||
Size string `json:"size,omitempty"`
|
Size string `json:"size,omitempty"`
|
||||||
|
NumCtx int `json:"num_ctx,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r GeneralOpenAIRequest) ParseInput() []string {
|
func (r GeneralOpenAIRequest) ParseInput() []string {
|
||||||
|
@ -29,6 +29,7 @@ export const CHANNEL_OPTIONS = [
|
|||||||
{ key: 39, text: 'together.ai', value: 39, color: 'blue' },
|
{ key: 39, text: 'together.ai', value: 39, color: 'blue' },
|
||||||
{ key: 42, text: 'VertexAI', value: 42, color: 'blue' },
|
{ key: 42, text: 'VertexAI', value: 42, color: 'blue' },
|
||||||
{ key: 43, text: 'Proxy', value: 43, color: 'blue' },
|
{ key: 43, text: 'Proxy', value: 43, color: 'blue' },
|
||||||
|
{ key: 44, text: 'SiliconFlow', value: 44, color: 'blue' },
|
||||||
{ key: 8, text: '自定义渠道', value: 8, color: 'pink' },
|
{ key: 8, text: '自定义渠道', value: 8, color: 'pink' },
|
||||||
{ key: 22, text: '知识库:FastGPT', value: 22, color: 'blue' },
|
{ key: 22, text: '知识库:FastGPT', value: 22, color: 'blue' },
|
||||||
{ key: 21, text: '知识库:AI Proxy', value: 21, color: 'purple' },
|
{ key: 21, text: '知识库:AI Proxy', value: 21, color: 'purple' },
|
||||||
|
@ -173,6 +173,12 @@ export const CHANNEL_OPTIONS = {
|
|||||||
value: 43,
|
value: 43,
|
||||||
color: 'primary'
|
color: 'primary'
|
||||||
},
|
},
|
||||||
|
44: {
|
||||||
|
key: 44,
|
||||||
|
text: 'SiliconFlow',
|
||||||
|
value: 44,
|
||||||
|
color: 'primary'
|
||||||
|
},
|
||||||
41: {
|
41: {
|
||||||
key: 41,
|
key: 41,
|
||||||
text: 'Novita',
|
text: 'Novita',
|
||||||
|
@ -29,6 +29,7 @@ export const CHANNEL_OPTIONS = [
|
|||||||
{ key: 39, text: 'together.ai', value: 39, color: 'blue' },
|
{ key: 39, text: 'together.ai', value: 39, color: 'blue' },
|
||||||
{ key: 42, text: 'VertexAI', value: 42, color: 'blue' },
|
{ key: 42, text: 'VertexAI', value: 42, color: 'blue' },
|
||||||
{ key: 43, text: 'Proxy', value: 43, color: 'blue' },
|
{ key: 43, text: 'Proxy', value: 43, color: 'blue' },
|
||||||
|
{ key: 44, text: 'SiliconFlow', value: 44, color: 'blue' },
|
||||||
{ key: 8, text: '自定义渠道', value: 8, color: 'pink' },
|
{ key: 8, text: '自定义渠道', value: 8, color: 'pink' },
|
||||||
{ key: 22, text: '知识库:FastGPT', value: 22, color: 'blue' },
|
{ key: 22, text: '知识库:FastGPT', value: 22, color: 'blue' },
|
||||||
{ key: 21, text: '知识库:AI Proxy', value: 21, color: 'purple' },
|
{ key: 21, text: '知识库:AI Proxy', value: 21, color: 'purple' },
|
||||||
|
Loading…
Reference in New Issue
Block a user