mirror of
https://github.com/songquanpeng/one-api.git
synced 2025-10-25 02:43:41 +08:00
Compare commits
10 Commits
v0.6.11-pr
...
v0.6.11-pr
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3f421c4f04 | ||
|
|
1ce6a226f6 | ||
|
|
cafd0a0327 | ||
|
|
8b8cd03e85 | ||
|
|
54c38de813 | ||
|
|
d6284bf6b0 | ||
|
|
df5d2ca93d | ||
|
|
fef7ae048b | ||
|
|
6916debf66 | ||
|
|
53da209134 |
@@ -115,7 +115,7 @@ _✨ 通过标准的 OpenAI API 格式访问所有的大模型,开箱即用
|
||||
19. 支持丰富的**自定义**设置,
|
||||
1. 支持自定义系统名称,logo 以及页脚。
|
||||
2. 支持自定义首页和关于页面,可以选择使用 HTML & Markdown 代码进行自定义,或者使用一个单独的网页通过 iframe 嵌入。
|
||||
20. 支持通过系统访问令牌调用管理 API,进而**在无需二开的情况下扩展和自定义** One API 的功能,详情请参考此处 [API 文档](./docs/API.md)。。
|
||||
20. 支持通过系统访问令牌调用管理 API,进而**在无需二开的情况下扩展和自定义** One API 的功能,详情请参考此处 [API 文档](./docs/API.md)。
|
||||
21. 支持 Cloudflare Turnstile 用户校验。
|
||||
22. 支持用户管理,支持**多种用户登录注册方式**:
|
||||
+ 邮箱登录注册(支持注册邮箱白名单)以及通过邮箱进行密码重置。
|
||||
|
||||
@@ -163,4 +163,4 @@ var UserContentRequestProxy = env.String("USER_CONTENT_REQUEST_PROXY", "")
|
||||
var UserContentRequestTimeout = env.Int("USER_CONTENT_REQUEST_TIMEOUT", 30)
|
||||
|
||||
var EnforceIncludeUsage = env.Bool("ENFORCE_INCLUDE_USAGE", false)
|
||||
var TestPrompt = env.String("TEST_PROMPT", "Print your model name exactly and do not output without any other text.")
|
||||
var TestPrompt = env.String("TEST_PROMPT", "Output only your specific model name with no additional text.")
|
||||
|
||||
20
relay/adaptor/alibailian/constants.go
Normal file
20
relay/adaptor/alibailian/constants.go
Normal file
@@ -0,0 +1,20 @@
|
||||
package alibailian
|
||||
|
||||
// https://help.aliyun.com/zh/model-studio/getting-started/models
|
||||
|
||||
var ModelList = []string{
|
||||
"qwen-turbo",
|
||||
"qwen-plus",
|
||||
"qwen-long",
|
||||
"qwen-max",
|
||||
"qwen-coder-plus",
|
||||
"qwen-coder-plus-latest",
|
||||
"qwen-coder-turbo",
|
||||
"qwen-coder-turbo-latest",
|
||||
"qwen-mt-plus",
|
||||
"qwen-mt-turbo",
|
||||
"qwq-32b-preview",
|
||||
|
||||
"deepseek-r1",
|
||||
"deepseek-v3",
|
||||
}
|
||||
19
relay/adaptor/alibailian/main.go
Normal file
19
relay/adaptor/alibailian/main.go
Normal file
@@ -0,0 +1,19 @@
|
||||
package alibailian
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/songquanpeng/one-api/relay/meta"
|
||||
"github.com/songquanpeng/one-api/relay/relaymode"
|
||||
)
|
||||
|
||||
func GetRequestURL(meta *meta.Meta) (string, error) {
|
||||
switch meta.Mode {
|
||||
case relaymode.ChatCompletions:
|
||||
return fmt.Sprintf("%s/compatible-mode/v1/chat/completions", meta.BaseURL), nil
|
||||
case relaymode.Embeddings:
|
||||
return fmt.Sprintf("%s/compatible-mode/v1/embeddings", meta.BaseURL), nil
|
||||
default:
|
||||
}
|
||||
return "", fmt.Errorf("unsupported relay mode %d for ali bailian", meta.Mode)
|
||||
}
|
||||
@@ -5,9 +5,10 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
|
||||
"github.com/songquanpeng/one-api/common/config"
|
||||
"github.com/songquanpeng/one-api/common/helper"
|
||||
channelhelper "github.com/songquanpeng/one-api/relay/adaptor"
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/openai"
|
||||
@@ -20,17 +21,12 @@ type Adaptor struct {
|
||||
}
|
||||
|
||||
func (a *Adaptor) Init(meta *meta.Meta) {
|
||||
|
||||
}
|
||||
|
||||
func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error) {
|
||||
var defaultVersion string
|
||||
switch meta.ActualModelName {
|
||||
case "gemini-2.0-flash-exp",
|
||||
"gemini-2.0-flash-thinking-exp",
|
||||
"gemini-2.0-flash-thinking-exp-01-21":
|
||||
defaultVersion = "v1beta"
|
||||
default:
|
||||
defaultVersion := config.GeminiVersion
|
||||
if strings.Contains(meta.ActualModelName, "gemini-2.0") ||
|
||||
strings.Contains(meta.ActualModelName, "gemini-1.5") {
|
||||
defaultVersion = "v1beta"
|
||||
}
|
||||
|
||||
|
||||
@@ -1,11 +1,35 @@
|
||||
package gemini
|
||||
|
||||
import (
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/geminiv2"
|
||||
)
|
||||
|
||||
// https://ai.google.dev/models/gemini
|
||||
|
||||
var ModelList = []string{
|
||||
"gemini-pro", "gemini-1.0-pro",
|
||||
"gemini-1.5-flash", "gemini-1.5-pro",
|
||||
"text-embedding-004", "aqa",
|
||||
"gemini-2.0-flash-exp",
|
||||
"gemini-2.0-flash-thinking-exp", "gemini-2.0-flash-thinking-exp-01-21",
|
||||
var ModelList = geminiv2.ModelList
|
||||
|
||||
// ModelsSupportSystemInstruction is the list of models that support system instruction.
|
||||
//
|
||||
// https://cloud.google.com/vertex-ai/generative-ai/docs/learn/prompts/system-instructions
|
||||
var ModelsSupportSystemInstruction = []string{
|
||||
// "gemini-1.0-pro-002",
|
||||
// "gemini-1.5-flash", "gemini-1.5-flash-001", "gemini-1.5-flash-002",
|
||||
// "gemini-1.5-flash-8b",
|
||||
// "gemini-1.5-pro", "gemini-1.5-pro-001", "gemini-1.5-pro-002",
|
||||
// "gemini-1.5-pro-experimental",
|
||||
"gemini-2.0-flash", "gemini-2.0-flash-exp",
|
||||
"gemini-2.0-flash-thinking-exp-01-21",
|
||||
}
|
||||
|
||||
// IsModelSupportSystemInstruction check if the model support system instruction.
|
||||
//
|
||||
// Because the main version of Go is 1.20, slice.Contains cannot be used
|
||||
func IsModelSupportSystemInstruction(model string) bool {
|
||||
for _, m := range ModelsSupportSystemInstruction {
|
||||
if m == model {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -132,9 +132,16 @@ func ConvertRequest(textRequest model.GeneralOpenAIRequest) *ChatRequest {
|
||||
}
|
||||
// Converting system prompt to prompt from user for the same reason
|
||||
if content.Role == "system" {
|
||||
content.Role = "user"
|
||||
shouldAddDummyModelMessage = true
|
||||
if IsModelSupportSystemInstruction(textRequest.Model) {
|
||||
geminiRequest.SystemInstruction = &content
|
||||
geminiRequest.SystemInstruction.Role = ""
|
||||
continue
|
||||
} else {
|
||||
content.Role = "user"
|
||||
}
|
||||
}
|
||||
|
||||
geminiRequest.Contents = append(geminiRequest.Contents, content)
|
||||
|
||||
// If a system message is the last message, we need to add a dummy model message to make gemini happy
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
package gemini
|
||||
|
||||
type ChatRequest struct {
|
||||
Contents []ChatContent `json:"contents"`
|
||||
SafetySettings []ChatSafetySettings `json:"safety_settings,omitempty"`
|
||||
GenerationConfig ChatGenerationConfig `json:"generation_config,omitempty"`
|
||||
Tools []ChatTools `json:"tools,omitempty"`
|
||||
Contents []ChatContent `json:"contents"`
|
||||
SafetySettings []ChatSafetySettings `json:"safety_settings,omitempty"`
|
||||
GenerationConfig ChatGenerationConfig `json:"generation_config,omitempty"`
|
||||
Tools []ChatTools `json:"tools,omitempty"`
|
||||
SystemInstruction *ChatContent `json:"system_instruction,omitempty"`
|
||||
}
|
||||
|
||||
type EmbeddingRequest struct {
|
||||
|
||||
15
relay/adaptor/geminiv2/constants.go
Normal file
15
relay/adaptor/geminiv2/constants.go
Normal file
@@ -0,0 +1,15 @@
|
||||
package geminiv2
|
||||
|
||||
// https://ai.google.dev/models/gemini
|
||||
|
||||
var ModelList = []string{
|
||||
"gemini-pro", "gemini-1.0-pro",
|
||||
// "gemma-2-2b-it", "gemma-2-9b-it", "gemma-2-27b-it",
|
||||
"gemini-1.5-flash", "gemini-1.5-flash-8b",
|
||||
"gemini-1.5-pro", "gemini-1.5-pro-experimental",
|
||||
"text-embedding-004", "aqa",
|
||||
"gemini-2.0-flash", "gemini-2.0-flash-exp",
|
||||
"gemini-2.0-flash-lite-preview-02-05",
|
||||
"gemini-2.0-flash-thinking-exp-01-21",
|
||||
"gemini-2.0-pro-exp-02-05",
|
||||
}
|
||||
14
relay/adaptor/geminiv2/main.go
Normal file
14
relay/adaptor/geminiv2/main.go
Normal file
@@ -0,0 +1,14 @@
|
||||
package geminiv2
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/songquanpeng/one-api/relay/meta"
|
||||
)
|
||||
|
||||
func GetRequestURL(meta *meta.Meta) (string, error) {
|
||||
baseURL := strings.TrimSuffix(meta.BaseURL, "/")
|
||||
requestPath := strings.TrimPrefix(meta.RequestURLPath, "/v1")
|
||||
return fmt.Sprintf("%s%s", baseURL, requestPath), nil
|
||||
}
|
||||
@@ -10,8 +10,10 @@ import (
|
||||
"github.com/gin-gonic/gin"
|
||||
|
||||
"github.com/songquanpeng/one-api/relay/adaptor"
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/alibailian"
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/baiduv2"
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/doubao"
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/geminiv2"
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/minimax"
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/novita"
|
||||
"github.com/songquanpeng/one-api/relay/channeltype"
|
||||
@@ -56,6 +58,10 @@ func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error) {
|
||||
return novita.GetRequestURL(meta)
|
||||
case channeltype.BaiduV2:
|
||||
return baiduv2.GetRequestURL(meta)
|
||||
case channeltype.AliBailian:
|
||||
return alibailian.GetRequestURL(meta)
|
||||
case channeltype.GeminiOpenAICompatible:
|
||||
return geminiv2.GetRequestURL(meta)
|
||||
default:
|
||||
return GetFullRequestURL(meta.BaseURL, meta.RequestURLPath, meta.ChannelType), nil
|
||||
}
|
||||
|
||||
@@ -2,10 +2,12 @@ package openai
|
||||
|
||||
import (
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/ai360"
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/alibailian"
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/baichuan"
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/baiduv2"
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/deepseek"
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/doubao"
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/geminiv2"
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/groq"
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/lingyiwanwu"
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/minimax"
|
||||
@@ -79,6 +81,10 @@ func GetCompatibleChannelMeta(channelType int) (string, []string) {
|
||||
return "xunfeiv2", xunfeiv2.ModelList
|
||||
case channeltype.OpenRouter:
|
||||
return "openrouter", openrouter.ModelList
|
||||
case channeltype.AliBailian:
|
||||
return "alibailian", alibailian.ModelList
|
||||
case channeltype.GeminiOpenAICompatible:
|
||||
return "geminiv2", geminiv2.ModelList
|
||||
default:
|
||||
return "openai", ModelList
|
||||
}
|
||||
|
||||
@@ -17,6 +17,9 @@ func ResponseText2Usage(responseText string, modelName string, promptTokens int)
|
||||
}
|
||||
|
||||
func GetFullRequestURL(baseURL string, requestURL string, channelType int) string {
|
||||
if channelType == channeltype.OpenAICompatible {
|
||||
return fmt.Sprintf("%s%s", strings.TrimSuffix(baseURL, "/"), strings.TrimPrefix(requestURL, "/v1"))
|
||||
}
|
||||
fullRequestURL := fmt.Sprintf("%s%s", baseURL, requestURL)
|
||||
|
||||
if strings.HasPrefix(baseURL, "https://gateway.ai.cloudflare.com") {
|
||||
|
||||
@@ -16,10 +16,12 @@ import (
|
||||
|
||||
var ModelList = []string{
|
||||
"gemini-pro", "gemini-pro-vision",
|
||||
"gemini-1.5-pro-001", "gemini-1.5-flash-001",
|
||||
"gemini-1.5-pro-002", "gemini-1.5-flash-002",
|
||||
"gemini-2.0-flash-exp",
|
||||
"gemini-2.0-flash-thinking-exp", "gemini-2.0-flash-thinking-exp-01-21",
|
||||
"gemini-exp-1206",
|
||||
"gemini-1.5-pro-001", "gemini-1.5-pro-002",
|
||||
"gemini-1.5-flash-001", "gemini-1.5-flash-002",
|
||||
"gemini-2.0-flash-exp", "gemini-2.0-flash-001",
|
||||
"gemini-2.0-flash-lite-preview-02-05",
|
||||
"gemini-2.0-flash-thinking-exp-01-21",
|
||||
}
|
||||
|
||||
type Adaptor struct {
|
||||
|
||||
@@ -115,15 +115,24 @@ var ModelRatio = map[string]float64{
|
||||
"bge-large-en": 0.002 * RMB,
|
||||
"tao-8k": 0.002 * RMB,
|
||||
// https://ai.google.dev/pricing
|
||||
"gemini-pro": 1, // $0.00025 / 1k characters -> $0.001 / 1k tokens
|
||||
"gemini-1.0-pro": 1,
|
||||
"gemini-1.5-pro": 1,
|
||||
"gemini-1.5-pro-001": 1,
|
||||
"gemini-1.5-flash": 1,
|
||||
"gemini-1.5-flash-001": 1,
|
||||
"gemini-2.0-flash-exp": 1,
|
||||
"gemini-2.0-flash-thinking-exp": 1,
|
||||
"gemini-2.0-flash-thinking-exp-01-21": 1,
|
||||
// https://cloud.google.com/vertex-ai/generative-ai/pricing
|
||||
// "gemma-2-2b-it": 0,
|
||||
// "gemma-2-9b-it": 0,
|
||||
// "gemma-2-27b-it": 0,
|
||||
"gemini-pro": 0.25 * MILLI_USD, // $0.00025 / 1k characters -> $0.001 / 1k tokens
|
||||
"gemini-1.0-pro": 0.125 * MILLI_USD,
|
||||
"gemini-1.5-pro": 1.25 * MILLI_USD,
|
||||
"gemini-1.5-pro-001": 1.25 * MILLI_USD,
|
||||
"gemini-1.5-pro-experimental": 1.25 * MILLI_USD,
|
||||
"gemini-1.5-flash": 0.075 * MILLI_USD,
|
||||
"gemini-1.5-flash-001": 0.075 * MILLI_USD,
|
||||
"gemini-1.5-flash-8b": 0.0375 * MILLI_USD,
|
||||
"gemini-2.0-flash-exp": 0.075 * MILLI_USD,
|
||||
"gemini-2.0-flash": 0.15 * MILLI_USD,
|
||||
"gemini-2.0-flash-001": 0.15 * MILLI_USD,
|
||||
"gemini-2.0-flash-lite-preview-02-05": 0.075 * MILLI_USD,
|
||||
"gemini-2.0-flash-thinking-exp-01-21": 0.075 * MILLI_USD,
|
||||
"gemini-2.0-pro-exp-02-05": 1.25 * MILLI_USD,
|
||||
"aqa": 1,
|
||||
// https://open.bigmodel.cn/pricing
|
||||
"glm-zero-preview": 0.01 * RMB,
|
||||
|
||||
@@ -50,5 +50,8 @@ const (
|
||||
Replicate
|
||||
BaiduV2
|
||||
XunfeiV2
|
||||
AliBailian
|
||||
OpenAICompatible
|
||||
GeminiOpenAICompatible
|
||||
Dummy
|
||||
)
|
||||
|
||||
@@ -50,6 +50,10 @@ var ChannelBaseURLs = []string{
|
||||
"https://api.replicate.com/v1/models/", // 46
|
||||
"https://qianfan.baidubce.com", // 47
|
||||
"https://spark-api-open.xf-yun.com", // 48
|
||||
"https://dashscope.aliyuncs.com", // 49
|
||||
"", // 50
|
||||
|
||||
"https://generativelanguage.googleapis.com/v1beta/openai/", // 51
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
||||
@@ -44,6 +44,9 @@ function renderType(type, t) {
|
||||
function renderBalance(type, balance, t) {
|
||||
switch (type) {
|
||||
case 1: // OpenAI
|
||||
if (balance === 0) {
|
||||
return <span>{t('channel.table.balance_not_supported')}</span>;
|
||||
}
|
||||
return <span>${balance.toFixed(2)}</span>;
|
||||
case 4: // CloseAI
|
||||
return <span>¥{balance.toFixed(2)}</span>;
|
||||
@@ -108,7 +111,7 @@ const ChannelsTable = () => {
|
||||
|
||||
const loadChannels = async (startIdx) => {
|
||||
const res = await API.get(`/api/channel/?p=${startIdx}`);
|
||||
const {success, message, data} = res.data;
|
||||
const { success, message, data } = res.data;
|
||||
if (success) {
|
||||
let localChannels = data.map(processChannelData);
|
||||
if (startIdx === 0) {
|
||||
@@ -588,7 +591,15 @@ const ChannelsTable = () => {
|
||||
/>
|
||||
</Table.Cell>
|
||||
<Table.Cell>
|
||||
<div>
|
||||
<div
|
||||
style={{
|
||||
display: 'flex',
|
||||
alignItems: 'center',
|
||||
flexWrap: 'wrap',
|
||||
gap: '2px',
|
||||
rowGap: '6px',
|
||||
}}
|
||||
>
|
||||
<Button
|
||||
size={'tiny'}
|
||||
positive
|
||||
|
||||
@@ -1,12 +1,26 @@
|
||||
export const CHANNEL_OPTIONS = [
|
||||
{key: 1, text: 'OpenAI', value: 1, color: 'green'},
|
||||
{key: 14, text: 'Anthropic Claude', value: 14, color: 'black'},
|
||||
{key: 33, text: 'AWS', value: 33, color: 'black'},
|
||||
{key: 3, text: 'Azure OpenAI', value: 3, color: 'olive'},
|
||||
{key: 11, text: 'Google PaLM2', value: 11, color: 'orange'},
|
||||
{key: 24, text: 'Google Gemini', value: 24, color: 'orange'},
|
||||
{key: 28, text: 'Mistral AI', value: 28, color: 'orange'},
|
||||
{key: 41, text: 'Novita', value: 41, color: 'purple'},
|
||||
{ key: 1, text: 'OpenAI', value: 1, color: 'green' },
|
||||
{
|
||||
key: 50,
|
||||
text: 'OpenAI 兼容',
|
||||
value: 50,
|
||||
color: 'olive',
|
||||
description: 'OpenAI 兼容渠道,支持设置 Base URL',
|
||||
},
|
||||
{key: 14, text: 'Anthropic', value: 14, color: 'black'},
|
||||
{ key: 33, text: 'AWS', value: 33, color: 'black' },
|
||||
{key: 3, text: 'Azure', value: 3, color: 'olive'},
|
||||
{key: 11, text: 'PaLM2', value: 11, color: 'orange'},
|
||||
{key: 24, text: 'Gemini', value: 24, color: 'orange'},
|
||||
{
|
||||
key: 51,
|
||||
text: 'Gemini (OpenAI)',
|
||||
value: 51,
|
||||
color: 'orange',
|
||||
description: 'Gemini OpenAI 兼容格式',
|
||||
},
|
||||
{ key: 28, text: 'Mistral AI', value: 28, color: 'orange' },
|
||||
{ key: 41, text: 'Novita', value: 41, color: 'purple' },
|
||||
{
|
||||
key: 40,
|
||||
text: '字节火山引擎',
|
||||
@@ -28,7 +42,14 @@ export const CHANNEL_OPTIONS = [
|
||||
color: 'blue',
|
||||
tip: '请前往<a href="https://console.bce.baidu.com/iam/#/iam/apikey/list" target="_blank">此处</a>获取 API Key,注意本渠道仅支持<a target="_blank" href="https://cloud.baidu.com/doc/WENXINWORKSHOP/s/em4tsqo3v">推理服务 V2</a>相关模型',
|
||||
},
|
||||
{key: 17, text: '阿里通义千问', value: 17, color: 'orange'},
|
||||
{
|
||||
key: 17,
|
||||
text: '阿里通义千问',
|
||||
value: 17,
|
||||
color: 'orange',
|
||||
tip: '如需使用阿里云百炼,请使用<strong>阿里云百炼</strong>渠道',
|
||||
},
|
||||
{ key: 49, text: '阿里云百炼', value: 49, color: 'orange' },
|
||||
{
|
||||
key: 18,
|
||||
text: '讯飞星火认知',
|
||||
@@ -43,38 +64,45 @@ export const CHANNEL_OPTIONS = [
|
||||
color: 'blue',
|
||||
tip: 'HTTP 版本的讯飞接口,前往<a href="https://console.xfyun.cn/services/cbm" target="_blank">此处</a>获取 HTTP 服务接口认证密钥',
|
||||
},
|
||||
{key: 16, text: '智谱 ChatGLM', value: 16, color: 'violet'},
|
||||
{key: 19, text: '360 智脑', value: 19, color: 'blue'},
|
||||
{key: 25, text: 'Moonshot AI', value: 25, color: 'black'},
|
||||
{key: 23, text: '腾讯混元', value: 23, color: 'teal'},
|
||||
{key: 26, text: '百川大模型', value: 26, color: 'orange'},
|
||||
{key: 27, text: 'MiniMax', value: 27, color: 'red'},
|
||||
{key: 29, text: 'Groq', value: 29, color: 'orange'},
|
||||
{key: 30, text: 'Ollama', value: 30, color: 'black'},
|
||||
{key: 31, text: '零一万物', value: 31, color: 'green'},
|
||||
{key: 32, text: '阶跃星辰', value: 32, color: 'blue'},
|
||||
{key: 34, text: 'Coze', value: 34, color: 'blue'},
|
||||
{key: 35, text: 'Cohere', value: 35, color: 'blue'},
|
||||
{key: 36, text: 'DeepSeek', value: 36, color: 'black'},
|
||||
{key: 37, text: 'Cloudflare', value: 37, color: 'orange'},
|
||||
{key: 38, text: 'DeepL', value: 38, color: 'black'},
|
||||
{key: 39, text: 'together.ai', value: 39, color: 'blue'},
|
||||
{key: 42, text: 'VertexAI', value: 42, color: 'blue'},
|
||||
{key: 43, text: 'Proxy', value: 43, color: 'blue'},
|
||||
{key: 44, text: 'SiliconFlow', value: 44, color: 'blue'},
|
||||
{key: 45, text: 'xAI', value: 45, color: 'blue'},
|
||||
{key: 46, text: 'Replicate', value: 46, color: 'blue'},
|
||||
{key: 8, text: '自定义渠道', value: 8, color: 'pink'},
|
||||
{key: 22, text: '知识库:FastGPT', value: 22, color: 'blue'},
|
||||
{key: 21, text: '知识库:AI Proxy', value: 21, color: 'purple'},
|
||||
{key: 20, text: 'OpenRouter', value: 20, color: 'black'},
|
||||
{key: 2, text: '代理:API2D', value: 2, color: 'blue'},
|
||||
{key: 5, text: '代理:OpenAI-SB', value: 5, color: 'brown'},
|
||||
{key: 7, text: '代理:OhMyGPT', value: 7, color: 'purple'},
|
||||
{key: 10, text: '代理:AI Proxy', value: 10, color: 'purple'},
|
||||
{key: 4, text: '代理:CloseAI', value: 4, color: 'teal'},
|
||||
{key: 6, text: '代理:OpenAI Max', value: 6, color: 'violet'},
|
||||
{key: 9, text: '代理:AI.LS', value: 9, color: 'yellow'},
|
||||
{key: 12, text: '代理:API2GPT', value: 12, color: 'blue'},
|
||||
{key: 13, text: '代理:AIGC2D', value: 13, color: 'purple'},
|
||||
{ key: 16, text: '智谱 ChatGLM', value: 16, color: 'violet' },
|
||||
{ key: 19, text: '360 智脑', value: 19, color: 'blue' },
|
||||
{ key: 25, text: 'Moonshot AI', value: 25, color: 'black' },
|
||||
{ key: 23, text: '腾讯混元', value: 23, color: 'teal' },
|
||||
{ key: 26, text: '百川大模型', value: 26, color: 'orange' },
|
||||
{ key: 27, text: 'MiniMax', value: 27, color: 'red' },
|
||||
{ key: 29, text: 'Groq', value: 29, color: 'orange' },
|
||||
{ key: 30, text: 'Ollama', value: 30, color: 'black' },
|
||||
{ key: 31, text: '零一万物', value: 31, color: 'green' },
|
||||
{ key: 32, text: '阶跃星辰', value: 32, color: 'blue' },
|
||||
{ key: 34, text: 'Coze', value: 34, color: 'blue' },
|
||||
{ key: 35, text: 'Cohere', value: 35, color: 'blue' },
|
||||
{ key: 36, text: 'DeepSeek', value: 36, color: 'black' },
|
||||
{ key: 37, text: 'Cloudflare', value: 37, color: 'orange' },
|
||||
{ key: 38, text: 'DeepL', value: 38, color: 'black' },
|
||||
{ key: 39, text: 'together.ai', value: 39, color: 'blue' },
|
||||
{ key: 42, text: 'VertexAI', value: 42, color: 'blue' },
|
||||
{ key: 43, text: 'Proxy', value: 43, color: 'blue' },
|
||||
{ key: 44, text: 'SiliconFlow', value: 44, color: 'blue' },
|
||||
{ key: 45, text: 'xAI', value: 45, color: 'blue' },
|
||||
{ key: 46, text: 'Replicate', value: 46, color: 'blue' },
|
||||
{
|
||||
key: 8,
|
||||
text: '自定义渠道',
|
||||
value: 8,
|
||||
color: 'pink',
|
||||
tip: '不推荐使用,请使用 <strong>OpenAI 兼容</strong>渠道类型。注意,这里所需要填入的代理地址仅会在实际请求时替换域名部分,如果你想填入 OpenAI SDK 中所要求的 Base URL,请使用 OpenAI 兼容渠道类型',
|
||||
description: '不推荐使用,请使用 OpenAI 兼容渠道类型',
|
||||
},
|
||||
{ key: 22, text: '知识库:FastGPT', value: 22, color: 'blue' },
|
||||
{ key: 21, text: '知识库:AI Proxy', value: 21, color: 'purple' },
|
||||
{ key: 20, text: 'OpenRouter', value: 20, color: 'black' },
|
||||
{ key: 2, text: '代理:API2D', value: 2, color: 'blue' },
|
||||
{ key: 5, text: '代理:OpenAI-SB', value: 5, color: 'brown' },
|
||||
{ key: 7, text: '代理:OhMyGPT', value: 7, color: 'purple' },
|
||||
{ key: 10, text: '代理:AI Proxy', value: 10, color: 'purple' },
|
||||
{ key: 4, text: '代理:CloseAI', value: 4, color: 'teal' },
|
||||
{ key: 6, text: '代理:OpenAI Max', value: 6, color: 'violet' },
|
||||
{ key: 9, text: '代理:AI.LS', value: 9, color: 'yellow' },
|
||||
{ key: 12, text: '代理:API2GPT', value: 12, color: 'blue' },
|
||||
{ key: 13, text: '代理:AIGC2D', value: 13, color: 'purple' },
|
||||
];
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import {Label, Message} from 'semantic-ui-react';
|
||||
import {getChannelOption} from './helper';
|
||||
import { Label, Message } from 'semantic-ui-react';
|
||||
import { getChannelOption } from './helper';
|
||||
import React from 'react';
|
||||
|
||||
export function renderText(text, limit) {
|
||||
@@ -16,7 +16,15 @@ export function renderGroup(group) {
|
||||
let groups = group.split(',');
|
||||
groups.sort();
|
||||
return (
|
||||
<>
|
||||
<div
|
||||
style={{
|
||||
display: 'flex',
|
||||
alignItems: 'center',
|
||||
flexWrap: 'wrap',
|
||||
gap: '2px',
|
||||
rowGap: '6px',
|
||||
}}
|
||||
>
|
||||
{groups.map((group) => {
|
||||
if (group === 'vip' || group === 'pro') {
|
||||
return <Label color='yellow'>{group}</Label>;
|
||||
@@ -25,7 +33,7 @@ export function renderGroup(group) {
|
||||
}
|
||||
return <Label>{group}</Label>;
|
||||
})}
|
||||
</>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
@@ -106,8 +114,8 @@ export function renderChannelTip(channelId) {
|
||||
return <></>;
|
||||
}
|
||||
return (
|
||||
<Message>
|
||||
<div dangerouslySetInnerHTML={{__html: channel.tip}}></div>
|
||||
</Message>
|
||||
<Message>
|
||||
<div dangerouslySetInnerHTML={{ __html: channel.tip }}></div>
|
||||
</Message>
|
||||
);
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import { toast } from 'react-toastify';
|
||||
import { toastConstants } from '../constants';
|
||||
import {toast} from 'react-toastify';
|
||||
import {toastConstants} from '../constants';
|
||||
import React from 'react';
|
||||
import { API } from './api';
|
||||
import {API} from './api';
|
||||
|
||||
const HTMLToastContent = ({ htmlContent }) => {
|
||||
return <div dangerouslySetInnerHTML={{ __html: htmlContent }} />;
|
||||
@@ -74,6 +74,7 @@ if (isMobile()) {
|
||||
}
|
||||
|
||||
export function showError(error) {
|
||||
if (!error) return;
|
||||
console.error(error);
|
||||
if (error.message) {
|
||||
if (error.name === 'AxiosError') {
|
||||
@@ -158,17 +159,7 @@ export function timestamp2string(timestamp) {
|
||||
second = '0' + second;
|
||||
}
|
||||
return (
|
||||
year +
|
||||
'-' +
|
||||
month +
|
||||
'-' +
|
||||
day +
|
||||
' ' +
|
||||
hour +
|
||||
':' +
|
||||
minute +
|
||||
':' +
|
||||
second
|
||||
year + '-' + month + '-' + day + ' ' + hour + ':' + minute + ':' + second
|
||||
);
|
||||
}
|
||||
|
||||
@@ -193,7 +184,6 @@ export const verifyJSON = (str) => {
|
||||
export function shouldShowPrompt(id) {
|
||||
let prompt = localStorage.getItem(`prompt-${id}`);
|
||||
return !prompt;
|
||||
|
||||
}
|
||||
|
||||
export function setPromptShown(id) {
|
||||
@@ -224,4 +214,4 @@ export function getChannelModels(type) {
|
||||
return channelModels[type];
|
||||
}
|
||||
return [];
|
||||
}
|
||||
}
|
||||
|
||||
@@ -104,8 +104,10 @@
|
||||
"model_mapping_placeholder": "Optional, used to modify model names in request body. A JSON string where keys are request model names and values are target model names",
|
||||
"system_prompt": "System Prompt",
|
||||
"system_prompt_placeholder": "Optional, used to force set system prompt. Use with custom model & model mapping. First create a unique custom model name above, then map it to a natively supported model",
|
||||
"base_url": "Proxy",
|
||||
"base_url_placeholder": "Optional, used for API calls through proxy. Enter proxy address in format: https://domain.com",
|
||||
"proxy_url": "Proxy",
|
||||
"proxy_url_placeholder": "This is optional and used for API calls via a proxy. Please enter the proxy URL, formatted as: https://domain.com",
|
||||
"base_url": "Base URL",
|
||||
"base_url_placeholder": "The Base URL required by the OpenAPI SDK",
|
||||
"key": "Key",
|
||||
"key_placeholder": "Please enter key",
|
||||
"batch": "Batch Create",
|
||||
|
||||
@@ -104,8 +104,10 @@
|
||||
"model_mapping_placeholder": "此项可选,用于修改请求体中的模型名称,为一个 JSON 字符串,键为请求中模型名称,值为要替换的模型名称",
|
||||
"system_prompt": "系统提示词",
|
||||
"system_prompt_placeholder": "此项可选,用于强制设置给定的系统提示词,请配合自定义模型 & 模型重定向使用,首先创建一个唯一的自定义模型名称并在上面填入,之后将该自定义模型重定向映射到该渠道一个原生支持的模型",
|
||||
"base_url": "代理",
|
||||
"base_url_placeholder": "此项可选,用于通过代理站来进行 API 调用,请输入代理站地址,格式为:https://domain.com",
|
||||
"proxy_url": "代理",
|
||||
"proxy_url_placeholder": "此项可选,用于通过代理站来进行 API 调用,请输入代理站地址,格式为:https://domain.com。注意,这里所需要填入的代理地址仅会在实际请求时替换域名部分,如果你想填入 OpenAI SDK 中所要求的 Base URL,请使用 OpenAI 兼容渠道类型",
|
||||
"base_url": "Base URL",
|
||||
"base_url_placeholder": "OpenAPI SDK 中所要求的 Base URL",
|
||||
"key": "密钥",
|
||||
"key_placeholder": "请输入密钥",
|
||||
"batch": "批量创建",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import React, {useEffect, useState} from 'react';
|
||||
import {useTranslation} from 'react-i18next';
|
||||
import {Button, Card, Form, Input, Message,} from 'semantic-ui-react';
|
||||
import {Button, Card, Form, Input, Message} from 'semantic-ui-react';
|
||||
import {useNavigate, useParams} from 'react-router-dom';
|
||||
import {API, copy, getChannelModels, showError, showInfo, showSuccess, verifyJSON,} from '../../helpers';
|
||||
import {CHANNEL_OPTIONS} from '../../constants';
|
||||
@@ -339,6 +339,20 @@ const EditChannel = () => {
|
||||
{inputs.type === 8 && (
|
||||
<Form.Field>
|
||||
<Form.Input
|
||||
required
|
||||
label={t('channel.edit.proxy_url')}
|
||||
name='base_url'
|
||||
placeholder={t('channel.edit.proxy_url_placeholder')}
|
||||
onChange={handleInputChange}
|
||||
value={inputs.base_url}
|
||||
autoComplete='new-password'
|
||||
/>
|
||||
</Form.Field>
|
||||
)}
|
||||
{inputs.type === 50 && (
|
||||
<Form.Field>
|
||||
<Form.Input
|
||||
required
|
||||
label={t('channel.edit.base_url')}
|
||||
name='base_url'
|
||||
placeholder={t('channel.edit.base_url_placeholder')}
|
||||
@@ -637,12 +651,13 @@ const EditChannel = () => {
|
||||
{inputs.type !== 3 &&
|
||||
inputs.type !== 33 &&
|
||||
inputs.type !== 8 &&
|
||||
inputs.type !== 50 &&
|
||||
inputs.type !== 22 && (
|
||||
<Form.Field>
|
||||
<Form.Input
|
||||
label={t('channel.edit.base_url')}
|
||||
label={t('channel.edit.proxy_url')}
|
||||
name='base_url'
|
||||
placeholder={t('channel.edit.base_url_placeholder')}
|
||||
placeholder={t('channel.edit.proxy_url_placeholder')}
|
||||
onChange={handleInputChange}
|
||||
value={inputs.base_url}
|
||||
autoComplete='new-password'
|
||||
|
||||
Reference in New Issue
Block a user