mirror of
https://github.com/songquanpeng/one-api.git
synced 2026-03-03 18:24:25 +08:00
Compare commits
9 Commits
v0.6.11-pr
...
c71a3586bd
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c71a3586bd | ||
|
|
8b8cd03e85 | ||
|
|
54c38de813 | ||
|
|
d6284bf6b0 | ||
|
|
df5d2ca93d | ||
|
|
fef7ae048b | ||
|
|
dabaa795b9 | ||
|
|
3e17184c1e | ||
|
|
d7e1b2a231 |
@@ -115,7 +115,7 @@ _✨ 通过标准的 OpenAI API 格式访问所有的大模型,开箱即用
|
||||
19. 支持丰富的**自定义**设置,
|
||||
1. 支持自定义系统名称,logo 以及页脚。
|
||||
2. 支持自定义首页和关于页面,可以选择使用 HTML & Markdown 代码进行自定义,或者使用一个单独的网页通过 iframe 嵌入。
|
||||
20. 支持通过系统访问令牌调用管理 API,进而**在无需二开的情况下扩展和自定义** One API 的功能,详情请参考此处 [API 文档](./docs/API.md)。。
|
||||
20. 支持通过系统访问令牌调用管理 API,进而**在无需二开的情况下扩展和自定义** One API 的功能,详情请参考此处 [API 文档](./docs/API.md)。
|
||||
21. 支持 Cloudflare Turnstile 用户校验。
|
||||
22. 支持用户管理,支持**多种用户登录注册方式**:
|
||||
+ 邮箱登录注册(支持注册邮箱白名单)以及通过邮箱进行密码重置。
|
||||
|
||||
@@ -5,9 +5,10 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
|
||||
"github.com/songquanpeng/one-api/common/config"
|
||||
"github.com/songquanpeng/one-api/common/helper"
|
||||
channelhelper "github.com/songquanpeng/one-api/relay/adaptor"
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/openai"
|
||||
@@ -20,17 +21,12 @@ type Adaptor struct {
|
||||
}
|
||||
|
||||
func (a *Adaptor) Init(meta *meta.Meta) {
|
||||
|
||||
}
|
||||
|
||||
func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error) {
|
||||
var defaultVersion string
|
||||
switch meta.ActualModelName {
|
||||
case "gemini-2.0-flash-exp",
|
||||
"gemini-2.0-flash-thinking-exp",
|
||||
"gemini-2.0-flash-thinking-exp-01-21":
|
||||
defaultVersion = "v1beta"
|
||||
default:
|
||||
defaultVersion := config.GeminiVersion
|
||||
if strings.Contains(meta.ActualModelName, "gemini-2.0") ||
|
||||
strings.Contains(meta.ActualModelName, "gemini-1.5") {
|
||||
defaultVersion = "v1beta"
|
||||
}
|
||||
|
||||
|
||||
@@ -4,8 +4,38 @@ package gemini
|
||||
|
||||
var ModelList = []string{
|
||||
"gemini-pro", "gemini-1.0-pro",
|
||||
"gemini-1.5-flash", "gemini-1.5-pro",
|
||||
// "gemma-2-2b-it", "gemma-2-9b-it", "gemma-2-27b-it",
|
||||
"gemini-1.5-flash", "gemini-1.5-flash-8b",
|
||||
"gemini-1.5-pro", "gemini-1.5-pro-experimental",
|
||||
"text-embedding-004", "aqa",
|
||||
"gemini-2.0-flash-exp",
|
||||
"gemini-2.0-flash-thinking-exp", "gemini-2.0-flash-thinking-exp-01-21",
|
||||
"gemini-2.0-flash", "gemini-2.0-flash-exp",
|
||||
"gemini-2.0-flash-lite-preview-02-05",
|
||||
"gemini-2.0-flash-thinking-exp-01-21",
|
||||
"gemini-2.0-pro-exp-02-05",
|
||||
}
|
||||
|
||||
// ModelsSupportSystemInstruction is the list of models that support system instruction.
|
||||
//
|
||||
// https://cloud.google.com/vertex-ai/generative-ai/docs/learn/prompts/system-instructions
|
||||
var ModelsSupportSystemInstruction = []string{
|
||||
// "gemini-1.0-pro-002",
|
||||
// "gemini-1.5-flash", "gemini-1.5-flash-001", "gemini-1.5-flash-002",
|
||||
// "gemini-1.5-flash-8b",
|
||||
// "gemini-1.5-pro", "gemini-1.5-pro-001", "gemini-1.5-pro-002",
|
||||
// "gemini-1.5-pro-experimental",
|
||||
"gemini-2.0-flash", "gemini-2.0-flash-exp",
|
||||
"gemini-2.0-flash-thinking-exp-01-21",
|
||||
}
|
||||
|
||||
// IsModelSupportSystemInstruction check if the model support system instruction.
|
||||
//
|
||||
// Because the main version of Go is 1.20, slice.Contains cannot be used
|
||||
func IsModelSupportSystemInstruction(model string) bool {
|
||||
for _, m := range ModelsSupportSystemInstruction {
|
||||
if m == model {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -132,9 +132,16 @@ func ConvertRequest(textRequest model.GeneralOpenAIRequest) *ChatRequest {
|
||||
}
|
||||
// Converting system prompt to prompt from user for the same reason
|
||||
if content.Role == "system" {
|
||||
content.Role = "user"
|
||||
shouldAddDummyModelMessage = true
|
||||
if IsModelSupportSystemInstruction(textRequest.Model) {
|
||||
geminiRequest.SystemInstruction = &content
|
||||
geminiRequest.SystemInstruction.Role = ""
|
||||
continue
|
||||
} else {
|
||||
content.Role = "user"
|
||||
}
|
||||
}
|
||||
|
||||
geminiRequest.Contents = append(geminiRequest.Contents, content)
|
||||
|
||||
// If a system message is the last message, we need to add a dummy model message to make gemini happy
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
package gemini
|
||||
|
||||
type ChatRequest struct {
|
||||
Contents []ChatContent `json:"contents"`
|
||||
SafetySettings []ChatSafetySettings `json:"safety_settings,omitempty"`
|
||||
GenerationConfig ChatGenerationConfig `json:"generation_config,omitempty"`
|
||||
Tools []ChatTools `json:"tools,omitempty"`
|
||||
Contents []ChatContent `json:"contents"`
|
||||
SafetySettings []ChatSafetySettings `json:"safety_settings,omitempty"`
|
||||
GenerationConfig ChatGenerationConfig `json:"generation_config,omitempty"`
|
||||
Tools []ChatTools `json:"tools,omitempty"`
|
||||
SystemInstruction *ChatContent `json:"system_instruction,omitempty"`
|
||||
}
|
||||
|
||||
type EmbeddingRequest struct {
|
||||
|
||||
@@ -32,16 +32,24 @@ func (a *Adaptor) Init(meta *meta.Meta) {
|
||||
func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error) {
|
||||
switch meta.ChannelType {
|
||||
case channeltype.Azure:
|
||||
defaultVersion := meta.Config.APIVersion
|
||||
|
||||
// https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/reasoning?tabs=python#api--feature-support
|
||||
if strings.HasPrefix(meta.ActualModelName, "o1") ||
|
||||
strings.HasPrefix(meta.ActualModelName, "o3") {
|
||||
defaultVersion = "2024-12-01-preview"
|
||||
}
|
||||
|
||||
if meta.Mode == relaymode.ImagesGenerations {
|
||||
// https://learn.microsoft.com/en-us/azure/ai-services/openai/dall-e-quickstart?tabs=dalle3%2Ccommand-line&pivots=rest-api
|
||||
// https://{resource_name}.openai.azure.com/openai/deployments/dall-e-3/images/generations?api-version=2024-03-01-preview
|
||||
fullRequestURL := fmt.Sprintf("%s/openai/deployments/%s/images/generations?api-version=%s", meta.BaseURL, meta.ActualModelName, meta.Config.APIVersion)
|
||||
fullRequestURL := fmt.Sprintf("%s/openai/deployments/%s/images/generations?api-version=%s", meta.BaseURL, meta.ActualModelName, defaultVersion)
|
||||
return fullRequestURL, nil
|
||||
}
|
||||
|
||||
// https://learn.microsoft.com/en-us/azure/cognitive-services/openai/chatgpt-quickstart?pivots=rest-api&tabs=command-line#rest-api
|
||||
requestURL := strings.Split(meta.RequestURLPath, "?")[0]
|
||||
requestURL = fmt.Sprintf("%s?api-version=%s", requestURL, meta.Config.APIVersion)
|
||||
requestURL = fmt.Sprintf("%s?api-version=%s", requestURL, defaultVersion)
|
||||
task := strings.TrimPrefix(requestURL, "/v1/")
|
||||
model_ := meta.ActualModelName
|
||||
model_ = strings.Replace(model_, ".", "", -1)
|
||||
@@ -89,6 +97,23 @@ func (a *Adaptor) ConvertRequest(c *gin.Context, relayMode int, request *model.G
|
||||
}
|
||||
request.StreamOptions.IncludeUsage = true
|
||||
}
|
||||
|
||||
// o1/o1-mini/o1-preview do not support system prompt/max_tokens/temperature
|
||||
if strings.HasPrefix(request.Model, "o1") {
|
||||
temperature := float64(1)
|
||||
request.Temperature = &temperature // Only the default (1) value is supported
|
||||
request.MaxTokens = 0
|
||||
request.Messages = func(raw []model.Message) (filtered []model.Message) {
|
||||
for i := range raw {
|
||||
if raw[i].Role != "system" {
|
||||
filtered = append(filtered, raw[i])
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}(request.Messages)
|
||||
}
|
||||
|
||||
return request, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -16,10 +16,12 @@ import (
|
||||
|
||||
var ModelList = []string{
|
||||
"gemini-pro", "gemini-pro-vision",
|
||||
"gemini-1.5-pro-001", "gemini-1.5-flash-001",
|
||||
"gemini-1.5-pro-002", "gemini-1.5-flash-002",
|
||||
"gemini-2.0-flash-exp",
|
||||
"gemini-2.0-flash-thinking-exp", "gemini-2.0-flash-thinking-exp-01-21",
|
||||
"gemini-exp-1206",
|
||||
"gemini-1.5-pro-001", "gemini-1.5-pro-002",
|
||||
"gemini-1.5-flash-001", "gemini-1.5-flash-002",
|
||||
"gemini-2.0-flash-exp", "gemini-2.0-flash-001",
|
||||
"gemini-2.0-flash-lite-preview-02-05",
|
||||
"gemini-2.0-flash-thinking-exp-01-21",
|
||||
}
|
||||
|
||||
type Adaptor struct {
|
||||
|
||||
@@ -115,15 +115,24 @@ var ModelRatio = map[string]float64{
|
||||
"bge-large-en": 0.002 * RMB,
|
||||
"tao-8k": 0.002 * RMB,
|
||||
// https://ai.google.dev/pricing
|
||||
"gemini-pro": 1, // $0.00025 / 1k characters -> $0.001 / 1k tokens
|
||||
"gemini-1.0-pro": 1,
|
||||
"gemini-1.5-pro": 1,
|
||||
"gemini-1.5-pro-001": 1,
|
||||
"gemini-1.5-flash": 1,
|
||||
"gemini-1.5-flash-001": 1,
|
||||
"gemini-2.0-flash-exp": 1,
|
||||
"gemini-2.0-flash-thinking-exp": 1,
|
||||
"gemini-2.0-flash-thinking-exp-01-21": 1,
|
||||
// https://cloud.google.com/vertex-ai/generative-ai/pricing
|
||||
// "gemma-2-2b-it": 0,
|
||||
// "gemma-2-9b-it": 0,
|
||||
// "gemma-2-27b-it": 0,
|
||||
"gemini-pro": 0.25 * MILLI_USD, // $0.00025 / 1k characters -> $0.001 / 1k tokens
|
||||
"gemini-1.0-pro": 0.125 * MILLI_USD,
|
||||
"gemini-1.5-pro": 1.25 * MILLI_USD,
|
||||
"gemini-1.5-pro-001": 1.25 * MILLI_USD,
|
||||
"gemini-1.5-pro-experimental": 1.25 * MILLI_USD,
|
||||
"gemini-1.5-flash": 0.075 * MILLI_USD,
|
||||
"gemini-1.5-flash-001": 0.075 * MILLI_USD,
|
||||
"gemini-1.5-flash-8b": 0.0375 * MILLI_USD,
|
||||
"gemini-2.0-flash-exp": 0.075 * MILLI_USD,
|
||||
"gemini-2.0-flash": 0.15 * MILLI_USD,
|
||||
"gemini-2.0-flash-001": 0.15 * MILLI_USD,
|
||||
"gemini-2.0-flash-lite-preview-02-05": 0.075 * MILLI_USD,
|
||||
"gemini-2.0-flash-thinking-exp-01-21": 0.075 * MILLI_USD,
|
||||
"gemini-2.0-pro-exp-02-05": 1.25 * MILLI_USD,
|
||||
"aqa": 1,
|
||||
// https://open.bigmodel.cn/pricing
|
||||
"glm-zero-preview": 0.01 * RMB,
|
||||
@@ -506,7 +515,7 @@ func GetCompletionRatio(name string, channelType int) float64 {
|
||||
}
|
||||
return 2
|
||||
}
|
||||
// including o1, o1-preview, o1-mini
|
||||
// including o1/o1-preview/o1-mini
|
||||
if strings.HasPrefix(name, "o1") {
|
||||
return 4
|
||||
}
|
||||
|
||||
@@ -44,6 +44,9 @@ function renderType(type, t) {
|
||||
function renderBalance(type, balance, t) {
|
||||
switch (type) {
|
||||
case 1: // OpenAI
|
||||
if (balance === 0) {
|
||||
return <span>{t('channel.table.balance_not_supported')}</span>;
|
||||
}
|
||||
return <span>${balance.toFixed(2)}</span>;
|
||||
case 4: // CloseAI
|
||||
return <span>¥{balance.toFixed(2)}</span>;
|
||||
@@ -108,7 +111,7 @@ const ChannelsTable = () => {
|
||||
|
||||
const loadChannels = async (startIdx) => {
|
||||
const res = await API.get(`/api/channel/?p=${startIdx}`);
|
||||
const {success, message, data} = res.data;
|
||||
const { success, message, data } = res.data;
|
||||
if (success) {
|
||||
let localChannels = data.map(processChannelData);
|
||||
if (startIdx === 0) {
|
||||
@@ -588,7 +591,15 @@ const ChannelsTable = () => {
|
||||
/>
|
||||
</Table.Cell>
|
||||
<Table.Cell>
|
||||
<div>
|
||||
<div
|
||||
style={{
|
||||
display: 'flex',
|
||||
alignItems: 'center',
|
||||
flexWrap: 'wrap',
|
||||
gap: '2px',
|
||||
rowGap: '6px',
|
||||
}}
|
||||
>
|
||||
<Button
|
||||
size={'tiny'}
|
||||
positive
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import {Label, Message} from 'semantic-ui-react';
|
||||
import {getChannelOption} from './helper';
|
||||
import { Label, Message } from 'semantic-ui-react';
|
||||
import { getChannelOption } from './helper';
|
||||
import React from 'react';
|
||||
|
||||
export function renderText(text, limit) {
|
||||
@@ -16,7 +16,15 @@ export function renderGroup(group) {
|
||||
let groups = group.split(',');
|
||||
groups.sort();
|
||||
return (
|
||||
<>
|
||||
<div
|
||||
style={{
|
||||
display: 'flex',
|
||||
alignItems: 'center',
|
||||
flexWrap: 'wrap',
|
||||
gap: '2px',
|
||||
rowGap: '6px',
|
||||
}}
|
||||
>
|
||||
{groups.map((group) => {
|
||||
if (group === 'vip' || group === 'pro') {
|
||||
return <Label color='yellow'>{group}</Label>;
|
||||
@@ -25,7 +33,7 @@ export function renderGroup(group) {
|
||||
}
|
||||
return <Label>{group}</Label>;
|
||||
})}
|
||||
</>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
@@ -106,8 +114,8 @@ export function renderChannelTip(channelId) {
|
||||
return <></>;
|
||||
}
|
||||
return (
|
||||
<Message>
|
||||
<div dangerouslySetInnerHTML={{__html: channel.tip}}></div>
|
||||
</Message>
|
||||
<Message>
|
||||
<div dangerouslySetInnerHTML={{ __html: channel.tip }}></div>
|
||||
</Message>
|
||||
);
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import { toast } from 'react-toastify';
|
||||
import { toastConstants } from '../constants';
|
||||
import {toast} from 'react-toastify';
|
||||
import {toastConstants} from '../constants';
|
||||
import React from 'react';
|
||||
import { API } from './api';
|
||||
import {API} from './api';
|
||||
|
||||
const HTMLToastContent = ({ htmlContent }) => {
|
||||
return <div dangerouslySetInnerHTML={{ __html: htmlContent }} />;
|
||||
@@ -74,6 +74,7 @@ if (isMobile()) {
|
||||
}
|
||||
|
||||
export function showError(error) {
|
||||
if (!error) return;
|
||||
console.error(error);
|
||||
if (error.message) {
|
||||
if (error.name === 'AxiosError') {
|
||||
@@ -158,17 +159,7 @@ export function timestamp2string(timestamp) {
|
||||
second = '0' + second;
|
||||
}
|
||||
return (
|
||||
year +
|
||||
'-' +
|
||||
month +
|
||||
'-' +
|
||||
day +
|
||||
' ' +
|
||||
hour +
|
||||
':' +
|
||||
minute +
|
||||
':' +
|
||||
second
|
||||
year + '-' + month + '-' + day + ' ' + hour + ':' + minute + ':' + second
|
||||
);
|
||||
}
|
||||
|
||||
@@ -193,7 +184,6 @@ export const verifyJSON = (str) => {
|
||||
export function shouldShowPrompt(id) {
|
||||
let prompt = localStorage.getItem(`prompt-${id}`);
|
||||
return !prompt;
|
||||
|
||||
}
|
||||
|
||||
export function setPromptShown(id) {
|
||||
@@ -224,4 +214,4 @@ export function getChannelModels(type) {
|
||||
return channelModels[type];
|
||||
}
|
||||
return [];
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user