Compare commits

...

20 Commits

Author SHA1 Message Date
CaIon
8fe8340b6e fix: fix gemini channel test 2024-03-08 21:38:51 +08:00
CaIon
26ef906c61 fix: fix claude channel test 2024-03-08 21:38:43 +08:00
CaIon
655dfe0d09 feat: update claude default model ratio 2024-03-08 21:17:32 +08:00
CaIon
f43b268520 fix: fix claude 3 request missing the 'max_token' field 2024-03-08 21:16:12 +08:00
CaIon
37113c0e96 perf: 优化其他设置显示效果 2024-03-08 20:35:15 +08:00
CaIon
3c3c53051d fix: fix gemini 2024-03-08 20:29:04 +08:00
CaIon
6a83c8ad86 fix: 隐藏无用按钮 2024-03-08 20:14:49 +08:00
CaIon
0640dd81fd fix: fix Azure channel test (close #101) 2024-03-08 20:06:40 +08:00
CaIon
cb50fcaffe Update README.md 2024-03-08 19:51:52 +08:00
Calcium-Ion
eca48268b2 Merge pull request #103 from Calcium-Ion/dev
feat: support Claude 3
2024-03-08 19:47:24 +08:00
CaIon
4a0af1ea3c feat: support Claude 3 2024-03-08 19:43:33 +08:00
CaIon
c2965eb835 feat: support claude3 not stream 2024-03-08 18:26:18 +08:00
Calcium-Ion
4186880e4c Merge pull request #96 from QuentinHsu/refactor-other-setting
refactor(OtherSetting): change UI, improve interaction
2024-03-08 16:52:45 +08:00
1808837298@qq.com
280c63e1d4 feat: 添加充值记录按钮 2024-03-07 22:41:04 +08:00
1808837298@qq.com
0e06be8c3e fix: fix bug 2024-03-06 19:53:31 +08:00
QuentinHsu
931d22c96f perf(OtherSetting): code logic 2024-03-06 18:26:22 +08:00
QuentinHsu
6413bf342a refactor(OtherSetting): change UI, improve interaction 2024-03-06 17:57:53 +08:00
1808837298@qq.com
626217fbd4 fix: 修复流模式错误扣费的问题 (close #95) 2024-03-06 17:41:55 +08:00
1808837298@qq.com
9de2d21e1a fix: 恢复微信公众号扫码功能 (close #7) 2024-03-06 16:53:21 +08:00
1808837298@qq.com
3ab4f145db feat: 支持部分渠道的system角色 (close #89) 2024-03-06 14:16:04 +08:00
36 changed files with 778 additions and 348 deletions

View File

@@ -43,6 +43,7 @@
此版本额外支持以下模型:
1. 第三方模型 **gps** gpt-4-gizmo-*
2. 智谱glm-4vglm-4v识图
3. Anthropic Claude 3 (claude-3-opus-20240229, claude-3-sonnet-20240229)
您可以在渠道中添加自定义模型gpt-4-gizmo-*此模型并非OpenAI官方模型而是第三方模型使用官方key无法调用。

View File

@@ -186,7 +186,7 @@ const (
const (
ChannelTypeUnknown = 0
ChannelTypeOpenAI = 1
ChannelTypeAPI2D = 2
ChannelTypeMidjourney = 2
ChannelTypeAzure = 3
ChannelTypeCloseAI = 4
ChannelTypeOpenAISB = 5
@@ -238,7 +238,7 @@ var ChannelBaseURLs = []string{
"https://api.aiproxy.io", // 21
"https://fastgpt.run/api/openapi", // 22
"https://hunyuan.cloud.tencent.com", //23
"", //24
"https://api.moonshot.cn", //25
"https://open.bigmodel.cn", //26
"https://generativelanguage.googleapis.com", //24
"https://api.moonshot.cn", //25
"https://open.bigmodel.cn", //26
}

View File

@@ -12,7 +12,7 @@ import (
"strings"
)
func DecodeBase64ImageData(base64String string) (image.Config, string, error) {
func DecodeBase64ImageData(base64String string) (image.Config, string, string, error) {
// 去除base64数据的URL前缀如果有
if idx := strings.Index(base64String, ","); idx != -1 {
base64String = base64String[idx+1:]
@@ -22,13 +22,13 @@ func DecodeBase64ImageData(base64String string) (image.Config, string, error) {
decodedData, err := base64.StdEncoding.DecodeString(base64String)
if err != nil {
fmt.Println("Error: Failed to decode base64 string")
return image.Config{}, "", err
return image.Config{}, "", "", err
}
// 创建一个bytes.Buffer用于存储解码后的数据
reader := bytes.NewReader(decodedData)
config, format, err := getImageConfig(reader)
return config, format, err
return config, format, base64String, err
}
func IsImageUrl(url string) (bool, error) {
@@ -42,6 +42,7 @@ func IsImageUrl(url string) (bool, error) {
return true, nil
}
// GetImageFromUrl 获取图片的类型和base64编码的数据
func GetImageFromUrl(url string) (mimeType string, data string, err error) {
isImage, err := IsImageUrl(url)
if !isImage {

View File

@@ -61,8 +61,11 @@ var ModelRatio = map[string]float64{
"text-moderation-latest": 0.1,
"dall-e-2": 8,
"dall-e-3": 16,
"claude-instant-1": 0.815, // $1.63 / 1M tokens
"claude-2": 5.51, // $11.02 / 1M tokens
"claude-instant-1": 0.4, // $0.8 / 1M tokens
"claude-2.0": 4, // $8 / 1M tokens
"claude-2.1": 4, // $8 / 1M tokens
"claude-3-sonnet-20240229": 1.5, // $3 / 1M tokens
"claude-3-opus-20240229": 7.5, // $15 / 1M tokens
"ERNIE-Bot": 0.8572, // ¥0.012 / 1k tokens
"ERNIE-Bot-turbo": 0.5715, // ¥0.008 / 1k tokens
"ERNIE-Bot-4": 8.572, // ¥0.12 / 1k tokens
@@ -179,10 +182,11 @@ func GetCompletionRatio(name string) float64 {
return 2
}
if strings.HasPrefix(name, "claude-instant-1") {
return 3.38
}
if strings.HasPrefix(name, "claude-2") {
return 2.965517
return 3
} else if strings.HasPrefix(name, "claude-2") {
return 3
} else if strings.HasPrefix(name, "claude-3") {
return 5
}
return 1
}

View File

@@ -24,6 +24,9 @@ import (
)
func testChannel(channel *model.Channel, testModel string) (err error, openaiErr *dto.OpenAIError) {
if channel.Type == common.ChannelTypeMidjourney {
return errors.New("midjourney channel test is not supported"), nil
}
common.SysLog(fmt.Sprintf("testing channel %d with model %s", channel.Id, testModel))
w := httptest.NewRecorder()
c, _ := gin.CreateTestContext(w)
@@ -37,6 +40,19 @@ func testChannel(channel *model.Channel, testModel string) (err error, openaiErr
c.Request.Header.Set("Content-Type", "application/json")
c.Set("channel", channel.Type)
c.Set("base_url", channel.GetBaseURL())
switch channel.Type {
case common.ChannelTypeAzure:
c.Set("api_version", channel.Other)
case common.ChannelTypeXunfei:
c.Set("api_version", channel.Other)
//case common.ChannelTypeAIProxyLibrary:
// c.Set("library_id", channel.Other)
case common.ChannelTypeGemini:
c.Set("api_version", channel.Other)
case common.ChannelTypeAli:
c.Set("plugin", channel.Other)
}
meta := relaycommon.GenRelayInfo(c)
apiType := constant.ChannelType2APIType(channel.Type)
adaptor := relay.GetAdaptor(apiType)
@@ -45,13 +61,14 @@ func testChannel(channel *model.Channel, testModel string) (err error, openaiErr
}
if testModel == "" {
testModel = adaptor.GetModelList()[0]
meta.UpstreamModelName = testModel
}
request := buildTestRequest()
request.Model = testModel
meta.UpstreamModelName = testModel
adaptor.Init(meta, *request)
request.Model = testModel
meta.UpstreamModelName = testModel
convertedRequest, err := adaptor.ConvertRequest(c, constant.RelayModeChatCompletions, request)
if err != nil {
return err, nil
@@ -68,11 +85,11 @@ func testChannel(channel *model.Channel, testModel string) (err error, openaiErr
}
if resp.StatusCode != http.StatusOK {
err := relaycommon.RelayErrorHandler(resp)
return fmt.Errorf("status code %d: %s", resp.StatusCode, err.OpenAIError.Message), &err.OpenAIError
return fmt.Errorf("status code %d: %s", resp.StatusCode, err.Error.Message), &err.Error
}
usage, respErr := adaptor.DoResponse(c, resp, meta)
if respErr != nil {
return fmt.Errorf("%s", respErr.OpenAIError.Message), &respErr.OpenAIError
return fmt.Errorf("%s", respErr.Error.Message), &respErr.Error
}
if usage == nil {
return errors.New("usage is nil"), nil

View File

@@ -38,24 +38,24 @@ func Relay(c *gin.Context) {
retryTimes = common.RetryTimes
}
if retryTimes > 0 {
c.Redirect(http.StatusTemporaryRedirect, fmt.Sprintf("%s?retry=%d&error=%s", c.Request.URL.Path, retryTimes-1, err.Message))
c.Redirect(http.StatusTemporaryRedirect, fmt.Sprintf("%s?retry=%d&error=%s", c.Request.URL.Path, retryTimes-1, err.Error.Message))
} else {
if err.StatusCode == http.StatusTooManyRequests {
//err.OpenAIError.Message = "当前分组上游负载已饱和,请稍后再试"
//err.Error.Message = "当前分组上游负载已饱和,请稍后再试"
}
err.OpenAIError.Message = common.MessageWithRequestId(err.OpenAIError.Message, requestId)
err.Error.Message = common.MessageWithRequestId(err.Error.Message, requestId)
c.JSON(err.StatusCode, gin.H{
"error": err.OpenAIError,
"error": err.Error,
})
}
channelId := c.GetInt("channel_id")
autoBan := c.GetBool("auto_ban")
common.LogError(c.Request.Context(), fmt.Sprintf("relay error (channel #%d): %s", channelId, err.Message))
common.LogError(c.Request.Context(), fmt.Sprintf("relay error (channel #%d): %s", channelId, err.Error.Message))
// https://platform.openai.com/docs/guides/error-codes/api-errors
if service.ShouldDisableChannel(&err.OpenAIError, err.StatusCode) && autoBan {
if service.ShouldDisableChannel(&err.Error, err.StatusCode) && autoBan {
channelId := c.GetInt("channel_id")
channelName := c.GetString("channel_name")
service.DisableChannel(channelId, channelName, err.Message)
service.DisableChannel(channelId, channelName, err.Error.Message)
}
}
}
@@ -110,7 +110,7 @@ func RelayMidjourney(c *gin.Context) {
}
channelId := c.GetInt("channel_id")
common.SysError(fmt.Sprintf("relay error (channel #%d): %s", channelId, fmt.Sprintf("%s %s", err.Description, err.Result)))
//if shouldDisableChannel(&err.OpenAIError) {
//if shouldDisableChannel(&err.Error) {
// channelId := c.GetInt("channel_id")
// channelName := c.GetString("channel_name")
// disableChannel(channelId, channelName, err.Result)

View File

@@ -8,6 +8,47 @@ type OpenAIError struct {
}
type OpenAIErrorWithStatusCode struct {
OpenAIError
StatusCode int `json:"status_code"`
Error OpenAIError `json:"error"`
StatusCode int `json:"status_code"`
}
type GeneralErrorResponse struct {
Error OpenAIError `json:"error"`
Message string `json:"message"`
Msg string `json:"msg"`
Err string `json:"err"`
ErrorMsg string `json:"error_msg"`
Header struct {
Message string `json:"message"`
} `json:"header"`
Response struct {
Error struct {
Message string `json:"message"`
} `json:"error"`
} `json:"response"`
}
func (e GeneralErrorResponse) ToMessage() string {
if e.Error.Message != "" {
return e.Error.Message
}
if e.Message != "" {
return e.Message
}
if e.Msg != "" {
return e.Msg
}
if e.Err != "" {
return e.Err
}
if e.ErrorMsg != "" {
return e.ErrorMsg
}
if e.Header.Message != "" {
return e.Header.Message
}
if e.Response.Error.Message != "" {
return e.Response.Error.Message
}
return ""
}

View File

@@ -82,6 +82,14 @@ func (m Message) StringContent() string {
return string(m.Content)
}
func (m Message) IsStringContent() bool {
var stringContent string
if err := json.Unmarshal(m.Content, &stringContent); err == nil {
return true
}
return false
}
func (m Message) ParseContent() []MediaMessage {
var contentList []MediaMessage
var stringContent string
@@ -130,9 +138,3 @@ func (m Message) ParseContent() []MediaMessage {
return nil
}
type Usage struct {
PromptTokens int `json:"prompt_tokens"`
CompletionTokens int `json:"completion_tokens"`
TotalTokens int `json:"total_tokens"`
}

View File

@@ -61,3 +61,9 @@ type CompletionsStreamResponse struct {
FinishReason string `json:"finish_reason"`
} `json:"choices"`
}
type Usage struct {
PromptTokens int `json:"prompt_tokens"`
CompletionTokens int `json:"completion_tokens"`
TotalTokens int `json:"total_tokens"`
}

View File

@@ -1,8 +1,8 @@
package ali
type AliMessage struct {
User string `json:"user"`
Bot string `json:"bot"`
Content string `json:"content"`
Role string `json:"role"`
}
type AliInput struct {
@@ -11,10 +11,11 @@ type AliInput struct {
}
type AliParameters struct {
TopP float64 `json:"top_p,omitempty"`
TopK int `json:"top_k,omitempty"`
Seed uint64 `json:"seed,omitempty"`
EnableSearch bool `json:"enable_search,omitempty"`
TopP float64 `json:"top_p,omitempty"`
TopK int `json:"top_k,omitempty"`
Seed uint64 `json:"seed,omitempty"`
EnableSearch bool `json:"enable_search,omitempty"`
IncrementalOutput bool `json:"incremental_output,omitempty"`
}
type AliChatRequest struct {

View File

@@ -14,28 +14,23 @@ import (
// https://help.aliyun.com/document_detail/613695.html?spm=a2c4g.2399480.0.0.1adb778fAdzP9w#341800c0f8w0r
const EnableSearchModelSuffix = "-internet"
func requestOpenAI2Ali(request dto.GeneralOpenAIRequest) *AliChatRequest {
messages := make([]AliMessage, 0, len(request.Messages))
prompt := ""
for i := 0; i < len(request.Messages); i++ {
message := request.Messages[i]
if message.Role == "system" {
messages = append(messages, AliMessage{
User: message.StringContent(),
Bot: "Okay",
})
continue
} else {
if i == len(request.Messages)-1 {
prompt = message.StringContent()
break
}
messages = append(messages, AliMessage{
User: message.StringContent(),
Bot: string(request.Messages[i+1].Content),
})
i++
}
messages = append(messages, AliMessage{
Content: message.StringContent(),
Role: strings.ToLower(message.Role),
})
}
enableSearch := false
aliModel := request.Model
if strings.HasSuffix(aliModel, EnableSearchModelSuffix) {
enableSearch = true
aliModel = strings.TrimSuffix(aliModel, EnableSearchModelSuffix)
}
return &AliChatRequest{
Model: request.Model,
@@ -43,12 +38,11 @@ func requestOpenAI2Ali(request dto.GeneralOpenAIRequest) *AliChatRequest {
Prompt: prompt,
History: messages,
},
//Parameters: AliParameters{ // ChatGPT's parameters are not compatible with Ali's
// TopP: request.TopP,
// TopK: 50,
// //Seed: 0,
// //EnableSearch: false,
//},
Parameters: AliParameters{
IncrementalOutput: request.Stream,
Seed: uint64(request.Seed),
EnableSearch: enableSearch,
},
}
}
@@ -77,7 +71,7 @@ func aliEmbeddingHandler(c *gin.Context, resp *http.Response) (*dto.OpenAIErrorW
if aliResponse.Code != "" {
return &dto.OpenAIErrorWithStatusCode{
OpenAIError: dto.OpenAIError{
Error: dto.OpenAIError{
Message: aliResponse.Message,
Type: aliResponse.Code,
Param: aliResponse.RequestId,
@@ -242,7 +236,7 @@ func aliHandler(c *gin.Context, resp *http.Response) (*dto.OpenAIErrorWithStatus
}
if aliResponse.Code != "" {
return &dto.OpenAIErrorWithStatusCode{
OpenAIError: dto.OpenAIError{
Error: dto.OpenAIError{
Message: aliResponse.Message,
Type: aliResponse.Code,
Param: aliResponse.RequestId,

View File

@@ -24,21 +24,10 @@ var baiduTokenStore sync.Map
func requestOpenAI2Baidu(request dto.GeneralOpenAIRequest) *BaiduChatRequest {
messages := make([]BaiduMessage, 0, len(request.Messages))
for _, message := range request.Messages {
if message.Role == "system" {
messages = append(messages, BaiduMessage{
Role: "user",
Content: message.StringContent(),
})
messages = append(messages, BaiduMessage{
Role: "assistant",
Content: "Okay",
})
} else {
messages = append(messages, BaiduMessage{
Role: message.Role,
Content: message.StringContent(),
})
}
messages = append(messages, BaiduMessage{
Role: message.Role,
Content: message.StringContent(),
})
}
return &BaiduChatRequest{
Messages: messages,
@@ -184,7 +173,7 @@ func baiduHandler(c *gin.Context, resp *http.Response) (*dto.OpenAIErrorWithStat
}
if baiduResponse.ErrorMsg != "" {
return &dto.OpenAIErrorWithStatusCode{
OpenAIError: dto.OpenAIError{
Error: dto.OpenAIError{
Message: baiduResponse.ErrorMsg,
Type: "baidu_error",
Param: "",
@@ -220,7 +209,7 @@ func baiduEmbeddingHandler(c *gin.Context, resp *http.Response) (*dto.OpenAIErro
}
if baiduResponse.ErrorMsg != "" {
return &dto.OpenAIErrorWithStatusCode{
OpenAIError: dto.OpenAIError{
Error: dto.OpenAIError{
Message: baiduResponse.ErrorMsg,
Type: "baidu_error",
Param: "",

View File

@@ -9,18 +9,32 @@ import (
"one-api/dto"
"one-api/relay/channel"
relaycommon "one-api/relay/common"
"one-api/service"
"strings"
)
const (
RequestModeCompletion = 1
RequestModeMessage = 2
)
type Adaptor struct {
RequestMode int
}
func (a *Adaptor) Init(info *relaycommon.RelayInfo, request dto.GeneralOpenAIRequest) {
if strings.HasPrefix(info.UpstreamModelName, "claude-3") {
a.RequestMode = RequestModeMessage
} else {
a.RequestMode = RequestModeCompletion
}
}
func (a *Adaptor) GetRequestURL(info *relaycommon.RelayInfo) (string, error) {
return fmt.Sprintf("%s/v1/complete", info.BaseUrl), nil
if a.RequestMode == RequestModeMessage {
return fmt.Sprintf("%s/v1/messages", info.BaseUrl), nil
} else {
return fmt.Sprintf("%s/v1/complete", info.BaseUrl), nil
}
}
func (a *Adaptor) SetupRequestHeader(c *gin.Context, req *http.Request, info *relaycommon.RelayInfo) error {
@@ -38,7 +52,11 @@ func (a *Adaptor) ConvertRequest(c *gin.Context, relayMode int, request *dto.Gen
if request == nil {
return nil, errors.New("request is nil")
}
return request, nil
if a.RequestMode == RequestModeCompletion {
return requestOpenAI2ClaudeComplete(*request), nil
} else {
return requestOpenAI2ClaudeMessage(*request)
}
}
func (a *Adaptor) DoRequest(c *gin.Context, info *relaycommon.RelayInfo, requestBody io.Reader) (*http.Response, error) {
@@ -47,11 +65,9 @@ func (a *Adaptor) DoRequest(c *gin.Context, info *relaycommon.RelayInfo, request
func (a *Adaptor) DoResponse(c *gin.Context, resp *http.Response, info *relaycommon.RelayInfo) (usage *dto.Usage, err *dto.OpenAIErrorWithStatusCode) {
if info.IsStream {
var responseText string
err, responseText = claudeStreamHandler(c, resp)
usage = service.ResponseText2Usage(responseText, info.UpstreamModelName, info.PromptTokens)
err, usage = claudeStreamHandler(a.RequestMode, info.UpstreamModelName, info.PromptTokens, c, resp)
} else {
err, usage = claudeHandler(c, resp, info.PromptTokens, info.UpstreamModelName)
err, usage = claudeHandler(a.RequestMode, c, resp, info.PromptTokens, info.UpstreamModelName)
}
return
}

View File

@@ -1,7 +1,7 @@
package claude
var ModelList = []string{
"claude-instant-1", "claude-2", "claude-2.0", "claude-2.1",
"claude-instant-1.2", "claude-2", "claude-2.0", "claude-2.1", "claude-3-sonnet-20240229", "claude-3-opus-20240229",
}
var ChannelName = "claude"

View File

@@ -4,14 +4,36 @@ type ClaudeMetadata struct {
UserId string `json:"user_id"`
}
type ClaudeMediaMessage struct {
Type string `json:"type"`
Text string `json:"text,omitempty"`
Source *ClaudeMessageSource `json:"source,omitempty"`
Usage *ClaudeUsage `json:"usage,omitempty"`
StopReason *string `json:"stop_reason,omitempty"`
}
type ClaudeMessageSource struct {
Type string `json:"type"`
MediaType string `json:"media_type"`
Data string `json:"data"`
}
type ClaudeMessage struct {
Role string `json:"role"`
Content any `json:"content"`
}
type ClaudeRequest struct {
Model string `json:"model"`
Prompt string `json:"prompt"`
MaxTokensToSample uint `json:"max_tokens_to_sample"`
StopSequences []string `json:"stop_sequences,omitempty"`
Temperature float64 `json:"temperature,omitempty"`
TopP float64 `json:"top_p,omitempty"`
TopK int `json:"top_k,omitempty"`
Model string `json:"model"`
Prompt string `json:"prompt,omitempty"`
System string `json:"system,omitempty"`
Messages []ClaudeMessage `json:"messages,omitempty"`
MaxTokensToSample uint `json:"max_tokens_to_sample,omitempty"`
MaxTokens uint `json:"max_tokens,omitempty"`
StopSequences []string `json:"stop_sequences,omitempty"`
Temperature float64 `json:"temperature,omitempty"`
TopP float64 `json:"top_p,omitempty"`
TopK int `json:"top_k,omitempty"`
//ClaudeMetadata `json:"metadata,omitempty"`
Stream bool `json:"stream,omitempty"`
}
@@ -22,8 +44,25 @@ type ClaudeError struct {
}
type ClaudeResponse struct {
Completion string `json:"completion"`
StopReason string `json:"stop_reason"`
Model string `json:"model"`
Error ClaudeError `json:"error"`
Id string `json:"id"`
Type string `json:"type"`
Content []ClaudeMediaMessage `json:"content"`
Completion string `json:"completion"`
StopReason string `json:"stop_reason"`
Model string `json:"model"`
Error ClaudeError `json:"error"`
Usage ClaudeUsage `json:"usage"`
Index int `json:"index"` // stream only
Delta *ClaudeMediaMessage `json:"delta"` // stream only
Message *ClaudeResponse `json:"message"` // stream only: message_start
}
//type ClaudeResponseChoice struct {
// Index int `json:"index"`
// Type string `json:"type"`
//}
type ClaudeUsage struct {
InputTokens int `json:"input_tokens"`
OutputTokens int `json:"output_tokens"`
}

View File

@@ -17,6 +17,8 @@ func stopReasonClaude2OpenAI(reason string) string {
switch reason {
case "stop_sequence":
return "stop"
case "end_turn":
return "stop"
case "max_tokens":
return "length"
default:
@@ -24,7 +26,7 @@ func stopReasonClaude2OpenAI(reason string) string {
}
}
func requestOpenAI2Claude(textRequest dto.GeneralOpenAIRequest) *ClaudeRequest {
func requestOpenAI2ClaudeComplete(textRequest dto.GeneralOpenAIRequest) *ClaudeRequest {
claudeRequest := ClaudeRequest{
Model: textRequest.Model,
Prompt: "",
@@ -44,7 +46,9 @@ func requestOpenAI2Claude(textRequest dto.GeneralOpenAIRequest) *ClaudeRequest {
} else if message.Role == "assistant" {
prompt += fmt.Sprintf("\n\nAssistant: %s", message.Content)
} else if message.Role == "system" {
prompt += fmt.Sprintf("\n\nSystem: %s", message.Content)
if prompt == "" {
prompt = message.StringContent()
}
}
}
prompt += "\n\nAssistant:"
@@ -52,51 +56,154 @@ func requestOpenAI2Claude(textRequest dto.GeneralOpenAIRequest) *ClaudeRequest {
return &claudeRequest
}
func streamResponseClaude2OpenAI(claudeResponse *ClaudeResponse) *dto.ChatCompletionsStreamResponse {
var choice dto.ChatCompletionsStreamResponseChoice
choice.Delta.Content = claudeResponse.Completion
finishReason := stopReasonClaude2OpenAI(claudeResponse.StopReason)
if finishReason != "null" {
choice.FinishReason = &finishReason
func requestOpenAI2ClaudeMessage(textRequest dto.GeneralOpenAIRequest) (*ClaudeRequest, error) {
claudeRequest := ClaudeRequest{
Model: textRequest.Model,
MaxTokens: textRequest.MaxTokens,
StopSequences: nil,
Temperature: textRequest.Temperature,
TopP: textRequest.TopP,
Stream: textRequest.Stream,
}
var response dto.ChatCompletionsStreamResponse
response.Object = "chat.completion.chunk"
response.Model = claudeResponse.Model
response.Choices = []dto.ChatCompletionsStreamResponseChoice{choice}
return &response
if claudeRequest.MaxTokens == 0 {
claudeRequest.MaxTokens = 4096
}
claudeMessages := make([]ClaudeMessage, 0)
for _, message := range textRequest.Messages {
if message.Role == "system" {
claudeRequest.System = message.StringContent()
} else {
claudeMessage := ClaudeMessage{
Role: message.Role,
}
if message.IsStringContent() {
claudeMessage.Content = message.StringContent()
} else {
claudeMediaMessages := make([]ClaudeMediaMessage, 0)
for _, mediaMessage := range message.ParseContent() {
claudeMediaMessage := ClaudeMediaMessage{
Type: mediaMessage.Type,
}
if mediaMessage.Type == "text" {
claudeMediaMessage.Text = mediaMessage.Text
} else {
imageUrl := mediaMessage.ImageUrl.(dto.MessageImageUrl)
claudeMediaMessage.Type = "image"
claudeMediaMessage.Source = &ClaudeMessageSource{
Type: "base64",
}
// 判断是否是url
if strings.HasPrefix(imageUrl.Url, "http") {
// 是url获取图片的类型和base64编码的数据
mimeType, data, _ := common.GetImageFromUrl(imageUrl.Url)
claudeMediaMessage.Source.MediaType = mimeType
claudeMediaMessage.Source.Data = data
} else {
_, format, base64String, err := common.DecodeBase64ImageData(imageUrl.Url)
if err != nil {
return nil, err
}
claudeMediaMessage.Source.MediaType = "image/" + format
claudeMediaMessage.Source.Data = base64String
}
}
claudeMediaMessages = append(claudeMediaMessages, claudeMediaMessage)
}
claudeMessage.Content = claudeMediaMessages
}
claudeMessages = append(claudeMessages, claudeMessage)
}
}
claudeRequest.Prompt = ""
claudeRequest.Messages = claudeMessages
return &claudeRequest, nil
}
func responseClaude2OpenAI(claudeResponse *ClaudeResponse) *dto.OpenAITextResponse {
content, _ := json.Marshal(strings.TrimPrefix(claudeResponse.Completion, " "))
choice := dto.OpenAITextResponseChoice{
Index: 0,
Message: dto.Message{
Role: "assistant",
Content: content,
Name: nil,
},
FinishReason: stopReasonClaude2OpenAI(claudeResponse.StopReason),
func streamResponseClaude2OpenAI(reqMode int, claudeResponse *ClaudeResponse) (*dto.ChatCompletionsStreamResponse, *ClaudeUsage) {
var response dto.ChatCompletionsStreamResponse
var claudeUsage *ClaudeUsage
response.Object = "chat.completion.chunk"
response.Model = claudeResponse.Model
response.Choices = make([]dto.ChatCompletionsStreamResponseChoice, 0)
var choice dto.ChatCompletionsStreamResponseChoice
if reqMode == RequestModeCompletion {
choice.Delta.Content = claudeResponse.Completion
finishReason := stopReasonClaude2OpenAI(claudeResponse.StopReason)
if finishReason != "null" {
choice.FinishReason = &finishReason
}
} else {
if claudeResponse.Type == "message_start" {
response.Id = claudeResponse.Message.Id
response.Model = claudeResponse.Message.Model
claudeUsage = &claudeResponse.Message.Usage
} else if claudeResponse.Type == "content_block_delta" {
choice.Index = claudeResponse.Index
choice.Delta.Content = claudeResponse.Delta.Text
} else if claudeResponse.Type == "message_delta" {
finishReason := stopReasonClaude2OpenAI(*claudeResponse.Delta.StopReason)
if finishReason != "null" {
choice.FinishReason = &finishReason
}
claudeUsage = &claudeResponse.Usage
}
}
response.Choices = append(response.Choices, choice)
return &response, claudeUsage
}
func responseClaude2OpenAI(reqMode int, claudeResponse *ClaudeResponse) *dto.OpenAITextResponse {
choices := make([]dto.OpenAITextResponseChoice, 0)
fullTextResponse := dto.OpenAITextResponse{
Id: fmt.Sprintf("chatcmpl-%s", common.GetUUID()),
Object: "chat.completion",
Created: common.GetTimestamp(),
Choices: []dto.OpenAITextResponseChoice{choice},
}
if reqMode == RequestModeCompletion {
content, _ := json.Marshal(strings.TrimPrefix(claudeResponse.Completion, " "))
choice := dto.OpenAITextResponseChoice{
Index: 0,
Message: dto.Message{
Role: "assistant",
Content: content,
Name: nil,
},
FinishReason: stopReasonClaude2OpenAI(claudeResponse.StopReason),
}
choices = append(choices, choice)
} else {
fullTextResponse.Id = claudeResponse.Id
for i, message := range claudeResponse.Content {
content, _ := json.Marshal(message.Text)
choice := dto.OpenAITextResponseChoice{
Index: i,
Message: dto.Message{
Role: "assistant",
Content: content,
},
FinishReason: stopReasonClaude2OpenAI(claudeResponse.StopReason),
}
choices = append(choices, choice)
}
}
fullTextResponse.Choices = choices
return &fullTextResponse
}
func claudeStreamHandler(c *gin.Context, resp *http.Response) (*dto.OpenAIErrorWithStatusCode, string) {
responseText := ""
func claudeStreamHandler(requestMode int, modelName string, promptTokens int, c *gin.Context, resp *http.Response) (*dto.OpenAIErrorWithStatusCode, *dto.Usage) {
responseId := fmt.Sprintf("chatcmpl-%s", common.GetUUID())
var usage dto.Usage
responseText := ""
createdTime := common.GetTimestamp()
scanner := bufio.NewScanner(resp.Body)
scanner.Split(func(data []byte, atEOF bool) (advance int, token []byte, err error) {
if atEOF && len(data) == 0 {
return 0, nil, nil
}
if i := strings.Index(string(data), "\r\n\r\n"); i >= 0 {
return i + 4, data[0:i], nil
if i := strings.Index(string(data), "\n"); i >= 0 {
return i + 1, data[0:i], nil
}
if atEOF {
return len(data), data, nil
@@ -108,10 +215,10 @@ func claudeStreamHandler(c *gin.Context, resp *http.Response) (*dto.OpenAIErrorW
go func() {
for scanner.Scan() {
data := scanner.Text()
if !strings.HasPrefix(data, "event: completion") {
if !strings.HasPrefix(data, "data: ") {
continue
}
data = strings.TrimPrefix(data, "event: completion\r\ndata: ")
data = strings.TrimPrefix(data, "data: ")
dataChan <- data
}
stopChan <- true
@@ -128,10 +235,31 @@ func claudeStreamHandler(c *gin.Context, resp *http.Response) (*dto.OpenAIErrorW
common.SysError("error unmarshalling stream response: " + err.Error())
return true
}
responseText += claudeResponse.Completion
response := streamResponseClaude2OpenAI(&claudeResponse)
response, claudeUsage := streamResponseClaude2OpenAI(requestMode, &claudeResponse)
if requestMode == RequestModeCompletion {
responseText += claudeResponse.Completion
responseId = response.Id
} else {
if claudeResponse.Type == "message_start" {
// message_start, 获取usage
responseId = claudeResponse.Message.Id
modelName = claudeResponse.Message.Model
usage.PromptTokens = claudeUsage.InputTokens
} else if claudeResponse.Type == "content_block_delta" {
responseText += claudeResponse.Delta.Text
} else if claudeResponse.Type == "message_delta" {
usage.CompletionTokens = claudeUsage.OutputTokens
usage.TotalTokens = claudeUsage.InputTokens + claudeUsage.OutputTokens
} else {
return true
}
}
//response.Id = responseId
response.Id = responseId
response.Created = createdTime
response.Model = modelName
jsonStr, err := json.Marshal(response)
if err != nil {
common.SysError("error marshalling stream response: " + err.Error())
@@ -146,12 +274,15 @@ func claudeStreamHandler(c *gin.Context, resp *http.Response) (*dto.OpenAIErrorW
})
err := resp.Body.Close()
if err != nil {
return service.OpenAIErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), ""
return service.OpenAIErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil
}
return nil, responseText
if requestMode == RequestModeCompletion {
usage = *service.ResponseText2Usage(responseText, modelName, promptTokens)
}
return nil, &usage
}
func claudeHandler(c *gin.Context, resp *http.Response, promptTokens int, model string) (*dto.OpenAIErrorWithStatusCode, *dto.Usage) {
func claudeHandler(requestMode int, c *gin.Context, resp *http.Response, promptTokens int, model string) (*dto.OpenAIErrorWithStatusCode, *dto.Usage) {
responseBody, err := io.ReadAll(resp.Body)
if err != nil {
return service.OpenAIErrorWrapper(err, "read_response_body_failed", http.StatusInternalServerError), nil
@@ -167,7 +298,7 @@ func claudeHandler(c *gin.Context, resp *http.Response, promptTokens int, model
}
if claudeResponse.Error.Type != "" {
return &dto.OpenAIErrorWithStatusCode{
OpenAIError: dto.OpenAIError{
Error: dto.OpenAIError{
Message: claudeResponse.Error.Message,
Type: claudeResponse.Error.Type,
Param: "",
@@ -176,12 +307,17 @@ func claudeHandler(c *gin.Context, resp *http.Response, promptTokens int, model
StatusCode: resp.StatusCode,
}, nil
}
fullTextResponse := responseClaude2OpenAI(&claudeResponse)
fullTextResponse := responseClaude2OpenAI(requestMode, &claudeResponse)
completionTokens := service.CountTokenText(claudeResponse.Completion, model)
usage := dto.Usage{
PromptTokens: promptTokens,
CompletionTokens: completionTokens,
TotalTokens: promptTokens + completionTokens,
usage := dto.Usage{}
if requestMode == RequestModeCompletion {
usage.PromptTokens = promptTokens
usage.CompletionTokens = completionTokens
usage.TotalTokens = promptTokens + completionTokens
} else {
usage.PromptTokens = claudeResponse.Usage.InputTokens
usage.CompletionTokens = claudeResponse.Usage.OutputTokens
usage.TotalTokens = claudeResponse.Usage.InputTokens + claudeResponse.Usage.OutputTokens
}
fullTextResponse.Usage = usage
jsonResponse, err := json.Marshal(fullTextResponse)

View File

@@ -20,6 +20,9 @@ func (a *Adaptor) Init(info *relaycommon.RelayInfo, request dto.GeneralOpenAIReq
func (a *Adaptor) GetRequestURL(info *relaycommon.RelayInfo) (string, error) {
version := "v1"
if info.ApiVersion != "" {
version = info.ApiVersion
}
action := "generateContent"
if info.IsStream {
action = "streamGenerateContent"

View File

@@ -140,8 +140,8 @@ func responseGeminiChat2OpenAI(response *GeminiChatResponse) *dto.OpenAITextResp
},
FinishReason: relaycommon.StopFinishReason,
}
content, _ = json.Marshal(candidate.Content.Parts[0].Text)
if len(candidate.Content.Parts) > 0 {
content, _ = json.Marshal(candidate.Content.Parts[0].Text)
choice.Message.Content = content
}
fullTextResponse.Choices = append(fullTextResponse.Choices, choice)
@@ -246,7 +246,7 @@ func geminiChatHandler(c *gin.Context, resp *http.Response, promptTokens int, mo
}
if len(geminiResponse.Candidates) == 0 {
return &dto.OpenAIErrorWithStatusCode{
OpenAIError: dto.OpenAIError{
Error: dto.OpenAIError{
Message: "No candidates returned",
Type: "server_error",
Param: "",

View File

@@ -50,10 +50,10 @@ func (a *Adaptor) SetupRequestHeader(c *gin.Context, req *http.Request, info *re
return nil
}
req.Header.Set("Authorization", "Bearer "+info.ApiKey)
if info.ChannelType == common.ChannelTypeOpenRouter {
req.Header.Set("HTTP-Referer", "https://github.com/songquanpeng/one-api")
req.Header.Set("X-Title", "One API")
}
//if info.ChannelType == common.ChannelTypeOpenRouter {
// req.Header.Set("HTTP-Referer", "https://github.com/songquanpeng/one-api")
// req.Header.Set("X-Title", "One API")
//}
return nil
}

View File

@@ -127,8 +127,8 @@ func OpenaiHandler(c *gin.Context, resp *http.Response, promptTokens int, model
}
if textResponse.Error.Type != "" {
return &dto.OpenAIErrorWithStatusCode{
OpenAIError: textResponse.Error,
StatusCode: resp.StatusCode,
Error: textResponse.Error,
StatusCode: resp.StatusCode,
}, nil
}
// Reset response body

View File

@@ -146,7 +146,7 @@ func palmHandler(c *gin.Context, resp *http.Response, promptTokens int, model st
}
if palmResponse.Error.Code != 0 || len(palmResponse.Candidates) == 0 {
return &dto.OpenAIErrorWithStatusCode{
OpenAIError: dto.OpenAIError{
Error: dto.OpenAIError{
Message: palmResponse.Error.Message,
Type: palmResponse.Error.Status,
Param: "",

View File

@@ -175,7 +175,7 @@ func tencentHandler(c *gin.Context, resp *http.Response) (*dto.OpenAIErrorWithSt
}
if TencentResponse.Error.Code != 0 {
return &dto.OpenAIErrorWithStatusCode{
OpenAIError: dto.OpenAIError{
Error: dto.OpenAIError{
Message: TencentResponse.Error.Message,
Code: TencentResponse.Error.Code,
},

View File

@@ -24,8 +24,9 @@ import (
func requestOpenAI2Xunfei(request dto.GeneralOpenAIRequest, xunfeiAppId string, domain string) *XunfeiChatRequest {
messages := make([]XunfeiMessage, 0, len(request.Messages))
shouldCovertSystemMessage := !strings.HasSuffix(request.Model, "3.5")
for _, message := range request.Messages {
if message.Role == "system" {
if message.Role == "system" && shouldCovertSystemMessage {
messages = append(messages, XunfeiMessage{
Role: "user",
Content: message.StringContent(),
@@ -126,7 +127,7 @@ func buildXunfeiAuthUrl(hostUrl string, apiKey, apiSecret string) string {
}
func xunfeiStreamHandler(c *gin.Context, textRequest dto.GeneralOpenAIRequest, appId string, apiSecret string, apiKey string) (*dto.OpenAIErrorWithStatusCode, *dto.Usage) {
domain, authUrl := getXunfeiAuthUrl(c, apiKey, apiSecret)
domain, authUrl := getXunfeiAuthUrl(c, apiKey, apiSecret, textRequest.Model)
dataChan, stopChan, err := xunfeiMakeRequest(textRequest, domain, authUrl, appId)
if err != nil {
return service.OpenAIErrorWrapper(err, "make xunfei request err", http.StatusInternalServerError), nil
@@ -156,7 +157,7 @@ func xunfeiStreamHandler(c *gin.Context, textRequest dto.GeneralOpenAIRequest, a
}
func xunfeiHandler(c *gin.Context, textRequest dto.GeneralOpenAIRequest, appId string, apiSecret string, apiKey string) (*dto.OpenAIErrorWithStatusCode, *dto.Usage) {
domain, authUrl := getXunfeiAuthUrl(c, apiKey, apiSecret)
domain, authUrl := getXunfeiAuthUrl(c, apiKey, apiSecret, textRequest.Model)
dataChan, stopChan, err := xunfeiMakeRequest(textRequest, domain, authUrl, appId)
if err != nil {
return service.OpenAIErrorWrapper(err, "make xunfei request err", http.StatusInternalServerError), nil
@@ -235,20 +236,44 @@ func xunfeiMakeRequest(textRequest dto.GeneralOpenAIRequest, domain, authUrl, ap
return dataChan, stopChan, nil
}
func getXunfeiAuthUrl(c *gin.Context, apiKey string, apiSecret string) (string, string) {
query := c.Request.URL.Query()
apiVersion := query.Get("api-version")
if apiVersion == "" {
apiVersion = c.GetString("api_version")
}
if apiVersion == "" {
apiVersion = "v1.1"
common.SysLog("api_version not found, use default: " + apiVersion)
}
domain := "general"
if apiVersion != "v1.1" {
domain += strings.Split(apiVersion, ".")[0]
func apiVersion2domain(apiVersion string) string {
switch apiVersion {
case "v1.1":
return "general"
case "v2.1":
return "generalv2"
case "v3.1":
return "generalv3"
case "v3.5":
return "generalv3.5"
}
return "general" + apiVersion
}
func getXunfeiAuthUrl(c *gin.Context, apiKey string, apiSecret string, modelName string) (string, string) {
apiVersion := getAPIVersion(c, modelName)
domain := apiVersion2domain(apiVersion)
authUrl := buildXunfeiAuthUrl(fmt.Sprintf("wss://spark-api.xf-yun.com/%s/chat", apiVersion), apiKey, apiSecret)
return domain, authUrl
}
func getAPIVersion(c *gin.Context, modelName string) string {
query := c.Request.URL.Query()
apiVersion := query.Get("api-version")
if apiVersion != "" {
return apiVersion
}
parts := strings.Split(modelName, "-")
if len(parts) == 2 {
apiVersion = parts[1]
return apiVersion
}
apiVersion = c.GetString("api_version")
if apiVersion != "" {
return apiVersion
}
apiVersion = "v1.1"
common.SysLog("api_version not found, using default: " + apiVersion)
return apiVersion
}

View File

@@ -244,7 +244,7 @@ func zhipuHandler(c *gin.Context, resp *http.Response) (*dto.OpenAIErrorWithStat
}
if !zhipuResponse.Success {
return &dto.OpenAIErrorWithStatusCode{
OpenAIError: dto.OpenAIError{
Error: dto.OpenAIError{
Message: zhipuResponse.Msg,
Type: "zhipu_error",
Param: "",

View File

@@ -234,8 +234,8 @@ func zhipuHandler(c *gin.Context, resp *http.Response) (*dto.OpenAIErrorWithStat
}
if textResponse.Error.Type != "" {
return &dto.OpenAIErrorWithStatusCode{
OpenAIError: textResponse.Error,
StatusCode: resp.StatusCode,
Error: textResponse.Error,
StatusCode: resp.StatusCode,
}, nil
}
// Reset response body

View File

@@ -57,7 +57,7 @@ func GenRelayInfo(c *gin.Context) *RelayInfo {
info.BaseUrl = common.ChannelBaseURLs[channelType]
}
if info.ChannelType == common.ChannelTypeAzure {
info.ApiVersion = GetAzureAPIVersion(c)
info.ApiVersion = GetAPIVersion(c)
}
return info
}

View File

@@ -17,10 +17,10 @@ import (
var StopFinishReason = "stop"
func RelayErrorHandler(resp *http.Response) (openAIErrorWithStatusCode *dto.OpenAIErrorWithStatusCode) {
openAIErrorWithStatusCode = &dto.OpenAIErrorWithStatusCode{
func RelayErrorHandler(resp *http.Response) (OpenAIErrorWithStatusCode *dto.OpenAIErrorWithStatusCode) {
OpenAIErrorWithStatusCode = &dto.OpenAIErrorWithStatusCode{
StatusCode: resp.StatusCode,
OpenAIError: dto.OpenAIError{
Error: dto.OpenAIError{
Message: fmt.Sprintf("bad response status code %d", resp.StatusCode),
Type: "upstream_error",
Code: "bad_response_status_code",
@@ -40,7 +40,7 @@ func RelayErrorHandler(resp *http.Response) (openAIErrorWithStatusCode *dto.Open
if err != nil {
return
}
openAIErrorWithStatusCode.OpenAIError = textResponse.Error
OpenAIErrorWithStatusCode.Error = textResponse.Error
return
}
@@ -66,12 +66,3 @@ func GetAPIVersion(c *gin.Context) string {
}
return apiVersion
}
func GetAzureAPIVersion(c *gin.Context) string {
query := c.Request.URL.Query()
apiVersion := query.Get("api-version")
if apiVersion == "" {
apiVersion = c.GetString("api_version")
}
return apiVersion
}

View File

@@ -113,7 +113,7 @@ func AudioHelper(c *gin.Context, relayMode int) *dto.OpenAIErrorWithStatusCode {
fullRequestURL := relaycommon.GetFullRequestURL(baseURL, requestURL, channelType)
if relayMode == relayconstant.RelayModeAudioTranscription && channelType == common.ChannelTypeAzure {
// https://learn.microsoft.com/en-us/azure/ai-services/openai/whisper-quickstart?tabs=command-line#rest-api
apiVersion := relaycommon.GetAzureAPIVersion(c)
apiVersion := relaycommon.GetAPIVersion(c)
fullRequestURL = fmt.Sprintf("%s/openai/deployments/%s/audio/transcriptions?api-version=%s", baseURL, audioRequest.Model, apiVersion)
}

View File

@@ -2,6 +2,7 @@ package relay
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
@@ -148,10 +149,19 @@ func TextHelper(c *gin.Context) *dto.OpenAIErrorWithStatusCode {
}
resp, err := adaptor.DoRequest(c, relayInfo, requestBody)
if err != nil {
return service.OpenAIErrorWrapper(err, "do_request_failed", http.StatusInternalServerError)
}
relayInfo.IsStream = relayInfo.IsStream || strings.HasPrefix(resp.Header.Get("Content-Type"), "text/event-stream")
if resp.StatusCode != http.StatusOK {
returnPreConsumedQuota(c, relayInfo.TokenId, userQuota, preConsumedQuota)
return service.RelayErrorHandler(resp)
}
usage, openaiErr := adaptor.DoResponse(c, resp, relayInfo)
if openaiErr != nil {
returnPreConsumedQuota(c, relayInfo.TokenId, userQuota, preConsumedQuota)
return openaiErr
}
postConsumeQuota(c, relayInfo, *textRequest, usage, ratio, preConsumedQuota, userQuota, modelRatio, groupRatio, modelPrice)
@@ -218,6 +228,18 @@ func preConsumeQuota(c *gin.Context, preConsumedQuota int, relayInfo *relaycommo
return preConsumedQuota, userQuota, nil
}
func returnPreConsumedQuota(c *gin.Context, tokenId int, userQuota int, preConsumedQuota int) {
if preConsumedQuota != 0 {
go func(ctx context.Context) {
// return pre-consumed quota
err := model.PostConsumeTokenQuota(tokenId, userQuota, -preConsumedQuota, 0, false)
if err != nil {
common.SysError("error return pre-consumed quota: " + err.Error())
}
}(c)
}
}
func postConsumeQuota(ctx *gin.Context, relayInfo *relaycommon.RelayInfo, textRequest dto.GeneralOpenAIRequest, usage *dto.Usage, ratio float64, preConsumedQuota int, userQuota int, modelRatio float64, groupRatio float64, modelPrice float64) {
useTimeSeconds := time.Now().Unix() - relayInfo.StartTime.Unix()
promptTokens := usage.PromptTokens

View File

@@ -1,9 +1,13 @@
package service
import (
"encoding/json"
"fmt"
"io"
"net/http"
"one-api/common"
"one-api/dto"
"strconv"
"strings"
)
@@ -23,7 +27,42 @@ func OpenAIErrorWrapper(err error, code string, statusCode int) *dto.OpenAIError
Code: code,
}
return &dto.OpenAIErrorWithStatusCode{
OpenAIError: openAIError,
StatusCode: statusCode,
Error: openAIError,
StatusCode: statusCode,
}
}
func RelayErrorHandler(resp *http.Response) (errWithStatusCode *dto.OpenAIErrorWithStatusCode) {
errWithStatusCode = &dto.OpenAIErrorWithStatusCode{
StatusCode: resp.StatusCode,
Error: dto.OpenAIError{
Message: "",
Type: "upstream_error",
Code: "bad_response_status_code",
Param: strconv.Itoa(resp.StatusCode),
},
}
responseBody, err := io.ReadAll(resp.Body)
if err != nil {
return
}
err = resp.Body.Close()
if err != nil {
return
}
var errResponse dto.GeneralErrorResponse
err = json.Unmarshal(responseBody, &errResponse)
if err != nil {
return
}
if errResponse.Error.Message != "" {
// OpenAI format error, so we override the default one
errWithStatusCode.Error = errResponse.Error
} else {
errWithStatusCode.Error.Message = errResponse.ToMessage()
}
if errWithStatusCode.Error.Message == "" {
errWithStatusCode.Error.Message = fmt.Sprintf("bad response status code %d", resp.StatusCode)
}
return
}

View File

@@ -74,7 +74,7 @@ func getImageToken(imageUrl *dto.MessageImageUrl) (int, error) {
config, format, err = common.DecodeUrlImageData(imageUrl.Url)
} else {
common.SysLog(fmt.Sprintf("decoding image"))
config, format, err = common.DecodeBase64ImageData(imageUrl.Url)
config, format, _, err = common.DecodeBase64ImageData(imageUrl.Url)
}
if err != nil {
return 0, err

View File

@@ -4,12 +4,13 @@ import { UserContext } from '../context/User';
import { API, getLogo, isMobile, showError, showInfo, showSuccess, showWarning } from '../helpers';
import { onGitHubOAuthClicked } from './utils';
import Turnstile from "react-turnstile";
import { Layout, Card, Image, Form, Button, Divider, Modal } from "@douyinfe/semi-ui";
import { Layout, Card, Image, Form, Button, Divider, Modal, Icon } from '@douyinfe/semi-ui';
import Title from "@douyinfe/semi-ui/lib/es/typography/title";
import Text from "@douyinfe/semi-ui/lib/es/typography/text";
import TelegramLoginButton from 'react-telegram-login';
import { IconGithubLogo } from '@douyinfe/semi-icons';
import WeChatIcon from './WeChatIcon';
const LoginForm = () => {
const [inputs, setInputs] = useState({
@@ -179,16 +180,16 @@ const LoginForm = () => {
) : (
<></>
)}
{/*{status.wechat_login ? (*/}
{/* <Button*/}
{/* circular*/}
{/* color='green'*/}
{/* icon='wechat'*/}
{/* onClick={onWeChatLoginClicked}*/}
{/* />*/}
{/*) : (*/}
{/* <></>*/}
{/*)}*/}
{status.wechat_login ? (
<Button
type='primary'
style={{color: 'rgba(var(--semi-green-5), 1)'}}
icon={<Icon svg={<WeChatIcon />} />}
onClick={onWeChatLoginClicked}
/>
) : (
<></>
)}
{status.telegram_oauth ? (
<TelegramLoginButton dataOnauth={onTelegramLoginClicked} botName={status.telegram_bot_name} />
@@ -200,40 +201,34 @@ const LoginForm = () => {
) : (
<></>
)}
{/*<Modal*/}
{/* onClose={() => setShowWeChatLoginModal(false)}*/}
{/* onOpen={() => setShowWeChatLoginModal(true)}*/}
{/* open={showWeChatLoginModal}*/}
{/* size={'mini'}*/}
{/*>*/}
{/* <Modal.Content>*/}
{/* <Modal.Description>*/}
{/* <Image src={status.wechat_qrcode} fluid/>*/}
{/* <div style={{textAlign: 'center'}}>*/}
{/* <p>*/}
{/* 微信扫码关注公众号,输入「验证码」获取验证码(三分钟内有效)*/}
{/* </p>*/}
{/* </div>*/}
{/* <Form size='large'>*/}
{/* <Form.Input*/}
{/* field={'wechat_verification_code'}*/}
{/* placeholder='验证码'*/}
{/* name='wechat_verification_code'*/}
{/* value={inputs.wechat_verification_code}*/}
{/* onChange={handleChange}*/}
{/* />*/}
{/* <Button*/}
{/* color=''*/}
{/* fluid*/}
{/* size='large'*/}
{/* onClick={onSubmitWeChatVerificationCode}*/}
{/* >*/}
{/* 登录*/}
{/* </Button>*/}
{/* </Form>*/}
{/* </Modal.Description>*/}
{/* </Modal.Content>*/}
{/*</Modal>*/}
<Modal
title="微信扫码登录"
visible={showWeChatLoginModal}
maskClosable={true}
onOk={onSubmitWeChatVerificationCode}
onCancel={() => setShowWeChatLoginModal(false)}
okText={'登录'}
size={'small'}
centered={true}
>
<div style={{ display: 'flex', alignItem: 'center', flexDirection: 'column' }}>
<img src={status.wechat_qrcode}/>
</div>
<div style={{textAlign: 'center'}}>
<p>
微信扫码关注公众号输入验证码获取验证码三分钟内有效
</p>
</div>
<Form size='large'>
<Form.Input
field={'wechat_verification_code'}
placeholder='验证码'
label={'验证码'}
value={inputs.wechat_verification_code}
onChange={(value) => handleChange('wechat_verification_code', value)}
/>
</Form>
</Modal>
</Card>
{turnstileEnabled ? (
<div style={{ display: 'flex', justifyContent: 'center', marginTop: 20 }}>

View File

@@ -1,15 +1,15 @@
import React, { useEffect, useState } from 'react';
import { Button, Divider, Form, Grid, Header, Message, Modal } from 'semantic-ui-react';
import React, { useEffect, useRef, useState } from 'react';
import { Col, Row , Form, Button, Banner } from '@douyinfe/semi-ui';
import { API, showError, showSuccess } from '../helpers';
import { marked } from 'marked';
const OtherSetting = () => {
let [inputs, setInputs] = useState({
Footer: '',
Notice: '',
About: '',
SystemName: '',
Logo: '',
Footer: '',
About: '',
HomePageContent: ''
});
let [loading, setLoading] = useState(false);
@@ -19,25 +19,6 @@ const OtherSetting = () => {
content: ''
});
const getOptions = async () => {
const res = await API.get('/api/option/');
const { success, message, data } = res.data;
if (success) {
let newInputs = {};
data.forEach((item) => {
if (item.key in inputs) {
newInputs[item.key] = item.value;
}
});
setInputs(newInputs);
} else {
showError(message);
}
};
useEffect(() => {
getOptions().then();
}, []);
const updateOption = async (key, value) => {
setLoading(true);
@@ -54,33 +35,105 @@ const OtherSetting = () => {
setLoading(false);
};
const handleInputChange = async (e, { name, value }) => {
const [loadingInput, setLoadingInput] = useState({
Notice: false,
SystemName: false,
Logo: false,
HomePageContent: false,
About: false,
Footer: false
});
const handleInputChange = async (value, e) => {
const name = e.target.id;
setInputs((inputs) => ({ ...inputs, [name]: value }));
};
// 通用设置
const formAPISettingGeneral = useRef();
// 通用设置 - Notice
const submitNotice = async () => {
await updateOption('Notice', inputs.Notice);
try {
setLoadingInput((loadingInput) => ({ ...loadingInput, Notice: true }));
await updateOption('Notice', inputs.Notice);
showSuccess('公告已更新');
} catch (error) {
console.error("公告更新失败", error);
showError("公告更新失败")
} finally {
setLoadingInput((loadingInput) => ({ ...loadingInput, Notice: false }));
}
};
const submitFooter = async () => {
await updateOption('Footer', inputs.Footer);
};
// 个性化设置
const formAPIPersonalization = useRef();
// 个性化设置 - SystemName
const submitSystemName = async () => {
await updateOption('SystemName', inputs.SystemName);
try {
setLoadingInput((loadingInput) => ({ ...loadingInput, SystemName: true }));
await updateOption('SystemName', inputs.SystemName);
showSuccess('系统名称已更新');
} catch (error) {
console.error("系统名称更新失败", error);
showError("系统名称更新失败")
} finally {
setLoadingInput((loadingInput) => ({ ...loadingInput, SystemName: false }));
}
};
// 个性化设置 - Logo
const submitLogo = async () => {
await updateOption('Logo', inputs.Logo);
try {
setLoadingInput((loadingInput) => ({ ...loadingInput, Logo: true }));
await updateOption('Logo', inputs.Logo);
showSuccess('Logo 已更新');
} catch (error) {
console.error("Logo 更新失败", error);
showError("Logo 更新失败")
} finally {
setLoadingInput((loadingInput) => ({ ...loadingInput, Logo: false }));
}
};
const submitAbout = async () => {
await updateOption('About', inputs.About);
};
// 个性化设置 - 首页内容
const submitOption = async (key) => {
await updateOption(key, inputs[key]);
try {
setLoadingInput((loadingInput) => ({ ...loadingInput, HomePageContent: true }));
await updateOption(key, inputs[key]);
showSuccess('首页内容已更新');
} catch (error) {
console.error("首页内容更新失败", error);
showError("首页内容更新失败")
} finally {
setLoadingInput((loadingInput) => ({ ...loadingInput, HomePageContent: false }));
}
};
// 个性化设置 - 关于
const submitAbout = async () => {
try {
setLoadingInput((loadingInput) => ({ ...loadingInput, About: true }));
await updateOption('About', inputs.About);
showSuccess('关于内容已更新');
} catch (error) {
console.error("关于内容更新失败", error);
showError("关于内容更新失败");
} finally {
setLoadingInput((loadingInput) => ({ ...loadingInput, About: false }));
}
};
// 个性化设置 - 页脚
const submitFooter = async () => {
try {
setLoadingInput((loadingInput) => ({ ...loadingInput, Footer: true }));
await updateOption('Footer', inputs.Footer);
showSuccess('页脚内容已更新');
} catch (error) {
console.error("页脚内容更新失败", error);
showError("页脚内容更新失败");
} finally {
setLoadingInput((loadingInput) => ({ ...loadingInput, Footer: false }));
}
};
const openGitHubRelease = () => {
window.location =
@@ -102,82 +155,99 @@ const OtherSetting = () => {
setShowUpdateModal(true);
}
};
const getOptions = async () => {
const res = await API.get('/api/option/');
const { success, message, data } = res.data;
if (success) {
let newInputs = {};
data.forEach((item) => {
if (item.key in inputs) {
newInputs[item.key] = item.value;
}
});
setInputs(newInputs);
formAPISettingGeneral.current.setValues(newInputs);
formAPIPersonalization.current.setValues(newInputs);
} else {
showError(message);
}
};
useEffect( () => {
getOptions();
}, []);
return (
<Grid columns={1}>
<Grid.Column>
<Form loading={loading}>
<Header as='h3'>通用设置</Header>
{/*<Form.Button onClick={checkUpdate}>检查更新</Form.Button>*/}
<Form.Group widths='equal'>
<Row >
<Col span={24}>
{/* 通用设置 */}
<Form values={inputs} getFormApi={formAPI => formAPISettingGeneral.current = formAPI} style={{marginBottom: 15}}>
<Form.Section text={'通用设置'}>
<Form.TextArea
label='公告'
placeholder='在此输入新的公告内容,支持 Markdown & HTML 代码'
value={inputs.Notice}
name='Notice'
label={'公告'}
placeholder={'在此输入新的公告内容,支持 Markdown & HTML 代码'}
field={'Notice'}
onChange={handleInputChange}
style={{ minHeight: 150, fontFamily: 'JetBrains Mono, Consolas' }}
/>
</Form.Group>
<Form.Button onClick={submitNotice}>保存公告</Form.Button>
<Divider />
<Header as='h3'>个性化设置</Header>
<Form.Group widths='equal'>
<Form.Input
label='系统名称'
placeholder='在此输入系统名称'
value={inputs.SystemName}
name='SystemName'
onChange={handleInputChange}
/>
</Form.Group>
<Form.Button onClick={submitSystemName}>设置系统名称</Form.Button>
<Form.Group widths='equal'>
<Form.Input
label='Logo 图片地址'
placeholder='在此输入 Logo 图片地址'
value={inputs.Logo}
name='Logo'
type='url'
onChange={handleInputChange}
/>
</Form.Group>
<Form.Button onClick={submitLogo}>设置 Logo</Form.Button>
<Form.Group widths='equal'>
<Form.TextArea
label='首页内容'
placeholder='在此输入首页内容,支持 Markdown & HTML 代码,设置后首页的状态信息将不再显示。如果输入的是一个链接,则会使用该链接作为 iframe 的 src 属性,这允许你设置任意网页作为首页。'
value={inputs.HomePageContent}
name='HomePageContent'
onChange={handleInputChange}
style={{ minHeight: 150, fontFamily: 'JetBrains Mono, Consolas' }}
/>
</Form.Group>
<Form.Button onClick={() => submitOption('HomePageContent')}>保存首页内容</Form.Button>
<Form.Group widths='equal'>
<Form.TextArea
label='关于'
placeholder='在此输入新的关于内容,支持 Markdown & HTML 代码。如果输入的是一个链接,则会使用该链接作为 iframe 的 src 属性,这允许你设置任意网页作为关于页面。'
value={inputs.About}
name='About'
onChange={handleInputChange}
style={{ minHeight: 150, fontFamily: 'JetBrains Mono, Consolas' }}
/>
</Form.Group>
<Form.Button onClick={submitAbout}>保存关于</Form.Button>
<Message>移除 One API 的版权标识必须首先获得授权项目维护需要花费大量精力如果本项目对你有意义请主动支持本项目</Message>
<Form.Group widths='equal'>
<Form.Input
label='页脚'
placeholder='在此输入新的页脚,留空则使用默认页脚,支持 HTML 代码'
value={inputs.Footer}
name='Footer'
onChange={handleInputChange}
/>
</Form.Group>
<Form.Button onClick={submitFooter}>设置页脚</Form.Button>
style={{ fontFamily: 'JetBrains Mono, Consolas' }}
autosize={{ minRows: 6, maxRows: 12 }}
/>
<Button onClick={submitNotice} loading={loadingInput['Notice']}>设置公告</Button>
</Form.Section>
</Form>
</Grid.Column>
{/* 个性化设置 */}
<Form values={inputs} getFormApi={formAPI => formAPIPersonalization.current = formAPI} style={{marginBottom: 15}}>
<Form.Section text={'个性化设置'}>
<Form.Input
label={'系统名称'}
placeholder={'在此输入系统名称'}
field={'SystemName'}
onChange={handleInputChange}
/>
<Button onClick={submitSystemName} loading={loadingInput['SystemName']}>设置系统名称</Button>
<Form.Input
label={'Logo 图片地址'}
placeholder={'在此输入 Logo 图片地址'}
field={'Logo'}
onChange={handleInputChange}
/>
<Button onClick={submitLogo} loading={loadingInput['Logo']}>设置 Logo</Button>
<Form.TextArea
label={'首页内容'}
placeholder={'在此输入首页内容,支持 Markdown & HTML 代码,设置后首页的状态信息将不再显示。如果输入的是一个链接,则会使用该链接作为 iframe 的 src 属性,这允许你设置任意网页作为首页。'}
field={'HomePageContent'}
onChange={handleInputChange}
style={{ fontFamily: 'JetBrains Mono, Consolas' }}
autosize={{ minRows: 6, maxRows: 12 }}
/>
<Button onClick={() => submitOption('HomePageContent')} loading={loadingInput['HomePageContent']}>设置首页内容</Button>
<Form.TextArea
label={'关于'}
placeholder={'在此输入新的关于内容,支持 Markdown & HTML 代码。如果输入的是一个链接,则会使用该链接作为 iframe 的 src 属性,这允许你设置任意网页作为关于页面。'}
field={'About'}
onChange={handleInputChange}
style={{ fontFamily: 'JetBrains Mono, Consolas' }}
autosize={{ minRows: 6, maxRows: 12 }}
/>
<Button onClick={submitAbout} loading={loadingInput['About']}>设置关于</Button>
{/* */}
<Banner
fullMode={false}
type="info"
description="移除 One API 的版权标识必须首先获得授权,项目维护需要花费大量精力,如果本项目对你有意义,请主动支持本项目。"
closeIcon={null}
style={{ marginTop: 15 }}
/>
<Form.Input
label={'页脚'}
placeholder={'在此输入新的页脚,留空则使用默认页脚,支持 HTML 代码'}
field={'Footer'}
onChange={handleInputChange}
/>
<Button onClick={submitFooter} loading={loadingInput['Footer']}>设置页脚</Button>
</Form.Section>
</Form>
</Col>
{/*<Modal*/}
{/* onClose={() => setShowUpdateModal(false)}*/}
{/* onOpen={() => setShowUpdateModal(true)}*/}
@@ -200,7 +270,7 @@ const OtherSetting = () => {
{/* />*/}
{/* </Modal.Actions>*/}
{/*</Modal>*/}
</Grid>
</Row>
);
};

View File

@@ -0,0 +1,24 @@
import React from 'react';
import { Icon } from '@douyinfe/semi-ui';
const WeChatIcon = () => {
function CustomIcon() {
return <svg t='1709714447384' className='icon' viewBox='0 0 1024 1024' version='1.1'
xmlns='http://www.w3.org/2000/svg' p-id='5091' width='16' height='16'>
<path
d='M690.1 377.4c5.9 0 11.8 0.2 17.6 0.5-24.4-128.7-158.3-227.1-319.9-227.1C209 150.8 64 271.4 64 420.2c0 81.1 43.6 154.2 111.9 203.6 5.5 3.9 9.1 10.3 9.1 17.6 0 2.4-0.5 4.6-1.1 6.9-5.5 20.3-14.2 52.8-14.6 54.3-0.7 2.6-1.7 5.2-1.7 7.9 0 5.9 4.8 10.8 10.8 10.8 2.3 0 4.2-0.9 6.2-2l70.9-40.9c5.3-3.1 11-5 17.2-5 3.2 0 6.4 0.5 9.5 1.4 33.1 9.5 68.8 14.8 105.7 14.8 6 0 11.9-0.1 17.8-0.4-7.1-21-10.9-43.1-10.9-66 0-135.8 132.2-245.8 295.3-245.8z m-194.3-86.5c23.8 0 43.2 19.3 43.2 43.1s-19.3 43.1-43.2 43.1c-23.8 0-43.2-19.3-43.2-43.1s19.4-43.1 43.2-43.1z m-215.9 86.2c-23.8 0-43.2-19.3-43.2-43.1s19.3-43.1 43.2-43.1 43.2 19.3 43.2 43.1-19.4 43.1-43.2 43.1z'
p-id='5092'></path>
<path
d='M866.7 792.7c56.9-41.2 93.2-102 93.2-169.7 0-124-120.8-224.5-269.9-224.5-149 0-269.9 100.5-269.9 224.5S540.9 847.5 690 847.5c30.8 0 60.6-4.4 88.1-12.3 2.6-0.8 5.2-1.2 7.9-1.2 5.2 0 9.9 1.6 14.3 4.1l59.1 34c1.7 1 3.3 1.7 5.2 1.7 2.4 0 4.7-0.9 6.4-2.6 1.7-1.7 2.6-4 2.6-6.4 0-2.2-0.9-4.4-1.4-6.6-0.3-1.2-7.6-28.3-12.2-45.3-0.5-1.9-0.9-3.8-0.9-5.7 0.1-5.9 3.1-11.2 7.6-14.5zM600.2 587.2c-19.9 0-36-16.1-36-35.9 0-19.8 16.1-35.9 36-35.9s36 16.1 36 35.9c0 19.8-16.2 35.9-36 35.9z m179.9 0c-19.9 0-36-16.1-36-35.9 0-19.8 16.1-35.9 36-35.9s36 16.1 36 35.9c-0.1 19.8-16.2 35.9-36 35.9z'
p-id='5093'></path>
</svg>;
}
return (
<div>
<Icon svg={<CustomIcon />} />
</div>
);
};
export default WeChatIcon;

View File

@@ -63,7 +63,7 @@ const EditChannel = (props) => {
let localModels = [];
switch (value) {
case 14:
localModels = ['claude-instant-1', 'claude-2'];
localModels = ["claude-instant-1.2", "claude-2", "claude-2.0", "claude-2.1", "claude-3-sonnet-20240229", "claude-3-opus-20240229"];
break;
case 11:
localModels = ['PaLM-2'];
@@ -72,13 +72,13 @@ const EditChannel = (props) => {
localModels = ['ERNIE-Bot', 'ERNIE-Bot-turbo', 'ERNIE-Bot-4', 'Embedding-V1'];
break;
case 17:
localModels = ['qwen-turbo', 'qwen-plus', 'text-embedding-v1'];
localModels = ["qwen-turbo", "qwen-plus", "qwen-max", "qwen-max-longcontext", 'text-embedding-v1'];
break;
case 16:
localModels = ['chatglm_pro', 'chatglm_std', 'chatglm_lite'];
break;
case 18:
localModels = ['SparkDesk'];
localModels = ['SparkDesk', 'SparkDesk-v1.1', 'SparkDesk-v2.1', 'SparkDesk-v3.1', 'SparkDesk-v3.5'];
break;
case 19:
localModels = ['360GPT_S2_V9', 'embedding-bert-512-v1', 'embedding_s1_v1', 'semantic_similarity_s1_v1'];
@@ -87,7 +87,10 @@ const EditChannel = (props) => {
localModels = ['hunyuan'];
break;
case 24:
localModels = ['gemini-pro'];
localModels = ['gemini-pro', 'gemini-pro-vision'];
break;
case 25:
localModels = ['moonshot-v1-8k', 'moonshot-v1-32k', 'moonshot-v1-128k'];
break;
case 26:
localModels = ['glm-4', 'glm-4v', 'glm-3-turbo'];

View File

@@ -3,6 +3,8 @@ import {API, isMobile, showError, showInfo, showSuccess} from '../../helpers';
import {renderNumber, renderQuota} from '../../helpers/render';
import {Col, Layout, Row, Typography, Card, Button, Form, Divider, Space, Modal} from "@douyinfe/semi-ui";
import Title from "@douyinfe/semi-ui/lib/es/typography/title";
import Text from '@douyinfe/semi-ui/lib/es/typography/text';
import { Link } from 'react-router-dom';
const TopUp = () => {
const [redemptionCode, setRedemptionCode] = useState('');
@@ -290,6 +292,15 @@ const TopUp = () => {
</Space>
</Form>
</div>
{/*<div style={{ display: 'flex', justifyContent: 'right' }}>*/}
{/* <Text>*/}
{/* <Link onClick={*/}
{/* async () => {*/}
{/* window.location.href = '/topup/history'*/}
{/* }*/}
{/* }>充值记录</Link>*/}
{/* </Text>*/}
{/*</div>*/}
</Card>
</div>