Compare commits

..

7 Commits

Author SHA1 Message Date
JustSong
1c4409ae80 Merge branch 'main' of https://github.com/songquanpeng/one-api 2023-08-26 13:36:58 +08:00
JustSong
5ee24e8acf feat: support 360's models (close #331, close #461)
feat: support 360's models (close #331, close #461)
2023-08-26 13:36:20 +08:00
shao0222
4f2f911e4d fix: fix the issue of function_call not working when using model mapping (#462) 2023-08-26 13:10:18 +08:00
JustSong
fdb2cccf65 perf: initialize all token encoder when starting (close #459, close $460) 2023-08-26 13:02:02 +08:00
JustSong
a3e267df7e fix: fix error response (close #468) 2023-08-26 12:37:45 +08:00
JustSong
ac7c0f3a76 fix: disable channel when 401 received (close #467) 2023-08-26 12:05:18 +08:00
glzjin
efeb9a16ce fix: fix xunfei crash (#451) 2023-08-20 22:07:50 +08:00
11 changed files with 161 additions and 47 deletions

View File

@@ -68,6 +68,7 @@ _✨ 通过标准的 OpenAI API 格式访问所有的大模型,开箱即用
+ [x] [阿里通义千问系列模型](https://help.aliyun.com/document_detail/2400395.html) + [x] [阿里通义千问系列模型](https://help.aliyun.com/document_detail/2400395.html)
+ [x] [讯飞星火认知大模型](https://www.xfyun.cn/doc/spark/Web.html) + [x] [讯飞星火认知大模型](https://www.xfyun.cn/doc/spark/Web.html)
+ [x] [智谱 ChatGLM 系列模型](https://bigmodel.cn) + [x] [智谱 ChatGLM 系列模型](https://bigmodel.cn)
+ [x] [360 智脑](https://ai.360.cn)
2. 支持配置镜像以及众多第三方代理服务: 2. 支持配置镜像以及众多第三方代理服务:
+ [x] [OpenAI-SB](https://openai-sb.com) + [x] [OpenAI-SB](https://openai-sb.com)
+ [x] [API2D](https://api2d.com/r/197971) + [x] [API2D](https://api2d.com/r/197971)

View File

@@ -173,6 +173,7 @@ const (
ChannelTypeZhipu = 16 ChannelTypeZhipu = 16
ChannelTypeAli = 17 ChannelTypeAli = 17
ChannelTypeXunfei = 18 ChannelTypeXunfei = 18
ChannelType360 = 19
) )
var ChannelBaseURLs = []string{ var ChannelBaseURLs = []string{
@@ -195,4 +196,5 @@ var ChannelBaseURLs = []string{
"https://open.bigmodel.cn", // 16 "https://open.bigmodel.cn", // 16
"https://dashscope.aliyuncs.com", // 17 "https://dashscope.aliyuncs.com", // 17
"", // 18 "", // 18
"https://ai.360.cn", // 19
} }

View File

@@ -13,46 +13,51 @@ import (
// 1 === $0.002 / 1K tokens // 1 === $0.002 / 1K tokens
// 1 === ¥0.014 / 1k tokens // 1 === ¥0.014 / 1k tokens
var ModelRatio = map[string]float64{ var ModelRatio = map[string]float64{
"gpt-4": 15, "gpt-4": 15,
"gpt-4-0314": 15, "gpt-4-0314": 15,
"gpt-4-0613": 15, "gpt-4-0613": 15,
"gpt-4-32k": 30, "gpt-4-32k": 30,
"gpt-4-32k-0314": 30, "gpt-4-32k-0314": 30,
"gpt-4-32k-0613": 30, "gpt-4-32k-0613": 30,
"gpt-3.5-turbo": 0.75, // $0.0015 / 1K tokens "gpt-3.5-turbo": 0.75, // $0.0015 / 1K tokens
"gpt-3.5-turbo-0301": 0.75, "gpt-3.5-turbo-0301": 0.75,
"gpt-3.5-turbo-0613": 0.75, "gpt-3.5-turbo-0613": 0.75,
"gpt-3.5-turbo-16k": 1.5, // $0.003 / 1K tokens "gpt-3.5-turbo-16k": 1.5, // $0.003 / 1K tokens
"gpt-3.5-turbo-16k-0613": 1.5, "gpt-3.5-turbo-16k-0613": 1.5,
"text-ada-001": 0.2, "text-ada-001": 0.2,
"text-babbage-001": 0.25, "text-babbage-001": 0.25,
"text-curie-001": 1, "text-curie-001": 1,
"text-davinci-002": 10, "text-davinci-002": 10,
"text-davinci-003": 10, "text-davinci-003": 10,
"text-davinci-edit-001": 10, "text-davinci-edit-001": 10,
"code-davinci-edit-001": 10, "code-davinci-edit-001": 10,
"whisper-1": 10, "whisper-1": 10,
"davinci": 10, "davinci": 10,
"curie": 10, "curie": 10,
"babbage": 10, "babbage": 10,
"ada": 10, "ada": 10,
"text-embedding-ada-002": 0.05, "text-embedding-ada-002": 0.05,
"text-search-ada-doc-001": 10, "text-search-ada-doc-001": 10,
"text-moderation-stable": 0.1, "text-moderation-stable": 0.1,
"text-moderation-latest": 0.1, "text-moderation-latest": 0.1,
"dall-e": 8, "dall-e": 8,
"claude-instant-1": 0.815, // $1.63 / 1M tokens "claude-instant-1": 0.815, // $1.63 / 1M tokens
"claude-2": 5.51, // $11.02 / 1M tokens "claude-2": 5.51, // $11.02 / 1M tokens
"ERNIE-Bot": 0.8572, // ¥0.012 / 1k tokens "ERNIE-Bot": 0.8572, // ¥0.012 / 1k tokens
"ERNIE-Bot-turbo": 0.5715, // ¥0.008 / 1k tokens "ERNIE-Bot-turbo": 0.5715, // ¥0.008 / 1k tokens
"Embedding-V1": 0.1429, // ¥0.002 / 1k tokens "Embedding-V1": 0.1429, // ¥0.002 / 1k tokens
"PaLM-2": 1, "PaLM-2": 1,
"chatglm_pro": 0.7143, // ¥0.01 / 1k tokens "chatglm_pro": 0.7143, // ¥0.01 / 1k tokens
"chatglm_std": 0.3572, // ¥0.005 / 1k tokens "chatglm_std": 0.3572, // ¥0.005 / 1k tokens
"chatglm_lite": 0.1429, // ¥0.002 / 1k tokens "chatglm_lite": 0.1429, // ¥0.002 / 1k tokens
"qwen-v1": 0.8572, // TBD: https://help.aliyun.com/document_detail/2399482.html?spm=a2c4g.2399482.0.0.1ad347feilAgag "qwen-v1": 0.8572, // TBD: https://help.aliyun.com/document_detail/2399482.html?spm=a2c4g.2399482.0.0.1ad347feilAgag
"qwen-plus-v1": 0.5715, // Same as above "qwen-plus-v1": 0.5715, // Same as above
"SparkDesk": 0.8572, // TBD "SparkDesk": 0.8572, // TBD
"360GPT_S2_V9": 0.8572, // ¥0.012 / 1k tokens
"embedding-bert-512-v1": 0.0715, // ¥0.001 / 1k tokens
"embedding_s1_v1": 0.0715, // ¥0.001 / 1k tokens
"semantic_similarity_s1_v1": 0.0715, // ¥0.001 / 1k tokens
"360GPT_S2_V9.4": 0.8572, // ¥0.012 / 1k tokens
} }
func ModelRatio2JSONString() string { func ModelRatio2JSONString() string {

View File

@@ -24,6 +24,10 @@ func testChannel(channel *model.Channel, request ChatRequest) (error, *OpenAIErr
fallthrough fallthrough
case common.ChannelTypeZhipu: case common.ChannelTypeZhipu:
fallthrough fallthrough
case common.ChannelTypeAli:
fallthrough
case common.ChannelType360:
fallthrough
case common.ChannelTypeXunfei: case common.ChannelTypeXunfei:
return errors.New("该渠道类型当前版本不支持测试,请手动测试"), nil return errors.New("该渠道类型当前版本不支持测试,请手动测试"), nil
case common.ChannelTypeAzure: case common.ChannelTypeAzure:
@@ -174,7 +178,7 @@ func testAllChannels(notify bool) error {
err = errors.New(fmt.Sprintf("响应时间 %.2fs 超过阈值 %.2fs", float64(milliseconds)/1000.0, float64(disableThreshold)/1000.0)) err = errors.New(fmt.Sprintf("响应时间 %.2fs 超过阈值 %.2fs", float64(milliseconds)/1000.0, float64(disableThreshold)/1000.0))
disableChannel(channel.Id, channel.Name, err.Error()) disableChannel(channel.Id, channel.Name, err.Error())
} }
if shouldDisableChannel(openaiErr) { if shouldDisableChannel(openaiErr, -1) {
disableChannel(channel.Id, channel.Name, err.Error()) disableChannel(channel.Id, channel.Name, err.Error())
} }
channel.UpdateResponseTime(milliseconds) channel.UpdateResponseTime(milliseconds)

View File

@@ -360,6 +360,51 @@ func init() {
Root: "SparkDesk", Root: "SparkDesk",
Parent: nil, Parent: nil,
}, },
{
Id: "360GPT_S2_V9",
Object: "model",
Created: 1677649963,
OwnedBy: "360",
Permission: permission,
Root: "360GPT_S2_V9",
Parent: nil,
},
{
Id: "embedding-bert-512-v1",
Object: "model",
Created: 1677649963,
OwnedBy: "360",
Permission: permission,
Root: "embedding-bert-512-v1",
Parent: nil,
},
{
Id: "embedding_s1_v1",
Object: "model",
Created: 1677649963,
OwnedBy: "360",
Permission: permission,
Root: "embedding_s1_v1",
Parent: nil,
},
{
Id: "semantic_similarity_s1_v1",
Object: "model",
Created: 1677649963,
OwnedBy: "360",
Permission: permission,
Root: "semantic_similarity_s1_v1",
Parent: nil,
},
{
Id: "360GPT_S2_V9.4",
Object: "model",
Created: 1677649963,
OwnedBy: "360",
Permission: permission,
Root: "360GPT_S2_V9.4",
Parent: nil,
},
} }
openAIModelsMap = make(map[string]OpenAIModels) openAIModelsMap = make(map[string]OpenAIModels)
for _, model := range openAIModels { for _, model := range openAIModels {

View File

@@ -315,11 +315,10 @@ func relayTextHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode {
return errorWrapper(err, "close_request_body_failed", http.StatusInternalServerError) return errorWrapper(err, "close_request_body_failed", http.StatusInternalServerError)
} }
isStream = isStream || strings.HasPrefix(resp.Header.Get("Content-Type"), "text/event-stream") isStream = isStream || strings.HasPrefix(resp.Header.Get("Content-Type"), "text/event-stream")
}
if resp.StatusCode != http.StatusOK { if resp.StatusCode != http.StatusOK {
return errorWrapper( return relayErrorHandler(resp)
fmt.Errorf("bad status code: %d", resp.StatusCode), "bad_status_code", resp.StatusCode) }
} }
var textResponse TextResponse var textResponse TextResponse

View File

@@ -1,16 +1,38 @@
package controller package controller
import ( import (
"encoding/json"
"fmt" "fmt"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"github.com/pkoukk/tiktoken-go" "github.com/pkoukk/tiktoken-go"
"io"
"net/http"
"one-api/common" "one-api/common"
"strconv"
) )
var stopFinishReason = "stop" var stopFinishReason = "stop"
var tokenEncoderMap = map[string]*tiktoken.Tiktoken{} var tokenEncoderMap = map[string]*tiktoken.Tiktoken{}
func InitTokenEncoders() {
common.SysLog("initializing token encoders")
fallbackTokenEncoder, err := tiktoken.EncodingForModel("gpt-3.5-turbo")
if err != nil {
common.FatalLog(fmt.Sprintf("failed to get fallback token encoder: %s", err.Error()))
}
for model, _ := range common.ModelRatio {
tokenEncoder, err := tiktoken.EncodingForModel(model)
if err != nil {
common.SysError(fmt.Sprintf("using fallback encoder for model %s", model))
tokenEncoderMap[model] = fallbackTokenEncoder
continue
}
tokenEncoderMap[model] = tokenEncoder
}
common.SysLog("token encoders initialized")
}
func getTokenEncoder(model string) *tiktoken.Tiktoken { func getTokenEncoder(model string) *tiktoken.Tiktoken {
if tokenEncoder, ok := tokenEncoderMap[model]; ok { if tokenEncoder, ok := tokenEncoderMap[model]; ok {
return tokenEncoder return tokenEncoder
@@ -95,13 +117,16 @@ func errorWrapper(err error, code string, statusCode int) *OpenAIErrorWithStatus
} }
} }
func shouldDisableChannel(err *OpenAIError) bool { func shouldDisableChannel(err *OpenAIError, statusCode int) bool {
if !common.AutomaticDisableChannelEnabled { if !common.AutomaticDisableChannelEnabled {
return false return false
} }
if err == nil { if err == nil {
return false return false
} }
if statusCode == http.StatusUnauthorized {
return true
}
if err.Type == "insufficient_quota" || err.Code == "invalid_api_key" || err.Code == "account_deactivated" { if err.Type == "insufficient_quota" || err.Code == "invalid_api_key" || err.Code == "account_deactivated" {
return true return true
} }
@@ -115,3 +140,30 @@ func setEventStreamHeaders(c *gin.Context) {
c.Writer.Header().Set("Transfer-Encoding", "chunked") c.Writer.Header().Set("Transfer-Encoding", "chunked")
c.Writer.Header().Set("X-Accel-Buffering", "no") c.Writer.Header().Set("X-Accel-Buffering", "no")
} }
func relayErrorHandler(resp *http.Response) (openAIErrorWithStatusCode *OpenAIErrorWithStatusCode) {
openAIErrorWithStatusCode = &OpenAIErrorWithStatusCode{
StatusCode: resp.StatusCode,
OpenAIError: OpenAIError{
Message: fmt.Sprintf("bad response status code %d", resp.StatusCode),
Type: "one_api_error",
Code: "bad_response_status_code",
Param: strconv.Itoa(resp.StatusCode),
},
}
responseBody, err := io.ReadAll(resp.Body)
if err != nil {
return
}
err = resp.Body.Close()
if err != nil {
return
}
var textResponse TextResponse
err = json.Unmarshal(responseBody, &textResponse)
if err != nil {
return
}
openAIErrorWithStatusCode.OpenAIError = textResponse.Error
return
}

View File

@@ -40,6 +40,7 @@ type GeneralOpenAIRequest struct {
Input any `json:"input,omitempty"` Input any `json:"input,omitempty"`
Instruction string `json:"instruction,omitempty"` Instruction string `json:"instruction,omitempty"`
Size string `json:"size,omitempty"` Size string `json:"size,omitempty"`
Functions any `json:"functions,omitempty"`
} }
type ChatRequest struct { type ChatRequest struct {
@@ -185,7 +186,7 @@ func Relay(c *gin.Context) {
channelId := c.GetInt("channel_id") channelId := c.GetInt("channel_id")
common.SysError(fmt.Sprintf("relay error (channel #%d): %s", channelId, err.Message)) common.SysError(fmt.Sprintf("relay error (channel #%d): %s", channelId, err.Message))
// https://platform.openai.com/docs/guides/error-codes/api-errors // https://platform.openai.com/docs/guides/error-codes/api-errors
if shouldDisableChannel(&err.OpenAIError) { if shouldDisableChannel(&err.OpenAIError, err.StatusCode) {
channelId := c.GetInt("channel_id") channelId := c.GetInt("channel_id")
channelName := c.GetString("channel_name") channelName := c.GetString("channel_name")
disableChannel(channelId, channelName, err.Message) disableChannel(channelId, channelName, err.Message)

View File

@@ -77,6 +77,7 @@ func main() {
} }
go controller.AutomaticallyTestChannels(frequency) go controller.AutomaticallyTestChannels(frequency)
} }
controller.InitTokenEncoders()
// Initialize HTTP server // Initialize HTTP server
server := gin.Default() server := gin.Default()

View File

@@ -7,6 +7,7 @@ export const CHANNEL_OPTIONS = [
{ key: 17, text: '阿里通义千问', value: 17, color: 'orange' }, { key: 17, text: '阿里通义千问', value: 17, color: 'orange' },
{ key: 18, text: '讯飞星火认知', value: 18, color: 'blue' }, { key: 18, text: '讯飞星火认知', value: 18, color: 'blue' },
{ key: 16, text: '智谱 ChatGLM', value: 16, color: 'violet' }, { key: 16, text: '智谱 ChatGLM', value: 16, color: 'violet' },
{ key: 19, text: '360 智脑', value: 19, color: 'blue' },
{ key: 8, text: '自定义渠道', value: 8, color: 'pink' }, { key: 8, text: '自定义渠道', value: 8, color: 'pink' },
{ key: 2, text: '代理API2D', value: 2, color: 'blue' }, { key: 2, text: '代理API2D', value: 2, color: 'blue' },
{ key: 5, text: '代理OpenAI-SB', value: 5, color: 'brown' }, { key: 5, text: '代理OpenAI-SB', value: 5, color: 'brown' },

View File

@@ -61,6 +61,9 @@ const EditChannel = () => {
case 18: case 18:
localModels = ['SparkDesk']; localModels = ['SparkDesk'];
break; break;
case 19:
localModels = ['360GPT_S2_V9', 'embedding-bert-512-v1', 'embedding_s1_v1', 'semantic_similarity_s1_v1', '360GPT_S2_V9.4']
break;
} }
setInputs((inputs) => ({ ...inputs, models: localModels })); setInputs((inputs) => ({ ...inputs, models: localModels }));
} }