mirror of
https://github.com/linux-do/new-api.git
synced 2025-11-27 09:49:25 +08:00
Compare commits
13 Commits
1c371300ab
...
v0.2.7.6-a
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4b5303a77b | ||
|
|
6eab0cc370 | ||
|
|
9e45dbe964 | ||
|
|
e495354823 | ||
|
|
9452be51b9 | ||
|
|
43076c2f33 | ||
|
|
04f0084d97 | ||
|
|
2e3c266bd6 | ||
|
|
4490258104 | ||
|
|
93c6d765c7 | ||
|
|
e614ca370a | ||
|
|
c152b4de08 | ||
|
|
190316f66e |
@@ -32,13 +32,14 @@ var defaultModelRatio = map[string]float64{
|
|||||||
"gpt-4-32k": 30,
|
"gpt-4-32k": 30,
|
||||||
//"gpt-4-32k-0314": 30, //deprecated
|
//"gpt-4-32k-0314": 30, //deprecated
|
||||||
"gpt-4-32k-0613": 30,
|
"gpt-4-32k-0613": 30,
|
||||||
"gpt-4-1106-preview": 5, // $0.01 / 1K tokens
|
"gpt-4-1106-preview": 5, // $0.01 / 1K tokens
|
||||||
"gpt-4-0125-preview": 5, // $0.01 / 1K tokens
|
"gpt-4-0125-preview": 5, // $0.01 / 1K tokens
|
||||||
"gpt-4-turbo-preview": 5, // $0.01 / 1K tokens
|
"gpt-4-turbo-preview": 5, // $0.01 / 1K tokens
|
||||||
"gpt-4-vision-preview": 5, // $0.01 / 1K tokens
|
"gpt-4-vision-preview": 5, // $0.01 / 1K tokens
|
||||||
"gpt-4-1106-vision-preview": 5, // $0.01 / 1K tokens
|
"gpt-4-1106-vision-preview": 5, // $0.01 / 1K tokens
|
||||||
"gpt-4o": 2.5, // $0.01 / 1K tokens
|
"gpt-4o": 2.5, // $0.01 / 1K tokens
|
||||||
"gpt-4o-2024-05-13": 2.5, // $0.01 / 1K tokens
|
"gpt-4o-2024-05-13": 2.5, // $0.01 / 1K tokens
|
||||||
|
"gpt-4o-2024-08-06": 1.25, // $0.01 / 1K tokens
|
||||||
"gpt-4o-mini": 0.075,
|
"gpt-4o-mini": 0.075,
|
||||||
"gpt-4o-mini-2024-07-18": 0.075,
|
"gpt-4o-mini-2024-07-18": 0.075,
|
||||||
"gpt-4-turbo": 5, // $0.01 / 1K tokens
|
"gpt-4-turbo": 5, // $0.01 / 1K tokens
|
||||||
@@ -326,7 +327,7 @@ func GetCompletionRatio(name string) float64 {
|
|||||||
return 3
|
return 3
|
||||||
}
|
}
|
||||||
if strings.HasPrefix(name, "gpt-4o") {
|
if strings.HasPrefix(name, "gpt-4o") {
|
||||||
if strings.Contains(name, "mini") {
|
if strings.HasPrefix(name, "gpt-4o-mini") || name == "gpt-4o-2024-08-06" {
|
||||||
return 4
|
return 4
|
||||||
}
|
}
|
||||||
return 3
|
return 3
|
||||||
|
|||||||
@@ -7,31 +7,31 @@ type ResponseFormat struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type GeneralOpenAIRequest struct {
|
type GeneralOpenAIRequest struct {
|
||||||
Model string `json:"model,omitempty"`
|
Model string `json:"model,omitempty"`
|
||||||
Messages []Message `json:"messages,omitempty"`
|
Messages []Message `json:"messages,omitempty"`
|
||||||
Prompt any `json:"prompt,omitempty"`
|
Prompt any `json:"prompt,omitempty"`
|
||||||
Stream bool `json:"stream,omitempty"`
|
Stream bool `json:"stream,omitempty"`
|
||||||
StreamOptions *StreamOptions `json:"stream_options,omitempty"`
|
StreamOptions *StreamOptions `json:"stream_options,omitempty"`
|
||||||
MaxTokens uint `json:"max_tokens,omitempty"`
|
MaxTokens uint `json:"max_tokens,omitempty"`
|
||||||
Temperature float64 `json:"temperature,omitempty"`
|
Temperature float64 `json:"temperature,omitempty"`
|
||||||
TopP float64 `json:"top_p,omitempty"`
|
TopP float64 `json:"top_p,omitempty"`
|
||||||
TopK int `json:"top_k,omitempty"`
|
TopK int `json:"top_k,omitempty"`
|
||||||
Stop any `json:"stop,omitempty"`
|
Stop any `json:"stop,omitempty"`
|
||||||
N int `json:"n,omitempty"`
|
N int `json:"n,omitempty"`
|
||||||
Input any `json:"input,omitempty"`
|
Input any `json:"input,omitempty"`
|
||||||
Instruction string `json:"instruction,omitempty"`
|
Instruction string `json:"instruction,omitempty"`
|
||||||
Size string `json:"size,omitempty"`
|
Size string `json:"size,omitempty"`
|
||||||
Functions any `json:"functions,omitempty"`
|
Functions any `json:"functions,omitempty"`
|
||||||
FrequencyPenalty float64 `json:"frequency_penalty,omitempty"`
|
FrequencyPenalty float64 `json:"frequency_penalty,omitempty"`
|
||||||
PresencePenalty float64 `json:"presence_penalty,omitempty"`
|
PresencePenalty float64 `json:"presence_penalty,omitempty"`
|
||||||
ResponseFormat *ResponseFormat `json:"response_format,omitempty"`
|
ResponseFormat any `json:"response_format,omitempty"`
|
||||||
Seed float64 `json:"seed,omitempty"`
|
Seed float64 `json:"seed,omitempty"`
|
||||||
Tools []ToolCall `json:"tools,omitempty"`
|
Tools []ToolCall `json:"tools,omitempty"`
|
||||||
ToolChoice any `json:"tool_choice,omitempty"`
|
ToolChoice any `json:"tool_choice,omitempty"`
|
||||||
User string `json:"user,omitempty"`
|
User string `json:"user,omitempty"`
|
||||||
LogProbs bool `json:"logprobs,omitempty"`
|
LogProbs bool `json:"logprobs,omitempty"`
|
||||||
TopLogProbs int `json:"top_logprobs,omitempty"`
|
TopLogProbs int `json:"top_logprobs,omitempty"`
|
||||||
Dimensions int `json:"dimensions,omitempty"`
|
Dimensions int `json:"dimensions,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type OpenAITools struct {
|
type OpenAITools struct {
|
||||||
|
|||||||
@@ -146,7 +146,7 @@ func TokenAuth() func(c *gin.Context) {
|
|||||||
if token != nil {
|
if token != nil {
|
||||||
id := c.GetInt("id")
|
id := c.GetInt("id")
|
||||||
if id == 0 {
|
if id == 0 {
|
||||||
c.Set("id", token.Id)
|
c.Set("id", token.UserId)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@@ -106,16 +106,23 @@ func SearchChannels(keyword string, group string, model string) ([]*Channel, err
|
|||||||
// 构造WHERE子句
|
// 构造WHERE子句
|
||||||
var whereClause string
|
var whereClause string
|
||||||
var args []interface{}
|
var args []interface{}
|
||||||
if group != "" {
|
if group != "" && group != "null" {
|
||||||
whereClause = "(id = ? OR name LIKE ? OR " + keyCol + " = ?) AND " + groupCol + " = ? AND " + modelsCol + " LIKE ?"
|
var groupCondition string
|
||||||
args = append(args, common.String2Int(keyword), "%"+keyword+"%", keyword, group, "%"+model+"%")
|
if common.UsingMySQL {
|
||||||
|
groupCondition = `CONCAT(',', ` + groupCol + `, ',') LIKE ?`
|
||||||
|
} else {
|
||||||
|
// sqlite, PostgreSQL
|
||||||
|
groupCondition = `(',' || ` + groupCol + ` || ',') LIKE ?`
|
||||||
|
}
|
||||||
|
whereClause = "(id = ? OR name LIKE ? OR " + keyCol + " = ?) AND " + modelsCol + ` LIKE ? AND ` + groupCondition
|
||||||
|
args = append(args, common.String2Int(keyword), "%"+keyword+"%", keyword, "%"+model+"%", "%,"+group+",%")
|
||||||
} else {
|
} else {
|
||||||
whereClause = "(id = ? OR name LIKE ? OR " + keyCol + " = ?) AND " + modelsCol + " LIKE ?"
|
whereClause = "(id = ? OR name LIKE ? OR " + keyCol + " = ?) AND " + modelsCol + " LIKE ?"
|
||||||
args = append(args, common.String2Int(keyword), "%"+keyword+"%", keyword, "%"+model+"%")
|
args = append(args, common.String2Int(keyword), "%"+keyword+"%", keyword, "%"+model+"%")
|
||||||
}
|
}
|
||||||
|
|
||||||
// 执行查询
|
// 执行查询
|
||||||
err := baseQuery.Where(whereClause, args...).Find(&channels).Error
|
err := baseQuery.Where(whereClause, args...).Order("priority desc").Find(&channels).Error
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -193,8 +193,8 @@ func SumUsedQuota(logType int, startTimestamp int64, endTimestamp int64, modelNa
|
|||||||
tx = tx.Where("created_at <= ?", endTimestamp)
|
tx = tx.Where("created_at <= ?", endTimestamp)
|
||||||
}
|
}
|
||||||
if modelName != "" {
|
if modelName != "" {
|
||||||
tx = tx.Where("model_name = ?", modelName)
|
tx = tx.Where("model_name like ?", modelName)
|
||||||
rpmTpmQuery = rpmTpmQuery.Where("model_name = ?", modelName)
|
rpmTpmQuery = rpmTpmQuery.Where("model_name like ?", modelName)
|
||||||
}
|
}
|
||||||
if channel != 0 {
|
if channel != 0 {
|
||||||
tx = tx.Where("channel_id = ?", channel)
|
tx = tx.Where("channel_id = ?", channel)
|
||||||
|
|||||||
@@ -3,18 +3,18 @@ package ollama
|
|||||||
import "one-api/dto"
|
import "one-api/dto"
|
||||||
|
|
||||||
type OllamaRequest struct {
|
type OllamaRequest struct {
|
||||||
Model string `json:"model,omitempty"`
|
Model string `json:"model,omitempty"`
|
||||||
Messages []dto.Message `json:"messages,omitempty"`
|
Messages []dto.Message `json:"messages,omitempty"`
|
||||||
Stream bool `json:"stream,omitempty"`
|
Stream bool `json:"stream,omitempty"`
|
||||||
Temperature float64 `json:"temperature,omitempty"`
|
Temperature float64 `json:"temperature,omitempty"`
|
||||||
Seed float64 `json:"seed,omitempty"`
|
Seed float64 `json:"seed,omitempty"`
|
||||||
Topp float64 `json:"top_p,omitempty"`
|
Topp float64 `json:"top_p,omitempty"`
|
||||||
TopK int `json:"top_k,omitempty"`
|
TopK int `json:"top_k,omitempty"`
|
||||||
Stop any `json:"stop,omitempty"`
|
Stop any `json:"stop,omitempty"`
|
||||||
Tools []dto.ToolCall `json:"tools,omitempty"`
|
Tools []dto.ToolCall `json:"tools,omitempty"`
|
||||||
ResponseFormat *dto.ResponseFormat `json:"response_format,omitempty"`
|
ResponseFormat any `json:"response_format,omitempty"`
|
||||||
FrequencyPenalty float64 `json:"frequency_penalty,omitempty"`
|
FrequencyPenalty float64 `json:"frequency_penalty,omitempty"`
|
||||||
PresencePenalty float64 `json:"presence_penalty,omitempty"`
|
PresencePenalty float64 `json:"presence_penalty,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type OllamaEmbeddingRequest struct {
|
type OllamaEmbeddingRequest struct {
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ var ModelList = []string{
|
|||||||
"gpt-4-32k", "gpt-4-32k-0613",
|
"gpt-4-32k", "gpt-4-32k-0613",
|
||||||
"gpt-4-turbo-preview", "gpt-4-turbo", "gpt-4-turbo-2024-04-09",
|
"gpt-4-turbo-preview", "gpt-4-turbo", "gpt-4-turbo-2024-04-09",
|
||||||
"gpt-4-vision-preview",
|
"gpt-4-vision-preview",
|
||||||
"gpt-4o", "gpt-4o-2024-05-13",
|
"gpt-4o", "gpt-4o-2024-05-13", "gpt-4o-2024-08-06",
|
||||||
"gpt-4o-mini", "gpt-4o-mini-2024-07-18",
|
"gpt-4o-mini", "gpt-4o-mini-2024-07-18",
|
||||||
"text-embedding-ada-002", "text-embedding-3-small", "text-embedding-3-large",
|
"text-embedding-ada-002", "text-embedding-3-small", "text-embedding-3-large",
|
||||||
"text-curie-001", "text-babbage-001", "text-ada-001",
|
"text-curie-001", "text-babbage-001", "text-ada-001",
|
||||||
|
|||||||
@@ -75,7 +75,7 @@ func AudioHelper(c *gin.Context) *dto.OpenAIErrorWithStatusCode {
|
|||||||
return service.OpenAIErrorWrapperLocal(err, "get_user_quota_failed", http.StatusInternalServerError)
|
return service.OpenAIErrorWrapperLocal(err, "get_user_quota_failed", http.StatusInternalServerError)
|
||||||
}
|
}
|
||||||
if userQuota-preConsumedQuota < 0 {
|
if userQuota-preConsumedQuota < 0 {
|
||||||
return service.OpenAIErrorWrapperLocal(errors.New("user quota is not enough"), "insufficient_user_quota", http.StatusForbidden)
|
return service.OpenAIErrorWrapperLocal(errors.New(fmt.Sprintf("audio pre-consumed quota failed, user quota: %d, need quota: %d", userQuota, preConsumedQuota)), "insufficient_user_quota", http.StatusBadRequest)
|
||||||
}
|
}
|
||||||
err = model.CacheDecreaseUserQuota(relayInfo.UserId, preConsumedQuota)
|
err = model.CacheDecreaseUserQuota(relayInfo.UserId, preConsumedQuota)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@@ -125,7 +125,7 @@ func ImageHelper(c *gin.Context, relayMode int) *dto.OpenAIErrorWithStatusCode {
|
|||||||
quota := int(imageRatio * groupRatio * common.QuotaPerUnit)
|
quota := int(imageRatio * groupRatio * common.QuotaPerUnit)
|
||||||
|
|
||||||
if userQuota-quota < 0 {
|
if userQuota-quota < 0 {
|
||||||
return service.OpenAIErrorWrapperLocal(errors.New("user quota is not enough"), "insufficient_user_quota", http.StatusForbidden)
|
return service.OpenAIErrorWrapperLocal(errors.New(fmt.Sprintf("image pre-consumed quota failed, user quota: %d, need quota: %d", userQuota, quota)), "insufficient_user_quota", http.StatusBadRequest)
|
||||||
}
|
}
|
||||||
|
|
||||||
adaptor := GetAdaptor(relayInfo.ApiType)
|
adaptor := GetAdaptor(relayInfo.ApiType)
|
||||||
|
|||||||
@@ -238,9 +238,12 @@ func preConsumeQuota(c *gin.Context, preConsumedQuota int, relayInfo *relaycommo
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, 0, service.OpenAIErrorWrapperLocal(err, "get_user_quota_failed", http.StatusInternalServerError)
|
return 0, 0, service.OpenAIErrorWrapperLocal(err, "get_user_quota_failed", http.StatusInternalServerError)
|
||||||
}
|
}
|
||||||
if userQuota <= 0 || userQuota-preConsumedQuota < 0 {
|
if userQuota <= 0 {
|
||||||
return 0, 0, service.OpenAIErrorWrapperLocal(errors.New("user quota is not enough"), "insufficient_user_quota", http.StatusForbidden)
|
return 0, 0, service.OpenAIErrorWrapperLocal(errors.New("user quota is not enough"), "insufficient_user_quota", http.StatusForbidden)
|
||||||
}
|
}
|
||||||
|
if userQuota-preConsumedQuota < 0 {
|
||||||
|
return 0, 0, service.OpenAIErrorWrapperLocal(errors.New(fmt.Sprintf("chat pre-consumed quota failed, user quota: %d, need quota: %d", userQuota, preConsumedQuota)), "insufficient_user_quota", http.StatusBadRequest)
|
||||||
|
}
|
||||||
err = model.CacheDecreaseUserQuota(relayInfo.UserId, preConsumedQuota)
|
err = model.CacheDecreaseUserQuota(relayInfo.UserId, preConsumedQuota)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, 0, service.OpenAIErrorWrapperLocal(err, "decrease_user_quota_failed", http.StatusInternalServerError)
|
return 0, 0, service.OpenAIErrorWrapperLocal(err, "decrease_user_quota_failed", http.StatusInternalServerError)
|
||||||
|
|||||||
@@ -745,7 +745,8 @@ const ChannelsTable = () => {
|
|||||||
<Form.Select
|
<Form.Select
|
||||||
field='group'
|
field='group'
|
||||||
label='分组'
|
label='分组'
|
||||||
optionList={groupOptions}
|
optionList={[{ label: '选择分组', value: null}, ...groupOptions]}
|
||||||
|
initValue={null}
|
||||||
onChange={(v) => {
|
onChange={(v) => {
|
||||||
setSearchGroup(v);
|
setSearchGroup(v);
|
||||||
searchChannels(searchKeyword, v, searchModel);
|
searchChannels(searchKeyword, v, searchModel);
|
||||||
|
|||||||
Reference in New Issue
Block a user