mirror of
https://github.com/songquanpeng/one-api.git
synced 2025-11-14 20:23:46 +08:00
🐛 fix usage error & web channel edit label error
This commit is contained in:
@@ -100,8 +100,10 @@ func (p *ClaudeProvider) ChatAction(request *types.ChatCompletionRequest, isMode
|
||||
return
|
||||
}
|
||||
|
||||
usage.PromptTokens = promptTokens
|
||||
usage.CompletionTokens = common.CountTokenText(responseText, request.Model)
|
||||
usage = &types.Usage{
|
||||
PromptTokens: promptTokens,
|
||||
CompletionTokens: common.CountTokenText(responseText, request.Model),
|
||||
}
|
||||
usage.TotalTokens = promptTokens + usage.CompletionTokens
|
||||
|
||||
} else {
|
||||
|
||||
@@ -147,8 +147,10 @@ func (p *GeminiProvider) ChatAction(request *types.ChatCompletionRequest, isMode
|
||||
return
|
||||
}
|
||||
|
||||
usage.PromptTokens = promptTokens
|
||||
usage.CompletionTokens = common.CountTokenText(responseText, request.Model)
|
||||
usage = &types.Usage{
|
||||
PromptTokens: promptTokens,
|
||||
CompletionTokens: common.CountTokenText(responseText, request.Model),
|
||||
}
|
||||
usage.TotalTokens = promptTokens + usage.CompletionTokens
|
||||
|
||||
} else {
|
||||
|
||||
@@ -19,7 +19,7 @@ func (c *OpenAIProviderImageResponseResponse) ResponseHandler(resp *http.Respons
|
||||
|
||||
func (p *OpenAIProvider) ImageGenerationsAction(request *types.ImageRequest, isModelMapped bool, promptTokens int) (usage *types.Usage, errWithCode *types.OpenAIErrorWithStatusCode) {
|
||||
|
||||
if isWithinRange(request.Model, request.N) == false {
|
||||
if !isWithinRange(request.Model, request.N) {
|
||||
return nil, common.StringErrorWrapper("n_not_within_range", "n_not_within_range", http.StatusBadRequest)
|
||||
}
|
||||
|
||||
|
||||
@@ -92,8 +92,10 @@ func (p *PalmProvider) ChatAction(request *types.ChatCompletionRequest, isModelM
|
||||
return
|
||||
}
|
||||
|
||||
usage.PromptTokens = promptTokens
|
||||
usage.CompletionTokens = common.CountTokenText(responseText, request.Model)
|
||||
usage = &types.Usage{
|
||||
PromptTokens: promptTokens,
|
||||
CompletionTokens: common.CountTokenText(responseText, request.Model),
|
||||
}
|
||||
usage.TotalTokens = promptTokens + usage.CompletionTokens
|
||||
|
||||
} else {
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"one-api/providers/base"
|
||||
"one-api/providers/claude"
|
||||
"one-api/providers/closeai"
|
||||
"one-api/providers/gemini"
|
||||
"one-api/providers/openai"
|
||||
"one-api/providers/openaisb"
|
||||
"one-api/providers/palm"
|
||||
@@ -49,6 +50,7 @@ func init() {
|
||||
providerFactories[common.ChannelTypeAIGC2D] = aigc2d.Aigc2dProviderFactory{}
|
||||
providerFactories[common.ChannelTypeAPI2GPT] = api2gpt.Api2gptProviderFactory{}
|
||||
providerFactories[common.ChannelTypeAzureSpeech] = azurespeech.AzureSpeechProviderFactory{}
|
||||
providerFactories[common.ChannelTypeGemini] = gemini.GeminiProviderFactory{}
|
||||
|
||||
}
|
||||
|
||||
|
||||
@@ -105,8 +105,10 @@ func (p *TencentProvider) ChatAction(request *types.ChatCompletionRequest, isMod
|
||||
return
|
||||
}
|
||||
|
||||
usage.PromptTokens = promptTokens
|
||||
usage.CompletionTokens = common.CountTokenText(responseText, request.Model)
|
||||
usage = &types.Usage{
|
||||
PromptTokens: promptTokens,
|
||||
CompletionTokens: common.CountTokenText(responseText, request.Model),
|
||||
}
|
||||
usage.TotalTokens = promptTokens + usage.CompletionTokens
|
||||
|
||||
} else {
|
||||
|
||||
Reference in New Issue
Block a user