fix: update .gitignore and improve model handling in various adapters and billing ratios

closes #37
This commit is contained in:
Laisky.Cai 2025-03-13 01:47:41 +00:00
parent d30b6840ad
commit a20f57a923
5 changed files with 28 additions and 8 deletions

3
.gitignore vendored
View File

@ -13,4 +13,5 @@ cmd.md
.env
/one-api
temp
.DS_Store
.DS_Store
/__debug*

View File

@ -48,7 +48,7 @@ type OpenAIModels struct {
Parent *string `json:"parent"`
}
// BUG: 更新 custom channel 时,应该同步更新所有自定义的 models 到 allModels
// BUG(#39): 更新 custom channel 时,应该同步更新所有自定义的 models 到 allModels
var allModels []OpenAIModels
var modelsMap map[string]OpenAIModels
var channelId2Models map[int][]string
@ -160,6 +160,14 @@ func ListModels(c *gin.Context) {
return
}
// fix(#39): Previously, to fix #31, I concatenated model_name with adaptor name to return models.
// But this caused an issue with custom channels, where the returned adaptor is "openai",
// resulting in adaptor name and ownedBy field mismatches when matching against allModels.
// For deepseek example, the adaptor is "openai" but ownedBy is "deepseek", causing mismatch.
// Our current solution: for models from custom channels, don't concatenate adaptor name,
// just match by model name only. However, this may reintroduce the duplicate models bug
// mentioned in #31. A complete fix would require significant changes, so I'll leave it for now.
// Create a map for quick lookup of enabled model+channel combinations
// Only store the exact model:channel combinations from abilities
abilityMap := make(map[string]bool)

View File

@ -9,7 +9,7 @@ var ModelList = []string{
"gemini-1.5-pro", "gemini-1.5-pro-experimental",
"text-embedding-004", "aqa",
"gemini-2.0-flash", "gemini-2.0-flash-exp",
"gemini-2.0-flash-lite-preview-02-05",
"gemini-2.0-flash-lite",
"gemini-2.0-flash-thinking-exp-01-21",
"gemini-2.0-pro-exp-02-05",
}

View File

@ -13,14 +13,20 @@ import (
"github.com/songquanpeng/one-api/relay/relaymode"
)
// ModelList is the list of models supported by Vertex AI.
//
// https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models
var ModelList = []string{
"gemini-pro", "gemini-pro-vision",
"gemini-exp-1206",
"gemini-1.5-pro-001", "gemini-1.5-pro-002",
"gemini-1.5-flash-001", "gemini-1.5-flash-002",
"gemini-2.0-flash-exp", "gemini-2.0-flash-001",
"gemini-2.0-flash-lite-preview-02-05",
"gemini-1.0-pro",
"gemini-1.0-pro-vision",
"gemini-1.5-pro", "gemini-1.5-pro-001", "gemini-1.5-pro-002",
"gemini-1.5-flash", "gemini-1.5-flash-001", "gemini-1.5-flash-002",
"gemini-2.0-flash", "gemini-2.0-flash-exp", "gemini-2.0-flash-001",
"gemini-2.0-flash-lite", "gemini-2.0-flash-lite-001",
"gemini-2.0-flash-thinking-exp-01-21",
"gemini-2.0-pro-exp-02-05",
}
type Adaptor struct {

View File

@ -135,15 +135,20 @@ var ModelRatio = map[string]float64{
// "gemma-2-27b-it": 0,
"gemini-pro": 0.25 * MILLI_USD, // $0.00025 / 1k characters -> $0.001 / 1k tokens
"gemini-1.0-pro": 0.125 * MILLI_USD,
"gemini-1.0-pro-vision": 0.125 * MILLI_USD,
"gemini-1.5-pro": 1.25 * MILLI_USD,
"gemini-1.5-pro-001": 1.25 * MILLI_USD,
"gemini-1.5-pro-002": 1.25 * MILLI_USD,
"gemini-1.5-pro-experimental": 1.25 * MILLI_USD,
"gemini-1.5-flash": 0.075 * MILLI_USD,
"gemini-1.5-flash-001": 0.075 * MILLI_USD,
"gemini-1.5-flash-002": 0.075 * MILLI_USD,
"gemini-1.5-flash-8b": 0.0375 * MILLI_USD,
"gemini-2.0-flash-exp": 0.075 * MILLI_USD,
"gemini-2.0-flash": 0.15 * MILLI_USD,
"gemini-2.0-flash-exp": 0.075 * MILLI_USD,
"gemini-2.0-flash-001": 0.15 * MILLI_USD,
"gemini-2.0-flash-lite": 0.075 * MILLI_USD,
"gemini-2.0-flash-lite-001": 0.075 * MILLI_USD,
"gemini-2.0-flash-lite-preview-02-05": 0.075 * MILLI_USD,
"gemini-2.0-flash-thinking-exp-01-21": 0.075 * MILLI_USD,
"gemini-2.0-pro-exp-02-05": 1.25 * MILLI_USD,