mirror of
https://github.com/songquanpeng/one-api.git
synced 2025-09-17 17:16:38 +08:00
- Refactor constant definitions and organization - Clean up package level variables and functions - Introduce new `relaymode` and `apitype` packages for constant definitions - Refactor and simplify code in several packages including `openai`, `relay/channel/baidu`, `relay/util`, `relay/controller`, `relay/channeltype` - Add helper functions in `relay/channeltype` package to convert channel type constants to corresponding API type constants - Remove deprecated functions such as `ResponseText2Usage` from `relay/channel/openai/helper.go` - Modify code in `relay/util/validation.go` and related files to use new `validator.ValidateTextRequest` function - Rename `util` package to `relaymode` and update related imports in several packages
256 lines
8.2 KiB
Go
256 lines
8.2 KiB
Go
package openai
|
|
|
|
import (
|
|
"fmt"
|
|
"github.com/Laisky/errors/v2"
|
|
"github.com/pkoukk/tiktoken-go"
|
|
"github.com/songquanpeng/one-api/common/config"
|
|
"github.com/songquanpeng/one-api/common/image"
|
|
"github.com/songquanpeng/one-api/common/logger"
|
|
billingratio "github.com/songquanpeng/one-api/relay/billing/ratio"
|
|
"github.com/songquanpeng/one-api/relay/model"
|
|
"math"
|
|
"strings"
|
|
)
|
|
|
|
// tokenEncoderMap won't grow after initialization
|
|
var tokenEncoderMap = map[string]*tiktoken.Tiktoken{}
|
|
var defaultTokenEncoder *tiktoken.Tiktoken
|
|
|
|
func InitTokenEncoders() {
|
|
logger.SysLog("initializing token encoders")
|
|
gpt35TokenEncoder, err := tiktoken.EncodingForModel("gpt-3.5-turbo")
|
|
if err != nil {
|
|
logger.FatalLog(fmt.Sprintf("failed to get gpt-3.5-turbo token encoder: %s", err.Error()))
|
|
}
|
|
defaultTokenEncoder = gpt35TokenEncoder
|
|
gpt4TokenEncoder, err := tiktoken.EncodingForModel("gpt-4")
|
|
if err != nil {
|
|
logger.FatalLog(fmt.Sprintf("failed to get gpt-4 token encoder: %s", err.Error()))
|
|
}
|
|
for model := range billingratio.ModelRatio {
|
|
if strings.HasPrefix(model, "gpt-3.5") {
|
|
tokenEncoderMap[model] = gpt35TokenEncoder
|
|
} else if strings.HasPrefix(model, "gpt-4") {
|
|
tokenEncoderMap[model] = gpt4TokenEncoder
|
|
} else {
|
|
tokenEncoderMap[model] = nil
|
|
}
|
|
}
|
|
logger.SysLog("token encoders initialized")
|
|
}
|
|
|
|
func getTokenEncoder(model string) *tiktoken.Tiktoken {
|
|
tokenEncoder, ok := tokenEncoderMap[model]
|
|
if ok && tokenEncoder != nil {
|
|
return tokenEncoder
|
|
}
|
|
if ok {
|
|
tokenEncoder, err := tiktoken.EncodingForModel(model)
|
|
if err != nil {
|
|
logger.SysError(fmt.Sprintf("failed to get token encoder for model %s: %s, using encoder for gpt-3.5-turbo", model, err.Error()))
|
|
tokenEncoder = defaultTokenEncoder
|
|
}
|
|
tokenEncoderMap[model] = tokenEncoder
|
|
return tokenEncoder
|
|
}
|
|
return defaultTokenEncoder
|
|
}
|
|
|
|
func getTokenNum(tokenEncoder *tiktoken.Tiktoken, text string) int {
|
|
if config.ApproximateTokenEnabled {
|
|
return int(float64(len(text)) * 0.38)
|
|
}
|
|
return len(tokenEncoder.Encode(text, nil, nil))
|
|
}
|
|
|
|
func CountTokenMessages(messages []model.Message, model string) int {
|
|
tokenEncoder := getTokenEncoder(model)
|
|
// Reference:
|
|
// https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb
|
|
// https://github.com/pkoukk/tiktoken-go/issues/6
|
|
//
|
|
// Every message follows <|start|>{role/name}\n{content}<|end|>\n
|
|
var tokensPerMessage int
|
|
var tokensPerName int
|
|
if model == "gpt-3.5-turbo-0301" {
|
|
tokensPerMessage = 4
|
|
tokensPerName = -1 // If there's a name, the role is omitted
|
|
} else {
|
|
tokensPerMessage = 3
|
|
tokensPerName = 1
|
|
}
|
|
tokenNum := 0
|
|
for _, message := range messages {
|
|
tokenNum += tokensPerMessage
|
|
switch v := message.Content.(type) {
|
|
case string:
|
|
tokenNum += getTokenNum(tokenEncoder, v)
|
|
case []any:
|
|
for _, it := range v {
|
|
m := it.(map[string]any)
|
|
switch m["type"] {
|
|
case "text":
|
|
tokenNum += getTokenNum(tokenEncoder, m["text"].(string))
|
|
case "image_url":
|
|
imageUrl, ok := m["image_url"].(map[string]any)
|
|
if ok {
|
|
url := imageUrl["url"].(string)
|
|
detail := ""
|
|
if imageUrl["detail"] != nil {
|
|
detail = imageUrl["detail"].(string)
|
|
}
|
|
imageTokens, err := countImageTokens(url, detail)
|
|
if err != nil {
|
|
logger.SysError("error counting image tokens: " + err.Error())
|
|
} else {
|
|
tokenNum += imageTokens
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
tokenNum += getTokenNum(tokenEncoder, message.Role)
|
|
if message.Name != nil {
|
|
tokenNum += tokensPerName
|
|
tokenNum += getTokenNum(tokenEncoder, *message.Name)
|
|
}
|
|
}
|
|
tokenNum += 3 // Every reply is primed with <|start|>assistant<|message|>
|
|
return tokenNum
|
|
}
|
|
|
|
// func countVisonTokenMessages(messages []VisionMessage, model string) (int, error) {
|
|
// tokenEncoder := getTokenEncoder(model)
|
|
// // Reference:
|
|
// // https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb
|
|
// // https://github.com/pkoukk/tiktoken-go/issues/6
|
|
// //
|
|
// // Every message follows <|start|>{role/name}\n{content}<|end|>\n
|
|
// var tokensPerMessage int
|
|
// var tokensPerName int
|
|
// if model == "gpt-3.5-turbo-0301" {
|
|
// tokensPerMessage = 4
|
|
// tokensPerName = -1 // If there's a name, the role is omitted
|
|
// } else {
|
|
// tokensPerMessage = 3
|
|
// tokensPerName = 1
|
|
// }
|
|
// tokenNum := 0
|
|
// for _, message := range messages {
|
|
// tokenNum += tokensPerMessage
|
|
// for _, cnt := range message.Content {
|
|
// switch cnt.Type {
|
|
// case OpenaiVisionMessageContentTypeText:
|
|
// tokenNum += getTokenNum(tokenEncoder, cnt.Text)
|
|
// case OpenaiVisionMessageContentTypeImageUrl:
|
|
// imgblob, err := base64.StdEncoding.DecodeString(strings.TrimPrefix(cnt.ImageUrl.URL, "data:image/jpeg;base64,"))
|
|
// if err != nil {
|
|
// return 0, errors.Wrap(err, "failed to decode base64 image")
|
|
// }
|
|
|
|
// if imgtoken, err := CountVisionImageToken(imgblob, cnt.ImageUrl.Detail); err != nil {
|
|
// return 0, errors.Wrap(err, "failed to count vision image token")
|
|
// } else {
|
|
// tokenNum += imgtoken
|
|
// }
|
|
// }
|
|
// }
|
|
|
|
// tokenNum += getTokenNum(tokenEncoder, message.Role)
|
|
// if message.Name != nil {
|
|
// tokenNum += tokensPerName
|
|
// tokenNum += getTokenNum(tokenEncoder, *message.Name)
|
|
// }
|
|
// }
|
|
// tokenNum += 3 // Every reply is primed with <|start|>assistant<|message|>
|
|
// return tokenNum, nil
|
|
// }
|
|
|
|
const (
|
|
lowDetailCost = 85
|
|
highDetailCostPerTile = 170
|
|
additionalCost = 85
|
|
)
|
|
|
|
// https://platform.openai.com/docs/guides/vision/calculating-costs
|
|
// https://github.com/openai/openai-cookbook/blob/05e3f9be4c7a2ae7ecf029a7c32065b024730ebe/examples/How_to_count_tokens_with_tiktoken.ipynb
|
|
func countImageTokens(url string, detail string) (_ int, err error) {
|
|
var fetchSize = true
|
|
var width, height int
|
|
// Reference: https://platform.openai.com/docs/guides/vision/low-or-high-fidelity-image-understanding
|
|
// detail == "auto" is undocumented on how it works, it just said the model will use the auto setting which will look at the image input size and decide if it should use the low or high setting.
|
|
// According to the official guide, "low" disable the high-res model,
|
|
// and only receive low-res 512px x 512px version of the image, indicating
|
|
// that image is treated as low-res when size is smaller than 512px x 512px,
|
|
// then we can assume that image size larger than 512px x 512px is treated
|
|
// as high-res. Then we have the following logic:
|
|
// if detail == "" || detail == "auto" {
|
|
// width, height, err = image.GetImageSize(url)
|
|
// if err != nil {
|
|
// return 0, err
|
|
// }
|
|
// fetchSize = false
|
|
// // not sure if this is correct
|
|
// if width > 512 || height > 512 {
|
|
// detail = "high"
|
|
// } else {
|
|
// detail = "low"
|
|
// }
|
|
// }
|
|
|
|
// However, in my test, it seems to be always the same as "high".
|
|
// The following image, which is 125x50, is still treated as high-res, taken
|
|
// 255 tokens in the response of non-stream chat completion api.
|
|
// https://upload.wikimedia.org/wikipedia/commons/1/10/18_Infantry_Division_Messina.jpg
|
|
if detail == "" || detail == "auto" {
|
|
// assume by test, not sure if this is correct
|
|
detail = "high"
|
|
}
|
|
switch detail {
|
|
case "low":
|
|
return lowDetailCost, nil
|
|
case "high":
|
|
if fetchSize {
|
|
width, height, err = image.GetImageSize(url)
|
|
if err != nil {
|
|
return 0, err
|
|
}
|
|
}
|
|
if width > 2048 || height > 2048 { // max(width, height) > 2048
|
|
ratio := float64(2048) / math.Max(float64(width), float64(height))
|
|
width = int(float64(width) * ratio)
|
|
height = int(float64(height) * ratio)
|
|
}
|
|
if width > 768 && height > 768 { // min(width, height) > 768
|
|
ratio := float64(768) / math.Min(float64(width), float64(height))
|
|
width = int(float64(width) * ratio)
|
|
height = int(float64(height) * ratio)
|
|
}
|
|
numSquares := int(math.Ceil(float64(width)/512) * math.Ceil(float64(height)/512))
|
|
result := numSquares*highDetailCostPerTile + additionalCost
|
|
return result, nil
|
|
default:
|
|
return 0, errors.New("invalid detail option")
|
|
}
|
|
}
|
|
|
|
func CountTokenInput(input any, model string) int {
|
|
switch v := input.(type) {
|
|
case string:
|
|
return CountTokenText(v, model)
|
|
case []string:
|
|
text := ""
|
|
for _, s := range v {
|
|
text += s
|
|
}
|
|
return CountTokenText(text, model)
|
|
}
|
|
return 0
|
|
}
|
|
|
|
func CountTokenText(text string, model string) int {
|
|
tokenEncoder := getTokenEncoder(model)
|
|
return getTokenNum(tokenEncoder, text)
|
|
}
|