feat: improve error logging for token encoder initialization with offline usage guidance

This commit is contained in:
JustSong 2025-02-02 17:24:23 +08:00
parent ececfe5ba0
commit c81ee68dd0

View File

@ -3,14 +3,16 @@ package openai
import (
"errors"
"fmt"
"math"
"strings"
"github.com/pkoukk/tiktoken-go"
"github.com/songquanpeng/one-api/common/config"
"github.com/songquanpeng/one-api/common/image"
"github.com/songquanpeng/one-api/common/logger"
billingratio "github.com/songquanpeng/one-api/relay/billing/ratio"
"github.com/songquanpeng/one-api/relay/model"
"math"
"strings"
)
// tokenEncoderMap won't grow after initialization
@ -21,7 +23,8 @@ func InitTokenEncoders() {
logger.SysLog("initializing token encoders")
gpt35TokenEncoder, err := tiktoken.EncodingForModel("gpt-3.5-turbo")
if err != nil {
logger.FatalLog(fmt.Sprintf("failed to get gpt-3.5-turbo token encoder: %s", err.Error()))
logger.FatalLog(fmt.Sprintf("failed to get gpt-3.5-turbo token encoder: %s, "+
"if you are using in offline environment, please set TIKTOKEN_CACHE_DIR to use exsited files, check this link for more information: https://stackoverflow.com/questions/76106366/how-to-use-tiktoken-in-offline-mode-computer ", err.Error()))
}
defaultTokenEncoder = gpt35TokenEncoder
gpt4oTokenEncoder, err := tiktoken.EncodingForModel("gpt-4o")