mirror of
https://github.com/songquanpeng/one-api.git
synced 2025-10-29 21:03:41 +08:00
Compare commits
9 Commits
v0.6.8-alp
...
v0.6.8-alp
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ec6ad24810 | ||
|
|
c4fe57c165 | ||
|
|
274fcf3d76 | ||
|
|
0fc07ea558 | ||
|
|
1ce1e529ee | ||
|
|
d936817de9 | ||
|
|
fecaece71b | ||
|
|
c135d74f13 | ||
|
|
d0369b114f |
16
.github/workflows/ci.yml
vendored
16
.github/workflows/ci.yml
vendored
@@ -36,21 +36,9 @@ jobs:
|
||||
# in the next step as well as the next job.
|
||||
- name: Test
|
||||
run: go test -cover -coverprofile=coverage.txt ./...
|
||||
|
||||
- name: Archive code coverage results
|
||||
uses: actions/upload-artifact@v4
|
||||
- uses: codecov/codecov-action@v4
|
||||
with:
|
||||
name: code-coverage
|
||||
path: coverage.txt # Make sure to use the same file name you chose for the "-coverprofile" in the "Test" step
|
||||
|
||||
code_coverage:
|
||||
name: "Code coverage report"
|
||||
runs-on: ubuntu-latest
|
||||
needs: unit_tests # Depends on the artifact uploaded by the "unit_tests" job
|
||||
steps:
|
||||
- uses: codecov/codecov-action@v4
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
|
||||
commit_lint:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
10
README.en.md
10
README.en.md
@@ -101,7 +101,7 @@ Nginx reference configuration:
|
||||
```
|
||||
server{
|
||||
server_name openai.justsong.cn; # Modify your domain name accordingly
|
||||
|
||||
|
||||
location / {
|
||||
client_max_body_size 64m;
|
||||
proxy_http_version 1.1;
|
||||
@@ -132,12 +132,12 @@ The initial account username is `root` and password is `123456`.
|
||||
1. Download the executable file from [GitHub Releases](https://github.com/songquanpeng/one-api/releases/latest) or compile from source:
|
||||
```shell
|
||||
git clone https://github.com/songquanpeng/one-api.git
|
||||
|
||||
|
||||
# Build the frontend
|
||||
cd one-api/web/default
|
||||
npm install
|
||||
npm run build
|
||||
|
||||
|
||||
# Build the backend
|
||||
cd ../..
|
||||
go mod download
|
||||
@@ -287,7 +287,9 @@ If the channel ID is not provided, load balancing will be used to distribute the
|
||||
+ Double-check that your interface address and API Key are correct.
|
||||
|
||||
## Related Projects
|
||||
[FastGPT](https://github.com/labring/FastGPT): Knowledge question answering system based on the LLM
|
||||
* [FastGPT](https://github.com/labring/FastGPT): Knowledge question answering system based on the LLM
|
||||
* [VChart](https://github.com/VisActor/VChart): More than just a cross-platform charting library, but also an expressive data storyteller.
|
||||
* [VMind](https://github.com/VisActor/VMind): Not just automatic, but also fantastic. Open-source solution for intelligent visualization.
|
||||
|
||||
## Note
|
||||
This project is an open-source project. Please use it in compliance with OpenAI's [Terms of Use](https://openai.com/policies/terms-of-use) and **applicable laws and regulations**. It must not be used for illegal purposes.
|
||||
|
||||
12
README.md
12
README.md
@@ -53,7 +53,7 @@ _✨ 通过标准的 OpenAI API 格式访问所有的大模型,开箱即用
|
||||
|
||||
> [!NOTE]
|
||||
> 本项目为开源项目,使用者必须在遵循 OpenAI 的[使用条款](https://openai.com/policies/terms-of-use)以及**法律法规**的情况下使用,不得用于非法用途。
|
||||
>
|
||||
>
|
||||
> 根据[《生成式人工智能服务管理暂行办法》](http://www.cac.gov.cn/2023-07/13/c_1690898327029107.htm)的要求,请勿对中国地区公众提供一切未经备案的生成式人工智能服务。
|
||||
|
||||
> [!WARNING]
|
||||
@@ -144,7 +144,7 @@ Nginx 的参考配置:
|
||||
```
|
||||
server{
|
||||
server_name openai.justsong.cn; # 请根据实际情况修改你的域名
|
||||
|
||||
|
||||
location / {
|
||||
client_max_body_size 64m;
|
||||
proxy_http_version 1.1;
|
||||
@@ -189,12 +189,12 @@ docker-compose ps
|
||||
1. 从 [GitHub Releases](https://github.com/songquanpeng/one-api/releases/latest) 下载可执行文件或者从源码编译:
|
||||
```shell
|
||||
git clone https://github.com/songquanpeng/one-api.git
|
||||
|
||||
|
||||
# 构建前端
|
||||
cd one-api/web/default
|
||||
npm install
|
||||
npm run build
|
||||
|
||||
|
||||
# 构建后端
|
||||
cd ../..
|
||||
go mod download
|
||||
@@ -321,7 +321,7 @@ Render 可以直接部署 docker 镜像,不需要 fork 仓库:https://dashbo
|
||||
例如对于 OpenAI 的官方库:
|
||||
```bash
|
||||
OPENAI_API_KEY="sk-xxxxxx"
|
||||
OPENAI_API_BASE="https://<HOST>:<PORT>/v1"
|
||||
OPENAI_API_BASE="https://<HOST>:<PORT>/v1"
|
||||
```
|
||||
|
||||
```mermaid
|
||||
@@ -448,6 +448,8 @@ https://openai.justsong.cn
|
||||
## 相关项目
|
||||
* [FastGPT](https://github.com/labring/FastGPT): 基于 LLM 大语言模型的知识库问答系统
|
||||
* [ChatGPT Next Web](https://github.com/Yidadaa/ChatGPT-Next-Web): 一键拥有你自己的跨平台 ChatGPT 应用
|
||||
* [VChart](https://github.com/VisActor/VChart): 不只是开箱即用的多端图表库,更是生动灵活的数据故事讲述者。
|
||||
* [VMind](https://github.com/VisActor/VMind): 不仅自动,还很智能。开源智能可视化解决方案。
|
||||
|
||||
## 注意
|
||||
|
||||
|
||||
@@ -145,6 +145,9 @@ var InitialRootToken = os.Getenv("INITIAL_ROOT_TOKEN")
|
||||
|
||||
var GeminiVersion = env.String("GEMINI_VERSION", "v1")
|
||||
|
||||
|
||||
var OnlyOneLogFile = env.Bool("ONLY_ONE_LOG_FILE", false)
|
||||
|
||||
var RelayProxy = env.String("RELAY_PROXY", "")
|
||||
var UserContentRequestProxy = env.String("USER_CONTENT_REQUEST_PROXY", "")
|
||||
var UserContentRequestTimeout = env.Int("USER_CONTENT_REQUEST_TIMEOUT", 30)
|
||||
|
||||
@@ -27,7 +27,12 @@ var setupLogOnce sync.Once
|
||||
func SetupLogger() {
|
||||
setupLogOnce.Do(func() {
|
||||
if LogDir != "" {
|
||||
logPath := filepath.Join(LogDir, fmt.Sprintf("oneapi-%s.log", time.Now().Format("20060102")))
|
||||
var logPath string
|
||||
if config.OnlyOneLogFile {
|
||||
logPath = filepath.Join(LogDir, "oneapi.log")
|
||||
} else {
|
||||
logPath = filepath.Join(LogDir, fmt.Sprintf("oneapi-%s.log", time.Now().Format("20060102")))
|
||||
}
|
||||
fd, err := os.OpenFile(logPath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
|
||||
if err != nil {
|
||||
log.Fatal("failed to open log file")
|
||||
|
||||
@@ -6,11 +6,16 @@ import (
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"github.com/songquanpeng/one-api/common/config"
|
||||
"net"
|
||||
"net/smtp"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
func shouldAuth() bool {
|
||||
return config.SMTPAccount != "" || config.SMTPToken != ""
|
||||
}
|
||||
|
||||
func SendEmail(subject string, receiver string, content string) error {
|
||||
if receiver == "" {
|
||||
return fmt.Errorf("receiver is empty")
|
||||
@@ -41,16 +46,24 @@ func SendEmail(subject string, receiver string, content string) error {
|
||||
"Date: %s\r\n"+
|
||||
"Content-Type: text/html; charset=UTF-8\r\n\r\n%s\r\n",
|
||||
receiver, config.SystemName, config.SMTPFrom, encodedSubject, messageId, time.Now().Format(time.RFC1123Z), content))
|
||||
|
||||
auth := smtp.PlainAuth("", config.SMTPAccount, config.SMTPToken, config.SMTPServer)
|
||||
addr := fmt.Sprintf("%s:%d", config.SMTPServer, config.SMTPPort)
|
||||
to := strings.Split(receiver, ";")
|
||||
|
||||
if config.SMTPPort == 465 {
|
||||
tlsConfig := &tls.Config{
|
||||
InsecureSkipVerify: true,
|
||||
ServerName: config.SMTPServer,
|
||||
if config.SMTPPort == 465 || !shouldAuth() {
|
||||
// need advanced client
|
||||
var conn net.Conn
|
||||
var err error
|
||||
if config.SMTPPort == 465 {
|
||||
tlsConfig := &tls.Config{
|
||||
InsecureSkipVerify: true,
|
||||
ServerName: config.SMTPServer,
|
||||
}
|
||||
conn, err = tls.Dial("tcp", fmt.Sprintf("%s:%d", config.SMTPServer, config.SMTPPort), tlsConfig)
|
||||
} else {
|
||||
conn, err = net.Dial("tcp", fmt.Sprintf("%s:%d", config.SMTPServer, config.SMTPPort))
|
||||
}
|
||||
conn, err := tls.Dial("tcp", fmt.Sprintf("%s:%d", config.SMTPServer, config.SMTPPort), tlsConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -59,8 +72,10 @@ func SendEmail(subject string, receiver string, content string) error {
|
||||
return err
|
||||
}
|
||||
defer client.Close()
|
||||
if err = client.Auth(auth); err != nil {
|
||||
return err
|
||||
if shouldAuth() {
|
||||
if err = client.Auth(auth); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err = client.Mail(config.SMTPFrom); err != nil {
|
||||
return err
|
||||
|
||||
22
main.go
22
main.go
@@ -27,27 +27,19 @@ func main() {
|
||||
common.Init()
|
||||
logger.SetupLogger()
|
||||
logger.SysLogf("One API %s started", common.Version)
|
||||
if os.Getenv("GIN_MODE") != "debug" {
|
||||
|
||||
if os.Getenv("GIN_MODE") != gin.DebugMode {
|
||||
gin.SetMode(gin.ReleaseMode)
|
||||
}
|
||||
if config.DebugEnabled {
|
||||
logger.SysLog("running in debug mode")
|
||||
}
|
||||
var err error
|
||||
|
||||
// Initialize SQL Database
|
||||
model.DB, err = model.InitDB("SQL_DSN")
|
||||
if err != nil {
|
||||
logger.FatalLog("failed to initialize database: " + err.Error())
|
||||
}
|
||||
if os.Getenv("LOG_SQL_DSN") != "" {
|
||||
logger.SysLog("using secondary database for table logs")
|
||||
model.LOG_DB, err = model.InitDB("LOG_SQL_DSN")
|
||||
if err != nil {
|
||||
logger.FatalLog("failed to initialize secondary database: " + err.Error())
|
||||
}
|
||||
} else {
|
||||
model.LOG_DB = model.DB
|
||||
}
|
||||
model.InitDB()
|
||||
model.InitLogDB()
|
||||
|
||||
var err error
|
||||
err = model.CreateRootAccountIfNeed()
|
||||
if err != nil {
|
||||
logger.FatalLog("database init error: " + err.Error())
|
||||
|
||||
219
model/main.go
219
model/main.go
@@ -1,6 +1,7 @@
|
||||
package model
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"github.com/songquanpeng/one-api/common"
|
||||
"github.com/songquanpeng/one-api/common/config"
|
||||
@@ -60,90 +61,156 @@ func CreateRootAccountIfNeed() error {
|
||||
}
|
||||
|
||||
func chooseDB(envName string) (*gorm.DB, error) {
|
||||
if os.Getenv(envName) != "" {
|
||||
dsn := os.Getenv(envName)
|
||||
if strings.HasPrefix(dsn, "postgres://") {
|
||||
// Use PostgreSQL
|
||||
logger.SysLog("using PostgreSQL as database")
|
||||
common.UsingPostgreSQL = true
|
||||
return gorm.Open(postgres.New(postgres.Config{
|
||||
DSN: dsn,
|
||||
PreferSimpleProtocol: true, // disables implicit prepared statement usage
|
||||
}), &gorm.Config{
|
||||
PrepareStmt: true, // precompile SQL
|
||||
})
|
||||
}
|
||||
dsn := os.Getenv(envName)
|
||||
|
||||
switch {
|
||||
case strings.HasPrefix(dsn, "postgres://"):
|
||||
// Use PostgreSQL
|
||||
return openPostgreSQL(dsn)
|
||||
case dsn != "":
|
||||
// Use MySQL
|
||||
logger.SysLog("using MySQL as database")
|
||||
common.UsingMySQL = true
|
||||
return gorm.Open(mysql.Open(dsn), &gorm.Config{
|
||||
PrepareStmt: true, // precompile SQL
|
||||
})
|
||||
return openMySQL(dsn)
|
||||
default:
|
||||
// Use SQLite
|
||||
return openSQLite()
|
||||
}
|
||||
// Use SQLite
|
||||
logger.SysLog("SQL_DSN not set, using SQLite as database")
|
||||
common.UsingSQLite = true
|
||||
config := fmt.Sprintf("?_busy_timeout=%d", common.SQLiteBusyTimeout)
|
||||
return gorm.Open(sqlite.Open(common.SQLitePath+config), &gorm.Config{
|
||||
}
|
||||
|
||||
func openPostgreSQL(dsn string) (*gorm.DB, error) {
|
||||
logger.SysLog("using PostgreSQL as database")
|
||||
common.UsingPostgreSQL = true
|
||||
return gorm.Open(postgres.New(postgres.Config{
|
||||
DSN: dsn,
|
||||
PreferSimpleProtocol: true, // disables implicit prepared statement usage
|
||||
}), &gorm.Config{
|
||||
PrepareStmt: true, // precompile SQL
|
||||
})
|
||||
}
|
||||
|
||||
func InitDB(envName string) (db *gorm.DB, err error) {
|
||||
db, err = chooseDB(envName)
|
||||
if err == nil {
|
||||
if config.DebugSQLEnabled {
|
||||
db = db.Debug()
|
||||
}
|
||||
sqlDB, err := db.DB()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sqlDB.SetMaxIdleConns(env.Int("SQL_MAX_IDLE_CONNS", 100))
|
||||
sqlDB.SetMaxOpenConns(env.Int("SQL_MAX_OPEN_CONNS", 1000))
|
||||
sqlDB.SetConnMaxLifetime(time.Second * time.Duration(env.Int("SQL_MAX_LIFETIME", 60)))
|
||||
func openMySQL(dsn string) (*gorm.DB, error) {
|
||||
logger.SysLog("using MySQL as database")
|
||||
common.UsingMySQL = true
|
||||
return gorm.Open(mysql.Open(dsn), &gorm.Config{
|
||||
PrepareStmt: true, // precompile SQL
|
||||
})
|
||||
}
|
||||
|
||||
if !config.IsMasterNode {
|
||||
return db, err
|
||||
}
|
||||
if common.UsingMySQL {
|
||||
_, _ = sqlDB.Exec("DROP INDEX idx_channels_key ON channels;") // TODO: delete this line when most users have upgraded
|
||||
}
|
||||
logger.SysLog("database migration started")
|
||||
err = db.AutoMigrate(&Channel{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = db.AutoMigrate(&Token{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = db.AutoMigrate(&User{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = db.AutoMigrate(&Option{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = db.AutoMigrate(&Redemption{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = db.AutoMigrate(&Ability{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = db.AutoMigrate(&Log{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
logger.SysLog("database migrated")
|
||||
return db, err
|
||||
} else {
|
||||
logger.FatalLog(err)
|
||||
func openSQLite() (*gorm.DB, error) {
|
||||
logger.SysLog("SQL_DSN not set, using SQLite as database")
|
||||
common.UsingSQLite = true
|
||||
dsn := fmt.Sprintf("%s?_busy_timeout=%d", common.SQLitePath, common.SQLiteBusyTimeout)
|
||||
return gorm.Open(sqlite.Open(dsn), &gorm.Config{
|
||||
PrepareStmt: true, // precompile SQL
|
||||
})
|
||||
}
|
||||
|
||||
func InitDB() {
|
||||
var err error
|
||||
DB, err = chooseDB("SQL_DSN")
|
||||
if err != nil {
|
||||
logger.FatalLog("failed to initialize database: " + err.Error())
|
||||
return
|
||||
}
|
||||
return db, err
|
||||
|
||||
sqlDB := setDBConns(DB)
|
||||
|
||||
if !config.IsMasterNode {
|
||||
return
|
||||
}
|
||||
|
||||
if common.UsingMySQL {
|
||||
_, _ = sqlDB.Exec("DROP INDEX idx_channels_key ON channels;") // TODO: delete this line when most users have upgraded
|
||||
}
|
||||
|
||||
logger.SysLog("database migration started")
|
||||
if err = migrateDB(); err != nil {
|
||||
logger.FatalLog("failed to migrate database: " + err.Error())
|
||||
return
|
||||
}
|
||||
logger.SysLog("database migrated")
|
||||
}
|
||||
|
||||
func migrateDB() error {
|
||||
var err error
|
||||
if err = DB.AutoMigrate(&Channel{}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = DB.AutoMigrate(&Token{}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = DB.AutoMigrate(&User{}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = DB.AutoMigrate(&Option{}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = DB.AutoMigrate(&Redemption{}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = DB.AutoMigrate(&Ability{}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = DB.AutoMigrate(&Log{}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = DB.AutoMigrate(&Channel{}); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func InitLogDB() {
|
||||
if os.Getenv("LOG_SQL_DSN") == "" {
|
||||
LOG_DB = DB
|
||||
return
|
||||
}
|
||||
|
||||
logger.SysLog("using secondary database for table logs")
|
||||
var err error
|
||||
LOG_DB, err = chooseDB("LOG_SQL_DSN")
|
||||
if err != nil {
|
||||
logger.FatalLog("failed to initialize secondary database: " + err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
setDBConns(LOG_DB)
|
||||
|
||||
if !config.IsMasterNode {
|
||||
return
|
||||
}
|
||||
|
||||
logger.SysLog("secondary database migration started")
|
||||
err = migrateLOGDB()
|
||||
if err != nil {
|
||||
logger.FatalLog("failed to migrate secondary database: " + err.Error())
|
||||
return
|
||||
}
|
||||
logger.SysLog("secondary database migrated")
|
||||
}
|
||||
|
||||
func migrateLOGDB() error {
|
||||
var err error
|
||||
if err = LOG_DB.AutoMigrate(&Log{}); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func setDBConns(db *gorm.DB) *sql.DB {
|
||||
if config.DebugSQLEnabled {
|
||||
db = db.Debug()
|
||||
}
|
||||
|
||||
sqlDB, err := db.DB()
|
||||
if err != nil {
|
||||
logger.FatalLog("failed to connect database: " + err.Error())
|
||||
return nil
|
||||
}
|
||||
|
||||
sqlDB.SetMaxIdleConns(env.Int("SQL_MAX_IDLE_CONNS", 100))
|
||||
sqlDB.SetMaxOpenConns(env.Int("SQL_MAX_OPEN_CONNS", 1000))
|
||||
sqlDB.SetConnMaxLifetime(time.Second * time.Duration(env.Int("SQL_MAX_LIFETIME", 60)))
|
||||
return sqlDB
|
||||
}
|
||||
|
||||
func closeDB(db *gorm.DB) error {
|
||||
|
||||
@@ -29,12 +29,30 @@ func stopReasonClaude2OpenAI(reason *string) string {
|
||||
return "stop"
|
||||
case "max_tokens":
|
||||
return "length"
|
||||
case "tool_use":
|
||||
return "tool_calls"
|
||||
default:
|
||||
return *reason
|
||||
}
|
||||
}
|
||||
|
||||
func ConvertRequest(textRequest model.GeneralOpenAIRequest) *Request {
|
||||
claudeTools := make([]Tool, 0, len(textRequest.Tools))
|
||||
|
||||
for _, tool := range textRequest.Tools {
|
||||
if params, ok := tool.Function.Parameters.(map[string]any); ok {
|
||||
claudeTools = append(claudeTools, Tool{
|
||||
Name: tool.Function.Name,
|
||||
Description: tool.Function.Description,
|
||||
InputSchema: InputSchema{
|
||||
Type: params["type"].(string),
|
||||
Properties: params["properties"],
|
||||
Required: params["required"],
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
claudeRequest := Request{
|
||||
Model: textRequest.Model,
|
||||
MaxTokens: textRequest.MaxTokens,
|
||||
@@ -42,6 +60,24 @@ func ConvertRequest(textRequest model.GeneralOpenAIRequest) *Request {
|
||||
TopP: textRequest.TopP,
|
||||
TopK: textRequest.TopK,
|
||||
Stream: textRequest.Stream,
|
||||
Tools: claudeTools,
|
||||
}
|
||||
if len(claudeTools) > 0 {
|
||||
claudeToolChoice := struct {
|
||||
Type string `json:"type"`
|
||||
Name string `json:"name,omitempty"`
|
||||
}{Type: "auto"} // default value https://docs.anthropic.com/en/docs/build-with-claude/tool-use#controlling-claudes-output
|
||||
if choice, ok := textRequest.ToolChoice.(map[string]any); ok {
|
||||
if function, ok := choice["function"].(map[string]any); ok {
|
||||
claudeToolChoice.Type = "tool"
|
||||
claudeToolChoice.Name = function["name"].(string)
|
||||
}
|
||||
} else if toolChoiceType, ok := textRequest.ToolChoice.(string); ok {
|
||||
if toolChoiceType == "any" {
|
||||
claudeToolChoice.Type = toolChoiceType
|
||||
}
|
||||
}
|
||||
claudeRequest.ToolChoice = claudeToolChoice
|
||||
}
|
||||
if claudeRequest.MaxTokens == 0 {
|
||||
claudeRequest.MaxTokens = 4096
|
||||
@@ -64,7 +100,24 @@ func ConvertRequest(textRequest model.GeneralOpenAIRequest) *Request {
|
||||
if message.IsStringContent() {
|
||||
content.Type = "text"
|
||||
content.Text = message.StringContent()
|
||||
if message.Role == "tool" {
|
||||
claudeMessage.Role = "user"
|
||||
content.Type = "tool_result"
|
||||
content.Content = content.Text
|
||||
content.Text = ""
|
||||
content.ToolUseId = message.ToolCallId
|
||||
}
|
||||
claudeMessage.Content = append(claudeMessage.Content, content)
|
||||
for i := range message.ToolCalls {
|
||||
inputParam := make(map[string]any)
|
||||
_ = json.Unmarshal([]byte(message.ToolCalls[i].Function.Arguments.(string)), &inputParam)
|
||||
claudeMessage.Content = append(claudeMessage.Content, Content{
|
||||
Type: "tool_use",
|
||||
Id: message.ToolCalls[i].Id,
|
||||
Name: message.ToolCalls[i].Function.Name,
|
||||
Input: inputParam,
|
||||
})
|
||||
}
|
||||
claudeRequest.Messages = append(claudeRequest.Messages, claudeMessage)
|
||||
continue
|
||||
}
|
||||
@@ -97,16 +150,35 @@ func StreamResponseClaude2OpenAI(claudeResponse *StreamResponse) (*openai.ChatCo
|
||||
var response *Response
|
||||
var responseText string
|
||||
var stopReason string
|
||||
tools := make([]model.Tool, 0)
|
||||
|
||||
switch claudeResponse.Type {
|
||||
case "message_start":
|
||||
return nil, claudeResponse.Message
|
||||
case "content_block_start":
|
||||
if claudeResponse.ContentBlock != nil {
|
||||
responseText = claudeResponse.ContentBlock.Text
|
||||
if claudeResponse.ContentBlock.Type == "tool_use" {
|
||||
tools = append(tools, model.Tool{
|
||||
Id: claudeResponse.ContentBlock.Id,
|
||||
Type: "function",
|
||||
Function: model.Function{
|
||||
Name: claudeResponse.ContentBlock.Name,
|
||||
Arguments: "",
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
case "content_block_delta":
|
||||
if claudeResponse.Delta != nil {
|
||||
responseText = claudeResponse.Delta.Text
|
||||
if claudeResponse.Delta.Type == "input_json_delta" {
|
||||
tools = append(tools, model.Tool{
|
||||
Function: model.Function{
|
||||
Arguments: claudeResponse.Delta.PartialJson,
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
case "message_delta":
|
||||
if claudeResponse.Usage != nil {
|
||||
@@ -120,6 +192,10 @@ func StreamResponseClaude2OpenAI(claudeResponse *StreamResponse) (*openai.ChatCo
|
||||
}
|
||||
var choice openai.ChatCompletionsStreamResponseChoice
|
||||
choice.Delta.Content = responseText
|
||||
if len(tools) > 0 {
|
||||
choice.Delta.Content = nil // compatible with other OpenAI derivative applications, like LobeOpenAICompatibleFactory ...
|
||||
choice.Delta.ToolCalls = tools
|
||||
}
|
||||
choice.Delta.Role = "assistant"
|
||||
finishReason := stopReasonClaude2OpenAI(&stopReason)
|
||||
if finishReason != "null" {
|
||||
@@ -136,12 +212,27 @@ func ResponseClaude2OpenAI(claudeResponse *Response) *openai.TextResponse {
|
||||
if len(claudeResponse.Content) > 0 {
|
||||
responseText = claudeResponse.Content[0].Text
|
||||
}
|
||||
tools := make([]model.Tool, 0)
|
||||
for _, v := range claudeResponse.Content {
|
||||
if v.Type == "tool_use" {
|
||||
args, _ := json.Marshal(v.Input)
|
||||
tools = append(tools, model.Tool{
|
||||
Id: v.Id,
|
||||
Type: "function", // compatible with other OpenAI derivative applications
|
||||
Function: model.Function{
|
||||
Name: v.Name,
|
||||
Arguments: string(args),
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
choice := openai.TextResponseChoice{
|
||||
Index: 0,
|
||||
Message: model.Message{
|
||||
Role: "assistant",
|
||||
Content: responseText,
|
||||
Name: nil,
|
||||
Role: "assistant",
|
||||
Content: responseText,
|
||||
Name: nil,
|
||||
ToolCalls: tools,
|
||||
},
|
||||
FinishReason: stopReasonClaude2OpenAI(claudeResponse.StopReason),
|
||||
}
|
||||
@@ -176,6 +267,7 @@ func StreamHandler(c *gin.Context, resp *http.Response) (*model.ErrorWithStatusC
|
||||
var usage model.Usage
|
||||
var modelName string
|
||||
var id string
|
||||
var lastToolCallChoice openai.ChatCompletionsStreamResponseChoice
|
||||
|
||||
for scanner.Scan() {
|
||||
data := scanner.Text()
|
||||
@@ -196,9 +288,20 @@ func StreamHandler(c *gin.Context, resp *http.Response) (*model.ErrorWithStatusC
|
||||
if meta != nil {
|
||||
usage.PromptTokens += meta.Usage.InputTokens
|
||||
usage.CompletionTokens += meta.Usage.OutputTokens
|
||||
modelName = meta.Model
|
||||
id = fmt.Sprintf("chatcmpl-%s", meta.Id)
|
||||
continue
|
||||
if len(meta.Id) > 0 { // only message_start has an id, otherwise it's a finish_reason event.
|
||||
modelName = meta.Model
|
||||
id = fmt.Sprintf("chatcmpl-%s", meta.Id)
|
||||
continue
|
||||
} else { // finish_reason case
|
||||
if len(lastToolCallChoice.Delta.ToolCalls) > 0 {
|
||||
lastArgs := &lastToolCallChoice.Delta.ToolCalls[len(lastToolCallChoice.Delta.ToolCalls)-1].Function
|
||||
if len(lastArgs.Arguments.(string)) == 0 { // compatible with OpenAI sending an empty object `{}` when no arguments.
|
||||
lastArgs.Arguments = "{}"
|
||||
response.Choices[len(response.Choices)-1].Delta.Content = nil
|
||||
response.Choices[len(response.Choices)-1].Delta.ToolCalls = lastToolCallChoice.Delta.ToolCalls
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if response == nil {
|
||||
continue
|
||||
@@ -207,6 +310,12 @@ func StreamHandler(c *gin.Context, resp *http.Response) (*model.ErrorWithStatusC
|
||||
response.Id = id
|
||||
response.Model = modelName
|
||||
response.Created = createdTime
|
||||
|
||||
for _, choice := range response.Choices {
|
||||
if len(choice.Delta.ToolCalls) > 0 {
|
||||
lastToolCallChoice = choice
|
||||
}
|
||||
}
|
||||
err = render.ObjectData(c, response)
|
||||
if err != nil {
|
||||
logger.SysError(err.Error())
|
||||
|
||||
@@ -16,6 +16,12 @@ type Content struct {
|
||||
Type string `json:"type"`
|
||||
Text string `json:"text,omitempty"`
|
||||
Source *ImageSource `json:"source,omitempty"`
|
||||
// tool_calls
|
||||
Id string `json:"id,omitempty"`
|
||||
Name string `json:"name,omitempty"`
|
||||
Input any `json:"input,omitempty"`
|
||||
Content string `json:"content,omitempty"`
|
||||
ToolUseId string `json:"tool_use_id,omitempty"`
|
||||
}
|
||||
|
||||
type Message struct {
|
||||
@@ -23,6 +29,18 @@ type Message struct {
|
||||
Content []Content `json:"content"`
|
||||
}
|
||||
|
||||
type Tool struct {
|
||||
Name string `json:"name"`
|
||||
Description string `json:"description,omitempty"`
|
||||
InputSchema InputSchema `json:"input_schema"`
|
||||
}
|
||||
|
||||
type InputSchema struct {
|
||||
Type string `json:"type"`
|
||||
Properties any `json:"properties,omitempty"`
|
||||
Required any `json:"required,omitempty"`
|
||||
}
|
||||
|
||||
type Request struct {
|
||||
Model string `json:"model"`
|
||||
Messages []Message `json:"messages"`
|
||||
@@ -33,6 +51,8 @@ type Request struct {
|
||||
Temperature float64 `json:"temperature,omitempty"`
|
||||
TopP float64 `json:"top_p,omitempty"`
|
||||
TopK int `json:"top_k,omitempty"`
|
||||
Tools []Tool `json:"tools,omitempty"`
|
||||
ToolChoice any `json:"tool_choice,omitempty"`
|
||||
//Metadata `json:"metadata,omitempty"`
|
||||
}
|
||||
|
||||
@@ -61,6 +81,7 @@ type Response struct {
|
||||
type Delta struct {
|
||||
Type string `json:"type"`
|
||||
Text string `json:"text"`
|
||||
PartialJson string `json:"partial_json,omitempty"`
|
||||
StopReason *string `json:"stop_reason"`
|
||||
StopSequence *string `json:"stop_sequence"`
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/songquanpeng/one-api/common/ctxkey"
|
||||
"github.com/songquanpeng/one-api/relay/adaptor/openai"
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
@@ -143,6 +144,8 @@ func StreamHandler(c *gin.Context, awsCli *bedrockruntime.Client) (*relaymodel.E
|
||||
c.Writer.Header().Set("Content-Type", "text/event-stream")
|
||||
var usage relaymodel.Usage
|
||||
var id string
|
||||
var lastToolCallChoice openai.ChatCompletionsStreamResponseChoice
|
||||
|
||||
c.Stream(func(w io.Writer) bool {
|
||||
event, ok := <-stream.Events()
|
||||
if !ok {
|
||||
@@ -163,8 +166,19 @@ func StreamHandler(c *gin.Context, awsCli *bedrockruntime.Client) (*relaymodel.E
|
||||
if meta != nil {
|
||||
usage.PromptTokens += meta.Usage.InputTokens
|
||||
usage.CompletionTokens += meta.Usage.OutputTokens
|
||||
id = fmt.Sprintf("chatcmpl-%s", meta.Id)
|
||||
return true
|
||||
if len(meta.Id) > 0 { // only message_start has an id, otherwise it's a finish_reason event.
|
||||
id = fmt.Sprintf("chatcmpl-%s", meta.Id)
|
||||
return true
|
||||
} else { // finish_reason case
|
||||
if len(lastToolCallChoice.Delta.ToolCalls) > 0 {
|
||||
lastArgs := &lastToolCallChoice.Delta.ToolCalls[len(lastToolCallChoice.Delta.ToolCalls)-1].Function
|
||||
if len(lastArgs.Arguments.(string)) == 0 { // compatible with OpenAI sending an empty object `{}` when no arguments.
|
||||
lastArgs.Arguments = "{}"
|
||||
response.Choices[len(response.Choices)-1].Delta.Content = nil
|
||||
response.Choices[len(response.Choices)-1].Delta.ToolCalls = lastToolCallChoice.Delta.ToolCalls
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if response == nil {
|
||||
return true
|
||||
@@ -172,6 +186,12 @@ func StreamHandler(c *gin.Context, awsCli *bedrockruntime.Client) (*relaymodel.E
|
||||
response.Id = id
|
||||
response.Model = c.GetString(ctxkey.OriginalModel)
|
||||
response.Created = createdTime
|
||||
|
||||
for _, choice := range response.Choices {
|
||||
if len(choice.Delta.ToolCalls) > 0 {
|
||||
lastToolCallChoice = choice
|
||||
}
|
||||
}
|
||||
jsonStr, err := json.Marshal(response)
|
||||
if err != nil {
|
||||
logger.SysError("error marshalling stream response: " + err.Error())
|
||||
|
||||
@@ -9,9 +9,12 @@ type Request struct {
|
||||
// AnthropicVersion should be "bedrock-2023-05-31"
|
||||
AnthropicVersion string `json:"anthropic_version"`
|
||||
Messages []anthropic.Message `json:"messages"`
|
||||
System string `json:"system,omitempty"`
|
||||
MaxTokens int `json:"max_tokens,omitempty"`
|
||||
Temperature float64 `json:"temperature,omitempty"`
|
||||
TopP float64 `json:"top_p,omitempty"`
|
||||
TopK int `json:"top_k,omitempty"`
|
||||
StopSequences []string `json:"stop_sequences,omitempty"`
|
||||
Tools []anthropic.Tool `json:"tools,omitempty"`
|
||||
ToolChoice any `json:"tool_choice,omitempty"`
|
||||
}
|
||||
|
||||
@@ -6,4 +6,5 @@ var ModelList = []string{
|
||||
"SparkDesk-v2.1",
|
||||
"SparkDesk-v3.1",
|
||||
"SparkDesk-v3.5",
|
||||
"SparkDesk-v4.0",
|
||||
}
|
||||
|
||||
@@ -44,7 +44,7 @@ func requestOpenAI2Xunfei(request model.GeneralOpenAIRequest, xunfeiAppId string
|
||||
xunfeiRequest.Parameter.Chat.MaxTokens = request.MaxTokens
|
||||
xunfeiRequest.Payload.Message.Text = messages
|
||||
|
||||
if strings.HasPrefix(domain, "generalv3") {
|
||||
if strings.HasPrefix(domain, "generalv3") || domain == "4.0Ultra" {
|
||||
functions := make([]model.Function, len(request.Tools))
|
||||
for i, tool := range request.Tools {
|
||||
functions[i] = tool.Function
|
||||
@@ -290,6 +290,8 @@ func apiVersion2domain(apiVersion string) string {
|
||||
return "generalv3"
|
||||
case "v3.5":
|
||||
return "generalv3.5"
|
||||
case "v4.0":
|
||||
return "4.0Ultra"
|
||||
}
|
||||
return "general" + apiVersion
|
||||
}
|
||||
|
||||
@@ -125,6 +125,7 @@ var ModelRatio = map[string]float64{
|
||||
"SparkDesk-v2.1": 1.2858, // ¥0.018 / 1k tokens
|
||||
"SparkDesk-v3.1": 1.2858, // ¥0.018 / 1k tokens
|
||||
"SparkDesk-v3.5": 1.2858, // ¥0.018 / 1k tokens
|
||||
"SparkDesk-v4.0": 1.2858, // ¥0.018 / 1k tokens
|
||||
"360GPT_S2_V9": 0.8572, // ¥0.012 / 1k tokens
|
||||
"embedding-bert-512-v1": 0.0715, // ¥0.001 / 1k tokens
|
||||
"embedding_s1_v1": 0.0715, // ¥0.001 / 1k tokens
|
||||
|
||||
@@ -40,78 +40,6 @@ func getAndValidateTextRequest(c *gin.Context, relayMode int) (*relaymodel.Gener
|
||||
return textRequest, nil
|
||||
}
|
||||
|
||||
func getImageRequest(c *gin.Context, relayMode int) (*relaymodel.ImageRequest, error) {
|
||||
imageRequest := &relaymodel.ImageRequest{}
|
||||
err := common.UnmarshalBodyReusable(c, imageRequest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if imageRequest.N == 0 {
|
||||
imageRequest.N = 1
|
||||
}
|
||||
if imageRequest.Size == "" {
|
||||
imageRequest.Size = "1024x1024"
|
||||
}
|
||||
if imageRequest.Model == "" {
|
||||
imageRequest.Model = "dall-e-2"
|
||||
}
|
||||
return imageRequest, nil
|
||||
}
|
||||
|
||||
func isValidImageSize(model string, size string) bool {
|
||||
if model == "cogview-3" {
|
||||
return true
|
||||
}
|
||||
_, ok := billingratio.ImageSizeRatios[model][size]
|
||||
return ok
|
||||
}
|
||||
|
||||
func getImageSizeRatio(model string, size string) float64 {
|
||||
ratio, ok := billingratio.ImageSizeRatios[model][size]
|
||||
if !ok {
|
||||
return 1
|
||||
}
|
||||
return ratio
|
||||
}
|
||||
|
||||
func validateImageRequest(imageRequest *relaymodel.ImageRequest, meta *meta.Meta) *relaymodel.ErrorWithStatusCode {
|
||||
// model validation
|
||||
hasValidSize := isValidImageSize(imageRequest.Model, imageRequest.Size)
|
||||
if !hasValidSize {
|
||||
return openai.ErrorWrapper(errors.New("size not supported for this image model"), "size_not_supported", http.StatusBadRequest)
|
||||
}
|
||||
// check prompt length
|
||||
if imageRequest.Prompt == "" {
|
||||
return openai.ErrorWrapper(errors.New("prompt is required"), "prompt_missing", http.StatusBadRequest)
|
||||
}
|
||||
if len(imageRequest.Prompt) > billingratio.ImagePromptLengthLimitations[imageRequest.Model] {
|
||||
return openai.ErrorWrapper(errors.New("prompt is too long"), "prompt_too_long", http.StatusBadRequest)
|
||||
}
|
||||
// Number of generated images validation
|
||||
if !isWithinRange(imageRequest.Model, imageRequest.N) {
|
||||
// channel not azure
|
||||
if meta.ChannelType != channeltype.Azure {
|
||||
return openai.ErrorWrapper(errors.New("invalid value of n"), "n_not_within_range", http.StatusBadRequest)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getImageCostRatio(imageRequest *relaymodel.ImageRequest) (float64, error) {
|
||||
if imageRequest == nil {
|
||||
return 0, errors.New("imageRequest is nil")
|
||||
}
|
||||
imageCostRatio := getImageSizeRatio(imageRequest.Model, imageRequest.Size)
|
||||
if imageRequest.Quality == "hd" && imageRequest.Model == "dall-e-3" {
|
||||
if imageRequest.Size == "1024x1024" {
|
||||
imageCostRatio *= 2
|
||||
} else {
|
||||
imageCostRatio *= 1.5
|
||||
}
|
||||
}
|
||||
return imageCostRatio, nil
|
||||
}
|
||||
|
||||
func getPromptTokens(textRequest *relaymodel.GeneralOpenAIRequest, relayMode int) int {
|
||||
switch relayMode {
|
||||
case relaymode.ChatCompletions:
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/songquanpeng/one-api/common"
|
||||
"github.com/songquanpeng/one-api/common/ctxkey"
|
||||
"github.com/songquanpeng/one-api/common/logger"
|
||||
"github.com/songquanpeng/one-api/model"
|
||||
@@ -20,13 +21,84 @@ import (
|
||||
"net/http"
|
||||
)
|
||||
|
||||
func isWithinRange(element string, value int) bool {
|
||||
if _, ok := billingratio.ImageGenerationAmounts[element]; !ok {
|
||||
return false
|
||||
func getImageRequest(c *gin.Context, relayMode int) (*relaymodel.ImageRequest, error) {
|
||||
imageRequest := &relaymodel.ImageRequest{}
|
||||
err := common.UnmarshalBodyReusable(c, imageRequest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
min := billingratio.ImageGenerationAmounts[element][0]
|
||||
max := billingratio.ImageGenerationAmounts[element][1]
|
||||
return value >= min && value <= max
|
||||
if imageRequest.N == 0 {
|
||||
imageRequest.N = 1
|
||||
}
|
||||
if imageRequest.Size == "" {
|
||||
imageRequest.Size = "1024x1024"
|
||||
}
|
||||
if imageRequest.Model == "" {
|
||||
imageRequest.Model = "dall-e-2"
|
||||
}
|
||||
return imageRequest, nil
|
||||
}
|
||||
|
||||
func isValidImageSize(model string, size string) bool {
|
||||
if model == "cogview-3" || billingratio.ImageSizeRatios[model] == nil {
|
||||
return true
|
||||
}
|
||||
_, ok := billingratio.ImageSizeRatios[model][size]
|
||||
return ok
|
||||
}
|
||||
|
||||
func isValidImagePromptLength(model string, promptLength int) bool {
|
||||
maxPromptLength, ok := billingratio.ImagePromptLengthLimitations[model]
|
||||
return !ok || promptLength <= maxPromptLength
|
||||
}
|
||||
|
||||
func isWithinRange(element string, value int) bool {
|
||||
amounts, ok := billingratio.ImageGenerationAmounts[element]
|
||||
return !ok || (value >= amounts[0] && value <= amounts[1])
|
||||
}
|
||||
|
||||
func getImageSizeRatio(model string, size string) float64 {
|
||||
if ratio, ok := billingratio.ImageSizeRatios[model][size]; ok {
|
||||
return ratio
|
||||
}
|
||||
return 1
|
||||
}
|
||||
|
||||
func validateImageRequest(imageRequest *relaymodel.ImageRequest, meta *meta.Meta) *relaymodel.ErrorWithStatusCode {
|
||||
// check prompt length
|
||||
if imageRequest.Prompt == "" {
|
||||
return openai.ErrorWrapper(errors.New("prompt is required"), "prompt_missing", http.StatusBadRequest)
|
||||
}
|
||||
|
||||
// model validation
|
||||
if !isValidImageSize(imageRequest.Model, imageRequest.Size) {
|
||||
return openai.ErrorWrapper(errors.New("size not supported for this image model"), "size_not_supported", http.StatusBadRequest)
|
||||
}
|
||||
|
||||
if !isValidImagePromptLength(imageRequest.Model, len(imageRequest.Prompt)) {
|
||||
return openai.ErrorWrapper(errors.New("prompt is too long"), "prompt_too_long", http.StatusBadRequest)
|
||||
}
|
||||
|
||||
// Number of generated images validation
|
||||
if !isWithinRange(imageRequest.Model, imageRequest.N) {
|
||||
return openai.ErrorWrapper(errors.New("invalid value of n"), "n_not_within_range", http.StatusBadRequest)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getImageCostRatio(imageRequest *relaymodel.ImageRequest) (float64, error) {
|
||||
if imageRequest == nil {
|
||||
return 0, errors.New("imageRequest is nil")
|
||||
}
|
||||
imageCostRatio := getImageSizeRatio(imageRequest.Model, imageRequest.Size)
|
||||
if imageRequest.Quality == "hd" && imageRequest.Model == "dall-e-3" {
|
||||
if imageRequest.Size == "1024x1024" {
|
||||
imageCostRatio *= 2
|
||||
} else {
|
||||
imageCostRatio *= 1.5
|
||||
}
|
||||
}
|
||||
return imageCostRatio, nil
|
||||
}
|
||||
|
||||
func RelayImageHelper(c *gin.Context, relayMode int) *relaymodel.ErrorWithStatusCode {
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
package model
|
||||
|
||||
type Message struct {
|
||||
Role string `json:"role,omitempty"`
|
||||
Content any `json:"content,omitempty"`
|
||||
Name *string `json:"name,omitempty"`
|
||||
ToolCalls []Tool `json:"tool_calls,omitempty"`
|
||||
Role string `json:"role,omitempty"`
|
||||
Content any `json:"content,omitempty"`
|
||||
Name *string `json:"name,omitempty"`
|
||||
ToolCalls []Tool `json:"tool_calls,omitempty"`
|
||||
ToolCallId string `json:"tool_call_id,omitempty"`
|
||||
}
|
||||
|
||||
func (m Message) IsStringContent() bool {
|
||||
|
||||
@@ -2,13 +2,13 @@ package model
|
||||
|
||||
type Tool struct {
|
||||
Id string `json:"id,omitempty"`
|
||||
Type string `json:"type"`
|
||||
Type string `json:"type,omitempty"` // when splicing claude tools stream messages, it is empty
|
||||
Function Function `json:"function"`
|
||||
}
|
||||
|
||||
type Function struct {
|
||||
Description string `json:"description,omitempty"`
|
||||
Name string `json:"name"`
|
||||
Name string `json:"name,omitempty"` // when splicing claude tools stream messages, it is empty
|
||||
Parameters any `json:"parameters,omitempty"` // request
|
||||
Arguments any `json:"arguments,omitempty"` // response
|
||||
}
|
||||
|
||||
@@ -78,7 +78,7 @@ const EditChannel = (props) => {
|
||||
localModels = ['chatglm_pro', 'chatglm_std', 'chatglm_lite'];
|
||||
break;
|
||||
case 18:
|
||||
localModels = ['SparkDesk', 'SparkDesk-v1.1', 'SparkDesk-v2.1', 'SparkDesk-v3.1', 'SparkDesk-v3.5'];
|
||||
localModels = ['SparkDesk', 'SparkDesk-v1.1', 'SparkDesk-v2.1', 'SparkDesk-v3.1', 'SparkDesk-v3.5', 'SparkDesk-v4.0'];
|
||||
break;
|
||||
case 19:
|
||||
localModels = ['360GPT_S2_V9', 'embedding-bert-512-v1', 'embedding_s1_v1', 'semantic_similarity_s1_v1'];
|
||||
|
||||
@@ -91,7 +91,7 @@ const typeConfig = {
|
||||
other: '版本号'
|
||||
},
|
||||
input: {
|
||||
models: ['SparkDesk', 'SparkDesk-v1.1', 'SparkDesk-v2.1', 'SparkDesk-v3.1', 'SparkDesk-v3.5']
|
||||
models: ['SparkDesk', 'SparkDesk-v1.1', 'SparkDesk-v2.1', 'SparkDesk-v3.1', 'SparkDesk-v3.5', 'SparkDesk-v4.0']
|
||||
},
|
||||
prompt: {
|
||||
key: '按照如下格式输入:APPID|APISecret|APIKey',
|
||||
|
||||
Reference in New Issue
Block a user