fix: implement handling for o1-mini and o1-preview models to disable system prompt and max_tokens

This commit is contained in:
Laisky.Cai 2024-11-27 06:11:34 +00:00
parent 26c57f3be6
commit 5449190b8e

View File

@ -4,6 +4,7 @@ import (
"fmt"
"io"
"net/http"
"slices"
"strings"
"github.com/Laisky/errors/v2"
@ -82,6 +83,21 @@ func (a *Adaptor) ConvertRequest(c *gin.Context, relayMode int, request *model.G
}
request.StreamOptions.IncludeUsage = true
}
// o1 do not support system prompt and max_tokens
if strings.HasPrefix(request.Model, "o1-mini") || strings.HasPrefix(request.Model, "o1-preview") {
request.MaxTokens = 0
request.Messages = func(raw []model.Message) (filtered []model.Message) {
for i := range raw {
if raw[i].Role != "system" {
filtered = append(filtered, raw[i])
}
}
return
}(request.Messages)
}
return request, nil
}