From 5449190b8eeaf5e13d6f93934cc0c6413993128f Mon Sep 17 00:00:00 2001 From: "Laisky.Cai" Date: Wed, 27 Nov 2024 06:11:34 +0000 Subject: [PATCH] fix: implement handling for o1-mini and o1-preview models to disable system prompt and max_tokens --- relay/adaptor/openai/adaptor.go | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/relay/adaptor/openai/adaptor.go b/relay/adaptor/openai/adaptor.go index 21f970c1..3f4cdb4a 100644 --- a/relay/adaptor/openai/adaptor.go +++ b/relay/adaptor/openai/adaptor.go @@ -4,6 +4,7 @@ import ( "fmt" "io" "net/http" + "slices" "strings" "github.com/Laisky/errors/v2" @@ -82,6 +83,21 @@ func (a *Adaptor) ConvertRequest(c *gin.Context, relayMode int, request *model.G } request.StreamOptions.IncludeUsage = true } + + // o1 do not support system prompt and max_tokens + if strings.HasPrefix(request.Model, "o1-mini") || strings.HasPrefix(request.Model, "o1-preview") { + request.MaxTokens = 0 + request.Messages = func(raw []model.Message) (filtered []model.Message) { + for i := range raw { + if raw[i].Role != "system" { + filtered = append(filtered, raw[i]) + } + } + + return + }(request.Messages) + } + return request, nil }