feat: enhance error handling and reasoning mechanisms across middleware

- Improve error handling across multiple middleware and adapter components, ensuring consistent error response formats in JSON.
- Enhance the functionality of request conversion functions by including context parameters and robust error wrapping.
- Introduce new features related to reasoning content in the messaging model, providing better customization and explanations in the documentation.
This commit is contained in:
Laisky.Cai
2025-02-26 05:38:21 +00:00
parent 07d9a8e144
commit f6cfe7cd4f
13 changed files with 320 additions and 87 deletions

View File

@@ -21,7 +21,11 @@ func (a *Adaptor) ConvertRequest(c *gin.Context, relayMode int, request *model.G
return nil, errors.New("request is nil")
}
claudeReq := anthropic.ConvertRequest(*request)
claudeReq, err := anthropic.ConvertRequest(c, *request)
if err != nil {
return nil, errors.Wrap(err, "convert request")
}
c.Set(ctxkey.RequestModel, request.Model)
c.Set(ctxkey.ConvertedRequest, claudeReq)
return claudeReq, nil

View File

@@ -88,7 +88,7 @@ func Handler(c *gin.Context, awsCli *bedrockruntime.Client, modelName string) (*
return utils.WrapErr(errors.Wrap(err, "unmarshal response")), nil
}
openaiResp := anthropic.ResponseClaude2OpenAI(claudeResponse)
openaiResp := anthropic.ResponseClaude2OpenAI(c, claudeResponse)
openaiResp.Model = modelName
usage := relaymodel.Usage{
PromptTokens: claudeResponse.Usage.InputTokens,
@@ -159,7 +159,7 @@ func StreamHandler(c *gin.Context, awsCli *bedrockruntime.Client) (*relaymodel.E
return false
}
response, meta := anthropic.StreamResponseClaude2OpenAI(claudeResp)
response, meta := anthropic.StreamResponseClaude2OpenAI(c, claudeResp)
if meta != nil {
usage.PromptTokens += meta.Usage.InputTokens
usage.CompletionTokens += meta.Usage.OutputTokens