diff --git a/common/constants.go b/common/constants.go index 5a53283..66cc10d 100644 --- a/common/constants.go +++ b/common/constants.go @@ -218,30 +218,30 @@ const ( ) var ChannelBaseURLs = []string{ - "", // 0 - "https://api.openai.com", // 1 - "https://oa.api2d.net", // 2 - "", // 3 - "http://localhost:11434", // 4 - "https://api.openai-sb.com", // 5 - "https://api.openaimax.com", // 6 - "https://api.ohmygpt.com", // 7 - "", // 8 - "https://api.caipacity.com", // 9 - "https://api.aiproxy.io", // 10 - "", // 11 - "https://api.api2gpt.com", // 12 - "https://api.aigc2d.com", // 13 - "https://api.anthropic.com", // 14 - "https://aip.baidubce.com", // 15 - "https://open.bigmodel.cn", // 16 - "https://dashscope.aliyuncs.com", // 17 - "", // 18 - "https://ai.360.cn", // 19 - "https://openrouter.ai/api", // 20 - "https://api.aiproxy.io", // 21 - "https://fastgpt.run/api/openapi", // 22 - "https://hunyuan.cloud.tencent.com", //23 + "", // 0 + "https://api.openai.com", // 1 + "https://oa.api2d.net", // 2 + "", // 3 + "http://localhost:11434", // 4 + "https://api.openai-sb.com", // 5 + "https://api.openaimax.com", // 6 + "https://api.ohmygpt.com", // 7 + "", // 8 + "https://api.caipacity.com", // 9 + "https://api.aiproxy.io", // 10 + "", // 11 + "https://api.api2gpt.com", // 12 + "https://api.aigc2d.com", // 13 + "https://api.anthropic.com", // 14 + "https://aip.baidubce.com", // 15 + "https://open.bigmodel.cn", // 16 + "https://dashscope.aliyuncs.com", // 17 + "", // 18 + "https://ai.360.cn", // 19 + "https://openrouter.ai/api", // 20 + "https://api.aiproxy.io", // 21 + "https://fastgpt.run/api/openapi", // 22 + "https://hunyuan.tencentcloudapi.com", //23 "https://generativelanguage.googleapis.com", //24 "https://api.moonshot.cn", //25 "https://open.bigmodel.cn", //26 diff --git a/controller/channel-test.go b/controller/channel-test.go index e3cef38..000d7f2 100644 --- a/controller/channel-test.go +++ b/controller/channel-test.go @@ -121,7 +121,6 @@ func testChannel(channel *model.Channel, testModel string) (err error, openaiErr return errors.New("usage is nil"), nil } result := w.Result() - // print result.Body respBody, err := io.ReadAll(result.Body) if err != nil { return err, nil diff --git a/relay/channel/tencent/adaptor.go b/relay/channel/tencent/adaptor.go index 42d4f12..d79330e 100644 --- a/relay/channel/tencent/adaptor.go +++ b/relay/channel/tencent/adaptor.go @@ -17,6 +17,7 @@ import ( type Adaptor struct { Sign string + AppID int64 Action string Version string Timestamp int64 @@ -34,7 +35,7 @@ func (a *Adaptor) Init(info *relaycommon.RelayInfo, request dto.GeneralOpenAIReq } func (a *Adaptor) GetRequestURL(info *relaycommon.RelayInfo) (string, error) { - return fmt.Sprintf("%s/hyllm/v1/chat/completions", info.BaseUrl), nil + return fmt.Sprintf("%s/", info.BaseUrl), nil } func (a *Adaptor) SetupRequestHeader(c *gin.Context, req *http.Request, info *relaycommon.RelayInfo) error { @@ -52,11 +53,12 @@ func (a *Adaptor) ConvertRequest(c *gin.Context, relayMode int, request *dto.Gen } apiKey := c.Request.Header.Get("Authorization") apiKey = strings.TrimPrefix(apiKey, "Bearer ") - _, secretId, secretKey, err := parseTencentConfig(apiKey) + appId, secretId, secretKey, err := parseTencentConfig(apiKey) + a.AppID = appId if err != nil { return nil, err } - tencentRequest := requestOpenAI2Tencent(*request) + tencentRequest := requestOpenAI2Tencent(a, *request) // we have to calculate the sign here a.Sign = getTencentSign(*tencentRequest, a, secretId, secretKey) return tencentRequest, nil diff --git a/relay/channel/tencent/dto.go b/relay/channel/tencent/dto.go index 395ccbb..65c548a 100644 --- a/relay/channel/tencent/dto.go +++ b/relay/channel/tencent/dto.go @@ -30,17 +30,17 @@ type TencentChatRequest struct { // // 注意: // 通过 SDK 调用时,流式和非流式调用需用**不同的方式**获取返回值,具体参考 SDK 中的注释或示例(在各语言 SDK 代码仓库的 examples/hunyuan/v20230901/ 目录中)。 - Stream *bool `json:"Stream"` + Stream *bool `json:"Stream,omitempty"` // 说明: // 1. 影响输出文本的多样性,取值越大,生成文本的多样性越强。 // 2. 取值区间为 [0.0, 1.0],未传值时使用各模型推荐值。 // 3. 非必要不建议使用,不合理的取值会影响效果。 - TopP *float64 `json:"TopP"` + TopP *float64 `json:"TopP,omitempty"` // 说明: // 1. 较高的数值会使输出更加随机,而较低的数值会使其更加集中和确定。 // 2. 取值区间为 [0.0, 2.0],未传值时使用各模型推荐值。 // 3. 非必要不建议使用,不合理的取值会影响效果。 - Temperature *float64 `json:"Temperature"` + Temperature *float64 `json:"Temperature,omitempty"` } type TencentError struct { @@ -69,3 +69,7 @@ type TencentChatResponse struct { Note string `json:"Note,omitempty"` // 注释 ReqID string `json:"Req_id,omitempty"` // 唯一请求 Id,每次请求都会返回。用于反馈接口入参 } + +type TencentChatResponseSB struct { + Response TencentChatResponse `json:"Response,omitempty"` +} diff --git a/relay/channel/tencent/relay-tencent.go b/relay/channel/tencent/relay-tencent.go index 9858011..3ea2376 100644 --- a/relay/channel/tencent/relay-tencent.go +++ b/relay/channel/tencent/relay-tencent.go @@ -22,7 +22,7 @@ import ( // https://cloud.tencent.com/document/product/1729/97732 -func requestOpenAI2Tencent(request dto.GeneralOpenAIRequest) *TencentChatRequest { +func requestOpenAI2Tencent(a *Adaptor, request dto.GeneralOpenAIRequest) *TencentChatRequest { messages := make([]*TencentMessage, 0, len(request.Messages)) for i := 0; i < len(request.Messages); i++ { message := request.Messages[i] @@ -31,17 +31,23 @@ func requestOpenAI2Tencent(request dto.GeneralOpenAIRequest) *TencentChatRequest Role: message.Role, }) } - return &TencentChatRequest{ - Temperature: &request.Temperature, - TopP: &request.TopP, - Stream: &request.Stream, - Messages: messages, - Model: &request.Model, + var req = TencentChatRequest{ + Stream: &request.Stream, + Messages: messages, + Model: &request.Model, } + if request.TopP != 0 { + req.TopP = &request.TopP + } + if request.Temperature != 0 { + req.Temperature = &request.Temperature + } + return &req } func responseTencent2OpenAI(response *TencentChatResponse) *dto.OpenAITextResponse { fullTextResponse := dto.OpenAITextResponse{ + Id: response.Id, Object: "chat.completion", Created: common.GetTimestamp(), Usage: dto.Usage{ @@ -129,7 +135,7 @@ func tencentStreamHandler(c *gin.Context, resp *http.Response) (*dto.OpenAIError } func tencentHandler(c *gin.Context, resp *http.Response) (*dto.OpenAIErrorWithStatusCode, *dto.Usage) { - var TencentResponse TencentChatResponse + var tencentSb TencentChatResponseSB responseBody, err := io.ReadAll(resp.Body) if err != nil { return service.OpenAIErrorWrapper(err, "read_response_body_failed", http.StatusInternalServerError), nil @@ -138,20 +144,20 @@ func tencentHandler(c *gin.Context, resp *http.Response) (*dto.OpenAIErrorWithSt if err != nil { return service.OpenAIErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil } - err = json.Unmarshal(responseBody, &TencentResponse) + err = json.Unmarshal(responseBody, &tencentSb) if err != nil { return service.OpenAIErrorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError), nil } - if TencentResponse.Error.Code != 0 { + if tencentSb.Response.Error.Code != 0 { return &dto.OpenAIErrorWithStatusCode{ Error: dto.OpenAIError{ - Message: TencentResponse.Error.Message, - Code: TencentResponse.Error.Code, + Message: tencentSb.Response.Error.Message, + Code: tencentSb.Response.Error.Code, }, StatusCode: resp.StatusCode, }, nil } - fullTextResponse := responseTencent2OpenAI(&TencentResponse) + fullTextResponse := responseTencent2OpenAI(&tencentSb.Response) jsonResponse, err := json.Marshal(fullTextResponse) if err != nil { return service.OpenAIErrorWrapper(err, "marshal_response_body_failed", http.StatusInternalServerError), nil diff --git a/relay/common/relay_utils.go b/relay/common/relay_utils.go index 726d22f..9ef9a8b 100644 --- a/relay/common/relay_utils.go +++ b/relay/common/relay_utils.go @@ -38,6 +38,7 @@ func RelayErrorHandler(resp *http.Response) (OpenAIErrorWithStatusCode *dto.Open var textResponse dto.TextResponseWithError err = json.Unmarshal(responseBody, &textResponse) if err != nil { + OpenAIErrorWithStatusCode.Error.Message = fmt.Sprintf("error unmarshalling response body: %s", responseBody) return } OpenAIErrorWithStatusCode.Error = textResponse.Error