diff --git a/controller/relay-claude.go b/controller/relay-claude.go index e131263..ee7abc7 100644 --- a/controller/relay-claude.go +++ b/controller/relay-claude.go @@ -18,7 +18,7 @@ type ClaudeMetadata struct { type ClaudeRequest struct { Model string `json:"model"` Prompt string `json:"prompt"` - MaxTokensToSample int `json:"max_tokens_to_sample"` + MaxTokensToSample uint `json:"max_tokens_to_sample"` StopSequences []string `json:"stop_sequences,omitempty"` Temperature float64 `json:"temperature,omitempty"` TopP float64 `json:"top_p,omitempty"` diff --git a/controller/relay-gemini.go b/controller/relay-gemini.go index f68d8c1..d4ce18c 100644 --- a/controller/relay-gemini.go +++ b/controller/relay-gemini.go @@ -47,7 +47,7 @@ type GeminiChatGenerationConfig struct { Temperature float64 `json:"temperature,omitempty"` TopP float64 `json:"topP,omitempty"` TopK float64 `json:"topK,omitempty"` - MaxOutputTokens int `json:"maxOutputTokens,omitempty"` + MaxOutputTokens uint `json:"maxOutputTokens,omitempty"` CandidateCount int `json:"candidateCount,omitempty"` StopSequences []string `json:"stopSequences,omitempty"` } diff --git a/controller/relay-palm.go b/controller/relay-palm.go index a7b0c1f..4a8826d 100644 --- a/controller/relay-palm.go +++ b/controller/relay-palm.go @@ -31,7 +31,7 @@ type PaLMChatRequest struct { Temperature float64 `json:"temperature,omitempty"` CandidateCount int `json:"candidateCount,omitempty"` TopP float64 `json:"topP,omitempty"` - TopK int `json:"topK,omitempty"` + TopK uint `json:"topK,omitempty"` } type PaLMError struct { diff --git a/controller/relay-text.go b/controller/relay-text.go index cd760f4..82104c4 100644 --- a/controller/relay-text.go +++ b/controller/relay-text.go @@ -233,7 +233,7 @@ func relayTextHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode { } preConsumedTokens := common.PreConsumedQuota if textRequest.MaxTokens != 0 { - preConsumedTokens = promptTokens + textRequest.MaxTokens + preConsumedTokens = promptTokens + int(textRequest.MaxTokens) } modelRatio := common.GetModelRatio(textRequest.Model) groupRatio := common.GetGroupRatio(group) diff --git a/controller/relay-xunfei.go b/controller/relay-xunfei.go index 33383d8..b2b9158 100644 --- a/controller/relay-xunfei.go +++ b/controller/relay-xunfei.go @@ -33,7 +33,7 @@ type XunfeiChatRequest struct { Domain string `json:"domain,omitempty"` Temperature float64 `json:"temperature,omitempty"` TopK int `json:"top_k,omitempty"` - MaxTokens int `json:"max_tokens,omitempty"` + MaxTokens uint `json:"max_tokens,omitempty"` Auditing bool `json:"auditing,omitempty"` } `json:"chat"` } `json:"parameter"` diff --git a/controller/relay.go b/controller/relay.go index a535a96..9254fb2 100644 --- a/controller/relay.go +++ b/controller/relay.go @@ -53,7 +53,7 @@ type GeneralOpenAIRequest struct { Messages []Message `json:"messages,omitempty"` Prompt any `json:"prompt,omitempty"` Stream bool `json:"stream,omitempty"` - MaxTokens int `json:"max_tokens,omitempty"` + MaxTokens uint `json:"max_tokens,omitempty"` Temperature float64 `json:"temperature,omitempty"` TopP float64 `json:"top_p,omitempty"` N int `json:"n,omitempty"` @@ -91,14 +91,14 @@ type AudioRequest struct { type ChatRequest struct { Model string `json:"model"` Messages []Message `json:"messages"` - MaxTokens int `json:"max_tokens"` + MaxTokens uint `json:"max_tokens"` } type TextRequest struct { Model string `json:"model"` Messages []Message `json:"messages"` Prompt string `json:"prompt"` - MaxTokens int `json:"max_tokens"` + MaxTokens uint `json:"max_tokens"` //Stream bool `json:"stream"` }