Compare commits

...

4 Commits

Author SHA1 Message Date
JustSong
519cb030f7 chore: update input label 2023-05-16 16:23:07 +08:00
JustSong
58fe923c85 perf: use max_tokens to reduce token consuming 2023-05-16 16:22:25 +08:00
JustSong
c9ac5e391f feat: support max_tokens now (#52) 2023-05-16 16:18:35 +08:00
JustSong
69cf1de7bd feat: disable operations for root user (close #76) 2023-05-16 15:38:03 +08:00
4 changed files with 27 additions and 15 deletions

View File

@@ -210,11 +210,12 @@ func testChannel(channel *model.Channel, request *ChatRequest) error {
func buildTestRequest(c *gin.Context) *ChatRequest {
model_ := c.Query("model")
testRequest := &ChatRequest{
Model: model_,
Model: model_,
MaxTokens: 1,
}
testMessage := Message{
Role: "user",
Content: "echo hi",
Content: "hi",
}
testRequest.Messages = append(testRequest.Messages, testMessage)
return testRequest

View File

@@ -21,14 +21,16 @@ type Message struct {
}
type ChatRequest struct {
Model string `json:"model"`
Messages []Message `json:"messages"`
Model string `json:"model"`
Messages []Message `json:"messages"`
MaxTokens int `json:"max_tokens"`
}
type TextRequest struct {
Model string `json:"model"`
Messages []Message `json:"messages"`
Prompt string `json:"prompt"`
Model string `json:"model"`
Messages []Message `json:"messages"`
Prompt string `json:"prompt"`
MaxTokens int `json:"max_tokens"`
//Stream bool `json:"stream"`
}
@@ -128,8 +130,17 @@ func relayHelper(c *gin.Context) error {
model_ = strings.TrimSuffix(model_, "-0314")
fullRequestURL = fmt.Sprintf("%s/openai/deployments/%s/%s", baseURL, model_, task)
}
var promptText string
for _, message := range textRequest.Messages {
promptText += fmt.Sprintf("%s: %s\n", message.Role, message.Content)
}
promptTokens := countToken(promptText) + 3
preConsumedTokens := common.PreConsumedQuota
if textRequest.MaxTokens != 0 {
preConsumedTokens = promptTokens + textRequest.MaxTokens
}
ratio := common.GetModelRatio(textRequest.Model)
preConsumedQuota := int(float64(common.PreConsumedQuota) * ratio)
preConsumedQuota := int(float64(preConsumedTokens) * ratio)
if consumeQuota {
err := model.PreConsumeTokenQuota(tokenId, preConsumedQuota)
if err != nil {
@@ -176,12 +187,8 @@ func relayHelper(c *gin.Context) error {
completionRatio = 2
}
if isStream {
var promptText string
for _, message := range textRequest.Messages {
promptText += fmt.Sprintf("%s: %s\n", message.Role, message.Content)
}
completionText := fmt.Sprintf("%s: %s\n", "assistant", streamResponseText)
quota = countToken(promptText) + countToken(completionText)*completionRatio + 3
quota = promptTokens + countToken(completionText)*completionRatio
} else {
quota = textResponse.Usage.PromptTokens + textResponse.Usage.CompletionTokens*completionRatio
}

View File

@@ -336,7 +336,7 @@ const SystemSetting = () => {
</Header>
<Form.Group widths={3}>
<Form.Input
label='最长应时间'
label='最长应时间'
name='ChannelDisableThreshold'
onChange={handleInputChange}
autoComplete='new-password'

View File

@@ -234,6 +234,7 @@ const UsersTable = () => {
onClick={() => {
manageUser(user.username, 'promote', idx);
}}
disabled={user.role === 100}
>
提升
</Button>
@@ -243,12 +244,13 @@ const UsersTable = () => {
onClick={() => {
manageUser(user.username, 'demote', idx);
}}
disabled={user.role === 100}
>
降级
</Button>
<Popup
trigger={
<Button size='small' negative>
<Button size='small' negative disabled={user.role === 100}>
删除
</Button>
}
@@ -274,6 +276,7 @@ const UsersTable = () => {
idx
);
}}
disabled={user.role === 100}
>
{user.status === 1 ? '禁用' : '启用'}
</Button>
@@ -281,6 +284,7 @@ const UsersTable = () => {
size={'small'}
as={Link}
to={'/user/edit/' + user.id}
disabled={user.role === 100}
>
编辑
</Button>