feat: 日志优化逻辑,新增请求时间和是否为stream字段

This commit is contained in:
CaIon 2024-01-21 15:01:59 +08:00
parent 800f494698
commit cbdce181af
8 changed files with 52 additions and 11 deletions

View File

@ -12,6 +12,7 @@ import (
"one-api/common" "one-api/common"
"one-api/model" "one-api/model"
"strings" "strings"
"time"
) )
var availableVoices = []string{ var availableVoices = []string{
@ -24,12 +25,12 @@ var availableVoices = []string{
} }
func relayAudioHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode { func relayAudioHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode {
tokenId := c.GetInt("token_id") tokenId := c.GetInt("token_id")
channelType := c.GetInt("channel") channelType := c.GetInt("channel")
channelId := c.GetInt("channel_id") channelId := c.GetInt("channel_id")
userId := c.GetInt("id") userId := c.GetInt("id")
group := c.GetString("group") group := c.GetString("group")
startTime := time.Now()
var audioRequest AudioRequest var audioRequest AudioRequest
if !strings.HasPrefix(c.Request.URL.Path, "/v1/audio/transcriptions") { if !strings.HasPrefix(c.Request.URL.Path, "/v1/audio/transcriptions") {
@ -154,6 +155,7 @@ func relayAudioHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode
defer func(ctx context.Context) { defer func(ctx context.Context) {
go func() { go func() {
useTimeSeconds := time.Now().Unix() - startTime.Unix()
quota := 0 quota := 0
var promptTokens = 0 var promptTokens = 0
if strings.HasPrefix(audioRequest.Model, "tts-1") { if strings.HasPrefix(audioRequest.Model, "tts-1") {
@ -178,7 +180,7 @@ func relayAudioHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode
if quota != 0 { if quota != 0 {
tokenName := c.GetString("token_name") tokenName := c.GetString("token_name")
logContent := fmt.Sprintf("模型倍率 %.2f,分组倍率 %.2f", modelRatio, groupRatio) logContent := fmt.Sprintf("模型倍率 %.2f,分组倍率 %.2f", modelRatio, groupRatio)
model.RecordConsumeLog(ctx, userId, channelId, promptTokens, 0, audioRequest.Model, tokenName, quota, logContent, tokenId, userQuota) model.RecordConsumeLog(ctx, userId, channelId, promptTokens, 0, audioRequest.Model, tokenName, quota, logContent, tokenId, userQuota, int(useTimeSeconds), false)
model.UpdateUserUsedQuotaAndRequestCount(userId, quota) model.UpdateUserUsedQuotaAndRequestCount(userId, quota)
channelId := c.GetInt("channel_id") channelId := c.GetInt("channel_id")
model.UpdateChannelUsedQuota(channelId, quota) model.UpdateChannelUsedQuota(channelId, quota)

View File

@ -12,6 +12,7 @@ import (
"one-api/common" "one-api/common"
"one-api/model" "one-api/model"
"strings" "strings"
"time"
) )
func relayImageHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode { func relayImageHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode {
@ -21,6 +22,7 @@ func relayImageHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode
userId := c.GetInt("id") userId := c.GetInt("id")
consumeQuota := c.GetBool("consume_quota") consumeQuota := c.GetBool("consume_quota")
group := c.GetString("group") group := c.GetString("group")
startTime := time.Now()
var imageRequest ImageRequest var imageRequest ImageRequest
if consumeQuota { if consumeQuota {
@ -169,6 +171,7 @@ func relayImageHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode
var textResponse ImageResponse var textResponse ImageResponse
defer func(ctx context.Context) { defer func(ctx context.Context) {
useTimeSeconds := time.Now().Unix() - startTime.Unix()
if consumeQuota { if consumeQuota {
if resp.StatusCode != http.StatusOK { if resp.StatusCode != http.StatusOK {
return return
@ -184,7 +187,7 @@ func relayImageHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode
if quota != 0 { if quota != 0 {
tokenName := c.GetString("token_name") tokenName := c.GetString("token_name")
logContent := fmt.Sprintf("模型倍率 %.2f,分组倍率 %.2f", modelRatio, groupRatio) logContent := fmt.Sprintf("模型倍率 %.2f,分组倍率 %.2f", modelRatio, groupRatio)
model.RecordConsumeLog(ctx, userId, channelId, 0, 0, imageRequest.Model, tokenName, quota, logContent, tokenId, userQuota) model.RecordConsumeLog(ctx, userId, channelId, 0, 0, imageRequest.Model, tokenName, quota, logContent, tokenId, userQuota, int(useTimeSeconds), false)
model.UpdateUserUsedQuotaAndRequestCount(userId, quota) model.UpdateUserUsedQuotaAndRequestCount(userId, quota)
channelId := c.GetInt("channel_id") channelId := c.GetInt("channel_id")
model.UpdateChannelUsedQuota(channelId, quota) model.UpdateChannelUsedQuota(channelId, quota)

View File

@ -478,7 +478,7 @@ func relayMidjourneySubmit(c *gin.Context, relayMode int) *MidjourneyResponse {
if quota != 0 { if quota != 0 {
tokenName := c.GetString("token_name") tokenName := c.GetString("token_name")
logContent := fmt.Sprintf("模型固定价格 %.2f,分组倍率 %.2f,操作 %s", modelPrice, groupRatio, midjRequest.Action) logContent := fmt.Sprintf("模型固定价格 %.2f,分组倍率 %.2f,操作 %s", modelPrice, groupRatio, midjRequest.Action)
model.RecordConsumeLog(ctx, userId, channelId, 0, 0, imageModel, tokenName, quota, logContent, tokenId, userQuota) model.RecordConsumeLog(ctx, userId, channelId, 0, 0, imageModel, tokenName, quota, logContent, tokenId, userQuota, 0, false)
model.UpdateUserUsedQuotaAndRequestCount(userId, quota) model.UpdateUserUsedQuotaAndRequestCount(userId, quota)
channelId := c.GetInt("channel_id") channelId := c.GetInt("channel_id")
model.UpdateChannelUsedQuota(channelId, quota) model.UpdateChannelUsedQuota(channelId, quota)

View File

@ -508,9 +508,9 @@ func relayTextHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode {
useTimeSeconds := time.Now().Unix() - startTime.Unix() useTimeSeconds := time.Now().Unix() - startTime.Unix()
var logContent string var logContent string
if modelPrice == -1 { if modelPrice == -1 {
logContent = fmt.Sprintf("模型倍率 %.2f,分组倍率 %.2f,用时 %d秒", modelRatio, groupRatio, useTimeSeconds) logContent = fmt.Sprintf("模型倍率 %.2f,分组倍率 %.2f", modelRatio, groupRatio)
} else { } else {
logContent = fmt.Sprintf("模型价格 %.2f,分组倍率 %.2f,用时 %d秒", modelPrice, groupRatio, useTimeSeconds) logContent = fmt.Sprintf("模型价格 %.2f,分组倍率 %.2f", modelPrice, groupRatio)
} }
logModel := textRequest.Model logModel := textRequest.Model
if strings.HasPrefix(logModel, "gpt-4-gizmo") { if strings.HasPrefix(logModel, "gpt-4-gizmo") {
@ -518,7 +518,7 @@ func relayTextHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode {
logContent += fmt.Sprintf(",模型 %s", textRequest.Model) logContent += fmt.Sprintf(",模型 %s", textRequest.Model)
} }
model.RecordConsumeLog(ctx, userId, channelId, promptTokens, completionTokens, logModel, tokenName, quota, logContent, tokenId, userQuota) model.RecordConsumeLog(ctx, userId, channelId, promptTokens, completionTokens, logModel, tokenName, quota, logContent, tokenId, userQuota, int(useTimeSeconds), isStream)
model.UpdateUserUsedQuotaAndRequestCount(userId, quota) model.UpdateUserUsedQuotaAndRequestCount(userId, quota)
model.UpdateChannelUsedQuota(channelId, quota) model.UpdateChannelUsedQuota(channelId, quota)
//if quota != 0 { //if quota != 0 {

View File

@ -20,6 +20,8 @@ type Log struct {
Quota int `json:"quota" gorm:"default:0"` Quota int `json:"quota" gorm:"default:0"`
PromptTokens int `json:"prompt_tokens" gorm:"default:0"` PromptTokens int `json:"prompt_tokens" gorm:"default:0"`
CompletionTokens int `json:"completion_tokens" gorm:"default:0"` CompletionTokens int `json:"completion_tokens" gorm:"default:0"`
UseTime int `json:"use_time" gorm:"bigint;default:0"`
IsStream bool `json:"is_stream" gorm:"default:false"`
ChannelId int `json:"channel" gorm:"index"` ChannelId int `json:"channel" gorm:"index"`
TokenId int `json:"token_id" gorm:"default:0;index"` TokenId int `json:"token_id" gorm:"default:0;index"`
} }
@ -55,7 +57,7 @@ func RecordLog(userId int, logType int, content string) {
} }
} }
func RecordConsumeLog(ctx context.Context, userId int, channelId int, promptTokens int, completionTokens int, modelName string, tokenName string, quota int, content string, tokenId int, userQuota int) { func RecordConsumeLog(ctx context.Context, userId int, channelId int, promptTokens int, completionTokens int, modelName string, tokenName string, quota int, content string, tokenId int, userQuota int, useTimeSeconds int, isStream bool) {
common.LogInfo(ctx, fmt.Sprintf("record consume log: userId=%d, 用户调用前余额=%d, channelId=%d, promptTokens=%d, completionTokens=%d, modelName=%s, tokenName=%s, quota=%d, content=%s", userId, userQuota, channelId, promptTokens, completionTokens, modelName, tokenName, quota, content)) common.LogInfo(ctx, fmt.Sprintf("record consume log: userId=%d, 用户调用前余额=%d, channelId=%d, promptTokens=%d, completionTokens=%d, modelName=%s, tokenName=%s, quota=%d, content=%s", userId, userQuota, channelId, promptTokens, completionTokens, modelName, tokenName, quota, content))
if !common.LogConsumeEnabled { if !common.LogConsumeEnabled {
return return
@ -74,6 +76,8 @@ func RecordConsumeLog(ctx context.Context, userId int, channelId int, promptToke
Quota: quota, Quota: quota,
ChannelId: channelId, ChannelId: channelId,
TokenId: tokenId, TokenId: tokenId,
UseTime: useTimeSeconds,
IsStream: isStream,
} }
err := DB.Create(log).Error err := DB.Create(log).Error
if err != nil { if err != nil {

View File

@ -54,5 +54,5 @@
"singleQuote": true, "singleQuote": true,
"jsxSingleQuote": true "jsxSingleQuote": true
}, },
"proxy": "https://nekoapi.com" "proxy": "http://localhost:3000"
} }

View File

@ -2,7 +2,7 @@ import React, {useEffect, useState} from 'react';
import {Label} from 'semantic-ui-react'; import {Label} from 'semantic-ui-react';
import {API, copy, isAdmin, showError, showSuccess, timestamp2string} from '../helpers'; import {API, copy, isAdmin, showError, showSuccess, timestamp2string} from '../helpers';
import {Table, Avatar, Tag, Form, Button, Layout, Select, Popover, Modal, Spin} from '@douyinfe/semi-ui'; import {Table, Avatar, Tag, Form, Button, Layout, Select, Popover, Modal, Spin, Space} from '@douyinfe/semi-ui';
import {ITEMS_PER_PAGE} from '../constants'; import {ITEMS_PER_PAGE} from '../constants';
import {renderNumber, renderQuota, stringToColor} from '../helpers/render'; import {renderNumber, renderQuota, stringToColor} from '../helpers/render';
import { import {
@ -56,6 +56,25 @@ function renderType(type) {
} }
} }
function renderIsStream(bool) {
if (bool) {
return <Tag color='blue' size='large'></Tag>;
} else {
return <Tag color='purple' size='large'>非流</Tag>;
}
}
function renderUseTime(type) {
const time = parseInt(type);
if (time < 101) {
return <Tag color='green' size='large'> {time} s </Tag>;
} else if (time < 300) {
return <Tag color='orange' size='large'> {time} s </Tag>;
} else {
return <Tag color='red' size='large'> {time} s </Tag>;
}
}
const LogsTable = () => { const LogsTable = () => {
const columns = [ const columns = [
{ {
@ -142,6 +161,20 @@ const LogsTable = () => {
); );
}, },
}, },
{
title: '用时',
dataIndex: 'use_time',
render: (text, record, index) => {
return (
<div>
<Space>
{renderUseTime(text)}
{renderIsStream(record.is_stream)}
</Space>
</div>
);
},
},
{ {
title: '提示', title: '提示',
dataIndex: 'prompt_tokens', dataIndex: 'prompt_tokens',

View File

@ -1,5 +1,4 @@
import React from 'react'; import React from 'react';
import { Header, Segment } from 'semantic-ui-react';
import LogsTable from '../../components/LogsTable'; import LogsTable from '../../components/LogsTable';
const Token = () => ( const Token = () => (