mirror of
https://github.com/linux-do/new-api.git
synced 2025-09-18 00:16:37 +08:00
feat: 日志优化逻辑,新增请求时间和是否为stream字段
This commit is contained in:
parent
800f494698
commit
cbdce181af
@ -12,6 +12,7 @@ import (
|
||||
"one-api/common"
|
||||
"one-api/model"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
var availableVoices = []string{
|
||||
@ -24,12 +25,12 @@ var availableVoices = []string{
|
||||
}
|
||||
|
||||
func relayAudioHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode {
|
||||
|
||||
tokenId := c.GetInt("token_id")
|
||||
channelType := c.GetInt("channel")
|
||||
channelId := c.GetInt("channel_id")
|
||||
userId := c.GetInt("id")
|
||||
group := c.GetString("group")
|
||||
startTime := time.Now()
|
||||
|
||||
var audioRequest AudioRequest
|
||||
if !strings.HasPrefix(c.Request.URL.Path, "/v1/audio/transcriptions") {
|
||||
@ -154,6 +155,7 @@ func relayAudioHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode
|
||||
|
||||
defer func(ctx context.Context) {
|
||||
go func() {
|
||||
useTimeSeconds := time.Now().Unix() - startTime.Unix()
|
||||
quota := 0
|
||||
var promptTokens = 0
|
||||
if strings.HasPrefix(audioRequest.Model, "tts-1") {
|
||||
@ -178,7 +180,7 @@ func relayAudioHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode
|
||||
if quota != 0 {
|
||||
tokenName := c.GetString("token_name")
|
||||
logContent := fmt.Sprintf("模型倍率 %.2f,分组倍率 %.2f", modelRatio, groupRatio)
|
||||
model.RecordConsumeLog(ctx, userId, channelId, promptTokens, 0, audioRequest.Model, tokenName, quota, logContent, tokenId, userQuota)
|
||||
model.RecordConsumeLog(ctx, userId, channelId, promptTokens, 0, audioRequest.Model, tokenName, quota, logContent, tokenId, userQuota, int(useTimeSeconds), false)
|
||||
model.UpdateUserUsedQuotaAndRequestCount(userId, quota)
|
||||
channelId := c.GetInt("channel_id")
|
||||
model.UpdateChannelUsedQuota(channelId, quota)
|
||||
|
@ -12,6 +12,7 @@ import (
|
||||
"one-api/common"
|
||||
"one-api/model"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
func relayImageHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode {
|
||||
@ -21,6 +22,7 @@ func relayImageHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode
|
||||
userId := c.GetInt("id")
|
||||
consumeQuota := c.GetBool("consume_quota")
|
||||
group := c.GetString("group")
|
||||
startTime := time.Now()
|
||||
|
||||
var imageRequest ImageRequest
|
||||
if consumeQuota {
|
||||
@ -169,6 +171,7 @@ func relayImageHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode
|
||||
|
||||
var textResponse ImageResponse
|
||||
defer func(ctx context.Context) {
|
||||
useTimeSeconds := time.Now().Unix() - startTime.Unix()
|
||||
if consumeQuota {
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return
|
||||
@ -184,7 +187,7 @@ func relayImageHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode
|
||||
if quota != 0 {
|
||||
tokenName := c.GetString("token_name")
|
||||
logContent := fmt.Sprintf("模型倍率 %.2f,分组倍率 %.2f", modelRatio, groupRatio)
|
||||
model.RecordConsumeLog(ctx, userId, channelId, 0, 0, imageRequest.Model, tokenName, quota, logContent, tokenId, userQuota)
|
||||
model.RecordConsumeLog(ctx, userId, channelId, 0, 0, imageRequest.Model, tokenName, quota, logContent, tokenId, userQuota, int(useTimeSeconds), false)
|
||||
model.UpdateUserUsedQuotaAndRequestCount(userId, quota)
|
||||
channelId := c.GetInt("channel_id")
|
||||
model.UpdateChannelUsedQuota(channelId, quota)
|
||||
|
@ -478,7 +478,7 @@ func relayMidjourneySubmit(c *gin.Context, relayMode int) *MidjourneyResponse {
|
||||
if quota != 0 {
|
||||
tokenName := c.GetString("token_name")
|
||||
logContent := fmt.Sprintf("模型固定价格 %.2f,分组倍率 %.2f,操作 %s", modelPrice, groupRatio, midjRequest.Action)
|
||||
model.RecordConsumeLog(ctx, userId, channelId, 0, 0, imageModel, tokenName, quota, logContent, tokenId, userQuota)
|
||||
model.RecordConsumeLog(ctx, userId, channelId, 0, 0, imageModel, tokenName, quota, logContent, tokenId, userQuota, 0, false)
|
||||
model.UpdateUserUsedQuotaAndRequestCount(userId, quota)
|
||||
channelId := c.GetInt("channel_id")
|
||||
model.UpdateChannelUsedQuota(channelId, quota)
|
||||
|
@ -508,9 +508,9 @@ func relayTextHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode {
|
||||
useTimeSeconds := time.Now().Unix() - startTime.Unix()
|
||||
var logContent string
|
||||
if modelPrice == -1 {
|
||||
logContent = fmt.Sprintf("模型倍率 %.2f,分组倍率 %.2f,用时 %d秒", modelRatio, groupRatio, useTimeSeconds)
|
||||
logContent = fmt.Sprintf("模型倍率 %.2f,分组倍率 %.2f", modelRatio, groupRatio)
|
||||
} else {
|
||||
logContent = fmt.Sprintf("模型价格 %.2f,分组倍率 %.2f,用时 %d秒", modelPrice, groupRatio, useTimeSeconds)
|
||||
logContent = fmt.Sprintf("模型价格 %.2f,分组倍率 %.2f", modelPrice, groupRatio)
|
||||
}
|
||||
logModel := textRequest.Model
|
||||
if strings.HasPrefix(logModel, "gpt-4-gizmo") {
|
||||
@ -518,7 +518,7 @@ func relayTextHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode {
|
||||
logContent += fmt.Sprintf(",模型 %s", textRequest.Model)
|
||||
}
|
||||
|
||||
model.RecordConsumeLog(ctx, userId, channelId, promptTokens, completionTokens, logModel, tokenName, quota, logContent, tokenId, userQuota)
|
||||
model.RecordConsumeLog(ctx, userId, channelId, promptTokens, completionTokens, logModel, tokenName, quota, logContent, tokenId, userQuota, int(useTimeSeconds), isStream)
|
||||
model.UpdateUserUsedQuotaAndRequestCount(userId, quota)
|
||||
model.UpdateChannelUsedQuota(channelId, quota)
|
||||
//if quota != 0 {
|
||||
|
@ -20,6 +20,8 @@ type Log struct {
|
||||
Quota int `json:"quota" gorm:"default:0"`
|
||||
PromptTokens int `json:"prompt_tokens" gorm:"default:0"`
|
||||
CompletionTokens int `json:"completion_tokens" gorm:"default:0"`
|
||||
UseTime int `json:"use_time" gorm:"bigint;default:0"`
|
||||
IsStream bool `json:"is_stream" gorm:"default:false"`
|
||||
ChannelId int `json:"channel" gorm:"index"`
|
||||
TokenId int `json:"token_id" gorm:"default:0;index"`
|
||||
}
|
||||
@ -55,7 +57,7 @@ func RecordLog(userId int, logType int, content string) {
|
||||
}
|
||||
}
|
||||
|
||||
func RecordConsumeLog(ctx context.Context, userId int, channelId int, promptTokens int, completionTokens int, modelName string, tokenName string, quota int, content string, tokenId int, userQuota int) {
|
||||
func RecordConsumeLog(ctx context.Context, userId int, channelId int, promptTokens int, completionTokens int, modelName string, tokenName string, quota int, content string, tokenId int, userQuota int, useTimeSeconds int, isStream bool) {
|
||||
common.LogInfo(ctx, fmt.Sprintf("record consume log: userId=%d, 用户调用前余额=%d, channelId=%d, promptTokens=%d, completionTokens=%d, modelName=%s, tokenName=%s, quota=%d, content=%s", userId, userQuota, channelId, promptTokens, completionTokens, modelName, tokenName, quota, content))
|
||||
if !common.LogConsumeEnabled {
|
||||
return
|
||||
@ -74,6 +76,8 @@ func RecordConsumeLog(ctx context.Context, userId int, channelId int, promptToke
|
||||
Quota: quota,
|
||||
ChannelId: channelId,
|
||||
TokenId: tokenId,
|
||||
UseTime: useTimeSeconds,
|
||||
IsStream: isStream,
|
||||
}
|
||||
err := DB.Create(log).Error
|
||||
if err != nil {
|
||||
|
@ -54,5 +54,5 @@
|
||||
"singleQuote": true,
|
||||
"jsxSingleQuote": true
|
||||
},
|
||||
"proxy": "https://nekoapi.com"
|
||||
"proxy": "http://localhost:3000"
|
||||
}
|
||||
|
@ -2,7 +2,7 @@ import React, {useEffect, useState} from 'react';
|
||||
import {Label} from 'semantic-ui-react';
|
||||
import {API, copy, isAdmin, showError, showSuccess, timestamp2string} from '../helpers';
|
||||
|
||||
import {Table, Avatar, Tag, Form, Button, Layout, Select, Popover, Modal, Spin} from '@douyinfe/semi-ui';
|
||||
import {Table, Avatar, Tag, Form, Button, Layout, Select, Popover, Modal, Spin, Space} from '@douyinfe/semi-ui';
|
||||
import {ITEMS_PER_PAGE} from '../constants';
|
||||
import {renderNumber, renderQuota, stringToColor} from '../helpers/render';
|
||||
import {
|
||||
@ -56,6 +56,25 @@ function renderType(type) {
|
||||
}
|
||||
}
|
||||
|
||||
function renderIsStream(bool) {
|
||||
if (bool) {
|
||||
return <Tag color='blue' size='large'>流</Tag>;
|
||||
} else {
|
||||
return <Tag color='purple' size='large'>非流</Tag>;
|
||||
}
|
||||
}
|
||||
|
||||
function renderUseTime(type) {
|
||||
const time = parseInt(type);
|
||||
if (time < 101) {
|
||||
return <Tag color='green' size='large'> {time} s </Tag>;
|
||||
} else if (time < 300) {
|
||||
return <Tag color='orange' size='large'> {time} s </Tag>;
|
||||
} else {
|
||||
return <Tag color='red' size='large'> {time} s </Tag>;
|
||||
}
|
||||
}
|
||||
|
||||
const LogsTable = () => {
|
||||
const columns = [
|
||||
{
|
||||
@ -142,6 +161,20 @@ const LogsTable = () => {
|
||||
);
|
||||
},
|
||||
},
|
||||
{
|
||||
title: '用时',
|
||||
dataIndex: 'use_time',
|
||||
render: (text, record, index) => {
|
||||
return (
|
||||
<div>
|
||||
<Space>
|
||||
{renderUseTime(text)}
|
||||
{renderIsStream(record.is_stream)}
|
||||
</Space>
|
||||
</div>
|
||||
);
|
||||
},
|
||||
},
|
||||
{
|
||||
title: '提示',
|
||||
dataIndex: 'prompt_tokens',
|
||||
|
@ -1,5 +1,4 @@
|
||||
import React from 'react';
|
||||
import { Header, Segment } from 'semantic-ui-react';
|
||||
import LogsTable from '../../components/LogsTable';
|
||||
|
||||
const Token = () => (
|
||||
|
Loading…
Reference in New Issue
Block a user