diff --git a/controller/relay-audio.go b/controller/relay-audio.go index ce2c706..63f8563 100644 --- a/controller/relay-audio.go +++ b/controller/relay-audio.go @@ -12,6 +12,7 @@ import ( "one-api/common" "one-api/model" "strings" + "time" ) var availableVoices = []string{ @@ -24,12 +25,12 @@ var availableVoices = []string{ } func relayAudioHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode { - tokenId := c.GetInt("token_id") channelType := c.GetInt("channel") channelId := c.GetInt("channel_id") userId := c.GetInt("id") group := c.GetString("group") + startTime := time.Now() var audioRequest AudioRequest if !strings.HasPrefix(c.Request.URL.Path, "/v1/audio/transcriptions") { @@ -154,6 +155,7 @@ func relayAudioHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode defer func(ctx context.Context) { go func() { + useTimeSeconds := time.Now().Unix() - startTime.Unix() quota := 0 var promptTokens = 0 if strings.HasPrefix(audioRequest.Model, "tts-1") { @@ -178,7 +180,7 @@ func relayAudioHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode if quota != 0 { tokenName := c.GetString("token_name") logContent := fmt.Sprintf("模型倍率 %.2f,分组倍率 %.2f", modelRatio, groupRatio) - model.RecordConsumeLog(ctx, userId, channelId, promptTokens, 0, audioRequest.Model, tokenName, quota, logContent, tokenId, userQuota) + model.RecordConsumeLog(ctx, userId, channelId, promptTokens, 0, audioRequest.Model, tokenName, quota, logContent, tokenId, userQuota, int(useTimeSeconds), false) model.UpdateUserUsedQuotaAndRequestCount(userId, quota) channelId := c.GetInt("channel_id") model.UpdateChannelUsedQuota(channelId, quota) diff --git a/controller/relay-image.go b/controller/relay-image.go index d7361cb..39f3308 100644 --- a/controller/relay-image.go +++ b/controller/relay-image.go @@ -12,6 +12,7 @@ import ( "one-api/common" "one-api/model" "strings" + "time" ) func relayImageHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode { @@ -21,6 +22,7 @@ func relayImageHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode userId := c.GetInt("id") consumeQuota := c.GetBool("consume_quota") group := c.GetString("group") + startTime := time.Now() var imageRequest ImageRequest if consumeQuota { @@ -169,6 +171,7 @@ func relayImageHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode var textResponse ImageResponse defer func(ctx context.Context) { + useTimeSeconds := time.Now().Unix() - startTime.Unix() if consumeQuota { if resp.StatusCode != http.StatusOK { return @@ -184,7 +187,7 @@ func relayImageHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode if quota != 0 { tokenName := c.GetString("token_name") logContent := fmt.Sprintf("模型倍率 %.2f,分组倍率 %.2f", modelRatio, groupRatio) - model.RecordConsumeLog(ctx, userId, channelId, 0, 0, imageRequest.Model, tokenName, quota, logContent, tokenId, userQuota) + model.RecordConsumeLog(ctx, userId, channelId, 0, 0, imageRequest.Model, tokenName, quota, logContent, tokenId, userQuota, int(useTimeSeconds), false) model.UpdateUserUsedQuotaAndRequestCount(userId, quota) channelId := c.GetInt("channel_id") model.UpdateChannelUsedQuota(channelId, quota) diff --git a/controller/relay-mj.go b/controller/relay-mj.go index 3e3d2cc..d334e92 100644 --- a/controller/relay-mj.go +++ b/controller/relay-mj.go @@ -478,7 +478,7 @@ func relayMidjourneySubmit(c *gin.Context, relayMode int) *MidjourneyResponse { if quota != 0 { tokenName := c.GetString("token_name") logContent := fmt.Sprintf("模型固定价格 %.2f,分组倍率 %.2f,操作 %s", modelPrice, groupRatio, midjRequest.Action) - model.RecordConsumeLog(ctx, userId, channelId, 0, 0, imageModel, tokenName, quota, logContent, tokenId, userQuota) + model.RecordConsumeLog(ctx, userId, channelId, 0, 0, imageModel, tokenName, quota, logContent, tokenId, userQuota, 0, false) model.UpdateUserUsedQuotaAndRequestCount(userId, quota) channelId := c.GetInt("channel_id") model.UpdateChannelUsedQuota(channelId, quota) diff --git a/controller/relay-text.go b/controller/relay-text.go index c68966e..6538028 100644 --- a/controller/relay-text.go +++ b/controller/relay-text.go @@ -508,9 +508,9 @@ func relayTextHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode { useTimeSeconds := time.Now().Unix() - startTime.Unix() var logContent string if modelPrice == -1 { - logContent = fmt.Sprintf("模型倍率 %.2f,分组倍率 %.2f,用时 %d秒", modelRatio, groupRatio, useTimeSeconds) + logContent = fmt.Sprintf("模型倍率 %.2f,分组倍率 %.2f", modelRatio, groupRatio) } else { - logContent = fmt.Sprintf("模型价格 %.2f,分组倍率 %.2f,用时 %d秒", modelPrice, groupRatio, useTimeSeconds) + logContent = fmt.Sprintf("模型价格 %.2f,分组倍率 %.2f", modelPrice, groupRatio) } logModel := textRequest.Model if strings.HasPrefix(logModel, "gpt-4-gizmo") { @@ -518,7 +518,7 @@ func relayTextHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode { logContent += fmt.Sprintf(",模型 %s", textRequest.Model) } - model.RecordConsumeLog(ctx, userId, channelId, promptTokens, completionTokens, logModel, tokenName, quota, logContent, tokenId, userQuota) + model.RecordConsumeLog(ctx, userId, channelId, promptTokens, completionTokens, logModel, tokenName, quota, logContent, tokenId, userQuota, int(useTimeSeconds), isStream) model.UpdateUserUsedQuotaAndRequestCount(userId, quota) model.UpdateChannelUsedQuota(channelId, quota) //if quota != 0 { diff --git a/model/log.go b/model/log.go index b4d1188..480d50c 100644 --- a/model/log.go +++ b/model/log.go @@ -20,6 +20,8 @@ type Log struct { Quota int `json:"quota" gorm:"default:0"` PromptTokens int `json:"prompt_tokens" gorm:"default:0"` CompletionTokens int `json:"completion_tokens" gorm:"default:0"` + UseTime int `json:"use_time" gorm:"bigint;default:0"` + IsStream bool `json:"is_stream" gorm:"default:false"` ChannelId int `json:"channel" gorm:"index"` TokenId int `json:"token_id" gorm:"default:0;index"` } @@ -55,7 +57,7 @@ func RecordLog(userId int, logType int, content string) { } } -func RecordConsumeLog(ctx context.Context, userId int, channelId int, promptTokens int, completionTokens int, modelName string, tokenName string, quota int, content string, tokenId int, userQuota int) { +func RecordConsumeLog(ctx context.Context, userId int, channelId int, promptTokens int, completionTokens int, modelName string, tokenName string, quota int, content string, tokenId int, userQuota int, useTimeSeconds int, isStream bool) { common.LogInfo(ctx, fmt.Sprintf("record consume log: userId=%d, 用户调用前余额=%d, channelId=%d, promptTokens=%d, completionTokens=%d, modelName=%s, tokenName=%s, quota=%d, content=%s", userId, userQuota, channelId, promptTokens, completionTokens, modelName, tokenName, quota, content)) if !common.LogConsumeEnabled { return @@ -74,6 +76,8 @@ func RecordConsumeLog(ctx context.Context, userId int, channelId int, promptToke Quota: quota, ChannelId: channelId, TokenId: tokenId, + UseTime: useTimeSeconds, + IsStream: isStream, } err := DB.Create(log).Error if err != nil { diff --git a/web/package.json b/web/package.json index 46e0274..8de3b15 100644 --- a/web/package.json +++ b/web/package.json @@ -54,5 +54,5 @@ "singleQuote": true, "jsxSingleQuote": true }, - "proxy": "https://nekoapi.com" + "proxy": "http://localhost:3000" } diff --git a/web/src/components/LogsTable.js b/web/src/components/LogsTable.js index c4c72f0..8d1b991 100644 --- a/web/src/components/LogsTable.js +++ b/web/src/components/LogsTable.js @@ -2,7 +2,7 @@ import React, {useEffect, useState} from 'react'; import {Label} from 'semantic-ui-react'; import {API, copy, isAdmin, showError, showSuccess, timestamp2string} from '../helpers'; -import {Table, Avatar, Tag, Form, Button, Layout, Select, Popover, Modal, Spin} from '@douyinfe/semi-ui'; +import {Table, Avatar, Tag, Form, Button, Layout, Select, Popover, Modal, Spin, Space} from '@douyinfe/semi-ui'; import {ITEMS_PER_PAGE} from '../constants'; import {renderNumber, renderQuota, stringToColor} from '../helpers/render'; import { @@ -56,6 +56,25 @@ function renderType(type) { } } +function renderIsStream(bool) { + if (bool) { + return ; + } else { + return 非流; + } +} + +function renderUseTime(type) { + const time = parseInt(type); + if (time < 101) { + return {time} s ; + } else if (time < 300) { + return {time} s ; + } else { + return {time} s ; + } +} + const LogsTable = () => { const columns = [ { @@ -142,6 +161,20 @@ const LogsTable = () => { ); }, }, + { + title: '用时', + dataIndex: 'use_time', + render: (text, record, index) => { + return ( +
+ + {renderUseTime(text)} + {renderIsStream(record.is_stream)} + +
+ ); + }, + }, { title: '提示', dataIndex: 'prompt_tokens', diff --git a/web/src/pages/Log/index.js b/web/src/pages/Log/index.js index 98bf69c..4ee109c 100644 --- a/web/src/pages/Log/index.js +++ b/web/src/pages/Log/index.js @@ -1,5 +1,4 @@ import React from 'react'; -import { Header, Segment } from 'semantic-ui-react'; import LogsTable from '../../components/LogsTable'; const Token = () => (