Compare commits

...

13 Commits

Author SHA1 Message Date
JustSong
9d0bec83df chore: update prompt 2023-06-11 09:55:50 +08:00
JustSong
f97a9ce597 fix: correct OpenAI error code's type 2023-06-11 09:49:57 +08:00
JustSong
4339f45f74 feat: support /v1/moderations now (close #117) 2023-06-11 09:37:36 +08:00
JustSong
e398e0756b docs: update README 2023-06-10 20:43:32 +08:00
JustSong
64db39320a feat: now able to check all user's log 2023-06-10 20:40:23 +08:00
JustSong
0b4bf30908 docs: update README 2023-06-10 16:34:14 +08:00
JustSong
d29c273073 chore: add more log types 2023-06-10 16:31:40 +08:00
JustSong
74f508e847 feat: now user can check its topup & consume history (close #78, close #95) 2023-06-10 16:04:04 +08:00
JustSong
145bb14cb2 fix: fix not using proxy when update balance 2023-06-09 18:57:27 +08:00
JustSong
8901f03864 feat: support set proxy for channel OpenAI (close #139) 2023-06-09 18:30:01 +08:00
JustSong
813bf0bd66 refactor: enable model configuration on default group (close #143) 2023-06-09 18:05:51 +08:00
JustSong
45e9fd66e7 feat: able to check topup history & consumption history (#78, #95) 2023-06-09 16:59:00 +08:00
JustSong
e0d0674f81 fix: fix redemption code's quota not updated 2023-06-08 15:19:55 +08:00
23 changed files with 561 additions and 26 deletions

View File

@@ -44,11 +44,13 @@ _✨ All in one 的 OpenAI 接口,整合各种 API 访问方式,开箱即用
<a href="https://iamazing.cn/page/reward">赞赏支持</a>
</p>
> **Warning**:使用 Docker 拉取的最新镜像可能是 `alpha` 版本,如果追求稳定性请手动指定版本。
> **Note**:使用 Docker 拉取的最新镜像可能是 `alpha` 版本,如果追求稳定性请手动指定版本。
> **Warning**:从 `v0.3` 版本升级到 `v0.4` 版本需要手动迁移数据库,请手动执行[数据库迁移脚本](./bin/migration_v0.3-v0.4.sql)。
## 功能
1. 支持多种 API 访问渠道,欢迎 PR 或提 issue 添加更多渠道:
+ [x] OpenAI 官方通道
+ [x] OpenAI 官方通道(支持配置代理)
+ [x] **Azure OpenAI API**
+ [x] [API2D](https://api2d.com/r/197971)
+ [x] [OhMyGPT](https://aigptx.top?aff=uFpUl2Kf)
@@ -57,7 +59,7 @@ _✨ All in one 的 OpenAI 接口,整合各种 API 访问方式,开箱即用
+ [x] [OpenAI Max](https://openaimax.com)
+ [x] [OpenAI-SB](https://openai-sb.com)
+ [x] [CloseAI](https://console.openai-asia.com/r/2412)
+ [x] 自定义渠道:例如使用自行搭建的 OpenAI 代理
+ [x] 自定义渠道:例如各种未收录的第三方代理服务
2. 支持通过**负载均衡**的方式访问多个渠道。
3. 支持 **stream 模式**,可以通过流式传输实现打字机效果。
4. 支持**多机部署**[详见此处](#多机部署)。
@@ -66,16 +68,17 @@ _✨ All in one 的 OpenAI 接口,整合各种 API 访问方式,开箱即用
7. 支持**通道管理**,批量创建通道。
8. 支持**用户分组**以及**渠道分组**。
9. 支持渠道**设置模型列表**。
10. 支持发布公告,设置充值链接,设置新用户初始额度
11. 支持丰富的**自定义**设置,
10. 支持**查看额度明细**
11. 支持发布公告,设置充值链接,设置新用户初始额度。
12. 支持丰富的**自定义**设置,
1. 支持自定义系统名称logo 以及页脚。
2. 支持自定义首页和关于页面,可以选择使用 HTML & Markdown 代码进行自定义,或者使用一个单独的网页通过 iframe 嵌入。
12. 支持通过系统访问令牌访问管理 API。
13. 支持用户管理,支持**多种用户登录注册方式**
13. 支持通过系统访问令牌访问管理 API。
14. 支持用户管理,支持**多种用户登录注册方式**
+ 邮箱登录注册以及通过邮箱进行密码重置。
+ [GitHub 开放授权](https://github.com/settings/applications/new)。
+ 微信公众号授权(需要额外部署 [WeChat Server](https://github.com/songquanpeng/wechat-server))。
14. 未来其他大模型开放 API 后,将第一时间支持,并将其封装成同样的 API 访问方式。
15. 未来其他大模型开放 API 后,将第一时间支持,并将其封装成同样的 API 访问方式。
## 部署
### 基于 Docker 进行部署

View File

@@ -0,0 +1,17 @@
INSERT INTO abilities (`group`, model, channel_id, enabled)
SELECT c.`group`, m.model, c.id, 1
FROM channels c
CROSS JOIN (
SELECT 'gpt-3.5-turbo' AS model UNION ALL
SELECT 'gpt-3.5-turbo-0301' AS model UNION ALL
SELECT 'gpt-4' AS model UNION ALL
SELECT 'gpt-4-0314' AS model
) AS m
WHERE c.status = 1
AND NOT EXISTS (
SELECT 1
FROM abilities a
WHERE a.`group` = c.`group`
AND a.model = m.model
AND a.channel_id = c.id
);

View File

@@ -25,6 +25,7 @@ var OptionMap map[string]string
var OptionMapRWMutex sync.RWMutex
var ItemsPerPage = 10
var MaxRecentItems = 100
var PasswordLoginEnabled = true
var PasswordRegisterEnabled = true

View File

@@ -26,8 +26,8 @@ var ModelRatio = map[string]float64{
"ada": 10,
"text-embedding-ada-002": 0.2,
"text-search-ada-doc-001": 10,
"text-moderation-stable": 10,
"text-moderation-latest": 10,
"text-moderation-stable": 0.1,
"text-moderation-latest": 0.1,
}
func ModelRatio2JSONString() string {

View File

@@ -41,7 +41,9 @@ func updateChannelBalance(channel *model.Channel) (float64, error) {
baseURL := common.ChannelBaseURLs[channel.Type]
switch channel.Type {
case common.ChannelTypeOpenAI:
// do nothing
if channel.BaseURL != "" {
baseURL = channel.BaseURL
}
case common.ChannelTypeAzure:
return 0, errors.New("尚未实现")
case common.ChannelTypeCustom:

View File

@@ -27,6 +27,8 @@ func testChannel(channel *model.Channel, request *ChatRequest) error {
} else {
if channel.Type == common.ChannelTypeCustom {
requestURL = channel.BaseURL
} else if channel.Type == common.ChannelTypeOpenAI && channel.BaseURL != "" {
requestURL = channel.BaseURL
}
requestURL += "/v1/chat/completions"
}
@@ -57,7 +59,7 @@ func testChannel(channel *model.Channel, request *ChatRequest) error {
return err
}
if response.Usage.CompletionTokens == 0 {
return errors.New(fmt.Sprintf("type %s, code %s, message %s", response.Error.Type, response.Error.Code, response.Error.Message))
return errors.New(fmt.Sprintf("type %s, code %v, message %s", response.Error.Type, response.Error.Code, response.Error.Message))
}
return nil
}

86
controller/log.go Normal file
View File

@@ -0,0 +1,86 @@
package controller
import (
"github.com/gin-gonic/gin"
"one-api/common"
"one-api/model"
"strconv"
)
func GetAllLogs(c *gin.Context) {
p, _ := strconv.Atoi(c.Query("p"))
if p < 0 {
p = 0
}
logType, _ := strconv.Atoi(c.Query("type"))
logs, err := model.GetAllLogs(logType, p*common.ItemsPerPage, common.ItemsPerPage)
if err != nil {
c.JSON(200, gin.H{
"success": false,
"message": err.Error(),
})
return
}
c.JSON(200, gin.H{
"success": true,
"message": "",
"data": logs,
})
}
func GetUserLogs(c *gin.Context) {
p, _ := strconv.Atoi(c.Query("p"))
if p < 0 {
p = 0
}
userId := c.GetInt("id")
logType, _ := strconv.Atoi(c.Query("type"))
logs, err := model.GetUserLogs(userId, logType, p*common.ItemsPerPage, common.ItemsPerPage)
if err != nil {
c.JSON(200, gin.H{
"success": false,
"message": err.Error(),
})
return
}
c.JSON(200, gin.H{
"success": true,
"message": "",
"data": logs,
})
}
func SearchAllLogs(c *gin.Context) {
keyword := c.Query("keyword")
logs, err := model.SearchAllLogs(keyword)
if err != nil {
c.JSON(200, gin.H{
"success": false,
"message": err.Error(),
})
return
}
c.JSON(200, gin.H{
"success": true,
"message": "",
"data": logs,
})
}
func SearchUserLogs(c *gin.Context) {
keyword := c.Query("keyword")
userId := c.GetInt("id")
logs, err := model.SearchUserLogs(userId, keyword)
if err != nil {
c.JSON(200, gin.H{
"success": false,
"message": err.Error(),
})
return
}
c.JSON(200, gin.H{
"success": true,
"message": "",
"data": logs,
})
}

View File

@@ -161,6 +161,24 @@ func init() {
Root: "text-ada-001",
Parent: nil,
},
{
Id: "text-moderation-latest",
Object: "model",
Created: 1677649963,
OwnedBy: "openai",
Permission: permission,
Root: "text-moderation-latest",
Parent: nil,
},
{
Id: "text-moderation-stable",
Object: "model",
Created: 1677649963,
OwnedBy: "openai",
Permission: permission,
Root: "text-moderation-stable",
Parent: nil,
},
}
openAIModelsMap = make(map[string]OpenAIModels)
for _, model := range openAIModels {

View File

@@ -24,6 +24,7 @@ const (
RelayModeChatCompletions
RelayModeCompletions
RelayModeEmbeddings
RelayModeModeration
)
// https://platform.openai.com/docs/api-reference/chat
@@ -37,6 +38,7 @@ type GeneralOpenAIRequest struct {
Temperature float64 `json:"temperature"`
TopP float64 `json:"top_p"`
N int `json:"n"`
Input string `json:"input"`
}
type ChatRequest struct {
@@ -63,7 +65,7 @@ type OpenAIError struct {
Message string `json:"message"`
Type string `json:"type"`
Param string `json:"param"`
Code string `json:"code"`
Code any `json:"code"`
}
type OpenAIErrorWithStatusCode struct {
@@ -100,11 +102,13 @@ func Relay(c *gin.Context) {
relayMode = RelayModeCompletions
} else if strings.HasPrefix(c.Request.URL.Path, "/v1/embeddings") {
relayMode = RelayModeEmbeddings
} else if strings.HasPrefix(c.Request.URL.Path, "/v1/moderations") {
relayMode = RelayModeModeration
}
err := relayHelper(c, relayMode)
if err != nil {
if err.StatusCode == http.StatusTooManyRequests {
err.OpenAIError.Message = "负载已,请稍后再试,或升级账户以提升服务质量。"
err.OpenAIError.Message = "当前分组负载已饱和,请稍后再试,或升级账户以提升服务质量。"
}
c.JSON(err.StatusCode, gin.H{
"error": err.OpenAIError,
@@ -143,10 +147,17 @@ func relayHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode {
return errorWrapper(err, "bind_request_body_failed", http.StatusBadRequest)
}
}
if relayMode == RelayModeModeration && textRequest.Model == "" {
textRequest.Model = "text-moderation-latest"
}
baseURL := common.ChannelBaseURLs[channelType]
requestURL := c.Request.URL.String()
if channelType == common.ChannelTypeCustom {
baseURL = c.GetString("base_url")
} else if channelType == common.ChannelTypeOpenAI {
if c.GetString("base_url") != "" {
baseURL = c.GetString("base_url")
}
}
fullRequestURL := fmt.Sprintf("%s%s", baseURL, requestURL)
if channelType == common.ChannelTypeAzure {
@@ -176,6 +187,8 @@ func relayHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode {
promptTokens = countTokenMessages(textRequest.Messages, textRequest.Model)
case RelayModeCompletions:
promptTokens = countTokenText(textRequest.Prompt, textRequest.Model)
case RelayModeModeration:
promptTokens = countTokenText(textRequest.Input, textRequest.Model)
}
preConsumedTokens := common.PreConsumedQuota
if textRequest.MaxTokens != 0 {
@@ -235,11 +248,16 @@ func relayHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode {
quota = textResponse.Usage.PromptTokens + textResponse.Usage.CompletionTokens*completionRatio
}
quota = int(float64(quota) * ratio)
if ratio != 0 && quota <= 0 {
quota = 1
}
quotaDelta := quota - preConsumedQuota
err := model.PostConsumeTokenQuota(tokenId, quotaDelta)
if err != nil {
common.SysError("Error consuming token remain quota: " + err.Error())
}
userId := c.GetInt("id")
model.RecordLog(userId, model.LogTypeConsume, fmt.Sprintf("使用模型 %s 消耗 %d 点额度", textRequest.Model, quota))
}
}()

View File

@@ -2,6 +2,7 @@ package controller
import (
"encoding/json"
"fmt"
"github.com/gin-contrib/sessions"
"github.com/gin-gonic/gin"
"net/http"
@@ -351,6 +352,9 @@ func UpdateUser(c *gin.Context) {
})
return
}
if originUser.Quota != updatedUser.Quota {
model.RecordLog(originUser.Id, model.LogTypeManage, fmt.Sprintf("管理员将用户额度从 %d 点修改为 %d 点", originUser.Quota, updatedUser.Quota))
}
c.JSON(http.StatusOK, gin.H{
"success": true,
"message": "",

View File

@@ -7,6 +7,7 @@ import (
"one-api/common"
"one-api/model"
"strconv"
"strings"
)
type ModelRequest struct {
@@ -64,6 +65,11 @@ func Distribute() func(c *gin.Context) {
c.Abort()
return
}
if strings.HasPrefix(c.Request.URL.Path, "/v1/moderations") {
if modelRequest.Model == "" {
modelRequest.Model = "text-moderation-stable"
}
}
userId := c.GetInt("id")
userGroup, _ := model.GetUserGroup(userId)
channel, err = model.GetRandomSatisfiedChannel(userGroup, modelRequest.Model)
@@ -82,11 +88,9 @@ func Distribute() func(c *gin.Context) {
c.Set("channel_id", channel.Id)
c.Set("channel_name", channel.Name)
c.Request.Header.Set("Authorization", fmt.Sprintf("Bearer %s", channel.Key))
if channel.Type == common.ChannelTypeCustom || channel.Type == common.ChannelTypeAzure {
c.Set("base_url", channel.BaseURL)
if channel.Type == common.ChannelTypeAzure {
c.Set("api_version", channel.Other)
}
c.Set("base_url", channel.BaseURL)
if channel.Type == common.ChannelTypeAzure {
c.Set("api_version", channel.Other)
}
c.Next()
}

View File

@@ -13,9 +13,6 @@ type Ability struct {
}
func GetRandomSatisfiedChannel(group string, model string) (*Channel, error) {
if group == "default" {
return GetRandomChannel()
}
ability := Ability{}
var err error = nil
if common.UsingSQLite {

67
model/log.go Normal file
View File

@@ -0,0 +1,67 @@
package model
import (
"gorm.io/gorm"
"one-api/common"
)
type Log struct {
Id int `json:"id"`
UserId int `json:"user_id" gorm:"index"`
CreatedAt int64 `json:"created_at" gorm:"bigint"`
Type int `json:"type" gorm:"index"`
Content string `json:"content"`
}
const (
LogTypeUnknown = iota
LogTypeTopup
LogTypeConsume
LogTypeManage
LogTypeSystem
)
func RecordLog(userId int, logType int, content string) {
log := &Log{
UserId: userId,
CreatedAt: common.GetTimestamp(),
Type: logType,
Content: content,
}
err := DB.Create(log).Error
if err != nil {
common.SysError("failed to record log: " + err.Error())
}
}
func GetAllLogs(logType int, startIdx int, num int) (logs []*Log, err error) {
var tx *gorm.DB
if logType == LogTypeUnknown {
tx = DB
} else {
tx = DB.Where("type = ?", logType)
}
err = tx.Order("id desc").Limit(num).Offset(startIdx).Find(&logs).Error
return logs, err
}
func GetUserLogs(userId int, logType int, startIdx int, num int) (logs []*Log, err error) {
var tx *gorm.DB
if logType == LogTypeUnknown {
tx = DB.Where("user_id = ?", userId)
} else {
tx = DB.Where("user_id = ? and type = ?", userId, logType)
}
err = tx.Order("id desc").Limit(num).Offset(startIdx).Omit("id").Find(&logs).Error
return logs, err
}
func SearchAllLogs(keyword string) (logs []*Log, err error) {
err = DB.Where("type = ? or content LIKE ?", keyword, keyword+"%").Order("id desc").Limit(common.MaxRecentItems).Find(&logs).Error
return logs, err
}
func SearchUserLogs(userId int, keyword string) (logs []*Log, err error) {
err = DB.Where("user_id = ? and type = ?", userId, keyword).Order("id desc").Limit(common.MaxRecentItems).Omit("id").Find(&logs).Error
return logs, err
}

View File

@@ -79,6 +79,10 @@ func InitDB() (err error) {
if err != nil {
return err
}
err = db.AutoMigrate(&Log{})
if err != nil {
return err
}
err = createRootAccountIfNeed()
return err
} else {

View File

@@ -2,6 +2,7 @@ package model
import (
"errors"
"fmt"
"one-api/common"
)
@@ -65,6 +66,7 @@ func Redeem(key string, userId int) (quota int, err error) {
if err != nil {
common.SysError("更新兑换码状态失败:" + err.Error())
}
RecordLog(userId, LogTypeTopup, fmt.Sprintf("通过兑换码充值 %d 点额度", redemption.Quota))
}()
return redemption.Quota, nil
}
@@ -83,7 +85,7 @@ func (redemption *Redemption) SelectUpdate() error {
// Update Make sure your token's fields is completed, because this will update non-zero values
func (redemption *Redemption) Update() error {
var err error
err = DB.Model(redemption).Select("name", "status", "redeemed_time").Updates(redemption).Error
err = DB.Model(redemption).Select("name", "status", "quota", "redeemed_time").Updates(redemption).Error
return err
}

View File

@@ -2,6 +2,7 @@ package model
import (
"errors"
"fmt"
"gorm.io/gorm"
"one-api/common"
"strings"
@@ -73,8 +74,14 @@ func (user *User) Insert() error {
}
user.Quota = common.QuotaForNewUser
user.AccessToken = common.GetUUID()
err = DB.Create(user).Error
return err
result := DB.Create(user)
if result.Error != nil {
return result.Error
}
if common.QuotaForNewUser > 0 {
RecordLog(user.Id, LogTypeSystem, fmt.Sprintf("新用户注册赠送 %d 点额度", common.QuotaForNewUser))
}
return nil
}
func (user *User) Update(updatePassword bool) error {

View File

@@ -93,5 +93,10 @@ func SetApiRouter(router *gin.Engine) {
redemptionRoute.PUT("/", controller.UpdateRedemption)
redemptionRoute.DELETE("/:id", controller.DeleteRedemption)
}
logRoute := apiRouter.Group("/log")
logRoute.GET("/", middleware.AdminAuth(), controller.GetAllLogs)
logRoute.GET("/search", middleware.AdminAuth(), controller.SearchAllLogs)
logRoute.GET("/self", middleware.UserAuth(), controller.GetUserLogs)
logRoute.GET("/self/search", middleware.UserAuth(), controller.SearchUserLogs)
}
}

View File

@@ -37,6 +37,6 @@ func SetRelayRouter(router *gin.Engine) {
relayV1Router.POST("/fine-tunes/:id/cancel", controller.RelayNotImplemented)
relayV1Router.GET("/fine-tunes/:id/events", controller.RelayNotImplemented)
relayV1Router.DELETE("/models/:model", controller.RelayNotImplemented)
relayV1Router.POST("/moderations", controller.RelayNotImplemented)
relayV1Router.POST("/moderations", controller.Relay)
}
}

View File

@@ -22,6 +22,7 @@ import EditChannel from './pages/Channel/EditChannel';
import Redemption from './pages/Redemption';
import EditRedemption from './pages/Redemption/EditRedemption';
import TopUp from './pages/TopUp';
import Log from './pages/Log';
const Home = lazy(() => import('./pages/Home'));
const About = lazy(() => import('./pages/About'));
@@ -250,6 +251,14 @@ function App() {
</PrivateRoute>
}
/>
<Route
path='/log'
element={
<PrivateRoute>
<Log />
</PrivateRoute>
}
/>
<Route
path='/about'
element={

View File

@@ -41,6 +41,11 @@ const headerButtons = [
icon: 'user',
admin: true,
},
{
name: '日志',
to: '/log',
icon: 'book',
},
{
name: '设置',
to: '/setting',

View File

@@ -0,0 +1,256 @@
import React, { useEffect, useState } from 'react';
import { Button, Label, Pagination, Select, Table } from 'semantic-ui-react';
import { API, isAdmin, showError, timestamp2string } from '../helpers';
import { ITEMS_PER_PAGE } from '../constants';
function renderTimestamp(timestamp) {
return (
<>
{timestamp2string(timestamp)}
</>
);
}
const MODE_OPTIONS = [
{ key: 'all', text: '全部用户', value: 'all' },
{ key: 'self', text: '当前用户', value: 'self' },
];
const LOG_OPTIONS = [
{ key: '0', text: '全部', value: 0 },
{ key: '1', text: '充值', value: 1 },
{ key: '2', text: '消费', value: 2 },
{ key: '3', text: '管理', value: 3 },
{ key: '4', text: '系统', value: 4 }
];
function renderType(type) {
switch (type) {
case 1:
return <Label basic color='green'> 充值 </Label>;
case 2:
return <Label basic color='olive'> 消费 </Label>;
case 3:
return <Label basic color='orange'> 管理 </Label>;
case 4:
return <Label basic color='purple'> 系统 </Label>;
default:
return <Label basic color='black'> 未知 </Label>;
}
}
const LogsTable = () => {
const [logs, setLogs] = useState([]);
const [loading, setLoading] = useState(true);
const [activePage, setActivePage] = useState(1);
const [searchKeyword, setSearchKeyword] = useState('');
const [searching, setSearching] = useState(false);
const [logType, setLogType] = useState(0);
const [mode, setMode] = useState('self'); // all, self
const showModePanel = isAdmin();
const loadLogs = async (startIdx) => {
let url = `/api/log/self/?p=${startIdx}&type=${logType}`;
if (mode === 'all') {
url = `/api/log/?p=${startIdx}&type=${logType}`;
}
const res = await API.get(url);
const { success, message, data } = res.data;
if (success) {
if (startIdx === 0) {
setLogs(data);
} else {
let newLogs = logs;
newLogs.push(...data);
setLogs(newLogs);
}
} else {
showError(message);
}
setLoading(false);
};
const onPaginationChange = (e, { activePage }) => {
(async () => {
if (activePage === Math.ceil(logs.length / ITEMS_PER_PAGE) + 1) {
// In this case we have to load more data and then append them.
await loadLogs(activePage - 1);
}
setActivePage(activePage);
})();
};
const refresh = async () => {
setLoading(true);
await loadLogs(0);
};
useEffect(() => {
loadLogs(0)
.then()
.catch((reason) => {
showError(reason);
});
}, []);
useEffect(() => {
refresh().then();
}, [mode, logType]);
const searchLogs = async () => {
if (searchKeyword === '') {
// if keyword is blank, load files instead.
await loadLogs(0);
setActivePage(1);
return;
}
setSearching(true);
const res = await API.get(`/api/log/self/search?keyword=${searchKeyword}`);
const { success, message, data } = res.data;
if (success) {
setLogs(data);
setActivePage(1);
} else {
showError(message);
}
setSearching(false);
};
const handleKeywordChange = async (e, { value }) => {
setSearchKeyword(value.trim());
};
const sortLog = (key) => {
if (logs.length === 0) return;
setLoading(true);
let sortedLogs = [...logs];
sortedLogs.sort((a, b) => {
return ('' + a[key]).localeCompare(b[key]);
});
if (sortedLogs[0].id === logs[0].id) {
sortedLogs.reverse();
}
setLogs(sortedLogs);
setLoading(false);
};
return (
<>
<Table basic>
<Table.Header>
<Table.Row>
<Table.HeaderCell
style={{ cursor: 'pointer' }}
onClick={() => {
sortLog('created_time');
}}
width={3}
>
时间
</Table.HeaderCell>
{
showModePanel && (
<Table.HeaderCell
style={{ cursor: 'pointer' }}
onClick={() => {
sortLog('user_id');
}}
width={1}
>
用户
</Table.HeaderCell>
)
}
<Table.HeaderCell
style={{ cursor: 'pointer' }}
onClick={() => {
sortLog('type');
}}
width={2}
>
类型
</Table.HeaderCell>
<Table.HeaderCell
style={{ cursor: 'pointer' }}
onClick={() => {
sortLog('content');
}}
width={showModePanel ? 10 : 11}
>
详情
</Table.HeaderCell>
</Table.Row>
</Table.Header>
<Table.Body>
{logs
.slice(
(activePage - 1) * ITEMS_PER_PAGE,
activePage * ITEMS_PER_PAGE
)
.map((log, idx) => {
if (log.deleted) return <></>;
return (
<Table.Row key={log.created_at}>
<Table.Cell>{renderTimestamp(log.created_at)}</Table.Cell>
{
showModePanel && (
<Table.Cell><Label>{log.user_id}</Label></Table.Cell>
)
}
<Table.Cell>{renderType(log.type)}</Table.Cell>
<Table.Cell>{log.content}</Table.Cell>
</Table.Row>
);
})}
</Table.Body>
<Table.Footer>
<Table.Row>
<Table.HeaderCell colSpan={showModePanel ? '5' : '4'}>
{
showModePanel && (
<Select
placeholder='选择模式'
options={MODE_OPTIONS}
style={{ marginRight: '8px' }}
name='mode'
value={mode}
onChange={(e, { name, value }) => {
setMode(value);
}}
/>
)
}
<Select
placeholder='选择明细分类'
options={LOG_OPTIONS}
style={{ marginRight: '8px' }}
name='logType'
value={logType}
onChange={(e, { name, value }) => {
setLogType(value);
}}
/>
<Button size='small' onClick={refresh} loading={loading}>刷新</Button>
<Pagination
floated='right'
activePage={activePage}
onPageChange={onPaginationChange}
size='small'
siblingRange={1}
totalPages={
Math.ceil(logs.length / ITEMS_PER_PAGE) +
(logs.length % ITEMS_PER_PAGE === 0 ? 1 : 0)
}
/>
</Table.HeaderCell>
</Table.Row>
</Table.Footer>
</Table>
</>
);
};
export default LogsTable;

View File

@@ -198,6 +198,20 @@ const EditChannel = () => {
handleInputChange(null, { name: 'models', value: fullModels });
}}>填入所有模型</Button>
</div>
{
inputs.type === 1 && (
<Form.Field>
<Form.Input
label='代理'
name='base_url'
placeholder={'请输入 OpenAI API 代理地址如果不需要请留空格式为https://api.openai.com'}
onChange={handleInputChange}
value={inputs.base_url}
autoComplete='new-password'
/>
</Form.Field>
)
}
{
batch ? <Form.Field>
<Form.TextArea

View File

@@ -0,0 +1,14 @@
import React from 'react';
import { Header, Segment } from 'semantic-ui-react';
import LogsTable from '../../components/LogsTable';
const Token = () => (
<>
<Segment>
<Header as='h3'>额度明细</Header>
<LogsTable />
</Segment>
</>
);
export default Token;