mirror of
https://github.com/linux-do/new-api.git
synced 2025-11-18 03:23:42 +08:00
Compare commits
111 Commits
v0.2.2.0-a
...
v0.2.4.0-a
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1291504fdc | ||
|
|
54f17d6002 | ||
|
|
fcb8506679 | ||
|
|
fa902cca4c | ||
|
|
0c8696816d | ||
|
|
1e0053985a | ||
|
|
36fac2baa2 | ||
|
|
7e26238231 | ||
|
|
bfbbe67fcd | ||
|
|
0867d36fc7 | ||
|
|
24722a8ee2 | ||
|
|
c86bff38ac | ||
|
|
3cd25c7e53 | ||
|
|
f07ae8139b | ||
|
|
6aa1f2fcbe | ||
|
|
e2663a5c66 | ||
|
|
d860289601 | ||
|
|
cf8fe63fb6 | ||
|
|
1568d6481a | ||
|
|
d05a786b4c | ||
|
|
01160658a5 | ||
|
|
f421699e1b | ||
|
|
f0c884cb55 | ||
|
|
51e0754ade | ||
|
|
1ab93717bb | ||
|
|
6fe643b1c1 | ||
|
|
d6c1e3f37c | ||
|
|
774ce7195c | ||
|
|
dbaa9390d3 | ||
|
|
84da88506f | ||
|
|
98a991306d | ||
|
|
a3de309175 | ||
|
|
de81eba90b | ||
|
|
1deb935f1d | ||
|
|
0caa639df7 | ||
|
|
ea0c99ac1b | ||
|
|
afc2289bdf | ||
|
|
472145aed6 | ||
|
|
f956e4489f | ||
|
|
095121673d | ||
|
|
039fda91f2 | ||
|
|
e0df8bbbda | ||
|
|
5e07ff85eb | ||
|
|
71dcf43c71 | ||
|
|
7003a4ed94 | ||
|
|
e3b885b7f3 | ||
|
|
55962acf7c | ||
|
|
d33b802dac | ||
|
|
63d68ce7bf | ||
|
|
95ac7c343b | ||
|
|
b1019be733 | ||
|
|
93858c32d9 | ||
|
|
ff044de42a | ||
|
|
a3b3e6cc38 | ||
|
|
7b5830522a | ||
|
|
9dcec2772d | ||
|
|
8faf5d2517 | ||
|
|
a3a6733fb5 | ||
|
|
0f11461af3 | ||
|
|
a5b84ba524 | ||
|
|
c222bc8752 | ||
|
|
3dd2a5bfc5 | ||
|
|
9f18641d7e | ||
|
|
ced67b9bb3 | ||
|
|
eda3bd1c9d | ||
|
|
9a9fd34cba | ||
|
|
475dea96d2 | ||
|
|
0ddb67f9a2 | ||
|
|
470f3a1d51 | ||
|
|
65ae70919b | ||
|
|
256ccfa989 | ||
|
|
6c059d5bf2 | ||
|
|
acbc3649d6 | ||
|
|
5715fcf8fb | ||
|
|
98c347e048 | ||
|
|
b283365ebc | ||
|
|
698af0786d | ||
|
|
21839ed13b | ||
|
|
71547849bc | ||
|
|
39f6812a2b | ||
|
|
5ac3d25f54 | ||
|
|
fd19798c92 | ||
|
|
12667ad17d | ||
|
|
e8800415b8 | ||
|
|
ecd06cf2f8 | ||
|
|
db575a1c25 | ||
|
|
2dbf50dc07 | ||
|
|
d8c006046f | ||
|
|
b427f0278f | ||
|
|
6fb1fbfe96 | ||
|
|
4641d44615 | ||
|
|
968ef1e5fa | ||
|
|
88bc295855 | ||
|
|
76f6b41bb2 | ||
|
|
a9d9877bce | ||
|
|
003745abcb | ||
|
|
96468ce64f | ||
|
|
9886cdd527 | ||
|
|
83dd62982e | ||
|
|
1cff3c100a | ||
|
|
d7a343e2f6 | ||
|
|
637801fba5 | ||
|
|
2bf404507f | ||
|
|
675de89c69 | ||
|
|
16b9aacb06 | ||
|
|
cad380eb16 | ||
|
|
234e39ddeb | ||
|
|
7fb6420e66 | ||
|
|
5425b5bfc3 | ||
|
|
21f32605c8 | ||
|
|
1c6fd87909 |
214
LICENSE
214
LICENSE
@@ -1,21 +1,201 @@
|
||||
MIT License
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
Copyright (c) 2024 Calcium-Ion
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
1. Definitions.
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
@@ -57,11 +57,11 @@
|
||||
|
||||
2. 在渠道管理中添加渠道,渠道类型选择**Midjourney Proxy**,如果是plus版本选择**Midjourney Proxy Plus**
|
||||
,模型请参考上方模型列表
|
||||
3. 地址填写midjourney-proxy部署的地址,例如:http://localhost:8080
|
||||
3. **代理**填写midjourney-proxy部署的地址,例如:http://localhost:8080
|
||||
4. 密钥填写midjourney-proxy的密钥,如果没有设置密钥,可以随便填
|
||||
|
||||
### 对接上游new api
|
||||
|
||||
1. 在渠道管理中添加渠道,渠道类型选择**Midjourney Proxy Plus**,模型请参考上方模型列表
|
||||
2. 地址填写上游new api的地址,例如:http://localhost:3000
|
||||
2. **代理**填写上游new api的地址,例如:http://localhost:3000
|
||||
3. 密钥填写上游new api的密钥
|
||||
16
README.md
16
README.md
@@ -56,17 +56,28 @@
|
||||
4. [Ollama](https://github.com/ollama/ollama?tab=readme-ov-file),添加渠道时,密钥可以随便填写,默认的请求地址是[http://localhost:11434](http://localhost:11434),如果需要修改请在渠道中修改
|
||||
5. [Midjourney-Proxy(Plus)](https://github.com/novicezk/midjourney-proxy)接口,[对接文档](Midjourney.md)
|
||||
6. [零一万物](https://platform.lingyiwanwu.com/)
|
||||
7. 自定义渠道,支持填入完整调用地址
|
||||
|
||||
您可以在渠道中添加自定义模型gpt-4-gizmo-*,此模型并非OpenAI官方模型,而是第三方模型,使用官方key无法调用。
|
||||
|
||||
## 渠道重试
|
||||
渠道重试功能已经实现,可以在`设置->运营设置->通用设置`设置重试次数,建议开启缓存功能。
|
||||
渠道重试功能已经实现,可以在`设置->运营设置->通用设置`设置重试次数,**建议开启缓存**功能。
|
||||
如果开启了重试功能,第一次重试使用同优先级,第二次重试使用下一个优先级,以此类推。
|
||||
### 缓存设置方法
|
||||
1. `REDIS_CONN_STRING`:设置之后将使用 Redis 作为缓存使用。
|
||||
+ 例子:`REDIS_CONN_STRING=redis://default:redispw@localhost:49153`
|
||||
2. `MEMORY_CACHE_ENABLED`:启用内存缓存(如果设置了`REDIS_CONN_STRING`,则无需手动设置),会导致用户额度的更新存在一定的延迟,可选值为 `true` 和 `false`,未设置则默认为 `false`。
|
||||
+ 例子:`MEMORY_CACHE_ENABLED=true`
|
||||
### 为什么有的时候没有重试
|
||||
这些错误码不会重试:400,504,524
|
||||
### 我想让400也重试
|
||||
在`渠道->编辑`中,将`状态码复写`改为
|
||||
```json
|
||||
{
|
||||
"400": "500"
|
||||
}
|
||||
```
|
||||
可以实现400错误转为500错误,从而重试
|
||||
|
||||
|
||||
## 部署
|
||||
@@ -88,6 +99,9 @@ docker run --name new-api -d --restart always -p 3000:3000 -e TZ=Asia/Shanghai -
|
||||
docker run --name new-api -d --restart always -p 3000:3000 -e SQL_DSN="root:123456@tcp(宝塔的服务器地址:宝塔数据库端口)/宝塔数据库名称" -e TZ=Asia/Shanghai -v /www/wwwroot/new-api:/data calciumion/new-api:latest
|
||||
# 注意:数据库要开启远程访问,并且只允许服务器IP访问
|
||||
```
|
||||
### 默认账号密码
|
||||
默认账号root 密码123456
|
||||
|
||||
## Midjourney接口设置文档
|
||||
[对接文档](Midjourney.md)
|
||||
|
||||
|
||||
@@ -208,6 +208,9 @@ const (
|
||||
ChannelTypeLingYiWanWu = 31
|
||||
ChannelTypeAws = 33
|
||||
ChannelTypeCohere = 34
|
||||
ChannelTypeMiniMax = 35
|
||||
|
||||
ChannelTypeDummy // this one is only for count, do not add any channel after this
|
||||
)
|
||||
|
||||
var ChannelBaseURLs = []string{
|
||||
@@ -246,4 +249,5 @@ var ChannelBaseURLs = []string{
|
||||
"", //32
|
||||
"", //33
|
||||
"https://api.cohere.ai", //34
|
||||
"https://api.minimax.chat", //35
|
||||
}
|
||||
|
||||
@@ -16,7 +16,22 @@ func SafeGoroutine(f func()) {
|
||||
}()
|
||||
}
|
||||
|
||||
func SafeSend(ch chan bool, value bool) (closed bool) {
|
||||
func SafeSendBool(ch chan bool, value bool) (closed bool) {
|
||||
defer func() {
|
||||
// Recover from panic if one occured. A panic would mean the channel was closed.
|
||||
if recover() != nil {
|
||||
closed = true
|
||||
}
|
||||
}()
|
||||
|
||||
// This will panic if the channel is closed.
|
||||
ch <- value
|
||||
|
||||
// If the code reaches here, then the channel was not closed.
|
||||
return false
|
||||
}
|
||||
|
||||
func SafeSendString(ch chan string, value string) (closed bool) {
|
||||
defer func() {
|
||||
// Recover from panic if one occured. A panic would mean the channel was closed.
|
||||
if recover() != nil {
|
||||
|
||||
@@ -5,6 +5,13 @@ import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
// from songquanpeng/one-api
|
||||
const (
|
||||
USD2RMB = 7.3 // 暂定 1 USD = 7.3 RMB
|
||||
USD = 500 // $0.002 = 1 -> $1 = 500
|
||||
RMB = USD / USD2RMB
|
||||
)
|
||||
|
||||
// modelRatio
|
||||
// https://platform.openai.com/docs/models/model-endpoint-compatibility
|
||||
// https://cloud.baidu.com/doc/WENXINWORKSHOP/s/Blfmc9dlf
|
||||
@@ -13,9 +20,11 @@ import (
|
||||
// 1 === $0.002 / 1K tokens
|
||||
// 1 === ¥0.014 / 1k tokens
|
||||
|
||||
var DefaultModelRatio = map[string]float64{
|
||||
var defaultModelRatio = map[string]float64{
|
||||
//"midjourney": 50,
|
||||
"gpt-4-gizmo-*": 15,
|
||||
"gpt-4-all": 15,
|
||||
"gpt-4o-all": 15,
|
||||
"gpt-4": 15,
|
||||
//"gpt-4-0314": 15, //deprecated
|
||||
"gpt-4-0613": 15,
|
||||
@@ -27,93 +36,123 @@ var DefaultModelRatio = map[string]float64{
|
||||
"gpt-4-turbo-preview": 5, // $0.01 / 1K tokens
|
||||
"gpt-4-vision-preview": 5, // $0.01 / 1K tokens
|
||||
"gpt-4-1106-vision-preview": 5, // $0.01 / 1K tokens
|
||||
"gpt-4o": 2.5, // $0.01 / 1K tokens
|
||||
"gpt-4o-2024-05-13": 2.5, // $0.01 / 1K tokens
|
||||
"gpt-4-turbo": 5, // $0.01 / 1K tokens
|
||||
"gpt-4-turbo-2024-04-09": 5, // $0.01 / 1K tokens
|
||||
"gpt-3.5-turbo": 0.25, // $0.0015 / 1K tokens
|
||||
//"gpt-3.5-turbo-0301": 0.75, //deprecated
|
||||
"gpt-3.5-turbo-0613": 0.75,
|
||||
"gpt-3.5-turbo-16k": 1.5, // $0.003 / 1K tokens
|
||||
"gpt-3.5-turbo-16k-0613": 1.5,
|
||||
"gpt-3.5-turbo-instruct": 0.75, // $0.0015 / 1K tokens
|
||||
"gpt-3.5-turbo-1106": 0.5, // $0.001 / 1K tokens
|
||||
"gpt-3.5-turbo-0125": 0.25,
|
||||
"babbage-002": 0.2, // $0.0004 / 1K tokens
|
||||
"davinci-002": 1, // $0.002 / 1K tokens
|
||||
"text-ada-001": 0.2,
|
||||
"text-babbage-001": 0.25,
|
||||
"text-curie-001": 1,
|
||||
"text-davinci-002": 10,
|
||||
"text-davinci-003": 10,
|
||||
"text-davinci-edit-001": 10,
|
||||
"code-davinci-edit-001": 10,
|
||||
"whisper-1": 15, // $0.006 / minute -> $0.006 / 150 words -> $0.006 / 200 tokens -> $0.03 / 1k tokens
|
||||
"tts-1": 7.5, // 1k characters -> $0.015
|
||||
"tts-1-1106": 7.5, // 1k characters -> $0.015
|
||||
"tts-1-hd": 15, // 1k characters -> $0.03
|
||||
"tts-1-hd-1106": 15, // 1k characters -> $0.03
|
||||
"davinci": 10,
|
||||
"curie": 10,
|
||||
"babbage": 10,
|
||||
"ada": 10,
|
||||
"text-embedding-3-small": 0.01,
|
||||
"text-embedding-3-large": 0.065,
|
||||
"text-embedding-ada-002": 0.05,
|
||||
"text-search-ada-doc-001": 10,
|
||||
"text-moderation-stable": 0.1,
|
||||
"text-moderation-latest": 0.1,
|
||||
"dall-e-2": 8,
|
||||
"dall-e-3": 16,
|
||||
"claude-instant-1": 0.4, // $0.8 / 1M tokens
|
||||
"claude-2.0": 4, // $8 / 1M tokens
|
||||
"claude-2.1": 4, // $8 / 1M tokens
|
||||
"claude-3-haiku-20240307": 0.125, // $0.25 / 1M tokens
|
||||
"claude-3-sonnet-20240229": 1.5, // $3 / 1M tokens
|
||||
"claude-3-opus-20240229": 7.5, // $15 / 1M tokens
|
||||
"ERNIE-Bot": 0.8572, // ¥0.012 / 1k tokens
|
||||
"ERNIE-Bot-turbo": 0.5715, // ¥0.008 / 1k tokens
|
||||
"ERNIE-Bot-4": 8.572, // ¥0.12 / 1k tokens
|
||||
"Embedding-V1": 0.1429, // ¥0.002 / 1k tokens
|
||||
"PaLM-2": 1,
|
||||
"gemini-pro": 1, // $0.00025 / 1k characters -> $0.001 / 1k tokens
|
||||
"gemini-pro-vision": 1, // $0.00025 / 1k characters -> $0.001 / 1k tokens
|
||||
"gemini-1.0-pro-vision-001": 1,
|
||||
"gemini-1.0-pro-001": 1,
|
||||
"gemini-1.5-pro-latest": 1,
|
||||
"gemini-1.0-pro-latest": 1,
|
||||
"gemini-1.0-pro-vision-latest": 1,
|
||||
"gemini-ultra": 1,
|
||||
"chatglm_turbo": 0.3572, // ¥0.005 / 1k tokens
|
||||
"chatglm_pro": 0.7143, // ¥0.01 / 1k tokens
|
||||
"chatglm_std": 0.3572, // ¥0.005 / 1k tokens
|
||||
"chatglm_lite": 0.1429, // ¥0.002 / 1k tokens
|
||||
"glm-4": 7.143, // ¥0.1 / 1k tokens
|
||||
"glm-4v": 7.143, // ¥0.1 / 1k tokens
|
||||
"glm-3-turbo": 0.3572,
|
||||
"qwen-turbo": 0.8572, // ¥0.012 / 1k tokens
|
||||
"qwen-plus": 10, // ¥0.14 / 1k tokens
|
||||
"text-embedding-v1": 0.05, // ¥0.0007 / 1k tokens
|
||||
"SparkDesk-v1.1": 1.2858, // ¥0.018 / 1k tokens
|
||||
"SparkDesk-v2.1": 1.2858, // ¥0.018 / 1k tokens
|
||||
"SparkDesk-v3.1": 1.2858, // ¥0.018 / 1k tokens
|
||||
"SparkDesk-v3.5": 1.2858, // ¥0.018 / 1k tokens
|
||||
"360GPT_S2_V9": 0.8572, // ¥0.012 / 1k tokens
|
||||
"embedding-bert-512-v1": 0.0715, // ¥0.001 / 1k tokens
|
||||
"embedding_s1_v1": 0.0715, // ¥0.001 / 1k tokens
|
||||
"semantic_similarity_s1_v1": 0.0715, // ¥0.001 / 1k tokens
|
||||
"hunyuan": 7.143, // ¥0.1 / 1k tokens // https://cloud.tencent.com/document/product/1729/97731#e0e6be58-60c8-469f-bdeb-6c264ce3b4d0
|
||||
"gpt-3.5-turbo-0613": 0.75,
|
||||
"gpt-3.5-turbo-16k": 1.5, // $0.003 / 1K tokens
|
||||
"gpt-3.5-turbo-16k-0613": 1.5,
|
||||
"gpt-3.5-turbo-instruct": 0.75, // $0.0015 / 1K tokens
|
||||
"gpt-3.5-turbo-1106": 0.5, // $0.001 / 1K tokens
|
||||
"gpt-3.5-turbo-0125": 0.25,
|
||||
"babbage-002": 0.2, // $0.0004 / 1K tokens
|
||||
"davinci-002": 1, // $0.002 / 1K tokens
|
||||
"text-ada-001": 0.2,
|
||||
"text-babbage-001": 0.25,
|
||||
"text-curie-001": 1,
|
||||
//"text-davinci-002": 10,
|
||||
//"text-davinci-003": 10,
|
||||
"text-davinci-edit-001": 10,
|
||||
"code-davinci-edit-001": 10,
|
||||
"whisper-1": 15, // $0.006 / minute -> $0.006 / 150 words -> $0.006 / 200 tokens -> $0.03 / 1k tokens
|
||||
"tts-1": 7.5, // 1k characters -> $0.015
|
||||
"tts-1-1106": 7.5, // 1k characters -> $0.015
|
||||
"tts-1-hd": 15, // 1k characters -> $0.03
|
||||
"tts-1-hd-1106": 15, // 1k characters -> $0.03
|
||||
"davinci": 10,
|
||||
"curie": 10,
|
||||
"babbage": 10,
|
||||
"ada": 10,
|
||||
"text-embedding-3-small": 0.01,
|
||||
"text-embedding-3-large": 0.065,
|
||||
"text-embedding-ada-002": 0.05,
|
||||
"text-search-ada-doc-001": 10,
|
||||
"text-moderation-stable": 0.1,
|
||||
"text-moderation-latest": 0.1,
|
||||
"claude-instant-1": 0.4, // $0.8 / 1M tokens
|
||||
"claude-2.0": 4, // $8 / 1M tokens
|
||||
"claude-2.1": 4, // $8 / 1M tokens
|
||||
"claude-3-haiku-20240307": 0.125, // $0.25 / 1M tokens
|
||||
"claude-3-sonnet-20240229": 1.5, // $3 / 1M tokens
|
||||
"claude-3-opus-20240229": 7.5, // $15 / 1M tokens
|
||||
"ERNIE-Bot": 0.8572, // ¥0.012 / 1k tokens //renamed to ERNIE-3.5-8K
|
||||
"ERNIE-Bot-turbo": 0.5715, // ¥0.008 / 1k tokens //renamed to ERNIE-Lite-8K
|
||||
"ERNIE-Bot-4": 8.572, // ¥0.12 / 1k tokens //renamed to ERNIE-4.0-8K
|
||||
"ERNIE-4.0-8K": 8.572, // ¥0.12 / 1k tokens
|
||||
"ERNIE-3.5-8K": 0.8572, // ¥0.012 / 1k tokens
|
||||
"ERNIE-Speed-8K": 0.2858, // ¥0.004 / 1k tokens
|
||||
"ERNIE-Speed-128K": 0.2858, // ¥0.004 / 1k tokens
|
||||
"ERNIE-Lite-8K": 0.2143, // ¥0.003 / 1k tokens
|
||||
"ERNIE-Tiny-8K": 0.0715, // ¥0.001 / 1k tokens
|
||||
"ERNIE-Character-8K": 0.2858, // ¥0.004 / 1k tokens
|
||||
"ERNIE-Functions-8K": 0.2858, // ¥0.004 / 1k tokens
|
||||
"Embedding-V1": 0.1429, // ¥0.002 / 1k tokens
|
||||
"PaLM-2": 1,
|
||||
"gemini-pro": 1, // $0.00025 / 1k characters -> $0.001 / 1k tokens
|
||||
"gemini-pro-vision": 1, // $0.00025 / 1k characters -> $0.001 / 1k tokens
|
||||
"gemini-1.0-pro-vision-001": 1,
|
||||
"gemini-1.0-pro-001": 1,
|
||||
"gemini-1.5-pro-latest": 1,
|
||||
"gemini-1.5-flash-latest": 1,
|
||||
"gemini-1.0-pro-latest": 1,
|
||||
"gemini-1.0-pro-vision-latest": 1,
|
||||
"gemini-ultra": 1,
|
||||
"chatglm_turbo": 0.3572, // ¥0.005 / 1k tokens
|
||||
"chatglm_pro": 0.7143, // ¥0.01 / 1k tokens
|
||||
"chatglm_std": 0.3572, // ¥0.005 / 1k tokens
|
||||
"chatglm_lite": 0.1429, // ¥0.002 / 1k tokens
|
||||
"glm-4": 7.143, // ¥0.1 / 1k tokens
|
||||
"glm-4v": 7.143, // ¥0.1 / 1k tokens
|
||||
"glm-3-turbo": 0.3572,
|
||||
"qwen-turbo": 0.8572, // ¥0.012 / 1k tokens
|
||||
"qwen-plus": 10, // ¥0.14 / 1k tokens
|
||||
"text-embedding-v1": 0.05, // ¥0.0007 / 1k tokens
|
||||
"SparkDesk-v1.1": 1.2858, // ¥0.018 / 1k tokens
|
||||
"SparkDesk-v2.1": 1.2858, // ¥0.018 / 1k tokens
|
||||
"SparkDesk-v3.1": 1.2858, // ¥0.018 / 1k tokens
|
||||
"SparkDesk-v3.5": 1.2858, // ¥0.018 / 1k tokens
|
||||
"360GPT_S2_V9": 0.8572, // ¥0.012 / 1k tokens
|
||||
"360gpt-turbo": 0.0858, // ¥0.0012 / 1k tokens
|
||||
"360gpt-turbo-responsibility-8k": 0.8572, // ¥0.012 / 1k tokens
|
||||
"360gpt-pro": 0.8572, // ¥0.012 / 1k tokens
|
||||
"embedding-bert-512-v1": 0.0715, // ¥0.001 / 1k tokens
|
||||
"embedding_s1_v1": 0.0715, // ¥0.001 / 1k tokens
|
||||
"semantic_similarity_s1_v1": 0.0715, // ¥0.001 / 1k tokens
|
||||
"hunyuan": 7.143, // ¥0.1 / 1k tokens // https://cloud.tencent.com/document/product/1729/97731#e0e6be58-60c8-469f-bdeb-6c264ce3b4d0
|
||||
// https://platform.lingyiwanwu.com/docs#-计费单元
|
||||
// 已经按照 7.2 来换算美元价格
|
||||
"yi-34b-chat-0205": 0.018,
|
||||
"yi-34b-chat-200k": 0.0864,
|
||||
"yi-vl-plus": 0.0432,
|
||||
"yi-34b-chat-0205": 0.18,
|
||||
"yi-34b-chat-200k": 0.864,
|
||||
"yi-vl-plus": 0.432,
|
||||
"yi-large": 20.0 / 1000 * RMB,
|
||||
"yi-medium": 2.5 / 1000 * RMB,
|
||||
"yi-vision": 6.0 / 1000 * RMB,
|
||||
"yi-medium-200k": 12.0 / 1000 * RMB,
|
||||
"yi-spark": 1.0 / 1000 * RMB,
|
||||
"yi-large-rag": 25.0 / 1000 * RMB,
|
||||
"yi-large-turbo": 12.0 / 1000 * RMB,
|
||||
"yi-large-preview": 20.0 / 1000 * RMB,
|
||||
"yi-large-rag-preview": 25.0 / 1000 * RMB,
|
||||
"command": 0.5,
|
||||
"command-nightly": 0.5,
|
||||
"command-light": 0.5,
|
||||
"command-light-nightly": 0.5,
|
||||
"command-r": 0.25,
|
||||
"command-r-plus ": 1.5,
|
||||
"deepseek-chat": 0.07,
|
||||
"deepseek-coder": 0.07,
|
||||
// Perplexity online 模型对搜索额外收费,有需要应自行调整,此处不计入搜索费用
|
||||
"llama-3-sonar-small-32k-chat": 0.2 / 1000 * USD,
|
||||
"llama-3-sonar-small-32k-online": 0.2 / 1000 * USD,
|
||||
"llama-3-sonar-large-32k-chat": 1 / 1000 * USD,
|
||||
"llama-3-sonar-large-32k-online": 1 / 1000 * USD,
|
||||
}
|
||||
|
||||
var DefaultModelPrice = map[string]float64{
|
||||
var defaultModelPrice = map[string]float64{
|
||||
"dall-e-3": 0.04,
|
||||
"gpt-4-gizmo-*": 0.1,
|
||||
"mj_imagine": 0.1,
|
||||
"mj_variation": 0.1,
|
||||
@@ -135,9 +174,15 @@ var DefaultModelPrice = map[string]float64{
|
||||
var modelPrice map[string]float64 = nil
|
||||
var modelRatio map[string]float64 = nil
|
||||
|
||||
var CompletionRatio map[string]float64 = nil
|
||||
var defaultCompletionRatio = map[string]float64{
|
||||
"gpt-4-gizmo-*": 2,
|
||||
"gpt-4-all": 2,
|
||||
}
|
||||
|
||||
func ModelPrice2JSONString() string {
|
||||
if modelPrice == nil {
|
||||
modelPrice = DefaultModelPrice
|
||||
modelPrice = defaultModelPrice
|
||||
}
|
||||
jsonBytes, err := json.Marshal(modelPrice)
|
||||
if err != nil {
|
||||
@@ -151,9 +196,10 @@ func UpdateModelPriceByJSONString(jsonStr string) error {
|
||||
return json.Unmarshal([]byte(jsonStr), &modelPrice)
|
||||
}
|
||||
|
||||
func GetModelPrice(name string, printErr bool) float64 {
|
||||
// GetModelPrice 返回模型的价格,如果模型不存在则返回-1,false
|
||||
func GetModelPrice(name string, printErr bool) (float64, bool) {
|
||||
if modelPrice == nil {
|
||||
modelPrice = DefaultModelPrice
|
||||
modelPrice = defaultModelPrice
|
||||
}
|
||||
if strings.HasPrefix(name, "gpt-4-gizmo") {
|
||||
name = "gpt-4-gizmo-*"
|
||||
@@ -163,14 +209,21 @@ func GetModelPrice(name string, printErr bool) float64 {
|
||||
if printErr {
|
||||
SysError("model price not found: " + name)
|
||||
}
|
||||
return -1
|
||||
return -1, false
|
||||
}
|
||||
return price
|
||||
return price, true
|
||||
}
|
||||
|
||||
func GetModelPriceMap() map[string]float64 {
|
||||
if modelPrice == nil {
|
||||
modelPrice = defaultModelPrice
|
||||
}
|
||||
return modelPrice
|
||||
}
|
||||
|
||||
func ModelRatio2JSONString() string {
|
||||
if modelRatio == nil {
|
||||
modelRatio = DefaultModelRatio
|
||||
modelRatio = defaultModelRatio
|
||||
}
|
||||
jsonBytes, err := json.Marshal(modelRatio)
|
||||
if err != nil {
|
||||
@@ -186,7 +239,7 @@ func UpdateModelRatioByJSONString(jsonStr string) error {
|
||||
|
||||
func GetModelRatio(name string) float64 {
|
||||
if modelRatio == nil {
|
||||
modelRatio = DefaultModelRatio
|
||||
modelRatio = defaultModelRatio
|
||||
}
|
||||
if strings.HasPrefix(name, "gpt-4-gizmo") {
|
||||
name = "gpt-4-gizmo-*"
|
||||
@@ -199,7 +252,38 @@ func GetModelRatio(name string) float64 {
|
||||
return ratio
|
||||
}
|
||||
|
||||
func DefaultModelRatio2JSONString() string {
|
||||
jsonBytes, err := json.Marshal(defaultModelRatio)
|
||||
if err != nil {
|
||||
SysError("error marshalling model ratio: " + err.Error())
|
||||
}
|
||||
return string(jsonBytes)
|
||||
}
|
||||
|
||||
func GetDefaultModelRatioMap() map[string]float64 {
|
||||
return defaultModelRatio
|
||||
}
|
||||
|
||||
func CompletionRatio2JSONString() string {
|
||||
if CompletionRatio == nil {
|
||||
CompletionRatio = defaultCompletionRatio
|
||||
}
|
||||
jsonBytes, err := json.Marshal(CompletionRatio)
|
||||
if err != nil {
|
||||
SysError("error marshalling completion ratio: " + err.Error())
|
||||
}
|
||||
return string(jsonBytes)
|
||||
}
|
||||
|
||||
func UpdateCompletionRatioByJSONString(jsonStr string) error {
|
||||
CompletionRatio = make(map[string]float64)
|
||||
return json.Unmarshal([]byte(jsonStr), &CompletionRatio)
|
||||
}
|
||||
|
||||
func GetCompletionRatio(name string) float64 {
|
||||
if strings.HasPrefix(name, "gpt-4-gizmo") {
|
||||
name = "gpt-4-gizmo-*"
|
||||
}
|
||||
if strings.HasPrefix(name, "gpt-3.5") {
|
||||
if name == "gpt-3.5-turbo" || strings.HasSuffix(name, "0125") {
|
||||
// https://openai.com/blog/new-embedding-models-and-api-updates
|
||||
@@ -211,8 +295,8 @@ func GetCompletionRatio(name string) float64 {
|
||||
}
|
||||
return 4.0 / 3.0
|
||||
}
|
||||
if strings.HasPrefix(name, "gpt-4") {
|
||||
if strings.HasPrefix(name, "gpt-4-turbo") || strings.HasSuffix(name, "preview") {
|
||||
if strings.HasPrefix(name, "gpt-4") && !strings.HasSuffix(name, "-all") && !strings.HasSuffix(name, "-gizmo-*") {
|
||||
if strings.HasPrefix(name, "gpt-4-turbo") || strings.HasSuffix(name, "preview") || strings.HasPrefix(name, "gpt-4o") {
|
||||
return 3
|
||||
}
|
||||
return 2
|
||||
@@ -240,9 +324,35 @@ func GetCompletionRatio(name string) float64 {
|
||||
return 2
|
||||
}
|
||||
}
|
||||
if strings.HasPrefix(name, "deepseek") {
|
||||
return 2
|
||||
}
|
||||
if strings.HasPrefix(name, "ERNIE-Speed-") {
|
||||
return 2
|
||||
} else if strings.HasPrefix(name, "ERNIE-Lite-") {
|
||||
return 2
|
||||
} else if strings.HasPrefix(name, "ERNIE-Character") {
|
||||
return 2
|
||||
} else if strings.HasPrefix(name, "ERNIE-Functions") {
|
||||
return 2
|
||||
}
|
||||
switch name {
|
||||
case "llama2-70b-4096":
|
||||
return 0.8 / 0.7
|
||||
return 0.8 / 0.64
|
||||
case "llama3-8b-8192":
|
||||
return 2
|
||||
case "llama3-70b-8192":
|
||||
return 0.79 / 0.59
|
||||
}
|
||||
if ratio, ok := CompletionRatio[name]; ok {
|
||||
return ratio
|
||||
}
|
||||
return 1
|
||||
}
|
||||
|
||||
func GetCompletionRatioMap() map[string]float64 {
|
||||
if CompletionRatio == nil {
|
||||
CompletionRatio = defaultCompletionRatio
|
||||
}
|
||||
return CompletionRatio
|
||||
}
|
||||
|
||||
@@ -1,5 +1,13 @@
|
||||
package common
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
goahocorasick "github.com/anknown/ahocorasick"
|
||||
"one-api/constant"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func SundaySearch(text string, pattern string) bool {
|
||||
// 计算偏移表
|
||||
offset := make(map[rune]int)
|
||||
@@ -48,3 +56,25 @@ func RemoveDuplicate(s []string) []string {
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func InitAc() *goahocorasick.Machine {
|
||||
m := new(goahocorasick.Machine)
|
||||
dict := readRunes()
|
||||
if err := m.Build(dict); err != nil {
|
||||
fmt.Println(err)
|
||||
return nil
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func readRunes() [][]rune {
|
||||
var dict [][]rune
|
||||
|
||||
for _, word := range constant.SensitiveWords {
|
||||
word = strings.ToLower(word)
|
||||
l := bytes.TrimSpace([]byte(word))
|
||||
dict = append(dict, bytes.Runes(l))
|
||||
}
|
||||
|
||||
return dict
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package common
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/google/uuid"
|
||||
"html/template"
|
||||
@@ -241,3 +242,28 @@ func RandomSleep() {
|
||||
// Sleep for 0-3000 ms
|
||||
time.Sleep(time.Duration(rand.Intn(3000)) * time.Millisecond)
|
||||
}
|
||||
|
||||
func MapToJsonStr(m map[string]interface{}) string {
|
||||
bytes, err := json.Marshal(m)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
return string(bytes)
|
||||
}
|
||||
|
||||
func MapToJsonStrFloat(m map[string]float64) string {
|
||||
bytes, err := json.Marshal(m)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
return string(bytes)
|
||||
}
|
||||
|
||||
func StrToMap(str string) map[string]interface{} {
|
||||
m := make(map[string]interface{})
|
||||
err := json.Unmarshal([]byte(str), &m)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package constant
|
||||
|
||||
var MjNotifyEnabled = false
|
||||
var MjAccountFilterEnabled = false
|
||||
var MjModeClearEnabled = false
|
||||
var MjForwardUrlEnabled = true
|
||||
|
||||
|
||||
@@ -16,7 +16,7 @@ var StreamCacheQueueLength = 0
|
||||
// SensitiveWords 敏感词
|
||||
// var SensitiveWords []string
|
||||
var SensitiveWords = []string{
|
||||
"test",
|
||||
"test_sensitive",
|
||||
}
|
||||
|
||||
func SensitiveWordsToString() string {
|
||||
|
||||
@@ -53,7 +53,7 @@ func testChannel(channel *model.Channel, testModel string) (err error, openaiErr
|
||||
}
|
||||
|
||||
meta := relaycommon.GenRelayInfo(c)
|
||||
apiType := constant.ChannelType2APIType(channel.Type)
|
||||
apiType, _ := constant.ChannelType2APIType(channel.Type)
|
||||
adaptor := relay.GetAdaptor(apiType)
|
||||
if adaptor == nil {
|
||||
return fmt.Errorf("invalid api type: %d, adaptor is nil", apiType), nil
|
||||
@@ -64,7 +64,21 @@ func testChannel(channel *model.Channel, testModel string) (err error, openaiErr
|
||||
} else {
|
||||
testModel = adaptor.GetModelList()[0]
|
||||
}
|
||||
} else {
|
||||
modelMapping := *channel.ModelMapping
|
||||
if modelMapping != "" && modelMapping != "{}" {
|
||||
modelMap := make(map[string]string)
|
||||
err := json.Unmarshal([]byte(modelMapping), &modelMap)
|
||||
if err != nil {
|
||||
openaiErr := service.OpenAIErrorWrapperLocal(err, "unmarshal_model_mapping_failed", http.StatusInternalServerError).Error
|
||||
return err, &openaiErr
|
||||
}
|
||||
if modelMap[testModel] != "" {
|
||||
testModel = modelMap[testModel]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
request := buildTestRequest()
|
||||
request.Model = testModel
|
||||
meta.UpstreamModelName = testModel
|
||||
@@ -208,7 +222,7 @@ func testAllChannels(notify bool) error {
|
||||
if isChannelEnabled && service.ShouldDisableChannel(openaiErr, -1) && ban {
|
||||
service.DisableChannel(channel.Id, channel.Name, err.Error())
|
||||
}
|
||||
if !isChannelEnabled && service.ShouldEnableChannel(err, openaiErr) {
|
||||
if !isChannelEnabled && service.ShouldEnableChannel(err, openaiErr, channel.Status) {
|
||||
service.EnableChannel(channel.Id, channel.Name)
|
||||
}
|
||||
channel.UpdateResponseTime(milliseconds)
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/gin-gonic/gin"
|
||||
"net/http"
|
||||
"one-api/common"
|
||||
@@ -9,6 +11,34 @@ import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
type OpenAIModel struct {
|
||||
ID string `json:"id"`
|
||||
Object string `json:"object"`
|
||||
Created int64 `json:"created"`
|
||||
OwnedBy string `json:"owned_by"`
|
||||
Permission []struct {
|
||||
ID string `json:"id"`
|
||||
Object string `json:"object"`
|
||||
Created int64 `json:"created"`
|
||||
AllowCreateEngine bool `json:"allow_create_engine"`
|
||||
AllowSampling bool `json:"allow_sampling"`
|
||||
AllowLogprobs bool `json:"allow_logprobs"`
|
||||
AllowSearchIndices bool `json:"allow_search_indices"`
|
||||
AllowView bool `json:"allow_view"`
|
||||
AllowFineTuning bool `json:"allow_fine_tuning"`
|
||||
Organization string `json:"organization"`
|
||||
Group string `json:"group"`
|
||||
IsBlocking bool `json:"is_blocking"`
|
||||
} `json:"permission"`
|
||||
Root string `json:"root"`
|
||||
Parent string `json:"parent"`
|
||||
}
|
||||
|
||||
type OpenAIModelsResponse struct {
|
||||
Data []OpenAIModel `json:"data"`
|
||||
Success bool `json:"success"`
|
||||
}
|
||||
|
||||
func GetAllChannels(c *gin.Context) {
|
||||
p, _ := strconv.Atoi(c.Query("p"))
|
||||
pageSize, _ := strconv.Atoi(c.Query("page_size"))
|
||||
@@ -35,6 +65,65 @@ func GetAllChannels(c *gin.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
func FetchUpstreamModels(c *gin.Context) {
|
||||
id, err := strconv.Atoi(c.Param("id"))
|
||||
if err != nil {
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"success": false,
|
||||
"message": err.Error(),
|
||||
})
|
||||
return
|
||||
}
|
||||
channel, err := model.GetChannelById(id, true)
|
||||
if err != nil {
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"success": false,
|
||||
"message": err.Error(),
|
||||
})
|
||||
return
|
||||
}
|
||||
if channel.Type != common.ChannelTypeOpenAI {
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"success": false,
|
||||
"message": "仅支持 OpenAI 类型渠道",
|
||||
})
|
||||
return
|
||||
}
|
||||
url := fmt.Sprintf("%s/v1/models", *channel.BaseURL)
|
||||
body, err := GetResponseBody("GET", url, channel, GetAuthHeader(channel.Key))
|
||||
if err != nil {
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"success": false,
|
||||
"message": err.Error(),
|
||||
})
|
||||
}
|
||||
result := OpenAIModelsResponse{}
|
||||
err = json.Unmarshal(body, &result)
|
||||
if err != nil {
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"success": false,
|
||||
"message": err.Error(),
|
||||
})
|
||||
}
|
||||
if !result.Success {
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"success": false,
|
||||
"message": "上游返回错误",
|
||||
})
|
||||
}
|
||||
|
||||
var ids []string
|
||||
for _, model := range result.Data {
|
||||
ids = append(ids, model.ID)
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"success": true,
|
||||
"message": "",
|
||||
"data": ids,
|
||||
})
|
||||
}
|
||||
|
||||
func FixChannelsAbilities(c *gin.Context) {
|
||||
count, err := model.FixAbility()
|
||||
if err != nil {
|
||||
|
||||
@@ -86,7 +86,7 @@ func UpdateMidjourneyTaskBulk() {
|
||||
continue
|
||||
}
|
||||
// 设置超时时间
|
||||
timeout := time.Second * 5
|
||||
timeout := time.Second * 15
|
||||
ctx, cancel := context.WithTimeout(context.Background(), timeout)
|
||||
// 使用带有超时的 context 创建新的请求
|
||||
req = req.WithContext(ctx)
|
||||
|
||||
@@ -147,7 +147,7 @@ func SendEmailVerification(c *gin.Context) {
|
||||
}
|
||||
}
|
||||
if common.EmailAliasRestrictionEnabled {
|
||||
containsSpecialSymbols := strings.Contains(localPart, "+") || strings.Count(localPart, ".") > 1
|
||||
containsSpecialSymbols := strings.Contains(localPart, "+") || strings.Contains(localPart, ".")
|
||||
if containsSpecialSymbols {
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"success": false,
|
||||
|
||||
@@ -4,49 +4,28 @@ import (
|
||||
"fmt"
|
||||
"github.com/gin-gonic/gin"
|
||||
"net/http"
|
||||
"one-api/common"
|
||||
"one-api/constant"
|
||||
"one-api/dto"
|
||||
"one-api/model"
|
||||
"one-api/relay"
|
||||
"one-api/relay/channel/ai360"
|
||||
"one-api/relay/channel/moonshot"
|
||||
"one-api/relay/channel/lingyiwanwu"
|
||||
"one-api/relay/channel/minimax"
|
||||
"one-api/relay/channel/moonshot"
|
||||
relaycommon "one-api/relay/common"
|
||||
relayconstant "one-api/relay/constant"
|
||||
)
|
||||
|
||||
// https://platform.openai.com/docs/api-reference/models/list
|
||||
|
||||
type OpenAIModelPermission struct {
|
||||
Id string `json:"id"`
|
||||
Object string `json:"object"`
|
||||
Created int `json:"created"`
|
||||
AllowCreateEngine bool `json:"allow_create_engine"`
|
||||
AllowSampling bool `json:"allow_sampling"`
|
||||
AllowLogprobs bool `json:"allow_logprobs"`
|
||||
AllowSearchIndices bool `json:"allow_search_indices"`
|
||||
AllowView bool `json:"allow_view"`
|
||||
AllowFineTuning bool `json:"allow_fine_tuning"`
|
||||
Organization string `json:"organization"`
|
||||
Group *string `json:"group"`
|
||||
IsBlocking bool `json:"is_blocking"`
|
||||
}
|
||||
var openAIModels []dto.OpenAIModels
|
||||
var openAIModelsMap map[string]dto.OpenAIModels
|
||||
var channelId2Models map[int][]string
|
||||
|
||||
type OpenAIModels struct {
|
||||
Id string `json:"id"`
|
||||
Object string `json:"object"`
|
||||
Created int `json:"created"`
|
||||
OwnedBy string `json:"owned_by"`
|
||||
Permission []OpenAIModelPermission `json:"permission"`
|
||||
Root string `json:"root"`
|
||||
Parent *string `json:"parent"`
|
||||
}
|
||||
|
||||
var openAIModels []OpenAIModels
|
||||
var openAIModelsMap map[string]OpenAIModels
|
||||
|
||||
func init() {
|
||||
var permission []OpenAIModelPermission
|
||||
permission = append(permission, OpenAIModelPermission{
|
||||
func getPermission() []dto.OpenAIModelPermission {
|
||||
var permission []dto.OpenAIModelPermission
|
||||
permission = append(permission, dto.OpenAIModelPermission{
|
||||
Id: "modelperm-LwHkVFn8AcMItP432fKKDIKJ",
|
||||
Object: "model_permission",
|
||||
Created: 1626777600,
|
||||
@@ -60,7 +39,12 @@ func init() {
|
||||
Group: nil,
|
||||
IsBlocking: false,
|
||||
})
|
||||
return permission
|
||||
}
|
||||
|
||||
func init() {
|
||||
// https://platform.openai.com/docs/models/model-endpoint-compatibility
|
||||
permission := getPermission()
|
||||
for i := 0; i < relayconstant.APITypeDummy; i++ {
|
||||
if i == relayconstant.APITypeAIProxyLibrary {
|
||||
continue
|
||||
@@ -69,7 +53,7 @@ func init() {
|
||||
channelName := adaptor.GetChannelName()
|
||||
modelNames := adaptor.GetModelList()
|
||||
for _, modelName := range modelNames {
|
||||
openAIModels = append(openAIModels, OpenAIModels{
|
||||
openAIModels = append(openAIModels, dto.OpenAIModels{
|
||||
Id: modelName,
|
||||
Object: "model",
|
||||
Created: 1626777600,
|
||||
@@ -81,40 +65,51 @@ func init() {
|
||||
}
|
||||
}
|
||||
for _, modelName := range ai360.ModelList {
|
||||
openAIModels = append(openAIModels, OpenAIModels{
|
||||
openAIModels = append(openAIModels, dto.OpenAIModels{
|
||||
Id: modelName,
|
||||
Object: "model",
|
||||
Created: 1626777600,
|
||||
OwnedBy: "360",
|
||||
OwnedBy: ai360.ChannelName,
|
||||
Permission: permission,
|
||||
Root: modelName,
|
||||
Parent: nil,
|
||||
})
|
||||
}
|
||||
for _, modelName := range moonshot.ModelList {
|
||||
openAIModels = append(openAIModels, OpenAIModels{
|
||||
openAIModels = append(openAIModels, dto.OpenAIModels{
|
||||
Id: modelName,
|
||||
Object: "model",
|
||||
Created: 1626777600,
|
||||
OwnedBy: "moonshot",
|
||||
OwnedBy: moonshot.ChannelName,
|
||||
Permission: permission,
|
||||
Root: modelName,
|
||||
Parent: nil,
|
||||
})
|
||||
}
|
||||
for _, modelName := range lingyiwanwu.ModelList {
|
||||
openAIModels = append(openAIModels, OpenAIModels{
|
||||
openAIModels = append(openAIModels, dto.OpenAIModels{
|
||||
Id: modelName,
|
||||
Object: "model",
|
||||
Created: 1626777600,
|
||||
OwnedBy: "lingyiwanwu",
|
||||
OwnedBy: lingyiwanwu.ChannelName,
|
||||
Permission: permission,
|
||||
Root: modelName,
|
||||
Parent: nil,
|
||||
})
|
||||
}
|
||||
for _, modelName := range minimax.ModelList {
|
||||
openAIModels = append(openAIModels, dto.OpenAIModels{
|
||||
Id: modelName,
|
||||
Object: "model",
|
||||
Created: 1626777600,
|
||||
OwnedBy: minimax.ChannelName,
|
||||
Permission: permission,
|
||||
Root: modelName,
|
||||
Parent: nil,
|
||||
})
|
||||
}
|
||||
for modelName, _ := range constant.MidjourneyModel2Action {
|
||||
openAIModels = append(openAIModels, OpenAIModels{
|
||||
openAIModels = append(openAIModels, dto.OpenAIModels{
|
||||
Id: modelName,
|
||||
Object: "model",
|
||||
Created: 1626777600,
|
||||
@@ -124,9 +119,20 @@ func init() {
|
||||
Parent: nil,
|
||||
})
|
||||
}
|
||||
openAIModelsMap = make(map[string]OpenAIModels)
|
||||
for _, model := range openAIModels {
|
||||
openAIModelsMap[model.Id] = model
|
||||
openAIModelsMap = make(map[string]dto.OpenAIModels)
|
||||
for _, aiModel := range openAIModels {
|
||||
openAIModelsMap[aiModel.Id] = aiModel
|
||||
}
|
||||
channelId2Models = make(map[int][]string)
|
||||
for i := 1; i <= common.ChannelTypeDummy; i++ {
|
||||
apiType, success := relayconstant.ChannelType2APIType(i)
|
||||
if !success || apiType == relayconstant.APITypeAIProxyLibrary {
|
||||
continue
|
||||
}
|
||||
meta := &relaycommon.RelayInfo{ChannelType: i}
|
||||
adaptor := relay.GetAdaptor(apiType)
|
||||
adaptor.Init(meta, dto.GeneralOpenAIRequest{})
|
||||
channelId2Models[i] = adaptor.GetModelList()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -141,29 +147,47 @@ func ListModels(c *gin.Context) {
|
||||
return
|
||||
}
|
||||
models := model.GetGroupModels(user.Group)
|
||||
userOpenAiModels := make([]OpenAIModels, 0)
|
||||
userOpenAiModels := make([]dto.OpenAIModels, 0)
|
||||
permission := getPermission()
|
||||
for _, s := range models {
|
||||
if _, ok := openAIModelsMap[s]; ok {
|
||||
userOpenAiModels = append(userOpenAiModels, openAIModelsMap[s])
|
||||
} else {
|
||||
userOpenAiModels = append(userOpenAiModels, dto.OpenAIModels{
|
||||
Id: s,
|
||||
Object: "model",
|
||||
Created: 1626777600,
|
||||
OwnedBy: "custom",
|
||||
Permission: permission,
|
||||
Root: s,
|
||||
Parent: nil,
|
||||
})
|
||||
}
|
||||
}
|
||||
c.JSON(200, gin.H{
|
||||
"object": "list",
|
||||
"data": userOpenAiModels,
|
||||
"success": true,
|
||||
"data": userOpenAiModels,
|
||||
})
|
||||
}
|
||||
|
||||
func ChannelListModels(c *gin.Context) {
|
||||
c.JSON(200, gin.H{
|
||||
"object": "list",
|
||||
"data": openAIModels,
|
||||
"success": true,
|
||||
"data": openAIModels,
|
||||
})
|
||||
}
|
||||
|
||||
func DashboardListModels(c *gin.Context) {
|
||||
c.JSON(200, gin.H{
|
||||
"success": true,
|
||||
"data": channelId2Models,
|
||||
})
|
||||
}
|
||||
|
||||
func RetrieveModel(c *gin.Context) {
|
||||
modelId := c.Param("model")
|
||||
if model, ok := openAIModelsMap[modelId]; ok {
|
||||
c.JSON(200, model)
|
||||
if aiModel, ok := openAIModelsMap[modelId]; ok {
|
||||
c.JSON(200, aiModel)
|
||||
} else {
|
||||
openAIError := dto.OpenAIError{
|
||||
Message: fmt.Sprintf("The model '%s' does not exist", modelId),
|
||||
|
||||
47
controller/pricing.go
Normal file
47
controller/pricing.go
Normal file
@@ -0,0 +1,47 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"github.com/gin-gonic/gin"
|
||||
"one-api/common"
|
||||
"one-api/model"
|
||||
)
|
||||
|
||||
func GetPricing(c *gin.Context) {
|
||||
userId := c.GetInt("id")
|
||||
// if no login, get default group ratio
|
||||
groupRatio := common.GetGroupRatio("default")
|
||||
group, err := model.CacheGetUserGroup(userId)
|
||||
if err == nil {
|
||||
groupRatio = common.GetGroupRatio(group)
|
||||
}
|
||||
pricing := model.GetPricing(group)
|
||||
c.JSON(200, gin.H{
|
||||
"success": true,
|
||||
"data": pricing,
|
||||
"group_ratio": groupRatio,
|
||||
})
|
||||
}
|
||||
|
||||
func ResetModelRatio(c *gin.Context) {
|
||||
defaultStr := common.DefaultModelRatio2JSONString()
|
||||
err := model.UpdateOption("ModelRatio", defaultStr)
|
||||
if err != nil {
|
||||
c.JSON(200, gin.H{
|
||||
"success": false,
|
||||
"message": err.Error(),
|
||||
})
|
||||
return
|
||||
}
|
||||
err = common.UpdateModelRatioByJSONString(defaultStr)
|
||||
if err != nil {
|
||||
c.JSON(200, gin.H{
|
||||
"success": false,
|
||||
"message": err.Error(),
|
||||
})
|
||||
return
|
||||
}
|
||||
c.JSON(200, gin.H{
|
||||
"success": true,
|
||||
"message": "重置模型倍率成功",
|
||||
})
|
||||
}
|
||||
@@ -43,7 +43,7 @@ func Relay(c *gin.Context) {
|
||||
group := c.GetString("group")
|
||||
originalModel := c.GetString("original_model")
|
||||
openaiErr := relayHandler(c, relayMode)
|
||||
useChannel := []int{channelId}
|
||||
c.Set("use_channel", []string{fmt.Sprintf("%d", channelId)})
|
||||
if openaiErr != nil {
|
||||
go processChannelError(c, channelId, openaiErr)
|
||||
} else {
|
||||
@@ -56,7 +56,9 @@ func Relay(c *gin.Context) {
|
||||
break
|
||||
}
|
||||
channelId = channel.Id
|
||||
useChannel = append(useChannel, channelId)
|
||||
useChannel := c.GetStringSlice("use_channel")
|
||||
useChannel = append(useChannel, fmt.Sprintf("%d", channelId))
|
||||
c.Set("use_channel", useChannel)
|
||||
common.LogInfo(c.Request.Context(), fmt.Sprintf("using channel #%d to retry (remain times %d)", channel.Id, i))
|
||||
middleware.SetupContextForSelectedChannel(c, channel, originalModel)
|
||||
|
||||
@@ -67,6 +69,7 @@ func Relay(c *gin.Context) {
|
||||
go processChannelError(c, channelId, openaiErr)
|
||||
}
|
||||
}
|
||||
useChannel := c.GetStringSlice("use_channel")
|
||||
if len(useChannel) > 1 {
|
||||
retryLogStr := fmt.Sprintf("重试:%s", strings.Trim(strings.Join(strings.Fields(fmt.Sprint(useChannel)), "->"), "[]"))
|
||||
common.LogInfo(c.Request.Context(), retryLogStr)
|
||||
|
||||
37
dto/pricing.go
Normal file
37
dto/pricing.go
Normal file
@@ -0,0 +1,37 @@
|
||||
package dto
|
||||
|
||||
type OpenAIModelPermission struct {
|
||||
Id string `json:"id"`
|
||||
Object string `json:"object"`
|
||||
Created int `json:"created"`
|
||||
AllowCreateEngine bool `json:"allow_create_engine"`
|
||||
AllowSampling bool `json:"allow_sampling"`
|
||||
AllowLogprobs bool `json:"allow_logprobs"`
|
||||
AllowSearchIndices bool `json:"allow_search_indices"`
|
||||
AllowView bool `json:"allow_view"`
|
||||
AllowFineTuning bool `json:"allow_fine_tuning"`
|
||||
Organization string `json:"organization"`
|
||||
Group *string `json:"group"`
|
||||
IsBlocking bool `json:"is_blocking"`
|
||||
}
|
||||
|
||||
type OpenAIModels struct {
|
||||
Id string `json:"id"`
|
||||
Object string `json:"object"`
|
||||
Created int `json:"created"`
|
||||
OwnedBy string `json:"owned_by"`
|
||||
Permission []OpenAIModelPermission `json:"permission"`
|
||||
Root string `json:"root"`
|
||||
Parent *string `json:"parent"`
|
||||
}
|
||||
|
||||
type ModelPricing struct {
|
||||
Available bool `json:"available"`
|
||||
ModelName string `json:"model_name"`
|
||||
QuotaType int `json:"quota_type"`
|
||||
ModelRatio float64 `json:"model_ratio"`
|
||||
ModelPrice float64 `json:"model_price"`
|
||||
OwnerBy string `json:"owner_by"`
|
||||
CompletionRatio float64 `json:"completion_ratio"`
|
||||
EnableGroup []string `json:"enable_group,omitempty"`
|
||||
}
|
||||
@@ -55,8 +55,9 @@ type OpenAIEmbeddingResponse struct {
|
||||
|
||||
type ChatCompletionsStreamResponseChoice struct {
|
||||
Delta ChatCompletionsStreamResponseChoiceDelta `json:"delta,omitempty"`
|
||||
Logprobs *any `json:"logprobs"`
|
||||
FinishReason *string `json:"finish_reason"`
|
||||
Index int `json:"index,omitempty"`
|
||||
Index int `json:"index"`
|
||||
}
|
||||
|
||||
type ChatCompletionsStreamResponseChoiceDelta struct {
|
||||
@@ -95,11 +96,12 @@ type FunctionCall struct {
|
||||
}
|
||||
|
||||
type ChatCompletionsStreamResponse struct {
|
||||
Id string `json:"id"`
|
||||
Object string `json:"object"`
|
||||
Created int64 `json:"created"`
|
||||
Model string `json:"model"`
|
||||
Choices []ChatCompletionsStreamResponseChoice `json:"choices"`
|
||||
Id string `json:"id"`
|
||||
Object string `json:"object"`
|
||||
Created int64 `json:"created"`
|
||||
Model string `json:"model"`
|
||||
SystemFingerprint *string `json:"system_fingerprint"`
|
||||
Choices []ChatCompletionsStreamResponseChoice `json:"choices"`
|
||||
}
|
||||
|
||||
type ChatCompletionsStreamResponseSimple struct {
|
||||
|
||||
6
go.mod
6
go.mod
@@ -17,11 +17,11 @@ require (
|
||||
github.com/go-playground/validator/v10 v10.19.0
|
||||
github.com/go-redis/redis/v8 v8.11.5
|
||||
github.com/golang-jwt/jwt v3.2.2+incompatible
|
||||
github.com/google/uuid v1.3.0
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/gorilla/websocket v1.5.0
|
||||
github.com/jinzhu/copier v0.4.0
|
||||
github.com/linux-do/tiktoken-go v0.7.0
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/pkoukk/tiktoken-go v0.1.6
|
||||
github.com/samber/lo v1.39.0
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible
|
||||
golang.org/x/crypto v0.21.0
|
||||
@@ -42,7 +42,7 @@ require (
|
||||
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
||||
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 // indirect
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
|
||||
github.com/dlclark/regexp2 v1.10.0 // indirect
|
||||
github.com/dlclark/regexp2 v1.11.0 // indirect
|
||||
github.com/gabriel-vasile/mimetype v1.4.3 // indirect
|
||||
github.com/gin-contrib/sse v0.1.0 // indirect
|
||||
github.com/go-ole/go-ole v1.2.6 // indirect
|
||||
|
||||
12
go.sum
12
go.sum
@@ -32,8 +32,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
|
||||
github.com/dlclark/regexp2 v1.10.0 h1:+/GIL799phkJqYW+3YbOd8LCcbHzT0Pbo8zl70MHsq0=
|
||||
github.com/dlclark/regexp2 v1.10.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8=
|
||||
github.com/dlclark/regexp2 v1.11.0 h1:G/nrcoOa7ZXlpoa/91N3X7mM3r8eIlMBBJZvsz/mxKI=
|
||||
github.com/dlclark/regexp2 v1.11.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8=
|
||||
github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
|
||||
github.com/gabriel-vasile/mimetype v1.4.3 h1:in2uUcidCuFcDKtdcBxlR0rJ1+fsokWf+uqxgUFjbI0=
|
||||
github.com/gabriel-vasile/mimetype v1.4.3/go.mod h1:d8uq/6HKRL6CGdk+aubisF/M5GcPfT7nKyLpA0lbSSk=
|
||||
@@ -81,8 +81,8 @@ github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaS
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
|
||||
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/gorilla/context v1.1.1 h1:AWwleXJkX/nhcU9bZSnZoi3h/qGYqQAGhq6zZe/aQW8=
|
||||
github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
|
||||
github.com/gorilla/securecookie v1.1.1 h1:miw7JPhV+b/lAHSXz4qd/nN9jRiAFV5FwjeKyCS8BvQ=
|
||||
@@ -124,6 +124,8 @@ github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgx
|
||||
github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY=
|
||||
github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ=
|
||||
github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI=
|
||||
github.com/linux-do/tiktoken-go v0.7.0 h1:Kcm/miJ5gp77srtF8GQWnfq7W9kTaXEuHZg/g9IVEu8=
|
||||
github.com/linux-do/tiktoken-go v0.7.0/go.mod h1:9Vkdtp0ngi4USmrdSx984iuIQ5IMr0hnUdz4jZZTJb8=
|
||||
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
|
||||
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
|
||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||
@@ -148,8 +150,6 @@ github.com/pelletier/go-toml/v2 v2.0.8/go.mod h1:vuYfssBdrU2XDZ9bYydBu6t+6a6PYNc
|
||||
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkoukk/tiktoken-go v0.1.6 h1:JF0TlJzhTbrI30wCvFuiw6FzP2+/bR+FIxUdgEAcUsw=
|
||||
github.com/pkoukk/tiktoken-go v0.1.6/go.mod h1:9NiV+i9mJKGj1rYOT+njbv+ZwA/zJxYdewGl6qVatpg=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
|
||||
|
||||
@@ -64,6 +64,17 @@ func authHelper(c *gin.Context, minRole int) {
|
||||
c.Next()
|
||||
}
|
||||
|
||||
func TryUserAuth() func(c *gin.Context) {
|
||||
return func(c *gin.Context) {
|
||||
session := sessions.Default(c)
|
||||
id := session.Get("id")
|
||||
if id != nil {
|
||||
c.Set("id", id)
|
||||
}
|
||||
c.Next()
|
||||
}
|
||||
}
|
||||
|
||||
func UserAuth() func(c *gin.Context) {
|
||||
return func(c *gin.Context) {
|
||||
authHelper(c, common.RoleCommonUser)
|
||||
|
||||
@@ -29,6 +29,13 @@ func GetGroupModels(group string) []string {
|
||||
return models
|
||||
}
|
||||
|
||||
func GetEnabledModels() []string {
|
||||
var models []string
|
||||
// Find distinct models
|
||||
DB.Table("abilities").Where("enabled = ?", true).Distinct("model").Pluck("model", &models)
|
||||
return models
|
||||
}
|
||||
|
||||
func getPriority(group string, model string, retry int) (int, error) {
|
||||
groupCol := "`group`"
|
||||
trueVal := "1"
|
||||
|
||||
14
model/log.go
14
model/log.go
@@ -24,6 +24,7 @@ type Log struct {
|
||||
IsStream bool `json:"is_stream" gorm:"default:false"`
|
||||
ChannelId int `json:"channel" gorm:"index"`
|
||||
TokenId int `json:"token_id" gorm:"default:0;index"`
|
||||
Other string `json:"other"`
|
||||
}
|
||||
|
||||
const (
|
||||
@@ -57,12 +58,13 @@ func RecordLog(userId int, logType int, content string) {
|
||||
}
|
||||
}
|
||||
|
||||
func RecordConsumeLog(ctx context.Context, userId int, channelId int, promptTokens int, completionTokens int, modelName string, tokenName string, quota int, content string, tokenId int, userQuota int, useTimeSeconds int, isStream bool) {
|
||||
func RecordConsumeLog(ctx context.Context, userId int, channelId int, promptTokens int, completionTokens int, modelName string, tokenName string, quota int, content string, tokenId int, userQuota int, useTimeSeconds int, isStream bool, other map[string]interface{}) {
|
||||
common.LogInfo(ctx, fmt.Sprintf("record consume log: userId=%d, 用户调用前余额=%d, channelId=%d, promptTokens=%d, completionTokens=%d, modelName=%s, tokenName=%s, quota=%d, content=%s", userId, userQuota, channelId, promptTokens, completionTokens, modelName, tokenName, quota, content))
|
||||
if !common.LogConsumeEnabled {
|
||||
return
|
||||
}
|
||||
username, _ := CacheGetUsername(userId)
|
||||
otherStr := common.MapToJsonStr(other)
|
||||
log := &Log{
|
||||
UserId: userId,
|
||||
Username: username,
|
||||
@@ -78,6 +80,7 @@ func RecordConsumeLog(ctx context.Context, userId int, channelId int, promptToke
|
||||
TokenId: tokenId,
|
||||
UseTime: useTimeSeconds,
|
||||
IsStream: isStream,
|
||||
Other: otherStr,
|
||||
}
|
||||
err := DB.Create(log).Error
|
||||
if err != nil {
|
||||
@@ -139,6 +142,15 @@ func GetUserLogs(userId int, logType int, startTimestamp int64, endTimestamp int
|
||||
tx = tx.Where("created_at <= ?", endTimestamp)
|
||||
}
|
||||
err = tx.Order("id desc").Limit(num).Offset(startIdx).Omit("id").Find(&logs).Error
|
||||
for i := range logs {
|
||||
var otherMap map[string]interface{}
|
||||
otherMap = common.StrToMap(logs[i].Other)
|
||||
if otherMap != nil {
|
||||
// delete admin
|
||||
delete(otherMap, "admin_info")
|
||||
}
|
||||
logs[i].Other = common.MapToJsonStr(otherMap)
|
||||
}
|
||||
return logs, err
|
||||
}
|
||||
|
||||
|
||||
@@ -93,12 +93,12 @@ func InitDB() (err error) {
|
||||
if !common.IsMasterNode {
|
||||
return nil
|
||||
}
|
||||
if common.UsingMySQL {
|
||||
_, _ = sqlDB.Exec("DROP INDEX idx_channels_key ON channels;") // TODO: delete this line when most users have upgraded
|
||||
_, _ = sqlDB.Exec("ALTER TABLE midjourneys MODIFY action VARCHAR(40);") // TODO: delete this line when most users have upgraded
|
||||
_, _ = sqlDB.Exec("ALTER TABLE midjourneys MODIFY progress VARCHAR(30);") // TODO: delete this line when most users have upgraded
|
||||
_, _ = sqlDB.Exec("ALTER TABLE midjourneys MODIFY status VARCHAR(20);") // TODO: delete this line when most users have upgraded
|
||||
}
|
||||
//if common.UsingMySQL {
|
||||
// _, _ = sqlDB.Exec("DROP INDEX idx_channels_key ON channels;") // TODO: delete this line when most users have upgraded
|
||||
// _, _ = sqlDB.Exec("ALTER TABLE midjourneys MODIFY action VARCHAR(40);") // TODO: delete this line when most users have upgraded
|
||||
// _, _ = sqlDB.Exec("ALTER TABLE midjourneys MODIFY progress VARCHAR(30);") // TODO: delete this line when most users have upgraded
|
||||
// _, _ = sqlDB.Exec("ALTER TABLE midjourneys MODIFY status VARCHAR(20);") // TODO: delete this line when most users have upgraded
|
||||
//}
|
||||
common.SysLog("database migration started")
|
||||
err = db.AutoMigrate(&Channel{})
|
||||
if err != nil {
|
||||
|
||||
@@ -83,6 +83,7 @@ func InitOptionMap() {
|
||||
common.OptionMap["ModelRatio"] = common.ModelRatio2JSONString()
|
||||
common.OptionMap["ModelPrice"] = common.ModelPrice2JSONString()
|
||||
common.OptionMap["GroupRatio"] = common.GroupRatio2JSONString()
|
||||
common.OptionMap["CompletionRatio"] = common.CompletionRatio2JSONString()
|
||||
common.OptionMap["TopUpLink"] = common.TopUpLink
|
||||
common.OptionMap["ChatLink"] = common.ChatLink
|
||||
common.OptionMap["ChatLink2"] = common.ChatLink2
|
||||
@@ -92,6 +93,7 @@ func InitOptionMap() {
|
||||
common.OptionMap["DataExportDefaultTime"] = common.DataExportDefaultTime
|
||||
common.OptionMap["DefaultCollapseSidebar"] = strconv.FormatBool(common.DefaultCollapseSidebar)
|
||||
common.OptionMap["MjNotifyEnabled"] = strconv.FormatBool(constant.MjNotifyEnabled)
|
||||
common.OptionMap["MjAccountFilterEnabled"] = strconv.FormatBool(constant.MjAccountFilterEnabled)
|
||||
common.OptionMap["MjModeClearEnabled"] = strconv.FormatBool(constant.MjModeClearEnabled)
|
||||
common.OptionMap["MjForwardUrlEnabled"] = strconv.FormatBool(constant.MjForwardUrlEnabled)
|
||||
common.OptionMap["CheckSensitiveEnabled"] = strconv.FormatBool(constant.CheckSensitiveEnabled)
|
||||
@@ -197,6 +199,8 @@ func updateOptionMap(key string, value string) (err error) {
|
||||
common.DefaultCollapseSidebar = boolValue
|
||||
case "MjNotifyEnabled":
|
||||
constant.MjNotifyEnabled = boolValue
|
||||
case "MjAccountFilterEnabled":
|
||||
constant.MjAccountFilterEnabled = boolValue
|
||||
case "MjModeClearEnabled":
|
||||
constant.MjModeClearEnabled = boolValue
|
||||
case "MjForwardUrlEnabled":
|
||||
@@ -287,6 +291,8 @@ func updateOptionMap(key string, value string) (err error) {
|
||||
err = common.UpdateModelRatioByJSONString(value)
|
||||
case "GroupRatio":
|
||||
err = common.UpdateGroupRatioByJSONString(value)
|
||||
case "CompletionRatio":
|
||||
err = common.UpdateCompletionRatioByJSONString(value)
|
||||
case "ModelPrice":
|
||||
err = common.UpdateModelPriceByJSONString(value)
|
||||
case "TopUpLink":
|
||||
|
||||
63
model/pricing.go
Normal file
63
model/pricing.go
Normal file
@@ -0,0 +1,63 @@
|
||||
package model
|
||||
|
||||
import (
|
||||
"one-api/common"
|
||||
"one-api/dto"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
pricingMap []dto.ModelPricing
|
||||
lastGetPricingTime time.Time
|
||||
updatePricingLock sync.Mutex
|
||||
)
|
||||
|
||||
func GetPricing(group string) []dto.ModelPricing {
|
||||
updatePricingLock.Lock()
|
||||
defer updatePricingLock.Unlock()
|
||||
|
||||
if time.Since(lastGetPricingTime) > time.Minute*1 || len(pricingMap) == 0 {
|
||||
updatePricing()
|
||||
}
|
||||
if group != "" {
|
||||
userPricingMap := make([]dto.ModelPricing, 0)
|
||||
models := GetGroupModels(group)
|
||||
for _, pricing := range pricingMap {
|
||||
if !common.StringsContains(models, pricing.ModelName) {
|
||||
pricing.Available = false
|
||||
}
|
||||
userPricingMap = append(userPricingMap, pricing)
|
||||
}
|
||||
return userPricingMap
|
||||
}
|
||||
return pricingMap
|
||||
}
|
||||
|
||||
func updatePricing() {
|
||||
//modelRatios := common.GetModelRatios()
|
||||
enabledModels := GetEnabledModels()
|
||||
allModels := make(map[string]int)
|
||||
for i, model := range enabledModels {
|
||||
allModels[model] = i
|
||||
}
|
||||
|
||||
pricingMap = make([]dto.ModelPricing, 0)
|
||||
for model, _ := range allModels {
|
||||
pricing := dto.ModelPricing{
|
||||
Available: true,
|
||||
ModelName: model,
|
||||
}
|
||||
modelPrice, findPrice := common.GetModelPrice(model, false)
|
||||
if findPrice {
|
||||
pricing.ModelPrice = modelPrice
|
||||
pricing.QuotaType = 1
|
||||
} else {
|
||||
pricing.ModelRatio = common.GetModelRatio(model)
|
||||
pricing.CompletionRatio = common.GetCompletionRatio(model)
|
||||
pricing.QuotaType = 0
|
||||
}
|
||||
pricingMap = append(pricingMap, pricing)
|
||||
}
|
||||
lastGetPricingTime = time.Now()
|
||||
}
|
||||
@@ -11,7 +11,7 @@ import (
|
||||
|
||||
type Token struct {
|
||||
Id int `json:"id"`
|
||||
UserId int `json:"user_id"`
|
||||
UserId int `json:"user_id" gorm:"index"`
|
||||
Key string `json:"key" gorm:"type:char(48);uniqueIndex"`
|
||||
Status int `json:"status" gorm:"default:1"`
|
||||
Name string `json:"name" gorm:"index" `
|
||||
|
||||
@@ -45,6 +45,7 @@ func logQuotaDataCache(userId int, username string, modelName string, quota int,
|
||||
if ok {
|
||||
quotaData.Count += 1
|
||||
quotaData.Quota += quota
|
||||
quotaData.TokenUsed += tokenUsed
|
||||
} else {
|
||||
quotaData = &QuotaData{
|
||||
UserID: userId,
|
||||
|
||||
@@ -253,14 +253,17 @@ func (user *User) Edit(updatePassword bool) error {
|
||||
}
|
||||
}
|
||||
newUser := *user
|
||||
DB.First(&user, user.Id)
|
||||
err = DB.Model(user).Updates(map[string]interface{}{
|
||||
updates := map[string]interface{}{
|
||||
"username": newUser.Username,
|
||||
"password": newUser.Password,
|
||||
"display_name": newUser.DisplayName,
|
||||
"group": newUser.Group,
|
||||
"quota": newUser.Quota,
|
||||
}).Error
|
||||
}
|
||||
if updatePassword {
|
||||
updates["password"] = newUser.Password
|
||||
}
|
||||
DB.First(&user, user.Id)
|
||||
err = DB.Model(user).Updates(updates).Error
|
||||
if err == nil {
|
||||
if common.RedisEnabled {
|
||||
_ = common.RedisSet(fmt.Sprintf("user_group:%d", user.Id), user.Group, time.Duration(UserId2GroupCacheSeconds)*time.Second)
|
||||
|
||||
@@ -1,8 +1,13 @@
|
||||
package ai360
|
||||
|
||||
var ModelList = []string{
|
||||
"360gpt-turbo",
|
||||
"360gpt-turbo-responsibility-8k",
|
||||
"360gpt-pro",
|
||||
"360GPT_S2_V9",
|
||||
"embedding-bert-512-v1",
|
||||
"embedding_s1_v1",
|
||||
"semantic_similarity_s1_v1",
|
||||
}
|
||||
|
||||
var ChannelName = "ai360"
|
||||
|
||||
@@ -5,6 +5,7 @@ import "one-api/relay/channel/claude"
|
||||
type AwsClaudeRequest struct {
|
||||
// AnthropicVersion should be "bedrock-2023-05-31"
|
||||
AnthropicVersion string `json:"anthropic_version"`
|
||||
System string `json:"system"`
|
||||
Messages []claude.ClaudeMessage `json:"messages"`
|
||||
MaxTokens int `json:"max_tokens,omitempty"`
|
||||
Temperature float64 `json:"temperature,omitempty"`
|
||||
|
||||
@@ -156,6 +156,7 @@ func awsStreamHandler(c *gin.Context, info *relaycommon.RelayInfo, requestMode i
|
||||
var usage relaymodel.Usage
|
||||
var id string
|
||||
var model string
|
||||
createdTime := common.GetTimestamp()
|
||||
c.Stream(func(w io.Writer) bool {
|
||||
event, ok := <-stream.Events()
|
||||
if !ok {
|
||||
@@ -188,6 +189,7 @@ func awsStreamHandler(c *gin.Context, info *relaycommon.RelayInfo, requestMode i
|
||||
if response.Model != "" {
|
||||
model = response.Model
|
||||
}
|
||||
response.Created = createdTime
|
||||
response.Id = id
|
||||
response.Model = model
|
||||
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"one-api/relay/channel"
|
||||
relaycommon "one-api/relay/common"
|
||||
"one-api/relay/constant"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type Adaptor struct {
|
||||
@@ -33,8 +34,24 @@ func (a *Adaptor) GetRequestURL(info *relaycommon.RelayInfo) (string, error) {
|
||||
fullRequestURL = "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/eb-instant"
|
||||
case "BLOOMZ-7B":
|
||||
fullRequestURL = "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/bloomz_7b1"
|
||||
case "ERNIE-4.0-8K":
|
||||
fullRequestURL = "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/completions_pro"
|
||||
case "ERNIE-3.5-8K":
|
||||
fullRequestURL = "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/completions"
|
||||
case "ERNIE-Speed-8K":
|
||||
fullRequestURL = "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/ernie_speed"
|
||||
case "ERNIE-Character-8K":
|
||||
fullRequestURL = "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/ernie-char-8k"
|
||||
case "ERNIE-Functions-8K":
|
||||
fullRequestURL = "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/ernie-func-8k"
|
||||
case "ERNIE-Lite-8K-0922":
|
||||
fullRequestURL = "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/eb-instant"
|
||||
case "Yi-34B-Chat":
|
||||
fullRequestURL = "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/yi_34b_chat"
|
||||
case "Embedding-V1":
|
||||
fullRequestURL = "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/embeddings/embedding-v1"
|
||||
default:
|
||||
fullRequestURL = "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/" + strings.ToLower(info.UpstreamModelName)
|
||||
}
|
||||
var accessToken string
|
||||
var err error
|
||||
|
||||
@@ -1,11 +1,19 @@
|
||||
package baidu
|
||||
|
||||
var ModelList = []string{
|
||||
"ERNIE-Bot-4",
|
||||
"ERNIE-Bot-8K",
|
||||
"ERNIE-Bot",
|
||||
"ERNIE-Speed",
|
||||
"ERNIE-Bot-turbo",
|
||||
"ERNIE-3.5-8K",
|
||||
"ERNIE-4.0-8K",
|
||||
"ERNIE-Speed-8K",
|
||||
"ERNIE-Speed-128K",
|
||||
"ERNIE-Lite-8K",
|
||||
"ERNIE-Tiny-8K",
|
||||
"ERNIE-Character-8K",
|
||||
"ERNIE-Functions-8K",
|
||||
//"ERNIE-Bot-4",
|
||||
//"ERNIE-Bot-8K",
|
||||
//"ERNIE-Bot",
|
||||
//"ERNIE-Speed",
|
||||
//"ERNIE-Bot-turbo",
|
||||
"Embedding-V1",
|
||||
}
|
||||
|
||||
|
||||
@@ -24,15 +24,16 @@ type ClaudeMessage struct {
|
||||
}
|
||||
|
||||
type ClaudeRequest struct {
|
||||
Model string `json:"model"`
|
||||
Prompt string `json:"prompt,omitempty"`
|
||||
System string `json:"system,omitempty"`
|
||||
Messages []ClaudeMessage `json:"messages,omitempty"`
|
||||
MaxTokens uint `json:"max_tokens,omitempty"`
|
||||
StopSequences []string `json:"stop_sequences,omitempty"`
|
||||
Temperature float64 `json:"temperature,omitempty"`
|
||||
TopP float64 `json:"top_p,omitempty"`
|
||||
TopK int `json:"top_k,omitempty"`
|
||||
Model string `json:"model"`
|
||||
Prompt string `json:"prompt,omitempty"`
|
||||
System string `json:"system,omitempty"`
|
||||
Messages []ClaudeMessage `json:"messages,omitempty"`
|
||||
MaxTokens uint `json:"max_tokens,omitempty"`
|
||||
MaxTokensToSample uint `json:"max_tokens_to_sample,omitempty"`
|
||||
StopSequences []string `json:"stop_sequences,omitempty"`
|
||||
Temperature float64 `json:"temperature,omitempty"`
|
||||
TopP float64 `json:"top_p,omitempty"`
|
||||
TopK int `json:"top_k,omitempty"`
|
||||
//ClaudeMetadata `json:"metadata,omitempty"`
|
||||
Stream bool `json:"stream,omitempty"`
|
||||
}
|
||||
|
||||
@@ -30,15 +30,14 @@ func RequestOpenAI2ClaudeComplete(textRequest dto.GeneralOpenAIRequest) *ClaudeR
|
||||
claudeRequest := ClaudeRequest{
|
||||
Model: textRequest.Model,
|
||||
Prompt: "",
|
||||
MaxTokens: textRequest.MaxTokens,
|
||||
StopSequences: nil,
|
||||
Temperature: textRequest.Temperature,
|
||||
TopP: textRequest.TopP,
|
||||
TopK: textRequest.TopK,
|
||||
Stream: textRequest.Stream,
|
||||
}
|
||||
if claudeRequest.MaxTokens == 0 {
|
||||
claudeRequest.MaxTokens = 4096
|
||||
if claudeRequest.MaxTokensToSample == 0 {
|
||||
claudeRequest.MaxTokensToSample = 4096
|
||||
}
|
||||
prompt := ""
|
||||
for _, message := range textRequest.Messages {
|
||||
@@ -181,9 +180,10 @@ func StreamResponseClaude2OpenAI(reqMode int, claudeResponse *ClaudeResponse) (*
|
||||
response.Id = claudeResponse.Message.Id
|
||||
response.Model = claudeResponse.Message.Model
|
||||
claudeUsage = &claudeResponse.Message.Usage
|
||||
} else if claudeResponse.Type == "content_block_start" {
|
||||
choice.Delta.SetContentString("")
|
||||
choice.Delta.Role = "assistant"
|
||||
} else if claudeResponse.Type == "content_block_start" {
|
||||
return nil, nil
|
||||
} else if claudeResponse.Type == "content_block_delta" {
|
||||
choice.Index = claudeResponse.Index
|
||||
choice.Delta.SetContentString(claudeResponse.Delta.Text)
|
||||
@@ -370,7 +370,7 @@ func claudeHandler(requestMode int, c *gin.Context, resp *http.Response, promptT
|
||||
}, nil
|
||||
}
|
||||
fullTextResponse := ResponseClaude2OpenAI(requestMode, &claudeResponse)
|
||||
completionTokens, err, _ := service.CountTokenText(claudeResponse.Completion, model, false)
|
||||
completionTokens, err := service.CountTokenText(claudeResponse.Completion, model)
|
||||
if err != nil {
|
||||
return service.OpenAIErrorWrapper(err, "count_token_text_failed", http.StatusInternalServerError), nil
|
||||
}
|
||||
|
||||
@@ -21,6 +21,7 @@ func (a *Adaptor) Init(info *relaycommon.RelayInfo, request dto.GeneralOpenAIReq
|
||||
// 定义一个映射,存储模型名称和对应的版本
|
||||
var modelVersionMap = map[string]string{
|
||||
"gemini-1.5-pro-latest": "v1beta",
|
||||
"gemini-1.5-flash-latest": "v1beta",
|
||||
"gemini-ultra": "v1beta",
|
||||
}
|
||||
|
||||
|
||||
@@ -5,7 +5,7 @@ const (
|
||||
)
|
||||
|
||||
var ModelList = []string{
|
||||
"gemini-1.0-pro-latest", "gemini-1.0-pro-001", "gemini-1.5-pro-latest", "gemini-ultra",
|
||||
"gemini-1.0-pro-latest", "gemini-1.0-pro-001", "gemini-1.5-pro-latest", "gemini-1.5-flash-latest", "gemini-ultra",
|
||||
"gemini-1.0-pro-vision-latest", "gemini-1.0-pro-vision-001",
|
||||
}
|
||||
|
||||
|
||||
@@ -256,7 +256,7 @@ func geminiChatHandler(c *gin.Context, resp *http.Response, promptTokens int, mo
|
||||
}, nil
|
||||
}
|
||||
fullTextResponse := responseGeminiChat2OpenAI(&geminiResponse)
|
||||
completionTokens, _, _ := service.CountTokenText(geminiResponse.GetResponseText(), model, false)
|
||||
completionTokens, _ := service.CountTokenText(geminiResponse.GetResponseText(), model)
|
||||
usage := dto.Usage{
|
||||
PromptTokens: promptTokens,
|
||||
CompletionTokens: completionTokens,
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
package lingyiwanwu
|
||||
|
||||
// https://platform.lingyiwanwu.com/docs
|
||||
|
||||
var ModelList = []string{
|
||||
"yi-34b-chat-0205",
|
||||
"yi-34b-chat-200k",
|
||||
"yi-vl-plus",
|
||||
}
|
||||
package lingyiwanwu
|
||||
|
||||
// https://platform.lingyiwanwu.com/docs
|
||||
|
||||
var ModelList = []string{
|
||||
"yi-large", "yi-medium", "yi-vision", "yi-medium-200k", "yi-spark", "yi-large-rag", "yi-large-turbo", "yi-large-preview", "yi-large-rag-preview",
|
||||
}
|
||||
|
||||
var ChannelName = "lingyiwanwu"
|
||||
|
||||
13
relay/channel/minimax/constants.go
Normal file
13
relay/channel/minimax/constants.go
Normal file
@@ -0,0 +1,13 @@
|
||||
package minimax
|
||||
|
||||
// https://www.minimaxi.com/document/guides/chat-model/V2?id=65e0736ab2845de20908e2dd
|
||||
|
||||
var ModelList = []string{
|
||||
"abab6.5-chat",
|
||||
"abab6.5s-chat",
|
||||
"abab6-chat",
|
||||
"abab5.5-chat",
|
||||
"abab5.5s-chat",
|
||||
}
|
||||
|
||||
var ChannelName = "minimax"
|
||||
10
relay/channel/minimax/relay-minimax.go
Normal file
10
relay/channel/minimax/relay-minimax.go
Normal file
@@ -0,0 +1,10 @@
|
||||
package minimax
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
relaycommon "one-api/relay/common"
|
||||
)
|
||||
|
||||
func GetRequestURL(info *relaycommon.RelayInfo) (string, error) {
|
||||
return fmt.Sprintf("%s/v1/text/chatcompletion_v2", info.BaseUrl), nil
|
||||
}
|
||||
@@ -5,3 +5,5 @@ var ModelList = []string{
|
||||
"moonshot-v1-32k",
|
||||
"moonshot-v1-128k",
|
||||
}
|
||||
|
||||
var ChannelName = "moonshot"
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
package ollama
|
||||
|
||||
var ModelList []string
|
||||
var ModelList = []string{
|
||||
"llama3-7b",
|
||||
}
|
||||
|
||||
var ChannelName = "ollama"
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"one-api/relay/channel"
|
||||
"one-api/relay/channel/ai360"
|
||||
"one-api/relay/channel/lingyiwanwu"
|
||||
"one-api/relay/channel/minimax"
|
||||
"one-api/relay/channel/moonshot"
|
||||
relaycommon "one-api/relay/common"
|
||||
"one-api/service"
|
||||
@@ -26,7 +27,8 @@ func (a *Adaptor) Init(info *relaycommon.RelayInfo, request dto.GeneralOpenAIReq
|
||||
}
|
||||
|
||||
func (a *Adaptor) GetRequestURL(info *relaycommon.RelayInfo) (string, error) {
|
||||
if info.ChannelType == common.ChannelTypeAzure {
|
||||
switch info.ChannelType {
|
||||
case common.ChannelTypeAzure:
|
||||
// https://learn.microsoft.com/en-us/azure/cognitive-services/openai/chatgpt-quickstart?pivots=rest-api&tabs=command-line#rest-api
|
||||
requestURL := strings.Split(info.RequestURLPath, "?")[0]
|
||||
requestURL = fmt.Sprintf("%s?api-version=%s", requestURL, info.ApiVersion)
|
||||
@@ -37,8 +39,15 @@ func (a *Adaptor) GetRequestURL(info *relaycommon.RelayInfo) (string, error) {
|
||||
|
||||
requestURL = fmt.Sprintf("/openai/deployments/%s/%s", model_, task)
|
||||
return relaycommon.GetFullRequestURL(info.BaseUrl, requestURL, info.ChannelType), nil
|
||||
case common.ChannelTypeMiniMax:
|
||||
return minimax.GetRequestURL(info)
|
||||
case common.ChannelTypeCustom:
|
||||
url := info.BaseUrl
|
||||
url = strings.Replace(url, "{model}", info.UpstreamModelName, -1)
|
||||
return url, nil
|
||||
default:
|
||||
return relaycommon.GetFullRequestURL(info.BaseUrl, info.RequestURLPath, info.ChannelType), nil
|
||||
}
|
||||
return relaycommon.GetFullRequestURL(info.BaseUrl, info.RequestURLPath, info.ChannelType), nil
|
||||
}
|
||||
|
||||
func (a *Adaptor) SetupRequestHeader(c *gin.Context, req *http.Request, info *relaycommon.RelayInfo) error {
|
||||
@@ -90,11 +99,24 @@ func (a *Adaptor) GetModelList() []string {
|
||||
return moonshot.ModelList
|
||||
case common.ChannelTypeLingYiWanWu:
|
||||
return lingyiwanwu.ModelList
|
||||
case common.ChannelTypeMiniMax:
|
||||
return minimax.ModelList
|
||||
default:
|
||||
return ModelList
|
||||
}
|
||||
}
|
||||
|
||||
func (a *Adaptor) GetChannelName() string {
|
||||
return ChannelName
|
||||
switch a.ChannelType {
|
||||
case common.ChannelType360:
|
||||
return ai360.ChannelName
|
||||
case common.ChannelTypeMoonshot:
|
||||
return moonshot.ChannelName
|
||||
case common.ChannelTypeLingYiWanWu:
|
||||
return lingyiwanwu.ChannelName
|
||||
case common.ChannelTypeMiniMax:
|
||||
return minimax.ChannelName
|
||||
default:
|
||||
return ChannelName
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,19 +1,20 @@
|
||||
package openai
|
||||
|
||||
var ModelList = []string{
|
||||
"gpt-3.5-turbo", "gpt-3.5-turbo-0301", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-1106", "gpt-3.5-turbo-0125",
|
||||
"gpt-3.5-turbo", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-1106", "gpt-3.5-turbo-0125",
|
||||
"gpt-3.5-turbo-16k", "gpt-3.5-turbo-16k-0613",
|
||||
"gpt-3.5-turbo-instruct",
|
||||
"gpt-4", "gpt-4-0314", "gpt-4-0613", "gpt-4-1106-preview", "gpt-4-0125-preview",
|
||||
"gpt-4-32k", "gpt-4-32k-0314", "gpt-4-32k-0613",
|
||||
"gpt-4-turbo-preview",
|
||||
"gpt-4", "gpt-4-0613", "gpt-4-1106-preview", "gpt-4-0125-preview",
|
||||
"gpt-4-32k", "gpt-4-32k-0613",
|
||||
"gpt-4-turbo-preview", "gpt-4-turbo", "gpt-4-turbo-2024-04-09",
|
||||
"gpt-4-vision-preview",
|
||||
"gpt-4o", "gpt-4o-2024-05-13",
|
||||
"text-embedding-ada-002", "text-embedding-3-small", "text-embedding-3-large",
|
||||
"text-curie-001", "text-babbage-001", "text-ada-001", "text-davinci-002", "text-davinci-003",
|
||||
"text-curie-001", "text-babbage-001", "text-ada-001",
|
||||
"text-moderation-latest", "text-moderation-stable",
|
||||
"text-davinci-edit-001",
|
||||
"davinci-002", "babbage-002",
|
||||
"dall-e-2", "dall-e-3",
|
||||
"dall-e-3",
|
||||
"whisper-1",
|
||||
"tts-1", "tts-1-1106", "tts-1-hd", "tts-1-hd-1106",
|
||||
}
|
||||
|
||||
@@ -50,7 +50,7 @@ func OpenaiStreamHandler(c *gin.Context, resp *http.Response, relayMode int) (*d
|
||||
if data[:6] != "data: " && data[:6] != "[DONE]" {
|
||||
continue
|
||||
}
|
||||
dataChan <- data
|
||||
common.SafeSendString(dataChan, data)
|
||||
data = data[6:]
|
||||
if !strings.HasPrefix(data, "[DONE]") {
|
||||
streamItems = append(streamItems, data)
|
||||
@@ -123,7 +123,7 @@ func OpenaiStreamHandler(c *gin.Context, resp *http.Response, relayMode int) (*d
|
||||
// wait data out
|
||||
time.Sleep(2 * time.Second)
|
||||
}
|
||||
common.SafeSend(stopChan, true)
|
||||
common.SafeSendBool(stopChan, true)
|
||||
}()
|
||||
service.SetEventStreamHeaders(c)
|
||||
c.Stream(func(w io.Writer) bool {
|
||||
@@ -190,7 +190,7 @@ func OpenaiHandler(c *gin.Context, resp *http.Response, promptTokens int, model
|
||||
if simpleResponse.Usage.TotalTokens == 0 {
|
||||
completionTokens := 0
|
||||
for _, choice := range simpleResponse.Choices {
|
||||
ctkm, _, _ := service.CountTokenText(string(choice.Message.Content), model, false)
|
||||
ctkm, _ := service.CountTokenText(string(choice.Message.Content), model)
|
||||
completionTokens += ctkm
|
||||
}
|
||||
simpleResponse.Usage = dto.Usage{
|
||||
|
||||
@@ -156,7 +156,7 @@ func palmHandler(c *gin.Context, resp *http.Response, promptTokens int, model st
|
||||
}, nil
|
||||
}
|
||||
fullTextResponse := responsePaLM2OpenAI(&palmResponse)
|
||||
completionTokens, _, _ := service.CountTokenText(palmResponse.Candidates[0].Content, model, false)
|
||||
completionTokens, _ := service.CountTokenText(palmResponse.Candidates[0].Content, model)
|
||||
usage := dto.Usage{
|
||||
PromptTokens: promptTokens,
|
||||
CompletionTokens: completionTokens,
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
package perplexity
|
||||
|
||||
var ModelList = []string{
|
||||
"sonar-small-chat", "sonar-small-online", "sonar-medium-chat", "sonar-medium-online", "mistral-7b-instruct", "mixtral-8x7b-instruct",
|
||||
"llama-3-sonar-small-32k-chat", "llama-3-sonar-small-32k-online", "llama-3-sonar-large-32k-chat", "llama-3-sonar-large-32k-online", "llama-3-8b-instruct", "llama-3-70b-instruct", "mixtral-8x7b-instruct",
|
||||
}
|
||||
|
||||
var ChannelName = "perplexity"
|
||||
|
||||
@@ -38,7 +38,7 @@ func GenRelayInfo(c *gin.Context) *RelayInfo {
|
||||
tokenUnlimited := c.GetBool("token_unlimited_quota")
|
||||
startTime := time.Now()
|
||||
|
||||
apiType := constant.ChannelType2APIType(channelType)
|
||||
apiType, _ := constant.ChannelType2APIType(channelType)
|
||||
|
||||
info := &RelayInfo{
|
||||
RelayMode: constant.Path2RelayMode(c.Request.URL.Path),
|
||||
|
||||
@@ -15,7 +15,7 @@ const (
|
||||
APITypeAIProxyLibrary
|
||||
APITypeTencent
|
||||
APITypeGemini
|
||||
APITypeZhipu_v4
|
||||
APITypeZhipuV4
|
||||
APITypeOllama
|
||||
APITypePerplexity
|
||||
APITypeAws
|
||||
@@ -24,9 +24,11 @@ const (
|
||||
APITypeDummy // this one is only for count, do not add any channel after this
|
||||
)
|
||||
|
||||
func ChannelType2APIType(channelType int) int {
|
||||
apiType := APITypeOpenAI
|
||||
func ChannelType2APIType(channelType int) (int, bool) {
|
||||
apiType := -1
|
||||
switch channelType {
|
||||
case common.ChannelTypeOpenAI:
|
||||
apiType = APITypeOpenAI
|
||||
case common.ChannelTypeAnthropic:
|
||||
apiType = APITypeAnthropic
|
||||
case common.ChannelTypeBaidu:
|
||||
@@ -46,7 +48,7 @@ func ChannelType2APIType(channelType int) int {
|
||||
case common.ChannelTypeGemini:
|
||||
apiType = APITypeGemini
|
||||
case common.ChannelTypeZhipu_v4:
|
||||
apiType = APITypeZhipu_v4
|
||||
apiType = APITypeZhipuV4
|
||||
case common.ChannelTypeOllama:
|
||||
apiType = APITypeOllama
|
||||
case common.ChannelTypePerplexity:
|
||||
@@ -56,5 +58,8 @@ func ChannelType2APIType(channelType int) int {
|
||||
case common.ChannelTypeCohere:
|
||||
apiType = APITypeCohere
|
||||
}
|
||||
return apiType
|
||||
if apiType == -1 {
|
||||
return APITypeOpenAI, false
|
||||
}
|
||||
return apiType, true
|
||||
}
|
||||
|
||||
@@ -55,7 +55,13 @@ func AudioHelper(c *gin.Context, relayMode int) *dto.OpenAIErrorWithStatusCode {
|
||||
promptTokens := 0
|
||||
preConsumedTokens := common.PreConsumedQuota
|
||||
if strings.HasPrefix(audioRequest.Model, "tts-1") {
|
||||
promptTokens, err, _ = service.CountAudioToken(audioRequest.Input, audioRequest.Model, constant.ShouldCheckPromptSensitive())
|
||||
if constant.ShouldCheckPromptSensitive() {
|
||||
err = service.CheckSensitiveInput(audioRequest.Input)
|
||||
if err != nil {
|
||||
return service.OpenAIErrorWrapper(err, "sensitive_words_detected", http.StatusBadRequest)
|
||||
}
|
||||
}
|
||||
promptTokens, err = service.CountAudioToken(audioRequest.Input, audioRequest.Model)
|
||||
if err != nil {
|
||||
return service.OpenAIErrorWrapper(err, "count_audio_token_failed", http.StatusInternalServerError)
|
||||
}
|
||||
@@ -178,7 +184,7 @@ func AudioHelper(c *gin.Context, relayMode int) *dto.OpenAIErrorWithStatusCode {
|
||||
if strings.HasPrefix(audioRequest.Model, "tts-1") {
|
||||
quota = promptTokens
|
||||
} else {
|
||||
quota, err, _ = service.CountAudioToken(audioResponse.Text, audioRequest.Model, false)
|
||||
quota, err = service.CountAudioToken(audioResponse.Text, audioRequest.Model)
|
||||
}
|
||||
quota = int(float64(quota) * ratio)
|
||||
if ratio != 0 && quota <= 0 {
|
||||
@@ -196,7 +202,10 @@ func AudioHelper(c *gin.Context, relayMode int) *dto.OpenAIErrorWithStatusCode {
|
||||
if quota != 0 {
|
||||
tokenName := c.GetString("token_name")
|
||||
logContent := fmt.Sprintf("模型倍率 %.2f,分组倍率 %.2f", modelRatio, groupRatio)
|
||||
model.RecordConsumeLog(ctx, userId, channelId, promptTokens, 0, audioRequest.Model, tokenName, quota, logContent, tokenId, userQuota, int(useTimeSeconds), false)
|
||||
other := make(map[string]interface{})
|
||||
other["model_ratio"] = modelRatio
|
||||
other["group_ratio"] = groupRatio
|
||||
model.RecordConsumeLog(ctx, userId, channelId, promptTokens, 0, audioRequest.Model, tokenName, quota, logContent, tokenId, userQuota, int(useTimeSeconds), false, other)
|
||||
model.UpdateUserUsedQuotaAndRequestCount(userId, quota)
|
||||
channelId := c.GetInt("channel_id")
|
||||
model.UpdateChannelUsedQuota(channelId, quota)
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"io"
|
||||
"net/http"
|
||||
"one-api/common"
|
||||
"one-api/constant"
|
||||
"one-api/dto"
|
||||
"one-api/model"
|
||||
relaycommon "one-api/relay/common"
|
||||
@@ -47,6 +48,13 @@ func RelayImageHelper(c *gin.Context, relayMode int) *dto.OpenAIErrorWithStatusC
|
||||
return service.OpenAIErrorWrapper(errors.New("prompt is required"), "required_field_missing", http.StatusBadRequest)
|
||||
}
|
||||
|
||||
if constant.ShouldCheckPromptSensitive() {
|
||||
err = service.CheckSensitiveInput(imageRequest.Prompt)
|
||||
if err != nil {
|
||||
return service.OpenAIErrorWrapper(err, "sensitive_words_detected", http.StatusBadRequest)
|
||||
}
|
||||
}
|
||||
|
||||
if strings.Contains(imageRequest.Size, "×") {
|
||||
return service.OpenAIErrorWrapper(errors.New("size an unexpected error occurred in the parameter, please use 'x' instead of the multiplication sign '×'"), "invalid_field_value", http.StatusBadRequest)
|
||||
}
|
||||
@@ -106,21 +114,26 @@ func RelayImageHelper(c *gin.Context, relayMode int) *dto.OpenAIErrorWithStatusC
|
||||
requestBody = c.Request.Body
|
||||
}
|
||||
|
||||
modelRatio := common.GetModelRatio(imageRequest.Model)
|
||||
modelPrice, success := common.GetModelPrice(imageRequest.Model, true)
|
||||
if !success {
|
||||
modelRatio := common.GetModelRatio(imageRequest.Model)
|
||||
// modelRatio 16 = modelPrice $0.04
|
||||
// per 1 modelRatio = $0.04 / 16
|
||||
modelPrice = 0.0025 * modelRatio
|
||||
}
|
||||
groupRatio := common.GetGroupRatio(group)
|
||||
ratio := modelRatio * groupRatio
|
||||
userQuota, err := model.CacheGetUserQuota(userId)
|
||||
|
||||
sizeRatio := 1.0
|
||||
// Size
|
||||
if imageRequest.Size == "256x256" {
|
||||
sizeRatio = 1
|
||||
sizeRatio = 0.4
|
||||
} else if imageRequest.Size == "512x512" {
|
||||
sizeRatio = 1.125
|
||||
sizeRatio = 0.45
|
||||
} else if imageRequest.Size == "1024x1024" {
|
||||
sizeRatio = 1.25
|
||||
sizeRatio = 1
|
||||
} else if imageRequest.Size == "1024x1792" || imageRequest.Size == "1792x1024" {
|
||||
sizeRatio = 2.5
|
||||
sizeRatio = 2
|
||||
}
|
||||
|
||||
qualityRatio := 1.0
|
||||
@@ -131,7 +144,7 @@ func RelayImageHelper(c *gin.Context, relayMode int) *dto.OpenAIErrorWithStatusC
|
||||
}
|
||||
}
|
||||
|
||||
quota := int(ratio*sizeRatio*qualityRatio*1000) * imageRequest.N
|
||||
quota := int(modelPrice*groupRatio*common.QuotaPerUnit*sizeRatio*qualityRatio) * imageRequest.N
|
||||
|
||||
if userQuota-quota < 0 {
|
||||
return service.OpenAIErrorWrapper(errors.New("user quota is not enough"), "insufficient_user_quota", http.StatusForbidden)
|
||||
@@ -190,8 +203,11 @@ func RelayImageHelper(c *gin.Context, relayMode int) *dto.OpenAIErrorWithStatusC
|
||||
if imageRequest.Quality == "hd" {
|
||||
quality = "hd"
|
||||
}
|
||||
logContent := fmt.Sprintf("模型倍率 %.2f,分组倍率 %.2f, 大小 %s, 品质 %s", modelRatio, groupRatio, imageRequest.Size, quality)
|
||||
model.RecordConsumeLog(ctx, userId, channelId, 0, 0, imageRequest.Model, tokenName, quota, logContent, tokenId, userQuota, int(useTimeSeconds), false)
|
||||
logContent := fmt.Sprintf("模型价格 %.2f,分组倍率 %.2f, 大小 %s, 品质 %s", modelPrice, groupRatio, imageRequest.Size, quality)
|
||||
other := make(map[string]interface{})
|
||||
other["model_price"] = modelPrice
|
||||
other["group_ratio"] = groupRatio
|
||||
model.RecordConsumeLog(ctx, userId, channelId, 0, 0, imageRequest.Model, tokenName, quota, logContent, tokenId, userQuota, int(useTimeSeconds), false, other)
|
||||
model.UpdateUserUsedQuotaAndRequestCount(userId, quota)
|
||||
channelId := c.GetInt("channel_id")
|
||||
model.UpdateChannelUsedQuota(channelId, quota)
|
||||
|
||||
@@ -155,10 +155,10 @@ func RelaySwapFace(c *gin.Context) *dto.MidjourneyResponse {
|
||||
return service.MidjourneyErrorWrapper(constant.MjRequestError, "sour_base64_and_target_base64_is_required")
|
||||
}
|
||||
modelName := service.CoverActionToModelName(constant.MjActionSwapFace)
|
||||
modelPrice := common.GetModelPrice(modelName, true)
|
||||
modelPrice, success := common.GetModelPrice(modelName, true)
|
||||
// 如果没有配置价格,则使用默认价格
|
||||
if modelPrice == -1 {
|
||||
defaultPrice, ok := common.DefaultModelPrice[modelName]
|
||||
if !success {
|
||||
defaultPrice, ok := common.GetDefaultModelRatioMap()[modelName]
|
||||
if !ok {
|
||||
modelPrice = 0.1
|
||||
} else {
|
||||
@@ -202,7 +202,10 @@ func RelaySwapFace(c *gin.Context) *dto.MidjourneyResponse {
|
||||
if quota != 0 {
|
||||
tokenName := c.GetString("token_name")
|
||||
logContent := fmt.Sprintf("模型固定价格 %.2f,分组倍率 %.2f,操作 %s", modelPrice, groupRatio, constant.MjActionSwapFace)
|
||||
model.RecordConsumeLog(ctx, userId, channelId, 0, 0, modelName, tokenName, quota, logContent, tokenId, userQuota, 0, false)
|
||||
other := make(map[string]interface{})
|
||||
other["model_price"] = modelPrice
|
||||
other["group_ratio"] = groupRatio
|
||||
model.RecordConsumeLog(ctx, userId, channelId, 0, 0, modelName, tokenName, quota, logContent, tokenId, userQuota, 0, false, other)
|
||||
model.UpdateUserUsedQuotaAndRequestCount(userId, quota)
|
||||
channelId := c.GetInt("channel_id")
|
||||
model.UpdateChannelUsedQuota(channelId, quota)
|
||||
@@ -451,10 +454,10 @@ func RelayMidjourneySubmit(c *gin.Context, relayMode int) *dto.MidjourneyRespons
|
||||
fullRequestURL := fmt.Sprintf("%s%s", baseURL, requestURL)
|
||||
|
||||
modelName := service.CoverActionToModelName(midjRequest.Action)
|
||||
modelPrice := common.GetModelPrice(modelName, true)
|
||||
modelPrice, success := common.GetModelPrice(modelName, true)
|
||||
// 如果没有配置价格,则使用默认价格
|
||||
if modelPrice == -1 {
|
||||
defaultPrice, ok := common.DefaultModelPrice[modelName]
|
||||
if !success {
|
||||
defaultPrice, ok := common.GetDefaultModelRatioMap()[modelName]
|
||||
if !ok {
|
||||
modelPrice = 0.1
|
||||
} else {
|
||||
@@ -498,7 +501,10 @@ func RelayMidjourneySubmit(c *gin.Context, relayMode int) *dto.MidjourneyRespons
|
||||
if quota != 0 {
|
||||
tokenName := c.GetString("token_name")
|
||||
logContent := fmt.Sprintf("模型固定价格 %.2f,分组倍率 %.2f,操作 %s", modelPrice, groupRatio, midjRequest.Action)
|
||||
model.RecordConsumeLog(ctx, userId, channelId, 0, 0, modelName, tokenName, quota, logContent, tokenId, userQuota, 0, false)
|
||||
other := make(map[string]interface{})
|
||||
other["model_price"] = modelPrice
|
||||
other["group_ratio"] = groupRatio
|
||||
model.RecordConsumeLog(ctx, userId, channelId, 0, 0, modelName, tokenName, quota, logContent, tokenId, userQuota, 0, false, other)
|
||||
model.UpdateUserUsedQuotaAndRequestCount(userId, quota)
|
||||
channelId := c.GetInt("channel_id")
|
||||
model.UpdateChannelUsedQuota(channelId, quota)
|
||||
|
||||
@@ -91,24 +91,28 @@ func TextHelper(c *gin.Context) *dto.OpenAIErrorWithStatusCode {
|
||||
}
|
||||
}
|
||||
relayInfo.UpstreamModelName = textRequest.Model
|
||||
modelPrice := common.GetModelPrice(textRequest.Model, false)
|
||||
modelPrice, success := common.GetModelPrice(textRequest.Model, false)
|
||||
groupRatio := common.GetGroupRatio(relayInfo.Group)
|
||||
|
||||
var preConsumedQuota int
|
||||
var ratio float64
|
||||
var modelRatio float64
|
||||
//err := service.SensitiveWordsCheck(textRequest)
|
||||
promptTokens, err, sensitiveTrigger := getPromptTokens(textRequest, relayInfo)
|
||||
|
||||
// count messages token error 计算promptTokens错误
|
||||
if err != nil {
|
||||
if sensitiveTrigger {
|
||||
if constant.ShouldCheckPromptSensitive() {
|
||||
err = checkRequestSensitive(textRequest, relayInfo)
|
||||
if err != nil {
|
||||
return service.OpenAIErrorWrapperLocal(err, "sensitive_words_detected", http.StatusBadRequest)
|
||||
}
|
||||
}
|
||||
|
||||
promptTokens, err := getPromptTokens(textRequest, relayInfo)
|
||||
// count messages token error 计算promptTokens错误
|
||||
if err != nil {
|
||||
return service.OpenAIErrorWrapper(err, "count_token_messages_failed", http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
if modelPrice == -1 {
|
||||
if !success {
|
||||
preConsumedTokens := common.PreConsumedQuota
|
||||
if textRequest.MaxTokens != 0 {
|
||||
preConsumedTokens = promptTokens + int(textRequest.MaxTokens)
|
||||
@@ -128,7 +132,7 @@ func TextHelper(c *gin.Context) *dto.OpenAIErrorWithStatusCode {
|
||||
|
||||
adaptor := GetAdaptor(relayInfo.ApiType)
|
||||
if adaptor == nil {
|
||||
return service.OpenAIErrorWrapper(fmt.Errorf("invalid api type: %d", relayInfo.ApiType), "invalid_api_type", http.StatusBadRequest)
|
||||
return service.OpenAIErrorWrapperLocal(fmt.Errorf("invalid api type: %d", relayInfo.ApiType), "invalid_api_type", http.StatusBadRequest)
|
||||
}
|
||||
adaptor.Init(relayInfo, *textRequest)
|
||||
var requestBody io.Reader
|
||||
@@ -136,7 +140,7 @@ func TextHelper(c *gin.Context) *dto.OpenAIErrorWithStatusCode {
|
||||
if isModelMapped {
|
||||
jsonStr, err := json.Marshal(textRequest)
|
||||
if err != nil {
|
||||
return service.OpenAIErrorWrapper(err, "marshal_text_request_failed", http.StatusInternalServerError)
|
||||
return service.OpenAIErrorWrapperLocal(err, "marshal_text_request_failed", http.StatusInternalServerError)
|
||||
}
|
||||
requestBody = bytes.NewBuffer(jsonStr)
|
||||
} else {
|
||||
@@ -145,11 +149,11 @@ func TextHelper(c *gin.Context) *dto.OpenAIErrorWithStatusCode {
|
||||
} else {
|
||||
convertedRequest, err := adaptor.ConvertRequest(c, relayInfo.RelayMode, textRequest)
|
||||
if err != nil {
|
||||
return service.OpenAIErrorWrapper(err, "convert_request_failed", http.StatusInternalServerError)
|
||||
return service.OpenAIErrorWrapperLocal(err, "convert_request_failed", http.StatusInternalServerError)
|
||||
}
|
||||
jsonData, err := json.Marshal(convertedRequest)
|
||||
if err != nil {
|
||||
return service.OpenAIErrorWrapper(err, "json_marshal_failed", http.StatusInternalServerError)
|
||||
return service.OpenAIErrorWrapperLocal(err, "json_marshal_failed", http.StatusInternalServerError)
|
||||
}
|
||||
requestBody = bytes.NewBuffer(jsonData)
|
||||
}
|
||||
@@ -178,30 +182,43 @@ func TextHelper(c *gin.Context) *dto.OpenAIErrorWithStatusCode {
|
||||
service.ResetStatusCode(openaiErr, statusCodeMappingStr)
|
||||
return openaiErr
|
||||
}
|
||||
postConsumeQuota(c, relayInfo, *textRequest, usage, ratio, preConsumedQuota, userQuota, modelRatio, groupRatio, modelPrice)
|
||||
postConsumeQuota(c, relayInfo, *textRequest, usage, ratio, preConsumedQuota, userQuota, modelRatio, groupRatio, modelPrice, success)
|
||||
return nil
|
||||
}
|
||||
|
||||
func getPromptTokens(textRequest *dto.GeneralOpenAIRequest, info *relaycommon.RelayInfo) (int, error, bool) {
|
||||
func getPromptTokens(textRequest *dto.GeneralOpenAIRequest, info *relaycommon.RelayInfo) (int, error) {
|
||||
var promptTokens int
|
||||
var err error
|
||||
var sensitiveTrigger bool
|
||||
checkSensitive := constant.ShouldCheckPromptSensitive()
|
||||
switch info.RelayMode {
|
||||
case relayconstant.RelayModeChatCompletions:
|
||||
promptTokens, err, sensitiveTrigger = service.CountTokenChatRequest(*textRequest, textRequest.Model, checkSensitive)
|
||||
promptTokens, err = service.CountTokenChatRequest(*textRequest, textRequest.Model)
|
||||
case relayconstant.RelayModeCompletions:
|
||||
promptTokens, err, sensitiveTrigger = service.CountTokenInput(textRequest.Prompt, textRequest.Model, checkSensitive)
|
||||
promptTokens, err = service.CountTokenInput(textRequest.Prompt, textRequest.Model)
|
||||
case relayconstant.RelayModeModerations:
|
||||
promptTokens, err, sensitiveTrigger = service.CountTokenInput(textRequest.Input, textRequest.Model, checkSensitive)
|
||||
promptTokens, err = service.CountTokenInput(textRequest.Input, textRequest.Model)
|
||||
case relayconstant.RelayModeEmbeddings:
|
||||
promptTokens, err, sensitiveTrigger = service.CountTokenInput(textRequest.Input, textRequest.Model, checkSensitive)
|
||||
promptTokens, err = service.CountTokenInput(textRequest.Input, textRequest.Model)
|
||||
default:
|
||||
err = errors.New("unknown relay mode")
|
||||
promptTokens = 0
|
||||
}
|
||||
info.PromptTokens = promptTokens
|
||||
return promptTokens, err, sensitiveTrigger
|
||||
return promptTokens, err
|
||||
}
|
||||
|
||||
func checkRequestSensitive(textRequest *dto.GeneralOpenAIRequest, info *relaycommon.RelayInfo) error {
|
||||
var err error
|
||||
switch info.RelayMode {
|
||||
case relayconstant.RelayModeChatCompletions:
|
||||
err = service.CheckSensitiveMessages(textRequest.Messages)
|
||||
case relayconstant.RelayModeCompletions:
|
||||
err = service.CheckSensitiveInput(textRequest.Prompt)
|
||||
case relayconstant.RelayModeModerations:
|
||||
err = service.CheckSensitiveInput(textRequest.Input)
|
||||
case relayconstant.RelayModeEmbeddings:
|
||||
err = service.CheckSensitiveInput(textRequest.Input)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// 预扣费并返回用户剩余配额
|
||||
@@ -257,19 +274,19 @@ func returnPreConsumedQuota(c *gin.Context, tokenId int, userQuota int, preConsu
|
||||
|
||||
func postConsumeQuota(ctx *gin.Context, relayInfo *relaycommon.RelayInfo, textRequest dto.GeneralOpenAIRequest,
|
||||
usage *dto.Usage, ratio float64, preConsumedQuota int, userQuota int, modelRatio float64, groupRatio float64,
|
||||
modelPrice float64) {
|
||||
modelPrice float64, usePrice bool) {
|
||||
|
||||
useTimeSeconds := time.Now().Unix() - relayInfo.StartTime.Unix()
|
||||
promptTokens := usage.PromptTokens
|
||||
completionTokens := usage.CompletionTokens
|
||||
|
||||
tokenName := ctx.GetString("token_name")
|
||||
completionRatio := common.GetCompletionRatio(textRequest.Model)
|
||||
|
||||
quota := 0
|
||||
if modelPrice == -1 {
|
||||
completionRatio := common.GetCompletionRatio(textRequest.Model)
|
||||
quota = promptTokens + int(float64(completionTokens)*completionRatio)
|
||||
quota = int(float64(quota) * ratio)
|
||||
if !usePrice {
|
||||
quota = promptTokens + int(math.Round(float64(completionTokens)*completionRatio))
|
||||
quota = int(math.Round(float64(quota) * ratio))
|
||||
if ratio != 0 && quota <= 0 {
|
||||
quota = 1
|
||||
}
|
||||
@@ -279,7 +296,7 @@ func postConsumeQuota(ctx *gin.Context, relayInfo *relaycommon.RelayInfo, textRe
|
||||
totalTokens := promptTokens + completionTokens
|
||||
var logContent string
|
||||
if modelPrice == -1 {
|
||||
logContent = fmt.Sprintf("模型倍率 %.2f,分组倍率 %.2f", modelRatio, groupRatio)
|
||||
logContent = fmt.Sprintf("模型倍率 %.2f,分组倍率 %.2f,补全倍率 %.2f", modelRatio, groupRatio, completionRatio)
|
||||
} else {
|
||||
logContent = fmt.Sprintf("模型价格 %.2f,分组倍率 %.2f", modelPrice, groupRatio)
|
||||
}
|
||||
@@ -315,7 +332,15 @@ func postConsumeQuota(ctx *gin.Context, relayInfo *relaycommon.RelayInfo, textRe
|
||||
logModel = "gpt-4-gizmo-*"
|
||||
logContent += fmt.Sprintf(",模型 %s", textRequest.Model)
|
||||
}
|
||||
model.RecordConsumeLog(ctx, relayInfo.UserId, relayInfo.ChannelId, promptTokens, completionTokens, logModel, tokenName, quota, logContent, relayInfo.TokenId, userQuota, int(useTimeSeconds), relayInfo.IsStream)
|
||||
other := make(map[string]interface{})
|
||||
other["model_ratio"] = modelRatio
|
||||
other["group_ratio"] = groupRatio
|
||||
other["completion_ratio"] = completionRatio
|
||||
other["model_price"] = modelPrice
|
||||
adminInfo := make(map[string]interface{})
|
||||
adminInfo["use_channel"] = ctx.GetStringSlice("use_channel")
|
||||
other["admin_info"] = adminInfo
|
||||
model.RecordConsumeLog(ctx, relayInfo.UserId, relayInfo.ChannelId, promptTokens, completionTokens, logModel, tokenName, quota, logContent, relayInfo.TokenId, userQuota, int(useTimeSeconds), relayInfo.IsStream, other)
|
||||
|
||||
//if quota != 0 {
|
||||
//
|
||||
|
||||
@@ -41,7 +41,7 @@ func GetAdaptor(apiType int) channel.Adaptor {
|
||||
return &xunfei.Adaptor{}
|
||||
case constant.APITypeZhipu:
|
||||
return &zhipu.Adaptor{}
|
||||
case constant.APITypeZhipu_v4:
|
||||
case constant.APITypeZhipuV4:
|
||||
return &zhipu_4v.Adaptor{}
|
||||
case constant.APITypeOllama:
|
||||
return &ollama.Adaptor{}
|
||||
|
||||
@@ -14,11 +14,13 @@ func SetApiRouter(router *gin.Engine) {
|
||||
apiRouter.Use(middleware.GlobalAPIRateLimit())
|
||||
{
|
||||
apiRouter.GET("/status", controller.GetStatus)
|
||||
apiRouter.GET("/models", middleware.UserAuth(), controller.DashboardListModels)
|
||||
apiRouter.GET("/status/test", middleware.AdminAuth(), controller.TestStatus)
|
||||
apiRouter.GET("/notice", controller.GetNotice)
|
||||
apiRouter.GET("/about", controller.GetAbout)
|
||||
//apiRouter.GET("/midjourney", controller.GetMidjourney)
|
||||
apiRouter.GET("/home_page_content", controller.GetHomePageContent)
|
||||
apiRouter.GET("/pricing", middleware.TryUserAuth(), controller.GetPricing)
|
||||
apiRouter.GET("/verification", middleware.CriticalRateLimit(), middleware.TurnstileCheck(), controller.SendEmailVerification)
|
||||
apiRouter.GET("/reset_password", middleware.CriticalRateLimit(), middleware.TurnstileCheck(), controller.SendPasswordResetEmail)
|
||||
apiRouter.POST("/user/reset", middleware.CriticalRateLimit(), controller.ResetPassword)
|
||||
@@ -70,6 +72,7 @@ func SetApiRouter(router *gin.Engine) {
|
||||
{
|
||||
optionRoute.GET("/", controller.GetOptions)
|
||||
optionRoute.PUT("/", controller.UpdateOption)
|
||||
optionRoute.POST("/rest_model_ratio", controller.ResetModelRatio)
|
||||
}
|
||||
channelRoute := apiRouter.Group("/channel")
|
||||
channelRoute.Use(middleware.AdminAuth())
|
||||
@@ -88,6 +91,8 @@ func SetApiRouter(router *gin.Engine) {
|
||||
channelRoute.DELETE("/:id", controller.DeleteChannel)
|
||||
channelRoute.POST("/batch", controller.DeleteChannelBatch)
|
||||
channelRoute.POST("/fix", controller.FixChannelsAbilities)
|
||||
channelRoute.GET("/fetch_models/:id", controller.FetchUpstreamModels)
|
||||
|
||||
}
|
||||
tokenRoute := apiRouter.Group("/token")
|
||||
tokenRoute.Use(middleware.UserAuth())
|
||||
|
||||
@@ -63,7 +63,7 @@ func ShouldDisableChannel(err *relaymodel.OpenAIError, statusCode int) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func ShouldEnableChannel(err error, openAIErr *relaymodel.OpenAIError) bool {
|
||||
func ShouldEnableChannel(err error, openAIErr *relaymodel.OpenAIError, status int) bool {
|
||||
if !common.AutomaticEnableChannelEnabled {
|
||||
return false
|
||||
}
|
||||
@@ -73,5 +73,8 @@ func ShouldEnableChannel(err error, openAIErr *relaymodel.OpenAIError) bool {
|
||||
if openAIErr != nil {
|
||||
return false
|
||||
}
|
||||
if status != common.ChannelStatusAutoDisabled {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -165,7 +165,9 @@ func DoMidjourneyHttpRequest(c *gin.Context, timeout time.Duration, fullRequestU
|
||||
if err != nil {
|
||||
return MidjourneyErrorWithStatusCodeWrapper(constant.MjErrorUnknown, "read_request_body_failed", http.StatusInternalServerError), nullBytes, err
|
||||
}
|
||||
delete(mapResult, "accountFilter")
|
||||
if !constant.MjAccountFilterEnabled {
|
||||
delete(mapResult, "accountFilter")
|
||||
}
|
||||
if !constant.MjNotifyEnabled {
|
||||
delete(mapResult, "notifyHook")
|
||||
}
|
||||
@@ -174,11 +176,11 @@ func DoMidjourneyHttpRequest(c *gin.Context, timeout time.Duration, fullRequestU
|
||||
}
|
||||
if constant.MjModeClearEnabled {
|
||||
if prompt, ok := mapResult["prompt"].(string); ok {
|
||||
prompt = strings.Replace(prompt, "--fast", "", -1)
|
||||
prompt = strings.Replace(prompt, "--relax", "", -1)
|
||||
prompt = strings.Replace(prompt, "--turbo", "", -1)
|
||||
|
||||
mapResult["prompt"] = prompt
|
||||
prompt = strings.Replace(prompt, "--fast", "", -1)
|
||||
prompt = strings.Replace(prompt, "--relax", "", -1)
|
||||
prompt = strings.Replace(prompt, "--turbo", "", -1)
|
||||
|
||||
mapResult["prompt"] = prompt
|
||||
}
|
||||
}
|
||||
reqBody, err := json.Marshal(mapResult)
|
||||
|
||||
@@ -1,13 +1,60 @@
|
||||
package service
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/anknown/ahocorasick"
|
||||
"one-api/common"
|
||||
"one-api/constant"
|
||||
"one-api/dto"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func CheckSensitiveMessages(messages []dto.Message) error {
|
||||
for _, message := range messages {
|
||||
if len(message.Content) > 0 {
|
||||
if message.IsStringContent() {
|
||||
stringContent := message.StringContent()
|
||||
if ok, words := SensitiveWordContains(stringContent); ok {
|
||||
return errors.New("sensitive words: " + strings.Join(words, ","))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
arrayContent := message.ParseContent()
|
||||
for _, m := range arrayContent {
|
||||
if m.Type == "image_url" {
|
||||
// TODO: check image url
|
||||
} else {
|
||||
if ok, words := SensitiveWordContains(m.Text); ok {
|
||||
return errors.New("sensitive words: " + strings.Join(words, ","))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func CheckSensitiveText(text string) error {
|
||||
if ok, words := SensitiveWordContains(text); ok {
|
||||
return errors.New("sensitive words: " + strings.Join(words, ","))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func CheckSensitiveInput(input any) error {
|
||||
switch v := input.(type) {
|
||||
case string:
|
||||
return CheckSensitiveText(v)
|
||||
case []string:
|
||||
text := ""
|
||||
for _, s := range v {
|
||||
text += s
|
||||
}
|
||||
return CheckSensitiveText(text)
|
||||
}
|
||||
return CheckSensitiveText(fmt.Sprintf("%v", input))
|
||||
}
|
||||
|
||||
// SensitiveWordContains 是否包含敏感词,返回是否包含敏感词和敏感词列表
|
||||
func SensitiveWordContains(text string) (bool, []string) {
|
||||
if len(constant.SensitiveWords) == 0 {
|
||||
@@ -15,7 +62,7 @@ func SensitiveWordContains(text string) (bool, []string) {
|
||||
}
|
||||
checkText := strings.ToLower(text)
|
||||
// 构建一个AC自动机
|
||||
m := initAc()
|
||||
m := common.InitAc()
|
||||
hits := m.MultiPatternSearch([]rune(checkText), false)
|
||||
if len(hits) > 0 {
|
||||
words := make([]string, 0)
|
||||
@@ -33,7 +80,7 @@ func SensitiveWordReplace(text string, returnImmediately bool) (bool, []string,
|
||||
return false, nil, text
|
||||
}
|
||||
checkText := strings.ToLower(text)
|
||||
m := initAc()
|
||||
m := common.InitAc()
|
||||
hits := m.MultiPatternSearch([]rune(checkText), returnImmediately)
|
||||
if len(hits) > 0 {
|
||||
words := make([]string, 0)
|
||||
@@ -47,25 +94,3 @@ func SensitiveWordReplace(text string, returnImmediately bool) (bool, []string,
|
||||
}
|
||||
return false, nil, text
|
||||
}
|
||||
|
||||
func initAc() *goahocorasick.Machine {
|
||||
m := new(goahocorasick.Machine)
|
||||
dict := readRunes()
|
||||
if err := m.Build(dict); err != nil {
|
||||
fmt.Println(err)
|
||||
return nil
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func readRunes() [][]rune {
|
||||
var dict [][]rune
|
||||
|
||||
for _, word := range constant.SensitiveWords {
|
||||
word = strings.ToLower(word)
|
||||
l := bytes.TrimSpace([]byte(word))
|
||||
dict = append(dict, bytes.Runes(l))
|
||||
}
|
||||
|
||||
return dict
|
||||
}
|
||||
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/pkoukk/tiktoken-go"
|
||||
"github.com/linux-do/tiktoken-go"
|
||||
"image"
|
||||
"log"
|
||||
"math"
|
||||
@@ -26,14 +26,19 @@ func InitTokenEncoders() {
|
||||
}
|
||||
defaultTokenEncoder = gpt35TokenEncoder
|
||||
gpt4TokenEncoder, err := tiktoken.EncodingForModel("gpt-4")
|
||||
gpt4oTokenEncoder, err := tiktoken.EncodingForModel("gpt-4o")
|
||||
if err != nil {
|
||||
common.FatalLog(fmt.Sprintf("failed to get gpt-4 token encoder: %s", err.Error()))
|
||||
}
|
||||
for model, _ := range common.DefaultModelRatio {
|
||||
for model, _ := range common.GetDefaultModelRatioMap() {
|
||||
if strings.HasPrefix(model, "gpt-3.5") {
|
||||
tokenEncoderMap[model] = gpt35TokenEncoder
|
||||
} else if strings.HasPrefix(model, "gpt-4") {
|
||||
tokenEncoderMap[model] = gpt4TokenEncoder
|
||||
if strings.HasPrefix(model, "gpt-4o") {
|
||||
tokenEncoderMap[model] = gpt4oTokenEncoder
|
||||
} else {
|
||||
tokenEncoderMap[model] = gpt4TokenEncoder
|
||||
}
|
||||
} else {
|
||||
tokenEncoderMap[model] = nil
|
||||
}
|
||||
@@ -62,7 +67,11 @@ func getTokenNum(tokenEncoder *tiktoken.Tiktoken, text string) int {
|
||||
return len(tokenEncoder.Encode(text, nil, nil))
|
||||
}
|
||||
|
||||
func getImageToken(imageUrl *dto.MessageImageUrl) (int, error) {
|
||||
func getImageToken(imageUrl *dto.MessageImageUrl, model string, stream bool) (int, error) {
|
||||
// TODO: 非流模式下不计算图片token数量
|
||||
if model == "glm-4v" {
|
||||
return 1047, nil
|
||||
}
|
||||
if imageUrl.Detail == "low" {
|
||||
return 85, nil
|
||||
}
|
||||
@@ -116,11 +125,11 @@ func getImageToken(imageUrl *dto.MessageImageUrl) (int, error) {
|
||||
return tiles*170 + 85, nil
|
||||
}
|
||||
|
||||
func CountTokenChatRequest(request dto.GeneralOpenAIRequest, model string, checkSensitive bool) (int, error, bool) {
|
||||
func CountTokenChatRequest(request dto.GeneralOpenAIRequest, model string) (int, error) {
|
||||
tkm := 0
|
||||
msgTokens, err, b := CountTokenMessages(request.Messages, model, checkSensitive)
|
||||
msgTokens, err := CountTokenMessages(request.Messages, model, request.Stream)
|
||||
if err != nil {
|
||||
return 0, err, b
|
||||
return 0, err
|
||||
}
|
||||
tkm += msgTokens
|
||||
if request.Tools != nil {
|
||||
@@ -128,7 +137,7 @@ func CountTokenChatRequest(request dto.GeneralOpenAIRequest, model string, check
|
||||
var openaiTools []dto.OpenAITools
|
||||
err := json.Unmarshal(toolsData, &openaiTools)
|
||||
if err != nil {
|
||||
return 0, errors.New(fmt.Sprintf("count_tools_token_fail: %s", err.Error())), false
|
||||
return 0, errors.New(fmt.Sprintf("count_tools_token_fail: %s", err.Error()))
|
||||
}
|
||||
countStr := ""
|
||||
for _, tool := range openaiTools {
|
||||
@@ -140,18 +149,18 @@ func CountTokenChatRequest(request dto.GeneralOpenAIRequest, model string, check
|
||||
countStr += fmt.Sprintf("%v", tool.Function.Parameters)
|
||||
}
|
||||
}
|
||||
toolTokens, err, _ := CountTokenInput(countStr, model, false)
|
||||
toolTokens, err := CountTokenInput(countStr, model)
|
||||
if err != nil {
|
||||
return 0, err, false
|
||||
return 0, err
|
||||
}
|
||||
tkm += 8
|
||||
tkm += toolTokens
|
||||
}
|
||||
|
||||
return tkm, nil, false
|
||||
return tkm, nil
|
||||
}
|
||||
|
||||
func CountTokenMessages(messages []dto.Message, model string, checkSensitive bool) (int, error, bool) {
|
||||
func CountTokenMessages(messages []dto.Message, model string, stream bool) (int, error) {
|
||||
//recover when panic
|
||||
tokenEncoder := getTokenEncoder(model)
|
||||
// Reference:
|
||||
@@ -175,32 +184,19 @@ func CountTokenMessages(messages []dto.Message, model string, checkSensitive boo
|
||||
if len(message.Content) > 0 {
|
||||
if message.IsStringContent() {
|
||||
stringContent := message.StringContent()
|
||||
if checkSensitive {
|
||||
contains, words := SensitiveWordContains(stringContent)
|
||||
if contains {
|
||||
err := fmt.Errorf("message contains sensitive words: [%s]", strings.Join(words, ", "))
|
||||
return 0, err, true
|
||||
}
|
||||
}
|
||||
tokenNum += getTokenNum(tokenEncoder, stringContent)
|
||||
if message.Name != nil {
|
||||
tokenNum += tokensPerName
|
||||
tokenNum += getTokenNum(tokenEncoder, *message.Name)
|
||||
}
|
||||
} else {
|
||||
var err error
|
||||
arrayContent := message.ParseContent()
|
||||
for _, m := range arrayContent {
|
||||
if m.Type == "image_url" {
|
||||
var imageTokenNum int
|
||||
if model == "glm-4v" {
|
||||
imageTokenNum = 1047
|
||||
} else {
|
||||
imageUrl := m.ImageUrl.(dto.MessageImageUrl)
|
||||
imageTokenNum, err = getImageToken(&imageUrl)
|
||||
if err != nil {
|
||||
return 0, err, false
|
||||
}
|
||||
imageUrl := m.ImageUrl.(dto.MessageImageUrl)
|
||||
imageTokenNum, err := getImageToken(&imageUrl, model, stream)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
tokenNum += imageTokenNum
|
||||
log.Printf("image token num: %d", imageTokenNum)
|
||||
@@ -212,33 +208,33 @@ func CountTokenMessages(messages []dto.Message, model string, checkSensitive boo
|
||||
}
|
||||
}
|
||||
tokenNum += 3 // Every reply is primed with <|start|>assistant<|message|>
|
||||
return tokenNum, nil, false
|
||||
return tokenNum, nil
|
||||
}
|
||||
|
||||
func CountTokenInput(input any, model string, check bool) (int, error, bool) {
|
||||
func CountTokenInput(input any, model string) (int, error) {
|
||||
switch v := input.(type) {
|
||||
case string:
|
||||
return CountTokenText(v, model, check)
|
||||
return CountTokenText(v, model)
|
||||
case []string:
|
||||
text := ""
|
||||
for _, s := range v {
|
||||
text += s
|
||||
}
|
||||
return CountTokenText(text, model, check)
|
||||
return CountTokenText(text, model)
|
||||
}
|
||||
return CountTokenInput(fmt.Sprintf("%v", input), model, check)
|
||||
return CountTokenInput(fmt.Sprintf("%v", input), model)
|
||||
}
|
||||
|
||||
func CountTokenStreamChoices(messages []dto.ChatCompletionsStreamResponseChoice, model string) int {
|
||||
tokens := 0
|
||||
for _, message := range messages {
|
||||
tkm, _, _ := CountTokenInput(message.Delta.Content, model, false)
|
||||
tkm, _ := CountTokenInput(message.Delta.GetContentString(), model)
|
||||
tokens += tkm
|
||||
if message.Delta.ToolCalls != nil {
|
||||
for _, tool := range message.Delta.ToolCalls {
|
||||
tkm, _, _ := CountTokenInput(tool.Function.Name, model, false)
|
||||
tkm, _ := CountTokenInput(tool.Function.Name, model)
|
||||
tokens += tkm
|
||||
tkm, _, _ = CountTokenInput(tool.Function.Arguments, model, false)
|
||||
tkm, _ = CountTokenInput(tool.Function.Arguments, model)
|
||||
tokens += tkm
|
||||
}
|
||||
}
|
||||
@@ -246,29 +242,17 @@ func CountTokenStreamChoices(messages []dto.ChatCompletionsStreamResponseChoice,
|
||||
return tokens
|
||||
}
|
||||
|
||||
func CountAudioToken(text string, model string, check bool) (int, error, bool) {
|
||||
func CountAudioToken(text string, model string) (int, error) {
|
||||
if strings.HasPrefix(model, "tts") {
|
||||
contains, words := SensitiveWordContains(text)
|
||||
if contains {
|
||||
return utf8.RuneCountInString(text), fmt.Errorf("input contains sensitive words: [%s]", strings.Join(words, ",")), true
|
||||
}
|
||||
return utf8.RuneCountInString(text), nil, false
|
||||
return utf8.RuneCountInString(text), nil
|
||||
} else {
|
||||
return CountTokenText(text, model, check)
|
||||
return CountTokenText(text, model)
|
||||
}
|
||||
}
|
||||
|
||||
// CountTokenText 统计文本的token数量,仅当文本包含敏感词,返回错误,同时返回token数量
|
||||
func CountTokenText(text string, model string, check bool) (int, error, bool) {
|
||||
func CountTokenText(text string, model string) (int, error) {
|
||||
var err error
|
||||
var trigger bool
|
||||
if check {
|
||||
contains, words := SensitiveWordContains(text)
|
||||
if contains {
|
||||
err = fmt.Errorf("input contains sensitive words: [%s]", strings.Join(words, ","))
|
||||
trigger = true
|
||||
}
|
||||
}
|
||||
tokenEncoder := getTokenEncoder(model)
|
||||
return getTokenNum(tokenEncoder, text), err, trigger
|
||||
return getTokenNum(tokenEncoder, text), err
|
||||
}
|
||||
|
||||
@@ -19,7 +19,7 @@ import (
|
||||
func ResponseText2Usage(responseText string, modeName string, promptTokens int) (*dto.Usage, error) {
|
||||
usage := &dto.Usage{}
|
||||
usage.PromptTokens = promptTokens
|
||||
ctkm, err, _ := CountTokenText(responseText, modeName, false)
|
||||
ctkm, err := CountTokenText(responseText, modeName)
|
||||
usage.CompletionTokens = ctkm
|
||||
usage.TotalTokens = usage.PromptTokens + usage.CompletionTokens
|
||||
return usage, err
|
||||
|
||||
@@ -5,11 +5,12 @@
|
||||
"type": "module",
|
||||
"dependencies": {
|
||||
"@douyinfe/semi-icons": "^2.46.1",
|
||||
"@douyinfe/semi-ui": "^2.46.1",
|
||||
"@douyinfe/semi-ui": "^2.55.3",
|
||||
"@visactor/react-vchart": "~1.8.8",
|
||||
"@visactor/vchart": "~1.8.8",
|
||||
"@visactor/vchart-semi-theme": "~1.8.8",
|
||||
"axios": "^0.27.2",
|
||||
"dayjs": "^1.11.11",
|
||||
"history": "^5.3.0",
|
||||
"marked": "^4.1.1",
|
||||
"react": "^18.2.0",
|
||||
|
||||
2533
web/pnpm-lock.yaml
generated
Normal file
2533
web/pnpm-lock.yaml
generated
Normal file
File diff suppressed because it is too large
Load Diff
BIN
web/public/ratio.png
Normal file
BIN
web/public/ratio.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 140 KiB |
@@ -22,6 +22,7 @@ import Log from './pages/Log';
|
||||
import Chat from './pages/Chat';
|
||||
import { Layout } from '@douyinfe/semi-ui';
|
||||
import Midjourney from './pages/Midjourney';
|
||||
import Pricing from './pages/Pricing/index.js';
|
||||
// import Detail from './pages/Detail';
|
||||
|
||||
const Home = lazy(() => import('./pages/Home'));
|
||||
@@ -219,6 +220,14 @@ function App() {
|
||||
</PrivateRoute>
|
||||
}
|
||||
/>
|
||||
<Route
|
||||
path='/pricing'
|
||||
element={
|
||||
<Suspense fallback={<Loading></Loading>}>
|
||||
<Pricing />
|
||||
</Suspense>
|
||||
}
|
||||
/>
|
||||
<Route
|
||||
path='/about'
|
||||
element={
|
||||
|
||||
@@ -6,6 +6,7 @@ import {
|
||||
showError,
|
||||
showInfo,
|
||||
showSuccess,
|
||||
showWarning,
|
||||
timestamp2string,
|
||||
} from '../helpers';
|
||||
|
||||
@@ -31,6 +32,7 @@ import {
|
||||
} from '@douyinfe/semi-ui';
|
||||
import EditChannel from '../pages/Channel/EditChannel';
|
||||
import { IconTreeTriangleDown } from '@douyinfe/semi-icons';
|
||||
import { loadChannelModels } from './utils.js';
|
||||
|
||||
function renderTimestamp(timestamp) {
|
||||
return <>{timestamp2string(timestamp)}</>;
|
||||
@@ -308,6 +310,12 @@ const ChannelsTable = () => {
|
||||
|
||||
const setChannelFormat = (channels) => {
|
||||
for (let i = 0; i < channels.length; i++) {
|
||||
// if (channels[i].type === 8) {
|
||||
// showWarning(
|
||||
// '检测到您使用了“自定义渠道”类型,请更换为“OpenAI”渠道类型!',
|
||||
// );
|
||||
// showWarning('下个版本将不再支持“自定义渠道”类型!');
|
||||
// }
|
||||
channels[i].key = '' + channels[i].id;
|
||||
let test_models = [];
|
||||
channels[i].models.split(',').forEach((item, index) => {
|
||||
@@ -354,27 +362,29 @@ const ChannelsTable = () => {
|
||||
};
|
||||
|
||||
const copySelectedChannel = async (id) => {
|
||||
const channelToCopy = channels.find(channel => String(channel.id) === String(id));
|
||||
console.log(channelToCopy)
|
||||
const channelToCopy = channels.find(
|
||||
(channel) => String(channel.id) === String(id),
|
||||
);
|
||||
console.log(channelToCopy);
|
||||
channelToCopy.name += '_复制';
|
||||
channelToCopy.created_time = null;
|
||||
channelToCopy.balance = 0;
|
||||
channelToCopy.used_quota = 0;
|
||||
if (!channelToCopy) {
|
||||
showError("渠道未找到,请刷新页面后重试。");
|
||||
return;
|
||||
showError('渠道未找到,请刷新页面后重试。');
|
||||
return;
|
||||
}
|
||||
try {
|
||||
const newChannel = {...channelToCopy, id: undefined};
|
||||
const response = await API.post('/api/channel/', newChannel);
|
||||
if (response.data.success) {
|
||||
showSuccess("渠道复制成功");
|
||||
await refresh();
|
||||
} else {
|
||||
showError(response.data.message);
|
||||
}
|
||||
const newChannel = { ...channelToCopy, id: undefined };
|
||||
const response = await API.post('/api/channel/', newChannel);
|
||||
if (response.data.success) {
|
||||
showSuccess('渠道复制成功');
|
||||
await refresh();
|
||||
} else {
|
||||
showError(response.data.message);
|
||||
}
|
||||
} catch (error) {
|
||||
showError("渠道复制失败: " + error.message);
|
||||
showError('渠道复制失败: ' + error.message);
|
||||
}
|
||||
};
|
||||
|
||||
@@ -395,6 +405,7 @@ const ChannelsTable = () => {
|
||||
showError(reason);
|
||||
});
|
||||
fetchGroups().then();
|
||||
loadChannelModels().then();
|
||||
}, []);
|
||||
|
||||
const manageChannel = async (id, action, record, value) => {
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import React, { useEffect, useState } from 'react';
|
||||
|
||||
import { getFooterHTML, getSystemName } from '../helpers';
|
||||
import { Layout } from '@douyinfe/semi-ui';
|
||||
import { Layout, Tooltip } from '@douyinfe/semi-ui';
|
||||
|
||||
const Footer = () => {
|
||||
const systemName = getSystemName();
|
||||
@@ -15,6 +15,34 @@ const Footer = () => {
|
||||
}
|
||||
};
|
||||
|
||||
const defaultFooter = (
|
||||
<div className='custom-footer'>
|
||||
<a
|
||||
href='https://github.com/Calcium-Ion/new-api'
|
||||
target='_blank'
|
||||
rel='noreferrer'
|
||||
>
|
||||
New API {import.meta.env.VITE_REACT_APP_VERSION}{' '}
|
||||
</a>
|
||||
由{' '}
|
||||
<a
|
||||
href='https://github.com/Calcium-Ion'
|
||||
target='_blank'
|
||||
rel='noreferrer'
|
||||
>
|
||||
Calcium-Ion
|
||||
</a>{' '}
|
||||
开发,基于{' '}
|
||||
<a
|
||||
href='https://github.com/songquanpeng/one-api'
|
||||
target='_blank'
|
||||
rel='noreferrer'
|
||||
>
|
||||
One API
|
||||
</a>
|
||||
</div>
|
||||
);
|
||||
|
||||
useEffect(() => {
|
||||
const timer = setInterval(() => {
|
||||
if (remainCheckTimes <= 0) {
|
||||
@@ -31,41 +59,14 @@ const Footer = () => {
|
||||
<Layout>
|
||||
<Layout.Content style={{ textAlign: 'center' }}>
|
||||
{footer ? (
|
||||
<div
|
||||
className='custom-footer'
|
||||
dangerouslySetInnerHTML={{ __html: footer }}
|
||||
></div>
|
||||
<Tooltip content={defaultFooter}>
|
||||
<div
|
||||
className='custom-footer'
|
||||
dangerouslySetInnerHTML={{ __html: footer }}
|
||||
></div>
|
||||
</Tooltip>
|
||||
) : (
|
||||
<div className='custom-footer'>
|
||||
<a
|
||||
href='https://github.com/Calcium-Ion/new-api'
|
||||
target='_blank'
|
||||
rel='noreferrer'
|
||||
>
|
||||
New API {import.meta.env.VITE_REACT_APP_VERSION}{' '}
|
||||
</a>
|
||||
由{' '}
|
||||
<a
|
||||
href='https://github.com/Calcium-Ion'
|
||||
target='_blank'
|
||||
rel='noreferrer'
|
||||
>
|
||||
Calcium-Ion
|
||||
</a>{' '}
|
||||
开发,基于{' '}
|
||||
<a
|
||||
href='https://github.com/songquanpeng/one-api'
|
||||
target='_blank'
|
||||
rel='noreferrer'
|
||||
>
|
||||
One API v0.5.4
|
||||
</a>{' '}
|
||||
,本项目根据{' '}
|
||||
<a href='https://opensource.org/licenses/mit-license.php'>
|
||||
MIT 许可证
|
||||
</a>{' '}
|
||||
授权
|
||||
</div>
|
||||
defaultFooter
|
||||
)}
|
||||
</Layout.Content>
|
||||
</Layout>
|
||||
|
||||
@@ -19,6 +19,7 @@ import TelegramLoginButton from 'react-telegram-login';
|
||||
|
||||
import { IconGithubLogo } from '@douyinfe/semi-icons';
|
||||
import WeChatIcon from './WeChatIcon';
|
||||
import { setUserData } from '../helpers/data.js';
|
||||
|
||||
const LoginForm = () => {
|
||||
const [inputs, setInputs] = useState({
|
||||
@@ -99,7 +100,7 @@ const LoginForm = () => {
|
||||
const { success, message, data } = res.data;
|
||||
if (success) {
|
||||
userDispatch({ type: 'login', payload: data });
|
||||
localStorage.setItem('user', JSON.stringify(data));
|
||||
setUserData(data);
|
||||
showSuccess('登录成功!');
|
||||
if (username === 'root' && password === '123456') {
|
||||
Modal.error({
|
||||
|
||||
@@ -19,9 +19,15 @@ import {
|
||||
Spin,
|
||||
Table,
|
||||
Tag,
|
||||
Tooltip,
|
||||
} from '@douyinfe/semi-ui';
|
||||
import { ITEMS_PER_PAGE } from '../constants';
|
||||
import { renderNumber, renderQuota, stringToColor } from '../helpers/render';
|
||||
import {
|
||||
renderModelPrice,
|
||||
renderNumber,
|
||||
renderQuota,
|
||||
stringToColor,
|
||||
} from '../helpers/render';
|
||||
import Paragraph from '@douyinfe/semi-ui/lib/es/typography/paragraph';
|
||||
|
||||
const { Header } = Layout;
|
||||
@@ -288,20 +294,76 @@ const LogsTable = () => {
|
||||
);
|
||||
},
|
||||
},
|
||||
{
|
||||
title: '重试',
|
||||
dataIndex: 'retry',
|
||||
className: isAdmin() ? 'tableShow' : 'tableHiddle',
|
||||
render: (text, record, index) => {
|
||||
let content = '渠道:' + record.channel;
|
||||
if (record.other !== '') {
|
||||
let other = JSON.parse(record.other);
|
||||
if (other === null) {
|
||||
return <></>
|
||||
}
|
||||
if (other.admin_info !== undefined) {
|
||||
if (
|
||||
other.admin_info.use_channel !== null &&
|
||||
other.admin_info.use_channel !== undefined &&
|
||||
other.admin_info.use_channel !== ''
|
||||
) {
|
||||
// channel id array
|
||||
let useChannel = other.admin_info.use_channel;
|
||||
let useChannelStr = useChannel.join('->');
|
||||
content = `渠道:${useChannelStr}`;
|
||||
}
|
||||
}
|
||||
}
|
||||
return isAdminUser ? <div>{content}</div> : <></>;
|
||||
},
|
||||
},
|
||||
{
|
||||
title: '详情',
|
||||
dataIndex: 'content',
|
||||
render: (text, record, index) => {
|
||||
if (record.other === '') {
|
||||
record.other = '{}'
|
||||
}
|
||||
let other = JSON.parse(record.other);
|
||||
if (other == null) {
|
||||
return (
|
||||
<Paragraph
|
||||
ellipsis={{
|
||||
rows: 2,
|
||||
showTooltip: {
|
||||
type: 'popover',
|
||||
opts: { style: { width: 240 } },
|
||||
},
|
||||
}}
|
||||
style={{ maxWidth: 240 }}
|
||||
>
|
||||
{text}
|
||||
</Paragraph>
|
||||
);
|
||||
}
|
||||
let content = renderModelPrice(
|
||||
record.prompt_tokens,
|
||||
record.completion_tokens,
|
||||
other.model_ratio,
|
||||
other.model_price,
|
||||
other.completion_ratio,
|
||||
other.group_ratio,
|
||||
);
|
||||
return (
|
||||
<Paragraph
|
||||
ellipsis={{
|
||||
rows: 2,
|
||||
showTooltip: { type: 'popover', opts: { style: { width: 240 } } },
|
||||
}}
|
||||
style={{ maxWidth: 240 }}
|
||||
>
|
||||
{text}
|
||||
</Paragraph>
|
||||
<Tooltip content={content}>
|
||||
<Paragraph
|
||||
ellipsis={{
|
||||
rows: 2,
|
||||
}}
|
||||
style={{ maxWidth: 240 }}
|
||||
>
|
||||
{text}
|
||||
</Paragraph>
|
||||
</Tooltip>
|
||||
);
|
||||
},
|
||||
},
|
||||
|
||||
@@ -236,6 +236,31 @@ const renderTimestamp = (timestampInSeconds) => {
|
||||
|
||||
return `${year}-${month}-${day} ${hours}:${minutes}:${seconds}`; // 格式化输出
|
||||
};
|
||||
// 修改renderDuration函数以包含颜色逻辑
|
||||
function renderDuration(submit_time, finishTime) {
|
||||
// 确保startTime和finishTime都是有效的时间戳
|
||||
if (!submit_time || !finishTime) return 'N/A';
|
||||
|
||||
// 将时间戳转换为Date对象
|
||||
const start = new Date(submit_time);
|
||||
const finish = new Date(finishTime);
|
||||
|
||||
// 计算时间差(毫秒)
|
||||
const durationMs = finish - start;
|
||||
|
||||
// 将时间差转换为秒,并保留一位小数
|
||||
const durationSec = (durationMs / 1000).toFixed(1);
|
||||
|
||||
// 设置颜色:大于60秒则为红色,小于等于60秒则为绿色
|
||||
const color = durationSec > 60 ? 'red' : 'green';
|
||||
|
||||
// 返回带有样式的颜色标签
|
||||
return (
|
||||
<Tag color={color} size="large">
|
||||
{durationSec} 秒
|
||||
</Tag>
|
||||
);
|
||||
}
|
||||
|
||||
const LogsTable = () => {
|
||||
const [isModalOpen, setIsModalOpen] = useState(false);
|
||||
@@ -248,6 +273,15 @@ const LogsTable = () => {
|
||||
return <div>{renderTimestamp(text / 1000)}</div>;
|
||||
},
|
||||
},
|
||||
{
|
||||
title: '花费时间',
|
||||
dataIndex: 'finish_time', // 以finish_time作为dataIndex
|
||||
key: 'finish_time',
|
||||
render: (finish, record) => {
|
||||
// 假设record.start_time是存在的,并且finish是完成时间的时间戳
|
||||
return renderDuration(record.submit_time, finish);
|
||||
},
|
||||
},
|
||||
{
|
||||
title: '渠道',
|
||||
dataIndex: 'channel_id',
|
||||
|
||||
360
web/src/components/ModelPricing.js
Normal file
360
web/src/components/ModelPricing.js
Normal file
@@ -0,0 +1,360 @@
|
||||
import React, { useContext, useEffect, useRef, useMemo, useState } from 'react';
|
||||
import { API, copy, showError, showSuccess } from '../helpers';
|
||||
|
||||
import {
|
||||
Banner,
|
||||
Input,
|
||||
Layout,
|
||||
Modal,
|
||||
Space,
|
||||
Table,
|
||||
Tag,
|
||||
Tooltip,
|
||||
Popover,
|
||||
ImagePreview,
|
||||
Button,
|
||||
} from '@douyinfe/semi-ui';
|
||||
import {
|
||||
IconMore,
|
||||
IconVerify,
|
||||
IconUploadError,
|
||||
IconHelpCircle,
|
||||
} from '@douyinfe/semi-icons';
|
||||
import { UserContext } from '../context/User/index.js';
|
||||
import Text from '@douyinfe/semi-ui/lib/es/typography/text';
|
||||
|
||||
function renderQuotaType(type) {
|
||||
// Ensure all cases are string literals by adding quotes.
|
||||
switch (type) {
|
||||
case 1:
|
||||
return (
|
||||
<Tag color='teal' size='large'>
|
||||
按次计费
|
||||
</Tag>
|
||||
);
|
||||
case 0:
|
||||
return (
|
||||
<Tag color='violet' size='large'>
|
||||
按量计费
|
||||
</Tag>
|
||||
);
|
||||
default:
|
||||
return '未知';
|
||||
}
|
||||
}
|
||||
|
||||
function renderAvailable(available) {
|
||||
return available ? (
|
||||
<Popover
|
||||
content={
|
||||
<div style={{ padding: 8 }}>您的分组可以使用该模型</div>
|
||||
}
|
||||
position='top'
|
||||
key={available}
|
||||
style={{
|
||||
backgroundColor: 'rgba(var(--semi-blue-4),1)',
|
||||
borderColor: 'rgba(var(--semi-blue-4),1)',
|
||||
color: 'var(--semi-color-white)',
|
||||
borderWidth: 1,
|
||||
borderStyle: 'solid',
|
||||
}}
|
||||
>
|
||||
<IconVerify style={{ color: 'green' }} size="large" />
|
||||
</Popover>
|
||||
) : (
|
||||
<Popover
|
||||
content={
|
||||
<div style={{ padding: 8 }}>您的分组无权使用该模型</div>
|
||||
}
|
||||
position='top'
|
||||
key={available}
|
||||
style={{
|
||||
backgroundColor: 'rgba(var(--semi-blue-4),1)',
|
||||
borderColor: 'rgba(var(--semi-blue-4),1)',
|
||||
color: 'var(--semi-color-white)',
|
||||
borderWidth: 1,
|
||||
borderStyle: 'solid',
|
||||
}}
|
||||
>
|
||||
<IconUploadError style={{ color: '#FFA54F' }} size="large" />
|
||||
</Popover>
|
||||
);
|
||||
}
|
||||
|
||||
const ModelPricing = () => {
|
||||
const [filteredValue, setFilteredValue] = useState([]);
|
||||
const compositionRef = useRef({ isComposition: false });
|
||||
const [selectedRowKeys, setSelectedRowKeys] = useState([]);
|
||||
const [modalImageUrl, setModalImageUrl] = useState('');
|
||||
const [isModalOpenurl, setIsModalOpenurl] = useState(false);
|
||||
|
||||
const rowSelection = useMemo(
|
||||
() => ({
|
||||
onChange: (selectedRowKeys, selectedRows) => {
|
||||
setSelectedRowKeys(selectedRowKeys);
|
||||
},
|
||||
}),
|
||||
[]
|
||||
);
|
||||
|
||||
const handleChange = (value) => {
|
||||
if (compositionRef.current.isComposition) {
|
||||
return;
|
||||
}
|
||||
const newFilteredValue = value ? [value] : [];
|
||||
setFilteredValue(newFilteredValue);
|
||||
};
|
||||
const handleCompositionStart = () => {
|
||||
compositionRef.current.isComposition = true;
|
||||
};
|
||||
|
||||
const handleCompositionEnd = (event) => {
|
||||
compositionRef.current.isComposition = false;
|
||||
const value = event.target.value;
|
||||
const newFilteredValue = value ? [value] : [];
|
||||
setFilteredValue(newFilteredValue);
|
||||
};
|
||||
|
||||
const columns = [
|
||||
{
|
||||
title: '可用性',
|
||||
dataIndex: 'available',
|
||||
render: (text, record, index) => {
|
||||
return renderAvailable(text);
|
||||
},
|
||||
sorter: (a, b) => a.available - b.available,
|
||||
},
|
||||
{
|
||||
title: (
|
||||
<Space>
|
||||
<span>模型名称</span>
|
||||
<Input
|
||||
placeholder='模糊搜索'
|
||||
style={{ width: 200 }}
|
||||
onCompositionStart={handleCompositionStart}
|
||||
onCompositionEnd={handleCompositionEnd}
|
||||
onChange={handleChange}
|
||||
showClear
|
||||
/>
|
||||
</Space>
|
||||
),
|
||||
dataIndex: 'model_name', // 以finish_time作为dataIndex
|
||||
render: (text, record, index) => {
|
||||
return (
|
||||
<>
|
||||
<Tag
|
||||
color='green'
|
||||
size='large'
|
||||
onClick={() => {
|
||||
copyText(text);
|
||||
}}
|
||||
>
|
||||
{text}
|
||||
</Tag>
|
||||
</>
|
||||
);
|
||||
},
|
||||
onFilter: (value, record) =>
|
||||
record.model_name.toLowerCase().includes(value.toLowerCase()),
|
||||
filteredValue,
|
||||
},
|
||||
{
|
||||
title: '计费类型',
|
||||
dataIndex: 'quota_type',
|
||||
render: (text, record, index) => {
|
||||
return renderQuotaType(parseInt(text));
|
||||
},
|
||||
sorter: (a, b) => a.quota_type - b.quota_type,
|
||||
},
|
||||
{
|
||||
title: () => (
|
||||
<span style={{'display':'flex','alignItems':'center'}}>
|
||||
倍率
|
||||
<Popover
|
||||
content={
|
||||
<div style={{ padding: 8 }}>倍率是为了方便换算不同价格的模型<br/>点击查看倍率说明</div>
|
||||
}
|
||||
position='top'
|
||||
style={{
|
||||
backgroundColor: 'rgba(var(--semi-blue-4),1)',
|
||||
borderColor: 'rgba(var(--semi-blue-4),1)',
|
||||
color: 'var(--semi-color-white)',
|
||||
borderWidth: 1,
|
||||
borderStyle: 'solid',
|
||||
}}
|
||||
>
|
||||
<IconHelpCircle
|
||||
onClick={() => {
|
||||
setModalImageUrl('/ratio.png');
|
||||
setIsModalOpenurl(true);
|
||||
}}
|
||||
/>
|
||||
</Popover>
|
||||
</span>
|
||||
),
|
||||
dataIndex: 'model_ratio',
|
||||
render: (text, record, index) => {
|
||||
let content = text;
|
||||
let completionRatio = parseFloat(record.completion_ratio.toFixed(3));
|
||||
content = (
|
||||
<>
|
||||
<Text>模型:{record.quota_type === 0 ? text : '无'}</Text>
|
||||
<br />
|
||||
<Text>补全:{record.quota_type === 0 ? completionRatio : '无'}</Text>
|
||||
</>
|
||||
);
|
||||
return <div>{content}</div>;
|
||||
},
|
||||
},
|
||||
{
|
||||
title: '模型价格',
|
||||
dataIndex: 'model_price',
|
||||
render: (text, record, index) => {
|
||||
let content = text;
|
||||
if (record.quota_type === 0) {
|
||||
// 这里的 *2 是因为 1倍率=0.002刀,请勿删除
|
||||
let inputRatioPrice = record.model_ratio * 2 * record.group_ratio;
|
||||
let completionRatioPrice =
|
||||
record.model_ratio *
|
||||
record.completion_ratio * 2 *
|
||||
record.group_ratio;
|
||||
content = (
|
||||
<>
|
||||
<Text>提示 ${inputRatioPrice} / 1M tokens</Text>
|
||||
<br />
|
||||
<Text>补全 ${completionRatioPrice} / 1M tokens</Text>
|
||||
</>
|
||||
);
|
||||
} else {
|
||||
let price = parseFloat(text) * record.group_ratio;
|
||||
content = <>模型价格:${price}</>;
|
||||
}
|
||||
return <div>{content}</div>;
|
||||
},
|
||||
},
|
||||
];
|
||||
|
||||
const [models, setModels] = useState([]);
|
||||
const [loading, setLoading] = useState(true);
|
||||
const [userState, userDispatch] = useContext(UserContext);
|
||||
const [groupRatio, setGroupRatio] = useState(1);
|
||||
|
||||
const setModelsFormat = (models, groupRatio) => {
|
||||
for (let i = 0; i < models.length; i++) {
|
||||
models[i].key = models[i].model_name;
|
||||
models[i].group_ratio = groupRatio;
|
||||
}
|
||||
// sort by quota_type
|
||||
models.sort((a, b) => {
|
||||
return a.quota_type - b.quota_type;
|
||||
});
|
||||
|
||||
// sort by model_name, start with gpt is max, other use localeCompare
|
||||
models.sort((a, b) => {
|
||||
if (a.model_name.startsWith('gpt') && !b.model_name.startsWith('gpt')) {
|
||||
return -1;
|
||||
} else if (
|
||||
!a.model_name.startsWith('gpt') &&
|
||||
b.model_name.startsWith('gpt')
|
||||
) {
|
||||
return 1;
|
||||
} else {
|
||||
return a.model_name.localeCompare(b.model_name);
|
||||
}
|
||||
});
|
||||
|
||||
setModels(models);
|
||||
};
|
||||
|
||||
const loadPricing = async () => {
|
||||
setLoading(true);
|
||||
|
||||
let url = '';
|
||||
url = `/api/pricing`;
|
||||
const res = await API.get(url);
|
||||
const { success, message, data, group_ratio } = res.data;
|
||||
if (success) {
|
||||
setGroupRatio(group_ratio);
|
||||
setModelsFormat(data, group_ratio);
|
||||
} else {
|
||||
showError(message);
|
||||
}
|
||||
setLoading(false);
|
||||
};
|
||||
|
||||
const refresh = async () => {
|
||||
await loadPricing();
|
||||
};
|
||||
|
||||
const copyText = async (text) => {
|
||||
if (await copy(text)) {
|
||||
showSuccess('已复制:' + text);
|
||||
} else {
|
||||
// setSearchKeyword(text);
|
||||
Modal.error({ title: '无法复制到剪贴板,请手动复制', content: text });
|
||||
}
|
||||
};
|
||||
|
||||
useEffect(() => {
|
||||
refresh().then();
|
||||
}, []);
|
||||
|
||||
return (
|
||||
<>
|
||||
<Layout>
|
||||
{userState.user ? (
|
||||
<Banner
|
||||
type="success"
|
||||
fullMode={false}
|
||||
closeIcon="null"
|
||||
description={`您的分组为:${userState.user.group},分组倍率为:${groupRatio}`}
|
||||
/>
|
||||
) : (
|
||||
<Banner
|
||||
type='warning'
|
||||
fullMode={false}
|
||||
closeIcon="null"
|
||||
description={`您还未登陆,显示的价格为默认分组倍率: ${groupRatio}`}
|
||||
/>
|
||||
)}
|
||||
<br/>
|
||||
<Banner
|
||||
type="info"
|
||||
fullMode={false}
|
||||
description={<div>按量计费费用 = 分组倍率 × 模型倍率 × (提示token数 + 补全token数 × 补全倍率)/ 500000 (单位:美元)</div>}
|
||||
closeIcon="null"
|
||||
/>
|
||||
<br/>
|
||||
<Button
|
||||
theme='light'
|
||||
type='tertiary'
|
||||
style={{width: 150}}
|
||||
onClick={() => {
|
||||
copyText(selectedRowKeys);
|
||||
}}
|
||||
disabled={selectedRowKeys == ""}
|
||||
>
|
||||
复制选中模型
|
||||
</Button>
|
||||
<Table
|
||||
style={{ marginTop: 5 }}
|
||||
columns={columns}
|
||||
dataSource={models}
|
||||
loading={loading}
|
||||
pagination={{
|
||||
pageSize: models.length,
|
||||
showSizeChanger: false,
|
||||
}}
|
||||
rowSelection={rowSelection}
|
||||
/>
|
||||
<ImagePreview
|
||||
src={modalImageUrl}
|
||||
visible={isModalOpenurl}
|
||||
onVisibleChange={(visible) => setIsModalOpenurl(visible)}
|
||||
/>
|
||||
</Layout>
|
||||
</>
|
||||
);
|
||||
};
|
||||
|
||||
export default ModelPricing;
|
||||
@@ -1,17 +1,17 @@
|
||||
import React, { useEffect, useState } from 'react';
|
||||
import { Divider, Form, Grid, Header } from 'semantic-ui-react';
|
||||
import {
|
||||
API,
|
||||
showError,
|
||||
showSuccess,
|
||||
timestamp2string,
|
||||
verifyJSON,
|
||||
} from '../helpers';
|
||||
import { Card, Spin } from '@douyinfe/semi-ui';
|
||||
import SettingsGeneral from '../pages/Setting/Operation/SettingsGeneral.js';
|
||||
import SettingsDrawing from '../pages/Setting/Operation/SettingsDrawing.js';
|
||||
import SettingsSensitiveWords from '../pages/Setting/Operation/SettingsSensitiveWords.js';
|
||||
import SettingsLog from '../pages/Setting/Operation/SettingsLog.js';
|
||||
import SettingsDataDashboard from '../pages/Setting/Operation/SettingsDataDashboard.js';
|
||||
import SettingsMonitoring from '../pages/Setting/Operation/SettingsMonitoring.js';
|
||||
import SettingsCreditLimit from '../pages/Setting/Operation/SettingsCreditLimit.js';
|
||||
import SettingsMagnification from '../pages/Setting/Operation/SettingsMagnification.js';
|
||||
|
||||
import { useTheme } from '../context/Theme';
|
||||
import { API, showError, showSuccess } from '../helpers';
|
||||
|
||||
const OperationSetting = () => {
|
||||
let now = new Date();
|
||||
let [inputs, setInputs] = useState({
|
||||
QuotaForNewUser: 0,
|
||||
QuotaForInviter: 0,
|
||||
@@ -20,44 +20,38 @@ const OperationSetting = () => {
|
||||
PreConsumedQuota: 0,
|
||||
StreamCacheQueueLength: 0,
|
||||
ModelRatio: '',
|
||||
CompletionRatio: '',
|
||||
ModelPrice: '',
|
||||
GroupRatio: '',
|
||||
TopUpLink: '',
|
||||
ChatLink: '',
|
||||
ChatLink2: '', // 添加的新状态变量
|
||||
QuotaPerUnit: 0,
|
||||
AutomaticDisableChannelEnabled: '',
|
||||
AutomaticEnableChannelEnabled: '',
|
||||
AutomaticDisableChannelEnabled: false,
|
||||
AutomaticEnableChannelEnabled: false,
|
||||
ChannelDisableThreshold: 0,
|
||||
LogConsumeEnabled: '',
|
||||
DisplayInCurrencyEnabled: '',
|
||||
DisplayTokenStatEnabled: '',
|
||||
CheckSensitiveEnabled: '',
|
||||
CheckSensitiveOnPromptEnabled: '',
|
||||
LogConsumeEnabled: false,
|
||||
DisplayInCurrencyEnabled: false,
|
||||
DisplayTokenStatEnabled: false,
|
||||
CheckSensitiveEnabled: false,
|
||||
CheckSensitiveOnPromptEnabled: false,
|
||||
CheckSensitiveOnCompletionEnabled: '',
|
||||
StopOnSensitiveEnabled: '',
|
||||
SensitiveWords: '',
|
||||
MjNotifyEnabled: '',
|
||||
MjModeClearEnabled: '',
|
||||
MjForwardUrlEnabled: '',
|
||||
DrawingEnabled: '',
|
||||
DataExportEnabled: '',
|
||||
MjNotifyEnabled: false,
|
||||
MjAccountFilterEnabled: false,
|
||||
MjModeClearEnabled: false,
|
||||
MjForwardUrlEnabled: false,
|
||||
DrawingEnabled: false,
|
||||
DataExportEnabled: false,
|
||||
DataExportDefaultTime: 'hour',
|
||||
DataExportInterval: 5,
|
||||
DefaultCollapseSidebar: '', // 默认折叠侧边栏
|
||||
DefaultCollapseSidebar: false, // 默认折叠侧边栏
|
||||
RetryTimes: 0,
|
||||
});
|
||||
const [originInputs, setOriginInputs] = useState({});
|
||||
|
||||
let [loading, setLoading] = useState(false);
|
||||
let [historyTimestamp, setHistoryTimestamp] = useState(
|
||||
timestamp2string(now.getTime() / 1000 - 30 * 24 * 3600),
|
||||
); // a month ago
|
||||
// 精确时间选项(小时,天,周)
|
||||
const timeOptions = [
|
||||
{ key: 'hour', text: '小时', value: 'hour' },
|
||||
{ key: 'day', text: '天', value: 'day' },
|
||||
{ key: 'week', text: '周', value: 'week' },
|
||||
];
|
||||
|
||||
const getOptions = async () => {
|
||||
const res = await API.get('/api/option/');
|
||||
const { success, message, data } = res.data;
|
||||
@@ -67,546 +61,79 @@ const OperationSetting = () => {
|
||||
if (
|
||||
item.key === 'ModelRatio' ||
|
||||
item.key === 'GroupRatio' ||
|
||||
item.key === 'CompletionRatio' ||
|
||||
item.key === 'ModelPrice'
|
||||
) {
|
||||
item.value = JSON.stringify(JSON.parse(item.value), null, 2);
|
||||
}
|
||||
newInputs[item.key] = item.value;
|
||||
if (
|
||||
item.key.endsWith('Enabled') ||
|
||||
['DefaultCollapseSidebar'].includes(item.key)
|
||||
) {
|
||||
newInputs[item.key] = item.value === 'true' ? true : false;
|
||||
} else {
|
||||
newInputs[item.key] = item.value;
|
||||
}
|
||||
});
|
||||
|
||||
setInputs(newInputs);
|
||||
setOriginInputs(newInputs);
|
||||
} else {
|
||||
showError(message);
|
||||
}
|
||||
};
|
||||
|
||||
const theme = useTheme();
|
||||
const isDark = theme === 'dark';
|
||||
async function onRefresh() {
|
||||
try {
|
||||
setLoading(true);
|
||||
await getOptions();
|
||||
showSuccess('刷新成功');
|
||||
} catch (error) {
|
||||
showError('刷新失败');
|
||||
} finally {
|
||||
setLoading(false);
|
||||
}
|
||||
}
|
||||
|
||||
useEffect(() => {
|
||||
getOptions().then();
|
||||
onRefresh();
|
||||
}, []);
|
||||
|
||||
const updateOption = async (key, value) => {
|
||||
setLoading(true);
|
||||
if (key.endsWith('Enabled')) {
|
||||
value = inputs[key] === 'true' ? 'false' : 'true';
|
||||
}
|
||||
if (key === 'DefaultCollapseSidebar') {
|
||||
value = inputs[key] === 'true' ? 'false' : 'true';
|
||||
}
|
||||
console.log(key, value);
|
||||
const res = await API.put('/api/option/', {
|
||||
key,
|
||||
value,
|
||||
});
|
||||
const { success, message } = res.data;
|
||||
if (success) {
|
||||
setInputs((inputs) => ({ ...inputs, [key]: value }));
|
||||
} else {
|
||||
showError(message);
|
||||
}
|
||||
setLoading(false);
|
||||
};
|
||||
|
||||
const handleInputChange = async (e, { name, value }) => {
|
||||
if (
|
||||
name.endsWith('Enabled') ||
|
||||
name === 'DataExportInterval' ||
|
||||
name === 'DataExportDefaultTime' ||
|
||||
name === 'DefaultCollapseSidebar'
|
||||
) {
|
||||
if (name === 'DataExportDefaultTime') {
|
||||
localStorage.setItem('data_export_default_time', value);
|
||||
} else if (name === 'MjNotifyEnabled') {
|
||||
localStorage.setItem('mj_notify_enabled', value);
|
||||
}
|
||||
await updateOption(name, value);
|
||||
} else {
|
||||
setInputs((inputs) => ({ ...inputs, [name]: value }));
|
||||
}
|
||||
};
|
||||
|
||||
const submitConfig = async (group) => {
|
||||
switch (group) {
|
||||
case 'monitor':
|
||||
if (
|
||||
originInputs['ChannelDisableThreshold'] !==
|
||||
inputs.ChannelDisableThreshold
|
||||
) {
|
||||
await updateOption(
|
||||
'ChannelDisableThreshold',
|
||||
inputs.ChannelDisableThreshold,
|
||||
);
|
||||
}
|
||||
if (
|
||||
originInputs['QuotaRemindThreshold'] !== inputs.QuotaRemindThreshold
|
||||
) {
|
||||
await updateOption(
|
||||
'QuotaRemindThreshold',
|
||||
inputs.QuotaRemindThreshold,
|
||||
);
|
||||
}
|
||||
break;
|
||||
case 'ratio':
|
||||
if (originInputs['ModelRatio'] !== inputs.ModelRatio) {
|
||||
if (!verifyJSON(inputs.ModelRatio)) {
|
||||
showError('模型倍率不是合法的 JSON 字符串');
|
||||
return;
|
||||
}
|
||||
await updateOption('ModelRatio', inputs.ModelRatio);
|
||||
}
|
||||
if (originInputs['GroupRatio'] !== inputs.GroupRatio) {
|
||||
if (!verifyJSON(inputs.GroupRatio)) {
|
||||
showError('分组倍率不是合法的 JSON 字符串');
|
||||
return;
|
||||
}
|
||||
await updateOption('GroupRatio', inputs.GroupRatio);
|
||||
}
|
||||
if (originInputs['ModelPrice'] !== inputs.ModelPrice) {
|
||||
if (!verifyJSON(inputs.ModelPrice)) {
|
||||
showError('模型固定价格不是合法的 JSON 字符串');
|
||||
return;
|
||||
}
|
||||
await updateOption('ModelPrice', inputs.ModelPrice);
|
||||
}
|
||||
break;
|
||||
case 'words':
|
||||
if (originInputs['SensitiveWords'] !== inputs.SensitiveWords) {
|
||||
await updateOption('SensitiveWords', inputs.SensitiveWords);
|
||||
}
|
||||
break;
|
||||
case 'quota':
|
||||
if (originInputs['QuotaForNewUser'] !== inputs.QuotaForNewUser) {
|
||||
await updateOption('QuotaForNewUser', inputs.QuotaForNewUser);
|
||||
}
|
||||
if (originInputs['QuotaForInvitee'] !== inputs.QuotaForInvitee) {
|
||||
await updateOption('QuotaForInvitee', inputs.QuotaForInvitee);
|
||||
}
|
||||
if (originInputs['QuotaForInviter'] !== inputs.QuotaForInviter) {
|
||||
await updateOption('QuotaForInviter', inputs.QuotaForInviter);
|
||||
}
|
||||
if (originInputs['PreConsumedQuota'] !== inputs.PreConsumedQuota) {
|
||||
await updateOption('PreConsumedQuota', inputs.PreConsumedQuota);
|
||||
}
|
||||
break;
|
||||
case 'general':
|
||||
if (originInputs['TopUpLink'] !== inputs.TopUpLink) {
|
||||
await updateOption('TopUpLink', inputs.TopUpLink);
|
||||
}
|
||||
if (originInputs['ChatLink'] !== inputs.ChatLink) {
|
||||
await updateOption('ChatLink', inputs.ChatLink);
|
||||
}
|
||||
if (originInputs['ChatLink2'] !== inputs.ChatLink2) {
|
||||
await updateOption('ChatLink2', inputs.ChatLink2);
|
||||
}
|
||||
if (originInputs['QuotaPerUnit'] !== inputs.QuotaPerUnit) {
|
||||
await updateOption('QuotaPerUnit', inputs.QuotaPerUnit);
|
||||
}
|
||||
if (originInputs['RetryTimes'] !== inputs.RetryTimes) {
|
||||
await updateOption('RetryTimes', inputs.RetryTimes);
|
||||
}
|
||||
break;
|
||||
}
|
||||
};
|
||||
|
||||
const deleteHistoryLogs = async () => {
|
||||
console.log(inputs);
|
||||
const res = await API.delete(
|
||||
`/api/log/?target_timestamp=${Date.parse(historyTimestamp) / 1000}`,
|
||||
);
|
||||
const { success, message, data } = res.data;
|
||||
if (success) {
|
||||
showSuccess(`${data} 条日志已清理!`);
|
||||
return;
|
||||
}
|
||||
showError('日志清理失败:' + message);
|
||||
};
|
||||
return (
|
||||
<Grid columns={1}>
|
||||
<Grid.Column>
|
||||
<Form loading={loading} inverted={isDark}>
|
||||
<Header as='h3' inverted={isDark}>
|
||||
通用设置
|
||||
</Header>
|
||||
<Form.Group widths={4}>
|
||||
<Form.Input
|
||||
label='充值链接'
|
||||
name='TopUpLink'
|
||||
onChange={handleInputChange}
|
||||
autoComplete='new-password'
|
||||
value={inputs.TopUpLink}
|
||||
type='link'
|
||||
placeholder='例如发卡网站的购买链接'
|
||||
/>
|
||||
<Form.Input
|
||||
label='默认聊天页面链接'
|
||||
name='ChatLink'
|
||||
onChange={handleInputChange}
|
||||
autoComplete='new-password'
|
||||
value={inputs.ChatLink}
|
||||
type='link'
|
||||
placeholder='例如 ChatGPT Next Web 的部署地址'
|
||||
/>
|
||||
<Form.Input
|
||||
label='聊天页面2链接'
|
||||
name='ChatLink2'
|
||||
onChange={handleInputChange}
|
||||
autoComplete='new-password'
|
||||
value={inputs.ChatLink2}
|
||||
type='link'
|
||||
placeholder='例如 ChatGPT Web & Midjourney 的部署地址'
|
||||
/>
|
||||
<Form.Input
|
||||
label='单位美元额度'
|
||||
name='QuotaPerUnit'
|
||||
onChange={handleInputChange}
|
||||
autoComplete='new-password'
|
||||
value={inputs.QuotaPerUnit}
|
||||
type='number'
|
||||
step='0.01'
|
||||
placeholder='一单位货币能兑换的额度'
|
||||
/>
|
||||
<Form.Input
|
||||
label='失败重试次数'
|
||||
name='RetryTimes'
|
||||
type={'number'}
|
||||
step='1'
|
||||
min='0'
|
||||
onChange={handleInputChange}
|
||||
autoComplete='new-password'
|
||||
value={inputs.RetryTimes}
|
||||
placeholder='失败重试次数'
|
||||
/>
|
||||
</Form.Group>
|
||||
<Form.Group inline>
|
||||
<Form.Checkbox
|
||||
checked={inputs.DisplayInCurrencyEnabled === 'true'}
|
||||
label='以货币形式显示额度'
|
||||
name='DisplayInCurrencyEnabled'
|
||||
onChange={handleInputChange}
|
||||
/>
|
||||
<Form.Checkbox
|
||||
checked={inputs.DisplayTokenStatEnabled === 'true'}
|
||||
label='Billing 相关 API 显示令牌额度而非用户额度'
|
||||
name='DisplayTokenStatEnabled'
|
||||
onChange={handleInputChange}
|
||||
/>
|
||||
<Form.Checkbox
|
||||
checked={inputs.DefaultCollapseSidebar === 'true'}
|
||||
label='默认折叠侧边栏'
|
||||
name='DefaultCollapseSidebar'
|
||||
onChange={handleInputChange}
|
||||
/>
|
||||
</Form.Group>
|
||||
<Form.Button
|
||||
onClick={() => {
|
||||
submitConfig('general').then();
|
||||
}}
|
||||
>
|
||||
保存通用设置
|
||||
</Form.Button>
|
||||
<Divider />
|
||||
<Header as='h3' inverted={isDark}>
|
||||
绘图设置
|
||||
</Header>
|
||||
<Form.Group inline>
|
||||
<Form.Checkbox
|
||||
checked={inputs.DrawingEnabled === 'true'}
|
||||
label='启用绘图功能'
|
||||
name='DrawingEnabled'
|
||||
onChange={handleInputChange}
|
||||
/>
|
||||
<Form.Checkbox
|
||||
checked={inputs.MjNotifyEnabled === 'true'}
|
||||
label='允许回调(会泄露服务器ip地址)'
|
||||
name='MjNotifyEnabled'
|
||||
onChange={handleInputChange}
|
||||
/>
|
||||
<Form.Checkbox
|
||||
checked={inputs.MjForwardUrlEnabled === 'true'}
|
||||
label='开启之后将上游地址替换为服务器地址'
|
||||
name='MjForwardUrlEnabled'
|
||||
onChange={handleInputChange}
|
||||
/>
|
||||
<Form.Checkbox
|
||||
checked={inputs.MjModeClearEnabled === 'true'}
|
||||
label='开启之后会清除用户提示词中的--fast、--relax以及--turbo参数'
|
||||
name='MjModeClearEnabled'
|
||||
onChange={handleInputChange}
|
||||
/>
|
||||
</Form.Group>
|
||||
<Divider />
|
||||
<Header as='h3' inverted={isDark}>
|
||||
屏蔽词过滤设置
|
||||
</Header>
|
||||
<Form.Group inline>
|
||||
<Form.Checkbox
|
||||
checked={inputs.CheckSensitiveEnabled === 'true'}
|
||||
label='启用屏蔽词过滤功能'
|
||||
name='CheckSensitiveEnabled'
|
||||
onChange={handleInputChange}
|
||||
/>
|
||||
</Form.Group>
|
||||
<Form.Group inline>
|
||||
<Form.Checkbox
|
||||
checked={inputs.CheckSensitiveOnPromptEnabled === 'true'}
|
||||
label='启用prompt检查'
|
||||
name='CheckSensitiveOnPromptEnabled'
|
||||
onChange={handleInputChange}
|
||||
/>
|
||||
{/*<Form.Checkbox*/}
|
||||
{/* checked={inputs.CheckSensitiveOnCompletionEnabled === 'true'}*/}
|
||||
{/* label='启用生成内容检查'*/}
|
||||
{/* name='CheckSensitiveOnCompletionEnabled'*/}
|
||||
{/* onChange={handleInputChange}*/}
|
||||
{/*/>*/}
|
||||
</Form.Group>
|
||||
{/*<Form.Group inline>*/}
|
||||
{/* <Form.Checkbox*/}
|
||||
{/* checked={inputs.StopOnSensitiveEnabled === 'true'}*/}
|
||||
{/* label='在检测到屏蔽词时,立刻停止生成,否则替换屏蔽词'*/}
|
||||
{/* name='StopOnSensitiveEnabled'*/}
|
||||
{/* onChange={handleInputChange}*/}
|
||||
{/* />*/}
|
||||
{/*</Form.Group>*/}
|
||||
{/*<Form.Group>*/}
|
||||
{/* <Form.Input*/}
|
||||
{/* label="流模式下缓存队列,默认不缓存,设置越大检测越准确,但是回复会有卡顿感"*/}
|
||||
{/* name="StreamCacheTextLength"*/}
|
||||
{/* onChange={handleInputChange}*/}
|
||||
{/* value={inputs.StreamCacheQueueLength}*/}
|
||||
{/* type="number"*/}
|
||||
{/* min="0"*/}
|
||||
{/* placeholder="例如:10"*/}
|
||||
{/* />*/}
|
||||
{/*</Form.Group>*/}
|
||||
<Form.Group widths='equal'>
|
||||
<Form.TextArea
|
||||
label='屏蔽词列表,一行一个屏蔽词,不需要符号分割'
|
||||
name='SensitiveWords'
|
||||
onChange={handleInputChange}
|
||||
style={{ minHeight: 250, fontFamily: 'JetBrains Mono, Consolas' }}
|
||||
value={inputs.SensitiveWords}
|
||||
placeholder='一行一个屏蔽词'
|
||||
/>
|
||||
</Form.Group>
|
||||
<Form.Button
|
||||
onClick={() => {
|
||||
submitConfig('words').then();
|
||||
}}
|
||||
>
|
||||
保存屏蔽词设置
|
||||
</Form.Button>
|
||||
<Divider />
|
||||
<Header as='h3' inverted={isDark}>
|
||||
日志设置
|
||||
</Header>
|
||||
<Form.Group inline>
|
||||
<Form.Checkbox
|
||||
checked={inputs.LogConsumeEnabled === 'true'}
|
||||
label='启用额度消费日志记录'
|
||||
name='LogConsumeEnabled'
|
||||
onChange={handleInputChange}
|
||||
/>
|
||||
</Form.Group>
|
||||
<Form.Group widths={4}>
|
||||
<Form.Input
|
||||
label='目标时间'
|
||||
value={historyTimestamp}
|
||||
type='datetime-local'
|
||||
name='history_timestamp'
|
||||
onChange={(e, { name, value }) => {
|
||||
setHistoryTimestamp(value);
|
||||
}}
|
||||
/>
|
||||
</Form.Group>
|
||||
<Form.Button
|
||||
onClick={() => {
|
||||
deleteHistoryLogs().then();
|
||||
}}
|
||||
>
|
||||
清理历史日志
|
||||
</Form.Button>
|
||||
<Divider />
|
||||
<Header as='h3' inverted={isDark}>
|
||||
数据看板
|
||||
</Header>
|
||||
<Form.Checkbox
|
||||
checked={inputs.DataExportEnabled === 'true'}
|
||||
label='启用数据看板(实验性)'
|
||||
name='DataExportEnabled'
|
||||
onChange={handleInputChange}
|
||||
/>
|
||||
<Form.Group>
|
||||
<Form.Input
|
||||
label='数据看板更新间隔(分钟,设置过短会影响数据库性能)'
|
||||
name='DataExportInterval'
|
||||
type={'number'}
|
||||
step='1'
|
||||
min='1'
|
||||
onChange={handleInputChange}
|
||||
autoComplete='new-password'
|
||||
value={inputs.DataExportInterval}
|
||||
placeholder='数据看板更新间隔(分钟,设置过短会影响数据库性能)'
|
||||
/>
|
||||
<Form.Select
|
||||
label='数据看板默认时间粒度(仅修改展示粒度,统计精确到小时)'
|
||||
options={timeOptions}
|
||||
name='DataExportDefaultTime'
|
||||
onChange={handleInputChange}
|
||||
autoComplete='new-password'
|
||||
value={inputs.DataExportDefaultTime}
|
||||
placeholder='数据看板默认时间粒度'
|
||||
/>
|
||||
</Form.Group>
|
||||
<Divider />
|
||||
<Header as='h3' inverted={isDark}>
|
||||
监控设置
|
||||
</Header>
|
||||
<Form.Group widths={3}>
|
||||
<Form.Input
|
||||
label='最长响应时间'
|
||||
name='ChannelDisableThreshold'
|
||||
onChange={handleInputChange}
|
||||
autoComplete='new-password'
|
||||
value={inputs.ChannelDisableThreshold}
|
||||
type='number'
|
||||
min='0'
|
||||
placeholder='单位秒,当运行通道全部测试时,超过此时间将自动禁用通道'
|
||||
/>
|
||||
<Form.Input
|
||||
label='额度提醒阈值'
|
||||
name='QuotaRemindThreshold'
|
||||
onChange={handleInputChange}
|
||||
autoComplete='new-password'
|
||||
value={inputs.QuotaRemindThreshold}
|
||||
type='number'
|
||||
min='0'
|
||||
placeholder='低于此额度时将发送邮件提醒用户'
|
||||
/>
|
||||
</Form.Group>
|
||||
<Form.Group inline>
|
||||
<Form.Checkbox
|
||||
checked={inputs.AutomaticDisableChannelEnabled === 'true'}
|
||||
label='失败时自动禁用通道'
|
||||
name='AutomaticDisableChannelEnabled'
|
||||
onChange={handleInputChange}
|
||||
/>
|
||||
<Form.Checkbox
|
||||
checked={inputs.AutomaticEnableChannelEnabled === 'true'}
|
||||
label='成功时自动启用通道'
|
||||
name='AutomaticEnableChannelEnabled'
|
||||
onChange={handleInputChange}
|
||||
/>
|
||||
</Form.Group>
|
||||
<Form.Button
|
||||
onClick={() => {
|
||||
submitConfig('monitor').then();
|
||||
}}
|
||||
>
|
||||
保存监控设置
|
||||
</Form.Button>
|
||||
<Divider />
|
||||
<Header as='h3' inverted={isDark}>
|
||||
额度设置
|
||||
</Header>
|
||||
<Form.Group widths={4}>
|
||||
<Form.Input
|
||||
label='新用户初始额度'
|
||||
name='QuotaForNewUser'
|
||||
onChange={handleInputChange}
|
||||
autoComplete='new-password'
|
||||
value={inputs.QuotaForNewUser}
|
||||
type='number'
|
||||
min='0'
|
||||
placeholder='例如:100'
|
||||
/>
|
||||
<Form.Input
|
||||
label='请求预扣费额度'
|
||||
name='PreConsumedQuota'
|
||||
onChange={handleInputChange}
|
||||
autoComplete='new-password'
|
||||
value={inputs.PreConsumedQuota}
|
||||
type='number'
|
||||
min='0'
|
||||
placeholder='请求结束后多退少补'
|
||||
/>
|
||||
<Form.Input
|
||||
label='邀请新用户奖励额度'
|
||||
name='QuotaForInviter'
|
||||
onChange={handleInputChange}
|
||||
autoComplete='new-password'
|
||||
value={inputs.QuotaForInviter}
|
||||
type='number'
|
||||
min='0'
|
||||
placeholder='例如:2000'
|
||||
/>
|
||||
<Form.Input
|
||||
label='新用户使用邀请码奖励额度'
|
||||
name='QuotaForInvitee'
|
||||
onChange={handleInputChange}
|
||||
autoComplete='new-password'
|
||||
value={inputs.QuotaForInvitee}
|
||||
type='number'
|
||||
min='0'
|
||||
placeholder='例如:1000'
|
||||
/>
|
||||
</Form.Group>
|
||||
<Form.Button
|
||||
onClick={() => {
|
||||
submitConfig('quota').then();
|
||||
}}
|
||||
>
|
||||
保存额度设置
|
||||
</Form.Button>
|
||||
<Divider />
|
||||
<Header as='h3' inverted={isDark}>
|
||||
倍率设置
|
||||
</Header>
|
||||
<Form.Group widths='equal'>
|
||||
<Form.TextArea
|
||||
label='模型固定价格(一次调用消耗多少刀,优先级大于模型倍率)'
|
||||
name='ModelPrice'
|
||||
onChange={handleInputChange}
|
||||
style={{ minHeight: 250, fontFamily: 'JetBrains Mono, Consolas' }}
|
||||
autoComplete='new-password'
|
||||
value={inputs.ModelPrice}
|
||||
placeholder='为一个 JSON 文本,键为模型名称,值为一次调用消耗多少刀,比如 "gpt-4-gizmo-*": 0.1,一次消耗0.1刀'
|
||||
/>
|
||||
</Form.Group>
|
||||
<Form.Group widths='equal'>
|
||||
<Form.TextArea
|
||||
label='模型倍率'
|
||||
name='ModelRatio'
|
||||
onChange={handleInputChange}
|
||||
style={{ minHeight: 250, fontFamily: 'JetBrains Mono, Consolas' }}
|
||||
autoComplete='new-password'
|
||||
value={inputs.ModelRatio}
|
||||
placeholder='为一个 JSON 文本,键为模型名称,值为倍率'
|
||||
/>
|
||||
</Form.Group>
|
||||
<Form.Group widths='equal'>
|
||||
<Form.TextArea
|
||||
label='分组倍率'
|
||||
name='GroupRatio'
|
||||
onChange={handleInputChange}
|
||||
style={{ minHeight: 250, fontFamily: 'JetBrains Mono, Consolas' }}
|
||||
autoComplete='new-password'
|
||||
value={inputs.GroupRatio}
|
||||
placeholder='为一个 JSON 文本,键为分组名称,值为倍率'
|
||||
/>
|
||||
</Form.Group>
|
||||
<Form.Button
|
||||
onClick={() => {
|
||||
submitConfig('ratio').then();
|
||||
}}
|
||||
>
|
||||
保存倍率设置
|
||||
</Form.Button>
|
||||
</Form>
|
||||
</Grid.Column>
|
||||
</Grid>
|
||||
<>
|
||||
<Spin spinning={loading} size='large'>
|
||||
{/* 通用设置 */}
|
||||
<Card style={{ marginTop: '10px' }}>
|
||||
<SettingsGeneral options={inputs} refresh={onRefresh} />
|
||||
</Card>
|
||||
{/* 绘图设置 */}
|
||||
<Card style={{ marginTop: '10px' }}>
|
||||
<SettingsDrawing options={inputs} refresh={onRefresh} />
|
||||
</Card>
|
||||
{/* 屏蔽词过滤设置 */}
|
||||
<Card style={{ marginTop: '10px' }}>
|
||||
<SettingsSensitiveWords options={inputs} refresh={onRefresh} />
|
||||
</Card>
|
||||
{/* 日志设置 */}
|
||||
<Card style={{ marginTop: '10px' }}>
|
||||
<SettingsLog options={inputs} refresh={onRefresh} />
|
||||
</Card>
|
||||
{/* 数据看板 */}
|
||||
<Card style={{ marginTop: '10px' }}>
|
||||
<SettingsDataDashboard options={inputs} refresh={onRefresh} />
|
||||
</Card>
|
||||
{/* 监控设置 */}
|
||||
<Card style={{ marginTop: '10px' }}>
|
||||
<SettingsMonitoring options={inputs} refresh={onRefresh} />
|
||||
</Card>
|
||||
{/* 额度设置 */}
|
||||
<Card style={{ marginTop: '10px' }}>
|
||||
<SettingsCreditLimit options={inputs} refresh={onRefresh} />
|
||||
</Card>
|
||||
{/* 倍率设置 */}
|
||||
<Card style={{ marginTop: '10px' }}>
|
||||
<SettingsMagnification options={inputs} refresh={onRefresh} />
|
||||
</Card>
|
||||
</Spin>
|
||||
</>
|
||||
);
|
||||
};
|
||||
|
||||
|
||||
@@ -23,10 +23,12 @@ import {
|
||||
IconImage,
|
||||
IconKey,
|
||||
IconLayers,
|
||||
IconPriceTag,
|
||||
IconSetting,
|
||||
IconUser,
|
||||
} from '@douyinfe/semi-icons';
|
||||
import { Layout, Nav } from '@douyinfe/semi-ui';
|
||||
import { setStatusData } from '../helpers/data.js';
|
||||
|
||||
// HeaderBar Buttons
|
||||
|
||||
@@ -55,6 +57,7 @@ const SiderBar = () => {
|
||||
about: '/about',
|
||||
chat: '/chat',
|
||||
detail: '/detail',
|
||||
pricing: '/pricing',
|
||||
};
|
||||
|
||||
const headerButtons = useMemo(
|
||||
@@ -100,6 +103,12 @@ const SiderBar = () => {
|
||||
to: '/topup',
|
||||
icon: <IconCreditCard />,
|
||||
},
|
||||
{
|
||||
text: '模型价格',
|
||||
itemKey: 'pricing',
|
||||
to: '/pricing',
|
||||
icon: <IconPriceTag />,
|
||||
},
|
||||
{
|
||||
text: '用户管理',
|
||||
itemKey: 'user',
|
||||
@@ -161,34 +170,8 @@ const SiderBar = () => {
|
||||
}
|
||||
const { success, data } = res.data;
|
||||
if (success) {
|
||||
localStorage.setItem('status', JSON.stringify(data));
|
||||
statusDispatch({ type: 'set', payload: data });
|
||||
localStorage.setItem('system_name', data.system_name);
|
||||
localStorage.setItem('logo', data.logo);
|
||||
localStorage.setItem('footer_html', data.footer_html);
|
||||
localStorage.setItem('quota_per_unit', data.quota_per_unit);
|
||||
localStorage.setItem('display_in_currency', data.display_in_currency);
|
||||
localStorage.setItem('enable_drawing', data.enable_drawing);
|
||||
localStorage.setItem('enable_data_export', data.enable_data_export);
|
||||
localStorage.setItem(
|
||||
'data_export_default_time',
|
||||
data.data_export_default_time,
|
||||
);
|
||||
localStorage.setItem(
|
||||
'default_collapse_sidebar',
|
||||
data.default_collapse_sidebar,
|
||||
);
|
||||
localStorage.setItem('mj_notify_enabled', data.mj_notify_enabled);
|
||||
if (data.chat_link) {
|
||||
localStorage.setItem('chat_link', data.chat_link);
|
||||
} else {
|
||||
localStorage.removeItem('chat_link');
|
||||
}
|
||||
if (data.chat_link2) {
|
||||
localStorage.setItem('chat_link2', data.chat_link2);
|
||||
} else {
|
||||
localStorage.removeItem('chat_link2');
|
||||
}
|
||||
setStatusData(data);
|
||||
} else {
|
||||
showError('无法正常连接至服务器!');
|
||||
}
|
||||
|
||||
@@ -18,3 +18,32 @@ export async function onGitHubOAuthClicked(github_client_id) {
|
||||
`https://github.com/login/oauth/authorize?client_id=${github_client_id}&state=${state}&scope=user:email`,
|
||||
);
|
||||
}
|
||||
|
||||
let channelModels = undefined;
|
||||
export async function loadChannelModels() {
|
||||
const res = await API.get('/api/models');
|
||||
const { success, data } = res.data;
|
||||
if (!success) {
|
||||
return;
|
||||
}
|
||||
channelModels = data;
|
||||
localStorage.setItem('channel_models', JSON.stringify(data));
|
||||
}
|
||||
|
||||
export function getChannelModels(type) {
|
||||
if (channelModels !== undefined && type in channelModels) {
|
||||
if (!channelModels[type]) {
|
||||
return [];
|
||||
}
|
||||
return channelModels[type];
|
||||
}
|
||||
let models = localStorage.getItem('channel_models');
|
||||
if (!models) {
|
||||
return [];
|
||||
}
|
||||
channelModels = JSON.parse(models);
|
||||
if (type in channelModels) {
|
||||
return channelModels[type];
|
||||
}
|
||||
return [];
|
||||
}
|
||||
|
||||
@@ -36,13 +36,6 @@ export const CHANNEL_OPTIONS = [
|
||||
color: 'teal',
|
||||
label: 'Azure OpenAI',
|
||||
},
|
||||
{
|
||||
key: 11,
|
||||
text: 'Google PaLM2',
|
||||
value: 11,
|
||||
color: 'orange',
|
||||
label: 'Google PaLM2',
|
||||
},
|
||||
{
|
||||
key: 24,
|
||||
text: 'Google Gemini',
|
||||
@@ -86,16 +79,24 @@ export const CHANNEL_OPTIONS = [
|
||||
label: '智谱 ChatGLM',
|
||||
},
|
||||
{
|
||||
key: 16,
|
||||
key: 26,
|
||||
text: '智谱 GLM-4V',
|
||||
value: 26,
|
||||
color: 'purple',
|
||||
label: '智谱 GLM-4V',
|
||||
},
|
||||
{ key: 16, text: 'Moonshot', value: 25, color: 'green', label: 'Moonshot' },
|
||||
{
|
||||
key: 11,
|
||||
text: 'Google PaLM2',
|
||||
value: 11,
|
||||
color: 'orange',
|
||||
label: 'Google PaLM2',
|
||||
},
|
||||
{ key: 25, text: 'Moonshot', value: 25, color: 'green', label: 'Moonshot' },
|
||||
{ key: 19, text: '360 智脑', value: 19, color: 'blue', label: '360 智脑' },
|
||||
{ key: 23, text: '腾讯混元', value: 23, color: 'teal', label: '腾讯混元' },
|
||||
{ key: 31, text: '零一万物', value: 31, color: 'green', label: '零一万物' },
|
||||
{ key: 35, text: 'MiniMax', value: 35, color: 'green', label: 'MiniMax' },
|
||||
{ key: 8, text: '自定义渠道', value: 8, color: 'pink', label: '自定义渠道' },
|
||||
{
|
||||
key: 22,
|
||||
|
||||
33
web/src/helpers/data.js
Normal file
33
web/src/helpers/data.js
Normal file
@@ -0,0 +1,33 @@
|
||||
export function setStatusData(data) {
|
||||
localStorage.setItem('status', JSON.stringify(data));
|
||||
localStorage.setItem('system_name', data.system_name);
|
||||
localStorage.setItem('logo', data.logo);
|
||||
localStorage.setItem('footer_html', data.footer_html);
|
||||
localStorage.setItem('quota_per_unit', data.quota_per_unit);
|
||||
localStorage.setItem('display_in_currency', data.display_in_currency);
|
||||
localStorage.setItem('enable_drawing', data.enable_drawing);
|
||||
localStorage.setItem('enable_data_export', data.enable_data_export);
|
||||
localStorage.setItem(
|
||||
'data_export_default_time',
|
||||
data.data_export_default_time,
|
||||
);
|
||||
localStorage.setItem(
|
||||
'default_collapse_sidebar',
|
||||
data.default_collapse_sidebar,
|
||||
);
|
||||
localStorage.setItem('mj_notify_enabled', data.mj_notify_enabled);
|
||||
if (data.chat_link) {
|
||||
localStorage.setItem('chat_link', data.chat_link);
|
||||
} else {
|
||||
localStorage.removeItem('chat_link');
|
||||
}
|
||||
if (data.chat_link2) {
|
||||
localStorage.setItem('chat_link2', data.chat_link2);
|
||||
} else {
|
||||
localStorage.removeItem('chat_link2');
|
||||
}
|
||||
}
|
||||
|
||||
export function setUserData(data) {
|
||||
localStorage.setItem('user', JSON.stringify(data));
|
||||
}
|
||||
@@ -1,4 +1,3 @@
|
||||
import { Label } from 'semantic-ui-react';
|
||||
import { Tag } from '@douyinfe/semi-ui';
|
||||
|
||||
export function renderText(text, limit) {
|
||||
@@ -135,6 +134,44 @@ export function renderQuota(quota, digits = 2) {
|
||||
return renderNumber(quota);
|
||||
}
|
||||
|
||||
export function renderModelPrice(
|
||||
inputTokens,
|
||||
completionTokens,
|
||||
modelRatio,
|
||||
modelPrice = -1,
|
||||
completionRatio,
|
||||
groupRatio,
|
||||
) {
|
||||
// 1 ratio = $0.002 / 1K tokens
|
||||
if (modelPrice !== -1) {
|
||||
return '模型价格:$' + modelPrice * groupRatio;
|
||||
} else {
|
||||
if (completionRatio === undefined) {
|
||||
completionRatio = 0;
|
||||
}
|
||||
// 这里的 *2 是因为 1倍率=0.002刀,请勿删除
|
||||
let inputRatioPrice = modelRatio * 2.0 * groupRatio;
|
||||
let completionRatioPrice = modelRatio * 2.0 * completionRatio * groupRatio;
|
||||
let price =
|
||||
(inputTokens / 1000000) * inputRatioPrice +
|
||||
(completionTokens / 1000000) * completionRatioPrice;
|
||||
return (
|
||||
<>
|
||||
<article>
|
||||
<p>提示 ${inputRatioPrice} / 1M tokens</p>
|
||||
<p>补全 ${completionRatioPrice} / 1M tokens</p>
|
||||
<p></p>
|
||||
<p>
|
||||
提示 {inputTokens} tokens / 1M tokens * ${inputRatioPrice} + 补全{' '}
|
||||
{completionTokens} tokens / 1M tokens * ${completionRatioPrice} = $
|
||||
{price.toFixed(6)}
|
||||
</p>
|
||||
</article>
|
||||
</>
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
export function renderQuotaWithPrompt(quota, digits) {
|
||||
let displayInCurrency = localStorage.getItem('display_in_currency');
|
||||
displayInCurrency = displayInCurrency === 'true';
|
||||
|
||||
@@ -212,6 +212,16 @@ export const verifyJSON = (str) => {
|
||||
return true;
|
||||
};
|
||||
|
||||
export function verifyJSONPromise(value) {
|
||||
try {
|
||||
JSON.parse(value);
|
||||
return Promise.resolve();
|
||||
} catch (e) {
|
||||
return Promise.reject('不是合法的 JSON 字符串');
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
export function shouldShowPrompt(id) {
|
||||
let prompt = localStorage.getItem(`prompt-${id}`);
|
||||
return !prompt;
|
||||
@@ -220,3 +230,28 @@ export function shouldShowPrompt(id) {
|
||||
export function setPromptShown(id) {
|
||||
localStorage.setItem(`prompt-${id}`, 'true');
|
||||
}
|
||||
|
||||
/**
|
||||
* 比较两个对象的属性,找出有变化的属性,并返回包含变化属性信息的数组
|
||||
* @param {Object} oldObject - 旧对象
|
||||
* @param {Object} newObject - 新对象
|
||||
* @return {Array} 包含变化属性信息的数组,每个元素是一个对象,包含 key, oldValue 和 newValue
|
||||
*/
|
||||
export function compareObjects(oldObject, newObject) {
|
||||
const changedProperties = [];
|
||||
|
||||
// 比较两个对象的属性
|
||||
for (const key in oldObject) {
|
||||
if (oldObject.hasOwnProperty(key) && newObject.hasOwnProperty(key)) {
|
||||
if (oldObject[key] !== newObject[key]) {
|
||||
changedProperties.push({
|
||||
key: key,
|
||||
oldValue: oldObject[key],
|
||||
newValue: newObject[key],
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return changedProperties;
|
||||
}
|
||||
|
||||
@@ -39,13 +39,16 @@ const About = () => {
|
||||
</Layout.Header>
|
||||
<Layout.Content>
|
||||
<p>可在设置页面设置关于内容,支持 HTML & Markdown</p>
|
||||
new-api项目仓库地址:
|
||||
New-API项目仓库地址:
|
||||
<a href='https://github.com/Calcium-Ion/new-api'>
|
||||
https://github.com/Calcium-Ion/new-api
|
||||
</a>
|
||||
<p>
|
||||
NewAPI © 2023 CalciumIon | 基于 One API v0.5.4 © 2023
|
||||
JustSong。本项目根据MIT许可证授权。
|
||||
JustSong。
|
||||
</p>
|
||||
<p>
|
||||
本项目根据MIT许可证授权,需在遵守Apache-2.0协议的前提下使用。
|
||||
</p>
|
||||
</Layout.Content>
|
||||
</Layout>
|
||||
|
||||
@@ -15,6 +15,7 @@ import {
|
||||
Space,
|
||||
Spin,
|
||||
Button,
|
||||
Tooltip,
|
||||
Input,
|
||||
Typography,
|
||||
Select,
|
||||
@@ -23,6 +24,8 @@ import {
|
||||
Banner,
|
||||
} from '@douyinfe/semi-ui';
|
||||
import { Divider } from 'semantic-ui-react';
|
||||
import { getChannelModels, loadChannelModels } from '../../components/utils.js';
|
||||
import axios from 'axios';
|
||||
|
||||
const MODEL_MAPPING_EXAMPLE = {
|
||||
'gpt-3.5-turbo-0301': 'gpt-3.5-turbo',
|
||||
@@ -34,6 +37,8 @@ const STATUS_CODE_MAPPING_EXAMPLE = {
|
||||
400: '500',
|
||||
};
|
||||
|
||||
const fetchButtonTips = "1. 新建渠道时,请求通过当前浏览器发出;2. 编辑已有渠道,请求通过后端服务器发出"
|
||||
|
||||
function type2secretPrompt(type) {
|
||||
// inputs.type === 15 ? '按照如下格式输入:APIKey|SecretKey' : (inputs.type === 18 ? '按照如下格式输入:APPID|APISecret|APIKey' : '请输入渠道对应的鉴权密钥')
|
||||
switch (type) {
|
||||
@@ -87,97 +92,9 @@ const EditChannel = (props) => {
|
||||
const [customModel, setCustomModel] = useState('');
|
||||
const handleInputChange = (name, value) => {
|
||||
setInputs((inputs) => ({ ...inputs, [name]: value }));
|
||||
if (name === 'type' && inputs.models.length === 0) {
|
||||
if (name === 'type') {
|
||||
let localModels = [];
|
||||
switch (value) {
|
||||
case 33:
|
||||
case 14:
|
||||
localModels = [
|
||||
'claude-instant-1.2',
|
||||
'claude-2',
|
||||
'claude-2.0',
|
||||
'claude-2.1',
|
||||
'claude-3-opus-20240229',
|
||||
'claude-3-sonnet-20240229',
|
||||
'claude-3-haiku-20240307',
|
||||
];
|
||||
break;
|
||||
case 11:
|
||||
localModels = ['PaLM-2'];
|
||||
break;
|
||||
case 15:
|
||||
localModels = [
|
||||
'ERNIE-Bot',
|
||||
'ERNIE-Bot-turbo',
|
||||
'ERNIE-Bot-4',
|
||||
'Embedding-V1',
|
||||
];
|
||||
break;
|
||||
case 17:
|
||||
localModels = [
|
||||
'qwen-turbo',
|
||||
'qwen-plus',
|
||||
'qwen-max',
|
||||
'qwen-max-longcontext',
|
||||
'text-embedding-v1',
|
||||
];
|
||||
break;
|
||||
case 16:
|
||||
localModels = ['chatglm_pro', 'chatglm_std', 'chatglm_lite'];
|
||||
break;
|
||||
case 18:
|
||||
localModels = [
|
||||
'SparkDesk',
|
||||
'SparkDesk-v1.1',
|
||||
'SparkDesk-v2.1',
|
||||
'SparkDesk-v3.1',
|
||||
'SparkDesk-v3.5',
|
||||
];
|
||||
break;
|
||||
case 19:
|
||||
localModels = [
|
||||
'360GPT_S2_V9',
|
||||
'embedding-bert-512-v1',
|
||||
'embedding_s1_v1',
|
||||
'semantic_similarity_s1_v1',
|
||||
];
|
||||
break;
|
||||
case 23:
|
||||
localModels = ['hunyuan'];
|
||||
break;
|
||||
case 24:
|
||||
localModels = [
|
||||
'gemini-1.0-pro-001',
|
||||
'gemini-1.0-pro-vision-001',
|
||||
'gemini-1.5-pro',
|
||||
'gemini-1.5-pro-latest',
|
||||
'gemini-pro',
|
||||
'gemini-pro-vision',
|
||||
];
|
||||
break;
|
||||
case 34:
|
||||
localModels = [
|
||||
'command-r',
|
||||
'command-r-plus',
|
||||
'command-light',
|
||||
'command-light-nightly',
|
||||
'command',
|
||||
'command-nightly',
|
||||
];
|
||||
break;
|
||||
case 25:
|
||||
localModels = [
|
||||
'moonshot-v1-8k',
|
||||
'moonshot-v1-32k',
|
||||
'moonshot-v1-128k',
|
||||
];
|
||||
break;
|
||||
case 26:
|
||||
localModels = ['glm-4', 'glm-4v', 'glm-3-turbo'];
|
||||
break;
|
||||
case 31:
|
||||
localModels = ['yi-34b-chat-0205', 'yi-34b-chat-200k', 'yi-vl-plus'];
|
||||
break;
|
||||
case 2:
|
||||
localModels = [
|
||||
'mj_imagine',
|
||||
@@ -186,6 +103,7 @@ const EditChannel = (props) => {
|
||||
'mj_blend',
|
||||
'mj_upscale',
|
||||
'mj_describe',
|
||||
'mj_uploads',
|
||||
];
|
||||
break;
|
||||
case 5:
|
||||
@@ -205,10 +123,17 @@ const EditChannel = (props) => {
|
||||
'mj_high_variation',
|
||||
'mj_low_variation',
|
||||
'mj_pan',
|
||||
'mj_uploads',
|
||||
];
|
||||
break;
|
||||
default:
|
||||
localModels = getChannelModels(value);
|
||||
break;
|
||||
}
|
||||
setInputs((inputs) => ({ ...inputs, models: localModels }));
|
||||
if (inputs.models.length === 0) {
|
||||
setInputs((inputs) => ({ ...inputs, models: localModels }));
|
||||
}
|
||||
setBasicModels(localModels);
|
||||
}
|
||||
//setAutoBan
|
||||
};
|
||||
@@ -244,6 +169,7 @@ const EditChannel = (props) => {
|
||||
} else {
|
||||
setAutoBan(true);
|
||||
}
|
||||
setBasicModels(getChannelModels(data.type));
|
||||
// console.log(data);
|
||||
} else {
|
||||
showError(message);
|
||||
@@ -251,12 +177,60 @@ const EditChannel = (props) => {
|
||||
setLoading(false);
|
||||
};
|
||||
|
||||
|
||||
const fetchUpstreamModelList = async (name) => {
|
||||
if (inputs["type"] !== 1) {
|
||||
showError("仅支持 OpenAI 接口格式")
|
||||
return;
|
||||
}
|
||||
setLoading(true)
|
||||
const models = inputs["models"] || []
|
||||
let err = false;
|
||||
if (isEdit) {
|
||||
const res = await API.get("/api/channel/fetch_models/" + channelId)
|
||||
if (res.data && res.data?.success) {
|
||||
models.push(...res.data.data)
|
||||
} else {
|
||||
err = true
|
||||
}
|
||||
} else {
|
||||
if (!inputs?.["key"]) {
|
||||
showError("请填写密钥")
|
||||
err = true
|
||||
} else {
|
||||
try {
|
||||
const host = new URL((inputs["base_url"] || "https://api.openai.com"))
|
||||
|
||||
const url = `https://${host.hostname}/v1/models`;
|
||||
const key = inputs["key"];
|
||||
const res = await axios.get(url, {
|
||||
headers: {
|
||||
'Authorization': `Bearer ${key}`
|
||||
}
|
||||
})
|
||||
if (res.data && res.data?.success) {
|
||||
models.push(...es.data.data.map((model) => model.id))
|
||||
} else {
|
||||
err = true
|
||||
}
|
||||
}
|
||||
catch (error) {
|
||||
err = true
|
||||
}
|
||||
}
|
||||
}
|
||||
if (!err) {
|
||||
handleInputChange(name, Array.from(new Set(models)));
|
||||
showSuccess("获取模型列表成功");
|
||||
} else {
|
||||
showError('获取模型列表失败');
|
||||
}
|
||||
setLoading(false);
|
||||
}
|
||||
|
||||
const fetchModels = async () => {
|
||||
try {
|
||||
let res = await API.get(`/api/channel/models`);
|
||||
if (res === undefined) {
|
||||
return;
|
||||
}
|
||||
let localModelOptions = res.data.data.map((model) => ({
|
||||
label: model.id,
|
||||
value: model.id,
|
||||
@@ -312,6 +286,9 @@ const EditChannel = (props) => {
|
||||
loadChannel().then(() => {});
|
||||
} else {
|
||||
setInputs(originInputs);
|
||||
let localModels = getChannelModels(inputs.type);
|
||||
setBasicModels(localModels);
|
||||
setInputs((inputs) => ({ ...inputs, models: localModels }));
|
||||
}
|
||||
}, [props.editingChannel.id]);
|
||||
|
||||
@@ -373,24 +350,40 @@ const EditChannel = (props) => {
|
||||
}
|
||||
};
|
||||
|
||||
const addCustomModel = () => {
|
||||
const addCustomModels = () => {
|
||||
if (customModel.trim() === '') return;
|
||||
if (inputs.models.includes(customModel)) return showError('该模型已存在!');
|
||||
// 使用逗号分隔字符串,然后去除每个模型名称前后的空格
|
||||
const modelArray = customModel.split(',').map((model) => model.trim());
|
||||
|
||||
let localModels = [...inputs.models];
|
||||
localModels.push(customModel);
|
||||
let localModelOptions = [];
|
||||
localModelOptions.push({
|
||||
key: customModel,
|
||||
text: customModel,
|
||||
value: customModel,
|
||||
});
|
||||
setModelOptions((modelOptions) => {
|
||||
return [...modelOptions, ...localModelOptions];
|
||||
let localModelOptions = [...modelOptions];
|
||||
let hasError = false;
|
||||
|
||||
modelArray.forEach((model) => {
|
||||
// 检查模型是否已存在,且模型名称非空
|
||||
if (model && !localModels.includes(model)) {
|
||||
localModels.push(model); // 添加到模型列表
|
||||
localModelOptions.push({
|
||||
// 添加到下拉选项
|
||||
key: model,
|
||||
text: model,
|
||||
value: model,
|
||||
});
|
||||
} else if (model) {
|
||||
showError('某些模型已存在!');
|
||||
hasError = true;
|
||||
}
|
||||
});
|
||||
|
||||
if (hasError) return; // 如果有错误则终止操作
|
||||
|
||||
// 更新状态值
|
||||
setModelOptions(localModelOptions);
|
||||
setCustomModel('');
|
||||
handleInputChange('models', localModels);
|
||||
};
|
||||
|
||||
|
||||
return (
|
||||
<>
|
||||
<SideSheet
|
||||
@@ -493,11 +486,25 @@ const EditChannel = (props) => {
|
||||
{inputs.type === 8 && (
|
||||
<>
|
||||
<div style={{ marginTop: 10 }}>
|
||||
<Typography.Text strong>Base URL:</Typography.Text>
|
||||
<Banner
|
||||
type={'warning'}
|
||||
description={
|
||||
<>
|
||||
如果你对接的是上游One API或者New API等转发项目,请使用OpenAI类型,不要使用此类型,除非你知道你在做什么。
|
||||
</>
|
||||
}
|
||||
></Banner>
|
||||
</div>
|
||||
<div style={{ marginTop: 10 }}>
|
||||
<Typography.Text strong>
|
||||
完整的 Base URL,支持变量{'{model}'}:
|
||||
</Typography.Text>
|
||||
</div>
|
||||
<Input
|
||||
name='base_url'
|
||||
placeholder={'请输入自定义渠道的 Base URL'}
|
||||
placeholder={
|
||||
'请输入完整的URL,例如:https://api.openai.com/v1/chat/completions'
|
||||
}
|
||||
onChange={(value) => {
|
||||
handleInputChange('base_url', value);
|
||||
}}
|
||||
@@ -596,7 +603,7 @@ const EditChannel = (props) => {
|
||||
handleInputChange('models', basicModels);
|
||||
}}
|
||||
>
|
||||
填入基础模型
|
||||
填入相关模型
|
||||
</Button>
|
||||
<Button
|
||||
type='secondary'
|
||||
@@ -606,6 +613,16 @@ const EditChannel = (props) => {
|
||||
>
|
||||
填入所有模型
|
||||
</Button>
|
||||
<Tooltip content={fetchButtonTips}>
|
||||
<Button
|
||||
type='tertiary'
|
||||
onClick={() => {
|
||||
fetchUpstreamModelList('models');
|
||||
}}
|
||||
>
|
||||
获取模型列表
|
||||
</Button>
|
||||
</Tooltip>
|
||||
<Button
|
||||
type='warning'
|
||||
onClick={() => {
|
||||
@@ -617,7 +634,7 @@ const EditChannel = (props) => {
|
||||
</Space>
|
||||
<Input
|
||||
addonAfter={
|
||||
<Button type='primary' onClick={addCustomModel}>
|
||||
<Button type='primary' onClick={addCustomModels}>
|
||||
填入
|
||||
</Button>
|
||||
}
|
||||
|
||||
@@ -86,11 +86,21 @@ const Home = () => {
|
||||
<p>
|
||||
源码:
|
||||
<a
|
||||
href='https://github.com/songquanpeng/one-api'
|
||||
href='https://github.com/Calcium-Ion/new-api'
|
||||
target='_blank'
|
||||
rel='noreferrer'
|
||||
>
|
||||
https://github.com/songquanpeng/one-api
|
||||
https://github.com/Calcium-Ion/new-api
|
||||
</a>
|
||||
</p>
|
||||
<p>
|
||||
协议:
|
||||
<a
|
||||
href='https://www.apache.org/licenses/LICENSE-2.0'
|
||||
target='_blank'
|
||||
rel='noreferrer'
|
||||
>
|
||||
Apache-2.0 License
|
||||
</a>
|
||||
</p>
|
||||
<p>启动时间:{getStartTimeString()}</p>
|
||||
|
||||
10
web/src/pages/Pricing/index.js
Normal file
10
web/src/pages/Pricing/index.js
Normal file
@@ -0,0 +1,10 @@
|
||||
import React from 'react';
|
||||
import ModelPricing from '../../components/ModelPricing.js';
|
||||
|
||||
const Pricing = () => (
|
||||
<>
|
||||
<ModelPricing />
|
||||
</>
|
||||
);
|
||||
|
||||
export default Pricing;
|
||||
156
web/src/pages/Setting/Operation/SettingsCreditLimit.js
Normal file
156
web/src/pages/Setting/Operation/SettingsCreditLimit.js
Normal file
@@ -0,0 +1,156 @@
|
||||
import React, { useEffect, useState, useRef } from 'react';
|
||||
import { Button, Col, Form, Row, Spin } from '@douyinfe/semi-ui';
|
||||
import {
|
||||
compareObjects,
|
||||
API,
|
||||
showError,
|
||||
showSuccess,
|
||||
showWarning,
|
||||
} from '../../../helpers';
|
||||
|
||||
export default function SettingsCreditLimit(props) {
|
||||
const [loading, setLoading] = useState(false);
|
||||
const [inputs, setInputs] = useState({
|
||||
QuotaForNewUser: '',
|
||||
PreConsumedQuota: '',
|
||||
QuotaForInviter: '',
|
||||
QuotaForInvitee: '',
|
||||
});
|
||||
const refForm = useRef();
|
||||
const [inputsRow, setInputsRow] = useState(inputs);
|
||||
|
||||
function onSubmit() {
|
||||
const updateArray = compareObjects(inputs, inputsRow);
|
||||
if (!updateArray.length) return showWarning('你似乎并没有修改什么');
|
||||
const requestQueue = updateArray.map((item) => {
|
||||
let value = '';
|
||||
if (typeof inputs[item.key] === 'boolean') {
|
||||
value = String(inputs[item.key]);
|
||||
} else {
|
||||
value = inputs[item.key];
|
||||
}
|
||||
return API.put('/api/option/', {
|
||||
key: item.key,
|
||||
value,
|
||||
});
|
||||
});
|
||||
setLoading(true);
|
||||
Promise.all(requestQueue)
|
||||
.then((res) => {
|
||||
if (requestQueue.length === 1) {
|
||||
if (res.includes(undefined)) return;
|
||||
} else if (requestQueue.length > 1) {
|
||||
if (res.includes(undefined)) return showError('部分保存失败,请重试');
|
||||
}
|
||||
showSuccess('保存成功');
|
||||
props.refresh();
|
||||
})
|
||||
.catch(() => {
|
||||
showError('保存失败,请重试');
|
||||
})
|
||||
.finally(() => {
|
||||
setLoading(false);
|
||||
});
|
||||
}
|
||||
|
||||
useEffect(() => {
|
||||
const currentInputs = {};
|
||||
for (let key in props.options) {
|
||||
if (Object.keys(inputs).includes(key)) {
|
||||
currentInputs[key] = props.options[key];
|
||||
}
|
||||
}
|
||||
setInputs(currentInputs);
|
||||
setInputsRow(structuredClone(currentInputs));
|
||||
refForm.current.setValues(currentInputs);
|
||||
}, [props.options]);
|
||||
return (
|
||||
<>
|
||||
<Spin spinning={loading}>
|
||||
<Form
|
||||
values={inputs}
|
||||
getFormApi={(formAPI) => (refForm.current = formAPI)}
|
||||
style={{ marginBottom: 15 }}
|
||||
>
|
||||
<Form.Section text={'额度设置'}>
|
||||
<Row gutter={16}>
|
||||
<Col span={6}>
|
||||
<Form.InputNumber
|
||||
label={'新用户初始额度'}
|
||||
field={'QuotaForNewUser'}
|
||||
step={1}
|
||||
min={0}
|
||||
suffix={'Token'}
|
||||
placeholder={''}
|
||||
onChange={(value) =>
|
||||
setInputs({
|
||||
...inputs,
|
||||
QuotaForNewUser: String(value),
|
||||
})
|
||||
}
|
||||
/>
|
||||
</Col>
|
||||
<Col span={6}>
|
||||
<Form.InputNumber
|
||||
label={'请求预扣费额度'}
|
||||
field={'PreConsumedQuota'}
|
||||
step={1}
|
||||
min={0}
|
||||
suffix={'Token'}
|
||||
extraText={'请求结束后多退少补'}
|
||||
placeholder={''}
|
||||
onChange={(value) =>
|
||||
setInputs({
|
||||
...inputs,
|
||||
PreConsumedQuota: String(value),
|
||||
})
|
||||
}
|
||||
/>
|
||||
</Col>
|
||||
<Col span={6}>
|
||||
<Form.InputNumber
|
||||
label={'邀请新用户奖励额度'}
|
||||
field={'QuotaForInviter'}
|
||||
step={1}
|
||||
min={0}
|
||||
suffix={'Token'}
|
||||
extraText={''}
|
||||
placeholder={'例如:2000'}
|
||||
onChange={(value) =>
|
||||
setInputs({
|
||||
...inputs,
|
||||
QuotaForInviter: String(value),
|
||||
})
|
||||
}
|
||||
/>
|
||||
</Col>
|
||||
<Col span={6}>
|
||||
<Form.InputNumber
|
||||
label={'新用户使用邀请码奖励额度'}
|
||||
field={'QuotaForInvitee'}
|
||||
step={1}
|
||||
min={0}
|
||||
suffix={'Token'}
|
||||
extraText={''}
|
||||
placeholder={'例如:1000'}
|
||||
onChange={(value) =>
|
||||
setInputs({
|
||||
...inputs,
|
||||
QuotaForInvitee: String(value),
|
||||
})
|
||||
}
|
||||
/>
|
||||
</Col>
|
||||
</Row>
|
||||
|
||||
<Row>
|
||||
<Button size='large' onClick={onSubmit}>
|
||||
保存额度设置
|
||||
</Button>
|
||||
</Row>
|
||||
</Form.Section>
|
||||
</Form>
|
||||
</Spin>
|
||||
</>
|
||||
);
|
||||
}
|
||||
147
web/src/pages/Setting/Operation/SettingsDataDashboard.js
Normal file
147
web/src/pages/Setting/Operation/SettingsDataDashboard.js
Normal file
@@ -0,0 +1,147 @@
|
||||
import React, { useEffect, useState, useRef } from 'react';
|
||||
import { Button, Col, Form, Row, Spin, Tag } from '@douyinfe/semi-ui';
|
||||
import {
|
||||
compareObjects,
|
||||
API,
|
||||
showError,
|
||||
showSuccess,
|
||||
showWarning,
|
||||
} from '../../../helpers';
|
||||
|
||||
export default function DataDashboard(props) {
|
||||
const optionsDataExportDefaultTime = [
|
||||
{ key: 'hour', label: '小时', value: 'hour' },
|
||||
{ key: 'day', label: '天', value: 'day' },
|
||||
{ key: 'week', label: '周', value: 'week' },
|
||||
];
|
||||
const [loading, setLoading] = useState(false);
|
||||
const [inputs, setInputs] = useState({
|
||||
DataExportEnabled: false,
|
||||
DataExportInterval: '',
|
||||
DataExportDefaultTime: '',
|
||||
});
|
||||
const refForm = useRef();
|
||||
const [inputsRow, setInputsRow] = useState(inputs);
|
||||
|
||||
function onSubmit() {
|
||||
const updateArray = compareObjects(inputs, inputsRow);
|
||||
if (!updateArray.length) return showWarning('你似乎并没有修改什么');
|
||||
const requestQueue = updateArray.map((item) => {
|
||||
let value = '';
|
||||
if (typeof inputs[item.key] === 'boolean') {
|
||||
value = String(inputs[item.key]);
|
||||
} else {
|
||||
value = inputs[item.key];
|
||||
}
|
||||
return API.put('/api/option/', {
|
||||
key: item.key,
|
||||
value,
|
||||
});
|
||||
});
|
||||
setLoading(true);
|
||||
Promise.all(requestQueue)
|
||||
.then((res) => {
|
||||
if (requestQueue.length === 1) {
|
||||
if (res.includes(undefined)) return;
|
||||
} else if (requestQueue.length > 1) {
|
||||
if (res.includes(undefined)) return showError('部分保存失败,请重试');
|
||||
}
|
||||
showSuccess('保存成功');
|
||||
props.refresh();
|
||||
})
|
||||
.catch(() => {
|
||||
showError('保存失败,请重试');
|
||||
})
|
||||
.finally(() => {
|
||||
setLoading(false);
|
||||
});
|
||||
}
|
||||
|
||||
useEffect(() => {
|
||||
const currentInputs = {};
|
||||
for (let key in props.options) {
|
||||
if (Object.keys(inputs).includes(key)) {
|
||||
currentInputs[key] = props.options[key];
|
||||
}
|
||||
}
|
||||
setInputs(currentInputs);
|
||||
setInputsRow(structuredClone(currentInputs));
|
||||
refForm.current.setValues(currentInputs);
|
||||
localStorage.setItem(
|
||||
'data_export_default_time',
|
||||
String(inputs.DataExportDefaultTime),
|
||||
);
|
||||
}, [props.options]);
|
||||
|
||||
return (
|
||||
<>
|
||||
<Spin spinning={loading}>
|
||||
<Form
|
||||
values={inputs}
|
||||
getFormApi={(formAPI) => (refForm.current = formAPI)}
|
||||
style={{ marginBottom: 15 }}
|
||||
>
|
||||
<Form.Section text={'数据看板设置'}>
|
||||
<Row gutter={16}>
|
||||
<Col span={8}>
|
||||
<Form.Switch
|
||||
field={'DataExportEnabled'}
|
||||
label={'启用数据看板(实验性)'}
|
||||
size='large'
|
||||
checkedText='|'
|
||||
uncheckedText='〇'
|
||||
onChange={(value) => {
|
||||
setInputs({
|
||||
...inputs,
|
||||
DataExportEnabled: value,
|
||||
});
|
||||
}}
|
||||
/>
|
||||
</Col>
|
||||
</Row>
|
||||
<Row>
|
||||
<Col span={8}>
|
||||
<Form.InputNumber
|
||||
label={'数据看板更新间隔 '}
|
||||
step={1}
|
||||
min={1}
|
||||
suffix={'分钟'}
|
||||
extraText={'设置过短会影响数据库性能'}
|
||||
placeholder={'数据看板更新间隔'}
|
||||
field={'DataExportInterval'}
|
||||
onChange={(value) =>
|
||||
setInputs({
|
||||
...inputs,
|
||||
DataExportInterval: String(value),
|
||||
})
|
||||
}
|
||||
/>
|
||||
</Col>
|
||||
<Col span={8}>
|
||||
<Form.Select
|
||||
label='数据看板默认时间粒度'
|
||||
optionList={optionsDataExportDefaultTime}
|
||||
field={'DataExportDefaultTime'}
|
||||
extraText={'仅修改展示粒度,统计精确到小时'}
|
||||
placeholder={'数据看板默认时间粒度'}
|
||||
style={{ width: 180 }}
|
||||
onChange={(value) =>
|
||||
setInputs({
|
||||
...inputs,
|
||||
DataExportDefaultTime: String(value),
|
||||
})
|
||||
}
|
||||
/>
|
||||
</Col>
|
||||
</Row>
|
||||
<Row>
|
||||
<Button size='large' onClick={onSubmit}>
|
||||
保存数据看板设置
|
||||
</Button>
|
||||
</Row>
|
||||
</Form.Section>
|
||||
</Form>
|
||||
</Spin>
|
||||
</>
|
||||
);
|
||||
}
|
||||
170
web/src/pages/Setting/Operation/SettingsDrawing.js
Normal file
170
web/src/pages/Setting/Operation/SettingsDrawing.js
Normal file
@@ -0,0 +1,170 @@
|
||||
import React, { useEffect, useState, useRef } from 'react';
|
||||
import { Button, Col, Form, Row, Spin, Tag } from '@douyinfe/semi-ui';
|
||||
import {
|
||||
compareObjects,
|
||||
API,
|
||||
showError,
|
||||
showSuccess,
|
||||
showWarning,
|
||||
} from '../../../helpers';
|
||||
|
||||
export default function SettingsDrawing(props) {
|
||||
const [loading, setLoading] = useState(false);
|
||||
const [inputs, setInputs] = useState({
|
||||
DrawingEnabled: false,
|
||||
MjNotifyEnabled: false,
|
||||
MjAccountFilterEnabled: false,
|
||||
MjForwardUrlEnabled: false,
|
||||
MjModeClearEnabled: false,
|
||||
});
|
||||
const refForm = useRef();
|
||||
const [inputsRow, setInputsRow] = useState(inputs);
|
||||
|
||||
function onSubmit() {
|
||||
const updateArray = compareObjects(inputs, inputsRow);
|
||||
if (!updateArray.length) return showWarning('你似乎并没有修改什么');
|
||||
const requestQueue = updateArray.map((item) => {
|
||||
let value = '';
|
||||
if (typeof inputs[item.key] === 'boolean') {
|
||||
value = String(inputs[item.key]);
|
||||
} else {
|
||||
value = inputs[item.key];
|
||||
}
|
||||
return API.put('/api/option/', {
|
||||
key: item.key,
|
||||
value,
|
||||
});
|
||||
});
|
||||
setLoading(true);
|
||||
Promise.all(requestQueue)
|
||||
.then((res) => {
|
||||
if (requestQueue.length === 1) {
|
||||
if (res.includes(undefined)) return;
|
||||
} else if (requestQueue.length > 1) {
|
||||
if (res.includes(undefined)) return showError('部分保存失败,请重试');
|
||||
}
|
||||
showSuccess('保存成功');
|
||||
props.refresh();
|
||||
})
|
||||
.catch(() => {
|
||||
showError('保存失败,请重试');
|
||||
})
|
||||
.finally(() => {
|
||||
setLoading(false);
|
||||
});
|
||||
}
|
||||
|
||||
useEffect(() => {
|
||||
const currentInputs = {};
|
||||
for (let key in props.options) {
|
||||
if (Object.keys(inputs).includes(key)) {
|
||||
currentInputs[key] = props.options[key];
|
||||
}
|
||||
}
|
||||
setInputs(currentInputs);
|
||||
setInputsRow(structuredClone(currentInputs));
|
||||
refForm.current.setValues(currentInputs);
|
||||
localStorage.setItem('mj_notify_enabled', String(inputs.MjNotifyEnabled));
|
||||
}, [props.options]);
|
||||
return (
|
||||
<>
|
||||
<Spin spinning={loading}>
|
||||
<Form
|
||||
values={inputs}
|
||||
getFormApi={(formAPI) => (refForm.current = formAPI)}
|
||||
style={{ marginBottom: 15 }}
|
||||
>
|
||||
<Form.Section text={'绘图设置'}>
|
||||
<Row gutter={16}>
|
||||
<Col span={8}>
|
||||
<Form.Switch
|
||||
field={'DrawingEnabled'}
|
||||
label={'启用绘图功能'}
|
||||
size='large'
|
||||
checkedText='|'
|
||||
uncheckedText='〇'
|
||||
onChange={(value) => {
|
||||
setInputs({
|
||||
...inputs,
|
||||
DrawingEnabled: value,
|
||||
});
|
||||
}}
|
||||
/>
|
||||
</Col>
|
||||
<Col span={8}>
|
||||
<Form.Switch
|
||||
field={'MjNotifyEnabled'}
|
||||
label={'允许回调(会泄露服务器 IP 地址)'}
|
||||
size='large'
|
||||
checkedText='|'
|
||||
uncheckedText='〇'
|
||||
onChange={(value) =>
|
||||
setInputs({
|
||||
...inputs,
|
||||
MjNotifyEnabled: value,
|
||||
})
|
||||
}
|
||||
/>
|
||||
</Col>
|
||||
<Col span={8}>
|
||||
<Form.Switch
|
||||
field={'MjAccountFilterEnabled'}
|
||||
label={'允许 AccountFilter 参数'}
|
||||
size='large'
|
||||
checkedText='|'
|
||||
uncheckedText='〇'
|
||||
onChange={(value) =>
|
||||
setInputs({
|
||||
...inputs,
|
||||
MjAccountFilterEnabled: value,
|
||||
})
|
||||
}
|
||||
/>
|
||||
</Col>
|
||||
<Col span={8}>
|
||||
<Form.Switch
|
||||
field={'MjForwardUrlEnabled'}
|
||||
label={'开启之后将上游地址替换为服务器地址'}
|
||||
size='large'
|
||||
checkedText='|'
|
||||
uncheckedText='〇'
|
||||
onChange={(value) =>
|
||||
setInputs({
|
||||
...inputs,
|
||||
MjForwardUrlEnabled: value,
|
||||
})
|
||||
}
|
||||
/>
|
||||
</Col>
|
||||
<Col span={8}>
|
||||
<Form.Switch
|
||||
field={'MjModeClearEnabled'}
|
||||
label={
|
||||
<>
|
||||
开启之后会清除用户提示词中的 <Tag>--fast</Tag> 、
|
||||
<Tag>--relax</Tag> 以及 <Tag>--turbo</Tag> 参数
|
||||
</>
|
||||
}
|
||||
size='large'
|
||||
checkedText='|'
|
||||
uncheckedText='〇'
|
||||
onChange={(value) =>
|
||||
setInputs({
|
||||
...inputs,
|
||||
MjModeClearEnabled: value,
|
||||
})
|
||||
}
|
||||
/>
|
||||
</Col>
|
||||
</Row>
|
||||
<Row>
|
||||
<Button size='large' onClick={onSubmit}>
|
||||
保存绘图设置
|
||||
</Button>
|
||||
</Row>
|
||||
</Form.Section>
|
||||
</Form>
|
||||
</Spin>
|
||||
</>
|
||||
);
|
||||
}
|
||||
192
web/src/pages/Setting/Operation/SettingsGeneral.js
Normal file
192
web/src/pages/Setting/Operation/SettingsGeneral.js
Normal file
@@ -0,0 +1,192 @@
|
||||
import React, { useEffect, useState, useRef } from 'react';
|
||||
import { Button, Col, Form, Row, Spin } from '@douyinfe/semi-ui';
|
||||
import {
|
||||
compareObjects,
|
||||
API,
|
||||
showError,
|
||||
showSuccess,
|
||||
showWarning,
|
||||
} from '../../../helpers';
|
||||
|
||||
export default function GeneralSettings(props) {
|
||||
const [loading, setLoading] = useState(false);
|
||||
const [inputs, setInputs] = useState({
|
||||
TopUpLink: '',
|
||||
ChatLink: '',
|
||||
ChatLink2: '',
|
||||
QuotaPerUnit: '',
|
||||
RetryTimes: '',
|
||||
DisplayInCurrencyEnabled: false,
|
||||
DisplayTokenStatEnabled: false,
|
||||
DefaultCollapseSidebar: false,
|
||||
});
|
||||
const refForm = useRef();
|
||||
const [inputsRow, setInputsRow] = useState(inputs);
|
||||
function onChange(value, e) {
|
||||
const name = e.target.id;
|
||||
setInputs((inputs) => ({ ...inputs, [name]: value }));
|
||||
}
|
||||
function onSubmit() {
|
||||
const updateArray = compareObjects(inputs, inputsRow);
|
||||
if (!updateArray.length) return showWarning('你似乎并没有修改什么');
|
||||
const requestQueue = updateArray.map((item) => {
|
||||
let value = '';
|
||||
if (typeof inputs[item.key] === 'boolean') {
|
||||
value = String(inputs[item.key]);
|
||||
} else {
|
||||
value = inputs[item.key];
|
||||
}
|
||||
return API.put('/api/option/', {
|
||||
key: item.key,
|
||||
value,
|
||||
});
|
||||
});
|
||||
setLoading(true);
|
||||
Promise.all(requestQueue)
|
||||
.then((res) => {
|
||||
if (requestQueue.length === 1) {
|
||||
if (res.includes(undefined)) return;
|
||||
} else if (requestQueue.length > 1) {
|
||||
if (res.includes(undefined)) return showError('部分保存失败,请重试');
|
||||
}
|
||||
showSuccess('保存成功');
|
||||
props.refresh();
|
||||
})
|
||||
.catch(() => {
|
||||
showError('保存失败,请重试');
|
||||
})
|
||||
.finally(() => {
|
||||
setLoading(false);
|
||||
});
|
||||
}
|
||||
|
||||
useEffect(() => {
|
||||
const currentInputs = {};
|
||||
for (let key in props.options) {
|
||||
if (Object.keys(inputs).includes(key)) {
|
||||
currentInputs[key] = props.options[key];
|
||||
}
|
||||
}
|
||||
setInputs(currentInputs);
|
||||
setInputsRow(structuredClone(currentInputs));
|
||||
refForm.current.setValues(currentInputs);
|
||||
}, [props.options]);
|
||||
return (
|
||||
<>
|
||||
<Spin spinning={loading}>
|
||||
<Form
|
||||
values={inputs}
|
||||
getFormApi={(formAPI) => (refForm.current = formAPI)}
|
||||
style={{ marginBottom: 15 }}
|
||||
>
|
||||
<Form.Section text={'通用设置'}>
|
||||
<Row gutter={16}>
|
||||
<Col span={8}>
|
||||
<Form.Input
|
||||
field={'TopUpLink'}
|
||||
label={'充值链接'}
|
||||
initValue={''}
|
||||
placeholder={'例如发卡网站的购买链接'}
|
||||
onChange={onChange}
|
||||
showClear
|
||||
/>
|
||||
</Col>
|
||||
<Col span={8}>
|
||||
<Form.Input
|
||||
field={'ChatLink'}
|
||||
label={'默认聊天页面链接'}
|
||||
initValue={''}
|
||||
placeholder='例如 ChatGPT Next Web 的部署地址'
|
||||
onChange={onChange}
|
||||
showClear
|
||||
/>
|
||||
</Col>
|
||||
<Col span={8}>
|
||||
<Form.Input
|
||||
field={'ChatLink2'}
|
||||
label={'聊天页面 2 链接'}
|
||||
initValue={''}
|
||||
placeholder='例如 ChatGPT Next Web 的部署地址'
|
||||
onChange={onChange}
|
||||
showClear
|
||||
/>
|
||||
</Col>
|
||||
<Col span={8}>
|
||||
<Form.Input
|
||||
field={'QuotaPerUnit'}
|
||||
label={'单位美元额度'}
|
||||
initValue={''}
|
||||
placeholder='一单位货币能兑换的额度'
|
||||
onChange={onChange}
|
||||
showClear
|
||||
/>
|
||||
</Col>
|
||||
<Col span={8}>
|
||||
<Form.Input
|
||||
field={'RetryTimes'}
|
||||
label={'失败重试次数'}
|
||||
initValue={''}
|
||||
placeholder='失败重试次数'
|
||||
onChange={onChange}
|
||||
showClear
|
||||
/>
|
||||
</Col>
|
||||
</Row>
|
||||
<Row gutter={16}>
|
||||
<Col span={8}>
|
||||
<Form.Switch
|
||||
field={'DisplayInCurrencyEnabled'}
|
||||
label={'以货币形式显示额度'}
|
||||
size='large'
|
||||
checkedText='|'
|
||||
uncheckedText='〇'
|
||||
onChange={(value) => {
|
||||
setInputs({
|
||||
...inputs,
|
||||
DisplayInCurrencyEnabled: value,
|
||||
});
|
||||
}}
|
||||
/>
|
||||
</Col>
|
||||
<Col span={8}>
|
||||
<Form.Switch
|
||||
field={'DisplayTokenStatEnabled'}
|
||||
label={'Billing 相关 API 显示令牌额度而非用户额度'}
|
||||
size='large'
|
||||
checkedText='|'
|
||||
uncheckedText='〇'
|
||||
onChange={(value) =>
|
||||
setInputs({
|
||||
...inputs,
|
||||
DisplayTokenStatEnabled: value,
|
||||
})
|
||||
}
|
||||
/>
|
||||
</Col>
|
||||
<Col span={8}>
|
||||
<Form.Switch
|
||||
field={'DefaultCollapseSidebar'}
|
||||
label={'默认折叠侧边栏'}
|
||||
size='large'
|
||||
checkedText='|'
|
||||
uncheckedText='〇'
|
||||
onChange={(value) =>
|
||||
setInputs({
|
||||
...inputs,
|
||||
DefaultCollapseSidebar: value,
|
||||
})
|
||||
}
|
||||
/>
|
||||
</Col>
|
||||
</Row>
|
||||
<Row>
|
||||
<Button size='large' onClick={onSubmit}>
|
||||
保存通用设置
|
||||
</Button>
|
||||
</Row>
|
||||
</Form.Section>
|
||||
</Form>
|
||||
</Spin>
|
||||
</>
|
||||
);
|
||||
}
|
||||
147
web/src/pages/Setting/Operation/SettingsLog.js
Normal file
147
web/src/pages/Setting/Operation/SettingsLog.js
Normal file
@@ -0,0 +1,147 @@
|
||||
import React, { useEffect, useState, useRef } from 'react';
|
||||
import { Button, Col, Form, Row, Spin, DatePicker } from '@douyinfe/semi-ui';
|
||||
import dayjs from 'dayjs';
|
||||
import {
|
||||
compareObjects,
|
||||
API,
|
||||
showError,
|
||||
showSuccess,
|
||||
showWarning,
|
||||
} from '../../../helpers';
|
||||
|
||||
export default function SettingsLog(props) {
|
||||
const [loading, setLoading] = useState(false);
|
||||
const [loadingCleanHistoryLog, setLoadingCleanHistoryLog] = useState(false);
|
||||
const [inputs, setInputs] = useState({
|
||||
LogConsumeEnabled: false,
|
||||
historyTimestamp: dayjs().subtract(1, 'month').toDate(),
|
||||
});
|
||||
const refForm = useRef();
|
||||
const [inputsRow, setInputsRow] = useState(inputs);
|
||||
|
||||
function onSubmit() {
|
||||
const updateArray = compareObjects(inputs, inputsRow).filter(
|
||||
(item) => item.key !== 'historyTimestamp',
|
||||
);
|
||||
|
||||
if (!updateArray.length) return showWarning('你似乎并没有修改什么');
|
||||
const requestQueue = updateArray.map((item) => {
|
||||
let value = '';
|
||||
if (typeof inputs[item.key] === 'boolean') {
|
||||
value = String(inputs[item.key]);
|
||||
} else {
|
||||
value = inputs[item.key];
|
||||
}
|
||||
return API.put('/api/option/', {
|
||||
key: item.key,
|
||||
value,
|
||||
});
|
||||
});
|
||||
setLoading(true);
|
||||
Promise.all(requestQueue)
|
||||
.then((res) => {
|
||||
if (requestQueue.length === 1) {
|
||||
if (res.includes(undefined)) return;
|
||||
} else if (requestQueue.length > 1) {
|
||||
if (res.includes(undefined)) return showError('部分保存失败,请重试');
|
||||
}
|
||||
showSuccess('保存成功');
|
||||
props.refresh();
|
||||
})
|
||||
.catch(() => {
|
||||
showError('保存失败,请重试');
|
||||
})
|
||||
.finally(() => {
|
||||
setLoading(false);
|
||||
});
|
||||
}
|
||||
async function onCleanHistoryLog() {
|
||||
try {
|
||||
setLoadingCleanHistoryLog(true);
|
||||
if (!inputs.historyTimestamp) throw new Error('请选择日志记录时间');
|
||||
const res = await API.delete(
|
||||
`/api/log/?target_timestamp=${Date.parse(inputs.historyTimestamp) / 1000}`,
|
||||
);
|
||||
const { success, message, data } = res.data;
|
||||
if (success) {
|
||||
showSuccess(`${data} 条日志已清理!`);
|
||||
return;
|
||||
} else {
|
||||
throw new Error('日志清理失败:' + message);
|
||||
}
|
||||
} catch (error) {
|
||||
showError(error.message);
|
||||
} finally {
|
||||
setLoadingCleanHistoryLog(false);
|
||||
}
|
||||
}
|
||||
|
||||
useEffect(() => {
|
||||
const currentInputs = {};
|
||||
for (let key in props.options) {
|
||||
if (Object.keys(inputs).includes(key)) {
|
||||
currentInputs[key] = props.options[key];
|
||||
}
|
||||
}
|
||||
currentInputs['historyTimestamp'] = inputs.historyTimestamp;
|
||||
setInputs(Object.assign(inputs, currentInputs));
|
||||
setInputsRow(structuredClone(currentInputs));
|
||||
refForm.current.setValues(currentInputs);
|
||||
}, [props.options]);
|
||||
return (
|
||||
<>
|
||||
<Spin spinning={loading}>
|
||||
<Form
|
||||
values={inputs}
|
||||
getFormApi={(formAPI) => (refForm.current = formAPI)}
|
||||
style={{ marginBottom: 15 }}
|
||||
>
|
||||
<Form.Section text={'日志设置'}>
|
||||
<Row gutter={16}>
|
||||
<Col span={8}>
|
||||
<Form.Switch
|
||||
field={'LogConsumeEnabled'}
|
||||
label={'启用额度消费日志记录'}
|
||||
size='large'
|
||||
checkedText='|'
|
||||
uncheckedText='〇'
|
||||
onChange={(value) => {
|
||||
setInputs({
|
||||
...inputs,
|
||||
LogConsumeEnabled: value,
|
||||
});
|
||||
}}
|
||||
/>
|
||||
</Col>
|
||||
<Col span={8}>
|
||||
<Spin spinning={loadingCleanHistoryLog}>
|
||||
<Form.DatePicker
|
||||
label='日志记录时间'
|
||||
field={'historyTimestamp'}
|
||||
type='dateTime'
|
||||
inputReadOnly={true}
|
||||
onChange={(value) => {
|
||||
setInputs({
|
||||
...inputs,
|
||||
historyTimestamp: value,
|
||||
});
|
||||
}}
|
||||
/>
|
||||
<Button size='default' onClick={onCleanHistoryLog}>
|
||||
清除历史日志
|
||||
</Button>
|
||||
</Spin>
|
||||
</Col>
|
||||
</Row>
|
||||
|
||||
<Row>
|
||||
<Button size='large' onClick={onSubmit}>
|
||||
保存日志设置
|
||||
</Button>
|
||||
</Row>
|
||||
</Form.Section>
|
||||
</Form>
|
||||
</Spin>
|
||||
</>
|
||||
);
|
||||
}
|
||||
238
web/src/pages/Setting/Operation/SettingsMagnification.js
Normal file
238
web/src/pages/Setting/Operation/SettingsMagnification.js
Normal file
@@ -0,0 +1,238 @@
|
||||
import React, { useEffect, useState, useRef } from 'react';
|
||||
import { Button, Col, Form, Popconfirm, Row, Space, Spin } from '@douyinfe/semi-ui';
|
||||
import {
|
||||
compareObjects,
|
||||
API,
|
||||
showError,
|
||||
showSuccess,
|
||||
showWarning,
|
||||
verifyJSON,
|
||||
verifyJSONPromise
|
||||
} from '../../../helpers';
|
||||
|
||||
export default function SettingsMagnification(props) {
|
||||
const [loading, setLoading] = useState(false);
|
||||
const [inputs, setInputs] = useState({
|
||||
ModelPrice: '',
|
||||
ModelRatio: '',
|
||||
CompletionRatio: '',
|
||||
GroupRatio: ''
|
||||
});
|
||||
const refForm = useRef();
|
||||
const [inputsRow, setInputsRow] = useState(inputs);
|
||||
|
||||
async function onSubmit() {
|
||||
try {
|
||||
console.log('Starting validation...');
|
||||
await refForm.current.validate().then(() => {
|
||||
console.log('Validation passed');
|
||||
const updateArray = compareObjects(inputs, inputsRow);
|
||||
if (!updateArray.length) return showWarning('你似乎并没有修改什么');
|
||||
const requestQueue = updateArray.map((item) => {
|
||||
let value = '';
|
||||
if (typeof inputs[item.key] === 'boolean') {
|
||||
value = String(inputs[item.key]);
|
||||
} else {
|
||||
value = inputs[item.key];
|
||||
}
|
||||
return API.put('/api/option/', {
|
||||
key: item.key,
|
||||
value
|
||||
});
|
||||
});
|
||||
setLoading(true);
|
||||
Promise.all(requestQueue)
|
||||
.then((res) => {
|
||||
if (requestQueue.length === 1) {
|
||||
if (res.includes(undefined)) return;
|
||||
} else if (requestQueue.length > 1) {
|
||||
if (res.includes(undefined))
|
||||
return showError('部分保存失败,请重试');
|
||||
}
|
||||
showSuccess('保存成功');
|
||||
props.refresh();
|
||||
})
|
||||
.catch(() => {
|
||||
showError('保存失败,请重试');
|
||||
})
|
||||
.finally(() => {
|
||||
setLoading(false);
|
||||
});
|
||||
}).catch((error) => {
|
||||
console.error('Validation failed:', error);
|
||||
showError('请检查输入');
|
||||
});
|
||||
} catch (error) {
|
||||
showError('请检查输入');
|
||||
console.error(error);
|
||||
}
|
||||
}
|
||||
|
||||
async function resetModelRatio() {
|
||||
try {
|
||||
let res = await API.post(`/api/option/rest_model_ratio`);
|
||||
// return {success, message}
|
||||
if (res.data.success) {
|
||||
showSuccess(res.data.message);
|
||||
props.refresh();
|
||||
} else {
|
||||
showError(res.data.message);
|
||||
}
|
||||
} catch (error) {
|
||||
showError(error);
|
||||
}
|
||||
}
|
||||
|
||||
useEffect(() => {
|
||||
const currentInputs = {};
|
||||
for (let key in props.options) {
|
||||
if (Object.keys(inputs).includes(key)) {
|
||||
currentInputs[key] = props.options[key];
|
||||
}
|
||||
}
|
||||
setInputs(currentInputs);
|
||||
setInputsRow(structuredClone(currentInputs));
|
||||
refForm.current.setValues(currentInputs);
|
||||
}, [props.options]);
|
||||
|
||||
return (
|
||||
<Spin spinning={loading}>
|
||||
<Form
|
||||
values={inputs}
|
||||
getFormApi={(formAPI) => (refForm.current = formAPI)}
|
||||
style={{ marginBottom: 15 }}
|
||||
>
|
||||
<Form.Section text={'倍率设置'}>
|
||||
<Row gutter={16}>
|
||||
<Col span={16}>
|
||||
<Form.TextArea
|
||||
label={'模型固定价格'}
|
||||
extraText={'一次调用消耗多少刀,优先级大于模型倍率'}
|
||||
placeholder={
|
||||
'为一个 JSON 文本,键为模型名称,值为一次调用消耗多少刀,比如 "gpt-4-gizmo-*": 0.1,一次消耗0.1刀'
|
||||
}
|
||||
field={'ModelPrice'}
|
||||
autosize={{ minRows: 6, maxRows: 12 }}
|
||||
trigger='blur'
|
||||
stopValidateWithError
|
||||
rules={[
|
||||
{
|
||||
validator: (rule, value) => {
|
||||
return verifyJSON(value);
|
||||
},
|
||||
message: '不是合法的 JSON 字符串'
|
||||
}
|
||||
]}
|
||||
onChange={(value) =>
|
||||
setInputs({
|
||||
...inputs,
|
||||
ModelPrice: value
|
||||
})
|
||||
}
|
||||
/>
|
||||
</Col>
|
||||
</Row>
|
||||
<Row gutter={16}>
|
||||
<Col span={16}>
|
||||
<Form.TextArea
|
||||
label={'模型倍率'}
|
||||
extraText={''}
|
||||
placeholder={'为一个 JSON 文本,键为模型名称,值为倍率'}
|
||||
field={'ModelRatio'}
|
||||
autosize={{ minRows: 6, maxRows: 12 }}
|
||||
trigger='blur'
|
||||
stopValidateWithError
|
||||
rules={[
|
||||
{
|
||||
validator: (rule, value) => {
|
||||
return verifyJSON(value);
|
||||
},
|
||||
message: '不是合法的 JSON 字符串'
|
||||
}
|
||||
]}
|
||||
onChange={(value) =>
|
||||
setInputs({
|
||||
...inputs,
|
||||
ModelRatio: value
|
||||
})
|
||||
}
|
||||
/>
|
||||
</Col>
|
||||
</Row>
|
||||
<Row gutter={16}>
|
||||
<Col span={16}>
|
||||
<Form.TextArea
|
||||
label={'模型补全倍率(仅对自定义模型有效)'}
|
||||
extraText={'仅对自定义模型有效'}
|
||||
placeholder={'为一个 JSON 文本,键为模型名称,值为倍率'}
|
||||
field={'CompletionRatio'}
|
||||
autosize={{ minRows: 6, maxRows: 12 }}
|
||||
trigger='blur'
|
||||
stopValidateWithError
|
||||
rules={[
|
||||
{
|
||||
validator: (rule, value) => {
|
||||
return verifyJSON(value);
|
||||
},
|
||||
message: '不是合法的 JSON 字符串'
|
||||
}
|
||||
]}
|
||||
onChange={(value) =>
|
||||
setInputs({
|
||||
...inputs,
|
||||
CompletionRatio: value
|
||||
})
|
||||
}
|
||||
/>
|
||||
</Col>
|
||||
</Row>
|
||||
<Row gutter={16}>
|
||||
<Col span={16}>
|
||||
<Form.TextArea
|
||||
label={'分组倍率'}
|
||||
extraText={''}
|
||||
placeholder={'为一个 JSON 文本,键为分组名称,值为倍率'}
|
||||
field={'GroupRatio'}
|
||||
autosize={{ minRows: 6, maxRows: 12 }}
|
||||
trigger='blur'
|
||||
stopValidateWithError
|
||||
rules={[
|
||||
{
|
||||
validator: (rule, value) => {
|
||||
return verifyJSON(value);
|
||||
},
|
||||
message: '不是合法的 JSON 字符串'
|
||||
}
|
||||
]}
|
||||
onChange={(value) =>
|
||||
setInputs({
|
||||
...inputs,
|
||||
GroupRatio: value
|
||||
})
|
||||
}
|
||||
/>
|
||||
</Col>
|
||||
</Row>
|
||||
</Form.Section>
|
||||
</Form>
|
||||
<Space>
|
||||
<Button onClick={onSubmit}>
|
||||
保存倍率设置
|
||||
</Button>
|
||||
<Popconfirm
|
||||
title='确定重置模型倍率吗?'
|
||||
content='此修改将不可逆'
|
||||
okType={'danger'}
|
||||
position={'top'}
|
||||
onConfirm={() => {
|
||||
resetModelRatio();
|
||||
}}
|
||||
>
|
||||
<Button type={'danger'}>
|
||||
重置模型倍率
|
||||
</Button>
|
||||
</Popconfirm>
|
||||
</Space>
|
||||
</Spin>
|
||||
);
|
||||
}
|
||||
154
web/src/pages/Setting/Operation/SettingsMonitoring.js
Normal file
154
web/src/pages/Setting/Operation/SettingsMonitoring.js
Normal file
@@ -0,0 +1,154 @@
|
||||
import React, { useEffect, useState, useRef } from 'react';
|
||||
import { Button, Col, Form, Row, Spin } from '@douyinfe/semi-ui';
|
||||
import {
|
||||
compareObjects,
|
||||
API,
|
||||
showError,
|
||||
showSuccess,
|
||||
showWarning,
|
||||
} from '../../../helpers';
|
||||
|
||||
export default function SettingsMonitoring(props) {
|
||||
const [loading, setLoading] = useState(false);
|
||||
const [inputs, setInputs] = useState({
|
||||
ChannelDisableThreshold: '',
|
||||
QuotaRemindThreshold: '',
|
||||
AutomaticDisableChannelEnabled: false,
|
||||
AutomaticEnableChannelEnabled: false,
|
||||
});
|
||||
const refForm = useRef();
|
||||
const [inputsRow, setInputsRow] = useState(inputs);
|
||||
|
||||
function onSubmit() {
|
||||
const updateArray = compareObjects(inputs, inputsRow);
|
||||
if (!updateArray.length) return showWarning('你似乎并没有修改什么');
|
||||
const requestQueue = updateArray.map((item) => {
|
||||
let value = '';
|
||||
if (typeof inputs[item.key] === 'boolean') {
|
||||
value = String(inputs[item.key]);
|
||||
} else {
|
||||
value = inputs[item.key];
|
||||
}
|
||||
return API.put('/api/option/', {
|
||||
key: item.key,
|
||||
value,
|
||||
});
|
||||
});
|
||||
setLoading(true);
|
||||
Promise.all(requestQueue)
|
||||
.then((res) => {
|
||||
if (requestQueue.length === 1) {
|
||||
if (res.includes(undefined)) return;
|
||||
} else if (requestQueue.length > 1) {
|
||||
if (res.includes(undefined)) return showError('部分保存失败,请重试');
|
||||
}
|
||||
showSuccess('保存成功');
|
||||
props.refresh();
|
||||
})
|
||||
.catch(() => {
|
||||
showError('保存失败,请重试');
|
||||
})
|
||||
.finally(() => {
|
||||
setLoading(false);
|
||||
});
|
||||
}
|
||||
|
||||
useEffect(() => {
|
||||
const currentInputs = {};
|
||||
for (let key in props.options) {
|
||||
if (Object.keys(inputs).includes(key)) {
|
||||
currentInputs[key] = props.options[key];
|
||||
}
|
||||
}
|
||||
setInputs(currentInputs);
|
||||
setInputsRow(structuredClone(currentInputs));
|
||||
refForm.current.setValues(currentInputs);
|
||||
}, [props.options]);
|
||||
return (
|
||||
<>
|
||||
<Spin spinning={loading}>
|
||||
<Form
|
||||
values={inputs}
|
||||
getFormApi={(formAPI) => (refForm.current = formAPI)}
|
||||
style={{ marginBottom: 15 }}
|
||||
>
|
||||
<Form.Section text={'监控设置'}>
|
||||
<Row gutter={16}>
|
||||
<Col span={8}>
|
||||
<Form.InputNumber
|
||||
label={'最长响应时间'}
|
||||
step={1}
|
||||
min={0}
|
||||
suffix={'秒'}
|
||||
extraText={'当运行通道全部测试时,超过此时间将自动禁用通道'}
|
||||
placeholder={''}
|
||||
field={'ChannelDisableThreshold'}
|
||||
onChange={(value) =>
|
||||
setInputs({
|
||||
...inputs,
|
||||
ChannelDisableThreshold: String(value),
|
||||
})
|
||||
}
|
||||
/>
|
||||
</Col>
|
||||
<Col span={8}>
|
||||
<Form.InputNumber
|
||||
label={'额度提醒阈值'}
|
||||
step={1}
|
||||
min={0}
|
||||
suffix={'Token'}
|
||||
extraText={'低于此额度时将发送邮件提醒用户'}
|
||||
placeholder={''}
|
||||
field={'QuotaRemindThreshold'}
|
||||
onChange={(value) =>
|
||||
setInputs({
|
||||
...inputs,
|
||||
QuotaRemindThreshold: String(value),
|
||||
})
|
||||
}
|
||||
/>
|
||||
</Col>
|
||||
</Row>
|
||||
<Row gutter={16}>
|
||||
<Col span={8}>
|
||||
<Form.Switch
|
||||
field={'AutomaticDisableChannelEnabled'}
|
||||
label={'失败时自动禁用通道'}
|
||||
size='large'
|
||||
checkedText='|'
|
||||
uncheckedText='〇'
|
||||
onChange={(value) => {
|
||||
setInputs({
|
||||
...inputs,
|
||||
AutomaticDisableChannelEnabled: value,
|
||||
});
|
||||
}}
|
||||
/>
|
||||
</Col>
|
||||
<Col span={8}>
|
||||
<Form.Switch
|
||||
field={'AutomaticEnableChannelEnabled'}
|
||||
label={'成功时自动启用通道'}
|
||||
size='large'
|
||||
checkedText='|'
|
||||
uncheckedText='〇'
|
||||
onChange={(value) =>
|
||||
setInputs({
|
||||
...inputs,
|
||||
AutomaticEnableChannelEnabled: value,
|
||||
})
|
||||
}
|
||||
/>
|
||||
</Col>
|
||||
</Row>
|
||||
<Row>
|
||||
<Button size='large' onClick={onSubmit}>
|
||||
保存监控设置
|
||||
</Button>
|
||||
</Row>
|
||||
</Form.Section>
|
||||
</Form>
|
||||
</Spin>
|
||||
</>
|
||||
);
|
||||
}
|
||||
135
web/src/pages/Setting/Operation/SettingsSensitiveWords.js
Normal file
135
web/src/pages/Setting/Operation/SettingsSensitiveWords.js
Normal file
@@ -0,0 +1,135 @@
|
||||
import React, { useEffect, useState, useRef } from 'react';
|
||||
import { Button, Col, Form, Row, Spin, Tag } from '@douyinfe/semi-ui';
|
||||
import {
|
||||
compareObjects,
|
||||
API,
|
||||
showError,
|
||||
showSuccess,
|
||||
showWarning,
|
||||
} from '../../../helpers';
|
||||
|
||||
export default function SettingsSensitiveWords(props) {
|
||||
const [loading, setLoading] = useState(false);
|
||||
const [inputs, setInputs] = useState({
|
||||
CheckSensitiveEnabled: false,
|
||||
CheckSensitiveOnPromptEnabled: false,
|
||||
SensitiveWords: '',
|
||||
});
|
||||
const refForm = useRef();
|
||||
const [inputsRow, setInputsRow] = useState(inputs);
|
||||
|
||||
function onSubmit() {
|
||||
const updateArray = compareObjects(inputs, inputsRow);
|
||||
if (!updateArray.length) return showWarning('你似乎并没有修改什么');
|
||||
const requestQueue = updateArray.map((item) => {
|
||||
let value = '';
|
||||
if (typeof inputs[item.key] === 'boolean') {
|
||||
value = String(inputs[item.key]);
|
||||
} else {
|
||||
value = inputs[item.key];
|
||||
}
|
||||
return API.put('/api/option/', {
|
||||
key: item.key,
|
||||
value,
|
||||
});
|
||||
});
|
||||
setLoading(true);
|
||||
Promise.all(requestQueue)
|
||||
.then((res) => {
|
||||
if (requestQueue.length === 1) {
|
||||
if (res.includes(undefined)) return;
|
||||
} else if (requestQueue.length > 1) {
|
||||
if (res.includes(undefined)) return showError('部分保存失败,请重试');
|
||||
}
|
||||
showSuccess('保存成功');
|
||||
props.refresh();
|
||||
})
|
||||
.catch(() => {
|
||||
showError('保存失败,请重试');
|
||||
})
|
||||
.finally(() => {
|
||||
setLoading(false);
|
||||
});
|
||||
}
|
||||
|
||||
useEffect(() => {
|
||||
const currentInputs = {};
|
||||
for (let key in props.options) {
|
||||
if (Object.keys(inputs).includes(key)) {
|
||||
currentInputs[key] = props.options[key];
|
||||
}
|
||||
}
|
||||
setInputs(currentInputs);
|
||||
setInputsRow(structuredClone(currentInputs));
|
||||
refForm.current.setValues(currentInputs);
|
||||
}, [props.options]);
|
||||
return (
|
||||
<>
|
||||
<Spin spinning={loading}>
|
||||
<Form
|
||||
values={inputs}
|
||||
getFormApi={(formAPI) => (refForm.current = formAPI)}
|
||||
style={{ marginBottom: 15 }}
|
||||
>
|
||||
<Form.Section text={'屏蔽词过滤设置'}>
|
||||
<Row gutter={16}>
|
||||
<Col span={8}>
|
||||
<Form.Switch
|
||||
field={'CheckSensitiveEnabled'}
|
||||
label={'启用屏蔽词过滤功能'}
|
||||
size='large'
|
||||
checkedText='|'
|
||||
uncheckedText='〇'
|
||||
onChange={(value) => {
|
||||
setInputs({
|
||||
...inputs,
|
||||
CheckSensitiveEnabled: value,
|
||||
});
|
||||
}}
|
||||
/>
|
||||
</Col>
|
||||
<Col span={8}>
|
||||
<Form.Switch
|
||||
field={'CheckSensitiveOnPromptEnabled'}
|
||||
label={'启用 Prompt 检查'}
|
||||
size='large'
|
||||
checkedText='|'
|
||||
uncheckedText='〇'
|
||||
onChange={(value) =>
|
||||
setInputs({
|
||||
...inputs,
|
||||
CheckSensitiveOnPromptEnabled: value,
|
||||
})
|
||||
}
|
||||
/>
|
||||
</Col>
|
||||
</Row>
|
||||
<Row>
|
||||
<Col span={16}>
|
||||
<Form.TextArea
|
||||
label={'屏蔽词列表'}
|
||||
extraText={'一行一个屏蔽词,不需要符号分割'}
|
||||
placeholder={'一行一个屏蔽词,不需要符号分割'}
|
||||
field={'SensitiveWords'}
|
||||
onChange={(value) =>
|
||||
setInputs({
|
||||
...inputs,
|
||||
SensitiveWords: value,
|
||||
})
|
||||
}
|
||||
style={{ fontFamily: 'JetBrains Mono, Consolas' }}
|
||||
autosize={{ minRows: 6, maxRows: 12 }}
|
||||
/>
|
||||
</Col>
|
||||
</Row>
|
||||
<Row>
|
||||
<Button size='large' onClick={onSubmit}>
|
||||
保存屏蔽词过滤设置
|
||||
</Button>
|
||||
</Row>
|
||||
</Form.Section>
|
||||
</Form>
|
||||
</Spin>
|
||||
</>
|
||||
);
|
||||
}
|
||||
@@ -1,12 +1,13 @@
|
||||
import React, { useEffect, useState } from 'react';
|
||||
import { useNavigate } from 'react-router-dom';
|
||||
import { API, isMobile, showError, showSuccess } from '../../helpers';
|
||||
import { renderQuotaWithPrompt } from '../../helpers/render';
|
||||
import { renderQuota, renderQuotaWithPrompt } from '../../helpers/render';
|
||||
import Title from '@douyinfe/semi-ui/lib/es/typography/title';
|
||||
import {
|
||||
Button,
|
||||
Divider,
|
||||
Input,
|
||||
Modal,
|
||||
Select,
|
||||
SideSheet,
|
||||
Space,
|
||||
@@ -17,6 +18,8 @@ import {
|
||||
const EditUser = (props) => {
|
||||
const userId = props.editingUser.id;
|
||||
const [loading, setLoading] = useState(true);
|
||||
const [addQuotaModalOpen, setIsModalOpen] = useState(false);
|
||||
const [addQuotaLocal, setAddQuotaLocal] = useState('');
|
||||
const [inputs, setInputs] = useState({
|
||||
username: '',
|
||||
display_name: '',
|
||||
@@ -107,6 +110,16 @@ const EditUser = (props) => {
|
||||
setLoading(false);
|
||||
};
|
||||
|
||||
const addLocalQuota = () => {
|
||||
let newQuota = parseInt(quota) + parseInt(addQuotaLocal);
|
||||
setInputs((inputs) => ({ ...inputs, quota: newQuota }));
|
||||
};
|
||||
|
||||
const openAddQuotaModal = () => {
|
||||
setAddQuotaLocal('0');
|
||||
setIsModalOpen(true);
|
||||
};
|
||||
|
||||
return (
|
||||
<>
|
||||
<SideSheet
|
||||
@@ -192,14 +205,17 @@ const EditUser = (props) => {
|
||||
<div style={{ marginTop: 20 }}>
|
||||
<Typography.Text>{`剩余额度${renderQuotaWithPrompt(quota)}`}</Typography.Text>
|
||||
</div>
|
||||
<Input
|
||||
name='quota'
|
||||
placeholder={'请输入新的剩余额度'}
|
||||
onChange={(value) => handleInputChange('quota', value)}
|
||||
value={quota}
|
||||
type={'number'}
|
||||
autoComplete='new-password'
|
||||
/>
|
||||
<Space>
|
||||
<Input
|
||||
name='quota'
|
||||
placeholder={'请输入新的剩余额度'}
|
||||
onChange={(value) => handleInputChange('quota', value)}
|
||||
value={quota}
|
||||
type={'number'}
|
||||
autoComplete='new-password'
|
||||
/>
|
||||
<Button onClick={openAddQuotaModal}>添加额度</Button>
|
||||
</Space>
|
||||
</>
|
||||
)}
|
||||
<Divider style={{ marginTop: 20 }}>以下信息不可修改</Divider>
|
||||
@@ -245,6 +261,30 @@ const EditUser = (props) => {
|
||||
/>
|
||||
</Spin>
|
||||
</SideSheet>
|
||||
<Modal
|
||||
centered={true}
|
||||
visible={addQuotaModalOpen}
|
||||
onOk={() => {
|
||||
addLocalQuota();
|
||||
setIsModalOpen(false);
|
||||
}}
|
||||
onCancel={() => setIsModalOpen(false)}
|
||||
closable={null}
|
||||
>
|
||||
<div style={{ marginTop: 20 }}>
|
||||
<Typography.Text>{`新额度${renderQuota(quota)} + ${renderQuota(addQuotaLocal)} = ${renderQuota(quota + parseInt(addQuotaLocal))}`}</Typography.Text>
|
||||
</div>
|
||||
<Input
|
||||
name='addQuotaLocal'
|
||||
placeholder={'需要添加的额度(支持负数)'}
|
||||
onChange={(value) => {
|
||||
setAddQuotaLocal(value);
|
||||
}}
|
||||
value={addQuotaLocal}
|
||||
type={'number'}
|
||||
autoComplete='new-password'
|
||||
/>
|
||||
</Modal>
|
||||
</>
|
||||
);
|
||||
};
|
||||
|
||||
Reference in New Issue
Block a user