Compare commits

...

16 Commits

Author SHA1 Message Date
jinjianming
e9bf0b926a Merge cc367dd95b into cba82404ae 2024-09-21 22:57:49 +08:00
千寻简
cba82404ae feat: add lobechat open link options (#1741)
Co-authored-by: Star <iii9777@163.com>
2024-09-21 22:49:31 +08:00
forrestlinfeng
c9ac670ba1 feat: update stepfun models (#1740)
Co-authored-by: chenlinfeng <chenlinfeng@step.ai>
2024-09-21 22:48:46 +08:00
leavegee
15f815c23c fix: fix ali embedding model always use v1 (#1747)
* fix:ali embedding model: v2 and v3

* chore: use ctxkey.RequestModel to eliminate hardcoding

---------

Co-authored-by: xuejia <gexuejia@djbx.com>
Co-authored-by: JustSong <songquanpeng@foxmail.com>
2024-09-21 22:40:06 +08:00
majian
89b63ca96f feat: ResponseFormat support json_schema (#1759)
* feat: responseFormat support json_schema

* chore: rename struct name

---------

Co-authored-by: JustSong <songquanpeng@foxmail.com>
2024-09-21 22:35:24 +08:00
Ghostz
8cc54489b9 feat: update disabled channel (#1780)
* Update disabled channel

* Update manage.go

* Update manage.go

* chore: add missing space

---------

Co-authored-by: JustSong <songquanpeng@foxmail.com>
Co-authored-by: JustSong <39998050+songquanpeng@users.noreply.github.com>
2024-09-21 22:31:53 +08:00
guogeer
58bf60805e fix: postgres use COALESCE replace null (#1793)
Co-authored-by: jinqi.guo <jinqi.guo@ubtrobot.com>
2024-09-21 22:13:31 +08:00
AJ's Life Journey
6714cf96d6 fix: Groq organization not auto-disabled when blocked (#1822) 2024-09-21 22:12:09 +08:00
longkeyy
f9774698e9 feat: synchronize with the official release of the groq model (#1677)
update groq add gemma2-9b-it llama3.1 family fixup price k/token -> m/token
2024-08-06 23:51:08 +08:00
TAKO
2af6f6a166 feat: add Cloudflare New Free Model Llama 3.1 8b (#1703) 2024-08-06 23:49:48 +08:00
MotorBottle
04bb3ef392 feat: add Max Tokens and Context Window Setting Options for Ollama Channel (#1694)
* Update main.go with max_tokens param

* Update model.go with max_tokens param

* Update model.go

* Update main.go

* Update main.go

* Adds num_ctx param for Ollama Channel

* Added num_ctx param for ollama adapter

* Added num_ctx param for ollama adapter

* Improved data process logic
2024-08-06 23:44:37 +08:00
longkeyy
b4bfa418a8 feat: update gemini model and price (#1705) 2024-08-06 23:43:33 +08:00
SLKun
e7e99e558a feat: update Ollama embedding API to latest version with multi-text embedding support (#1715) 2024-08-06 23:43:20 +08:00
Shenghang Tsai
402fcf7f79 feat: add SiliconFlow (#1717)
* Add SiliconFlow

* Update README.md

* Update README.md

* Update channel.constants.js

* Update ChannelConstants.js

* Update channel.constants.js

* Update ChannelConstants.js

* Update compatible.go

* Update README.md
2024-08-06 23:42:25 +08:00
Junyan Qin
36039e329e docs: update introduction for QChatGPT (#1707) 2024-08-06 23:33:43 +08:00
jinjianmingming
cc367dd95b berry主题添加聊天按钮 2024-05-29 15:37:59 +08:00
28 changed files with 325 additions and 74 deletions

View File

@@ -89,6 +89,7 @@ _✨ 通过标准的 OpenAI API 格式访问所有的大模型,开箱即用
+ [x] [DeepL](https://www.deepl.com/)
+ [x] [together.ai](https://www.together.ai/)
+ [x] [novita.ai](https://www.novita.ai/)
+ [x] [硅基流动 SiliconCloud](https://siliconflow.cn/siliconcloud)
2. 支持配置镜像以及众多[第三方代理服务](https://iamazing.cn/page/openai-api-third-party-services)。
3. 支持通过**负载均衡**的方式访问多个渠道。
4. 支持 **stream 模式**,可以通过流式传输实现打字机效果。
@@ -251,9 +252,9 @@ docker run --name chatgpt-web -d -p 3002:3002 -e OPENAI_API_BASE_URL=https://ope
#### QChatGPT - QQ机器人
项目主页https://github.com/RockChinQ/QChatGPT
根据文档完成部署后,在`config.py`设置配置项`openai_config`的`reverse_proxy`为 One API 后端地址,设置`api_key`为 One API 生成的key并在配置项`completion_api_params`的`model`参数设置为 One API 支持的模型名称。
根据[文档](https://qchatgpt.rockchin.top)完成部署后,在 `data/provider.json`设置`requester.openai-chat-completions.base-url`为 One API 实例地址,并填写 API Key 到 `keys.openai` 组中,设置 `model` 为要使用的模型名称。
可安装 [Switcher 插件](https://github.com/RockChinQ/Switcher)在运行时切换所使用的模型。
运行期间可以通过`!model`命令查看、切换可用模型。
### 部署到第三方平台
<details>

View File

@@ -3,6 +3,7 @@ package model
import (
"context"
"fmt"
"github.com/songquanpeng/one-api/common"
"github.com/songquanpeng/one-api/common/config"
"github.com/songquanpeng/one-api/common/helper"
@@ -152,7 +153,11 @@ func SearchUserLogs(userId int, keyword string) (logs []*Log, err error) {
}
func SumUsedQuota(logType int, startTimestamp int64, endTimestamp int64, modelName string, username string, tokenName string, channel int) (quota int64) {
tx := LOG_DB.Table("logs").Select("ifnull(sum(quota),0)")
ifnull := "ifnull"
if common.UsingPostgreSQL {
ifnull = "COALESCE"
}
tx := LOG_DB.Table("logs").Select(fmt.Sprintf("%s(sum(quota),0)", ifnull))
if username != "" {
tx = tx.Where("username = ?", username)
}
@@ -176,7 +181,11 @@ func SumUsedQuota(logType int, startTimestamp int64, endTimestamp int64, modelNa
}
func SumUsedToken(logType int, startTimestamp int64, endTimestamp int64, modelName string, username string, tokenName string) (token int) {
tx := LOG_DB.Table("logs").Select("ifnull(sum(prompt_tokens),0) + ifnull(sum(completion_tokens),0)")
ifnull := "ifnull"
if common.UsingPostgreSQL {
ifnull = "COALESCE"
}
tx := LOG_DB.Table("logs").Select(fmt.Sprintf("%s(sum(prompt_tokens),0) + %s(sum(completion_tokens),0)", ifnull, ifnull))
if username != "" {
tx = tx.Where("username = ?", username)
}

View File

@@ -1,10 +1,11 @@
package monitor
import (
"github.com/songquanpeng/one-api/common/config"
"github.com/songquanpeng/one-api/relay/model"
"net/http"
"strings"
"github.com/songquanpeng/one-api/common/config"
"github.com/songquanpeng/one-api/relay/model"
)
func ShouldDisableChannel(err *model.Error, statusCode int) bool {
@@ -18,31 +19,23 @@ func ShouldDisableChannel(err *model.Error, statusCode int) bool {
return true
}
switch err.Type {
case "insufficient_quota":
return true
// https://docs.anthropic.com/claude/reference/errors
case "authentication_error":
return true
case "permission_error":
return true
case "forbidden":
case "insufficient_quota", "authentication_error", "permission_error", "forbidden":
return true
}
if err.Code == "invalid_api_key" || err.Code == "account_deactivated" {
return true
}
if strings.HasPrefix(err.Message, "Your credit balance is too low") { // anthropic
return true
} else if strings.HasPrefix(err.Message, "This organization has been disabled.") {
return true
}
//if strings.Contains(err.Message, "quota") {
// return true
//}
if strings.Contains(err.Message, "credit") {
return true
}
if strings.Contains(err.Message, "balance") {
lowerMessage := strings.ToLower(err.Message)
if strings.Contains(lowerMessage, "your access was terminated") ||
strings.Contains(lowerMessage, "violation of our policies") ||
strings.Contains(lowerMessage, "your credit balance is too low") ||
strings.Contains(lowerMessage, "organization has been disabled") ||
strings.Contains(lowerMessage, "credit") ||
strings.Contains(lowerMessage, "balance") ||
strings.Contains(lowerMessage, "permission denied") ||
strings.Contains(lowerMessage, "organization has been restricted") || // groq
strings.Contains(lowerMessage, "已欠费") {
return true
}
return false

View File

@@ -3,6 +3,7 @@ package ali
import (
"bufio"
"encoding/json"
"github.com/songquanpeng/one-api/common/ctxkey"
"github.com/songquanpeng/one-api/common/render"
"io"
"net/http"
@@ -59,7 +60,7 @@ func ConvertRequest(request model.GeneralOpenAIRequest) *ChatRequest {
func ConvertEmbeddingRequest(request model.GeneralOpenAIRequest) *EmbeddingRequest {
return &EmbeddingRequest{
Model: "text-embedding-v1",
Model: request.Model,
Input: struct {
Texts []string `json:"texts"`
}{
@@ -102,8 +103,9 @@ func EmbeddingHandler(c *gin.Context, resp *http.Response) (*model.ErrorWithStat
StatusCode: resp.StatusCode,
}, nil
}
requestModel := c.GetString(ctxkey.RequestModel)
fullTextResponse := embeddingResponseAli2OpenAI(&aliResponse)
fullTextResponse.Model = requestModel
jsonResponse, err := json.Marshal(fullTextResponse)
if err != nil {
return openai.ErrorWrapper(err, "marshal_response_body_failed", http.StatusInternalServerError), nil

View File

@@ -1,6 +1,7 @@
package cloudflare
var ModelList = []string{
"@cf/meta/llama-3.1-8b-instruct",
"@cf/meta/llama-2-7b-chat-fp16",
"@cf/meta/llama-2-7b-chat-int8",
"@cf/mistral/mistral-7b-instruct-v0.1",

View File

@@ -3,6 +3,5 @@ package gemini
// https://ai.google.dev/models/gemini
var ModelList = []string{
"gemini-pro", "gemini-1.0-pro-001", "gemini-1.5-pro",
"gemini-pro-vision", "gemini-1.0-pro-vision-001", "embedding-001", "text-embedding-004",
"gemini-pro", "gemini-1.0-pro", "gemini-1.5-flash", "gemini-1.5-pro", "text-embedding-004", "aqa",
}

View File

@@ -4,9 +4,14 @@ package groq
var ModelList = []string{
"gemma-7b-it",
"llama2-7b-2048",
"llama2-70b-4096",
"mixtral-8x7b-32768",
"llama3-8b-8192",
"llama3-70b-8192",
"gemma2-9b-it",
"llama-3.1-405b-reasoning",
"llama-3.1-70b-versatile",
"llama-3.1-8b-instant",
"llama3-groq-70b-8192-tool-use-preview",
"llama3-groq-8b-8192-tool-use-preview",
"whisper-large-v3",
}

View File

@@ -24,7 +24,7 @@ func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error) {
// https://github.com/ollama/ollama/blob/main/docs/api.md
fullRequestURL := fmt.Sprintf("%s/api/chat", meta.BaseURL)
if meta.Mode == relaymode.Embeddings {
fullRequestURL = fmt.Sprintf("%s/api/embeddings", meta.BaseURL)
fullRequestURL = fmt.Sprintf("%s/api/embed", meta.BaseURL)
}
return fullRequestURL, nil
}

View File

@@ -31,6 +31,8 @@ func ConvertRequest(request model.GeneralOpenAIRequest) *ChatRequest {
TopP: request.TopP,
FrequencyPenalty: request.FrequencyPenalty,
PresencePenalty: request.PresencePenalty,
NumPredict: request.MaxTokens,
NumCtx: request.NumCtx,
},
Stream: request.Stream,
}
@@ -118,8 +120,10 @@ func StreamHandler(c *gin.Context, resp *http.Response) (*model.ErrorWithStatusC
common.SetEventStreamHeaders(c)
for scanner.Scan() {
data := strings.TrimPrefix(scanner.Text(), "}")
data = data + "}"
data := scanner.Text()
if strings.HasPrefix(data, "}") {
data = strings.TrimPrefix(data, "}") + "}"
}
var ollamaResponse ChatResponse
err := json.Unmarshal([]byte(data), &ollamaResponse)
@@ -157,8 +161,15 @@ func StreamHandler(c *gin.Context, resp *http.Response) (*model.ErrorWithStatusC
func ConvertEmbeddingRequest(request model.GeneralOpenAIRequest) *EmbeddingRequest {
return &EmbeddingRequest{
Model: request.Model,
Prompt: strings.Join(request.ParseInput(), " "),
Model: request.Model,
Input: request.ParseInput(),
Options: &Options{
Seed: int(request.Seed),
Temperature: request.Temperature,
TopP: request.TopP,
FrequencyPenalty: request.FrequencyPenalty,
PresencePenalty: request.PresencePenalty,
},
}
}
@@ -201,15 +212,17 @@ func embeddingResponseOllama2OpenAI(response *EmbeddingResponse) *openai.Embeddi
openAIEmbeddingResponse := openai.EmbeddingResponse{
Object: "list",
Data: make([]openai.EmbeddingResponseItem, 0, 1),
Model: "text-embedding-v1",
Model: response.Model,
Usage: model.Usage{TotalTokens: 0},
}
openAIEmbeddingResponse.Data = append(openAIEmbeddingResponse.Data, openai.EmbeddingResponseItem{
Object: `embedding`,
Index: 0,
Embedding: response.Embedding,
})
for i, embedding := range response.Embeddings {
openAIEmbeddingResponse.Data = append(openAIEmbeddingResponse.Data, openai.EmbeddingResponseItem{
Object: `embedding`,
Index: i,
Embedding: embedding,
})
}
return &openAIEmbeddingResponse
}

View File

@@ -7,6 +7,8 @@ type Options struct {
TopP float64 `json:"top_p,omitempty"`
FrequencyPenalty float64 `json:"frequency_penalty,omitempty"`
PresencePenalty float64 `json:"presence_penalty,omitempty"`
NumPredict int `json:"num_predict,omitempty"`
NumCtx int `json:"num_ctx,omitempty"`
}
type Message struct {
@@ -37,11 +39,15 @@ type ChatResponse struct {
}
type EmbeddingRequest struct {
Model string `json:"model"`
Prompt string `json:"prompt"`
Model string `json:"model"`
Input []string `json:"input"`
// Truncate bool `json:"truncate,omitempty"`
Options *Options `json:"options,omitempty"`
// KeepAlive string `json:"keep_alive,omitempty"`
}
type EmbeddingResponse struct {
Error string `json:"error,omitempty"`
Embedding []float64 `json:"embedding,omitempty"`
Error string `json:"error,omitempty"`
Model string `json:"model"`
Embeddings [][]float64 `json:"embeddings"`
}

View File

@@ -13,6 +13,7 @@ import (
"github.com/songquanpeng/one-api/relay/adaptor/novita"
"github.com/songquanpeng/one-api/relay/adaptor/stepfun"
"github.com/songquanpeng/one-api/relay/adaptor/togetherai"
"github.com/songquanpeng/one-api/relay/adaptor/siliconflow"
"github.com/songquanpeng/one-api/relay/channeltype"
)
@@ -30,6 +31,7 @@ var CompatibleChannels = []int{
channeltype.DeepSeek,
channeltype.TogetherAI,
channeltype.Novita,
channeltype.SiliconFlow,
}
func GetCompatibleChannelMeta(channelType int) (string, []string) {
@@ -60,6 +62,8 @@ func GetCompatibleChannelMeta(channelType int) (string, []string) {
return "doubao", doubao.ModelList
case channeltype.Novita:
return "novita", novita.ModelList
case channeltype.SiliconFlow:
return "siliconflow", siliconflow.ModelList
default:
return "openai", ModelList
}

View File

@@ -0,0 +1,36 @@
package siliconflow
// https://docs.siliconflow.cn/docs/getting-started
var ModelList = []string{
"deepseek-ai/deepseek-llm-67b-chat",
"Qwen/Qwen1.5-14B-Chat",
"Qwen/Qwen1.5-7B-Chat",
"Qwen/Qwen1.5-110B-Chat",
"Qwen/Qwen1.5-32B-Chat",
"01-ai/Yi-1.5-6B-Chat",
"01-ai/Yi-1.5-9B-Chat-16K",
"01-ai/Yi-1.5-34B-Chat-16K",
"THUDM/chatglm3-6b",
"deepseek-ai/DeepSeek-V2-Chat",
"THUDM/glm-4-9b-chat",
"Qwen/Qwen2-72B-Instruct",
"Qwen/Qwen2-7B-Instruct",
"Qwen/Qwen2-57B-A14B-Instruct",
"deepseek-ai/DeepSeek-Coder-V2-Instruct",
"Qwen/Qwen2-1.5B-Instruct",
"internlm/internlm2_5-7b-chat",
"BAAI/bge-large-en-v1.5",
"BAAI/bge-large-zh-v1.5",
"Pro/Qwen/Qwen2-7B-Instruct",
"Pro/Qwen/Qwen2-1.5B-Instruct",
"Pro/Qwen/Qwen1.5-7B-Chat",
"Pro/THUDM/glm-4-9b-chat",
"Pro/THUDM/chatglm3-6b",
"Pro/01-ai/Yi-1.5-9B-Chat-16K",
"Pro/01-ai/Yi-1.5-6B-Chat",
"Pro/google/gemma-2-9b-it",
"Pro/internlm/internlm2_5-7b-chat",
"Pro/meta-llama/Meta-Llama-3-8B-Instruct",
"Pro/mistralai/Mistral-7B-Instruct-v0.2",
}

View File

@@ -1,7 +1,13 @@
package stepfun
var ModelList = []string{
"step-1-8k",
"step-1-32k",
"step-1-128k",
"step-1-256k",
"step-1-flash",
"step-2-16k",
"step-1v-8k",
"step-1v-32k",
"step-1-200k",
"step-1x-medium",
}

View File

@@ -30,6 +30,14 @@ var ImageSizeRatios = map[string]map[string]float64{
"720x1280": 1,
"1280x720": 1,
},
"step-1x-medium": {
"256x256": 1,
"512x512": 1,
"768x768": 1,
"1024x1024": 1,
"1280x800": 1,
"800x1280": 1,
},
}
var ImageGenerationAmounts = map[string][2]int{
@@ -39,6 +47,7 @@ var ImageGenerationAmounts = map[string][2]int{
"ali-stable-diffusion-v1.5": {1, 4}, // Ali
"wanx-v1": {1, 4}, // Ali
"cogview-3": {1, 1},
"step-1x-medium": {1, 1},
}
var ImagePromptLengthLimitations = map[string]int{
@@ -48,6 +57,7 @@ var ImagePromptLengthLimitations = map[string]int{
"ali-stable-diffusion-v1.5": 4000,
"wanx-v1": 4000,
"cogview-3": 833,
"step-1x-medium": 4000,
}
var ImageOriginModelName = map[string]string{

View File

@@ -98,12 +98,11 @@ var ModelRatio = map[string]float64{
"bge-large-en": 0.002 * RMB,
"tao-8k": 0.002 * RMB,
// https://ai.google.dev/pricing
"PaLM-2": 1,
"gemini-pro": 1, // $0.00025 / 1k characters -> $0.001 / 1k tokens
"gemini-pro-vision": 1, // $0.00025 / 1k characters -> $0.001 / 1k tokens
"gemini-1.0-pro-vision-001": 1,
"gemini-1.0-pro-001": 1,
"gemini-1.5-pro": 1,
"gemini-pro": 1, // $0.00025 / 1k characters -> $0.001 / 1k tokens
"gemini-1.0-pro": 1,
"gemini-1.5-flash": 1,
"gemini-1.5-pro": 1,
"aqa": 1,
// https://open.bigmodel.cn/pricing
"glm-4": 0.1 * RMB,
"glm-4v": 0.1 * RMB,
@@ -158,20 +157,29 @@ var ModelRatio = map[string]float64{
"mistral-large-latest": 8.0 / 1000 * USD,
"mistral-embed": 0.1 / 1000 * USD,
// https://wow.groq.com/#:~:text=inquiries%C2%A0here.-,Model,-Current%20Speed
"llama3-70b-8192": 0.59 / 1000 * USD,
"mixtral-8x7b-32768": 0.27 / 1000 * USD,
"llama3-8b-8192": 0.05 / 1000 * USD,
"gemma-7b-it": 0.1 / 1000 * USD,
"llama2-70b-4096": 0.64 / 1000 * USD,
"llama2-7b-2048": 0.1 / 1000 * USD,
"gemma-7b-it": 0.07 / 1000000 * USD,
"mixtral-8x7b-32768": 0.24 / 1000000 * USD,
"llama3-8b-8192": 0.05 / 1000000 * USD,
"llama3-70b-8192": 0.59 / 1000000 * USD,
"gemma2-9b-it": 0.20 / 1000000 * USD,
"llama-3.1-405b-reasoning": 0.89 / 1000000 * USD,
"llama-3.1-70b-versatile": 0.59 / 1000000 * USD,
"llama-3.1-8b-instant": 0.05 / 1000000 * USD,
"llama3-groq-70b-8192-tool-use-preview": 0.89 / 1000000 * USD,
"llama3-groq-8b-8192-tool-use-preview": 0.19 / 1000000 * USD,
// https://platform.lingyiwanwu.com/docs#-计费单元
"yi-34b-chat-0205": 2.5 / 1000 * RMB,
"yi-34b-chat-200k": 12.0 / 1000 * RMB,
"yi-vl-plus": 6.0 / 1000 * RMB,
// stepfun todo
"step-1v-32k": 0.024 * RMB,
"step-1-32k": 0.024 * RMB,
"step-1-200k": 0.15 * RMB,
// https://platform.stepfun.com/docs/pricing/details
"step-1-8k": 0.005 / 1000 * RMB,
"step-1-32k": 0.015 / 1000 * RMB,
"step-1-128k": 0.040 / 1000 * RMB,
"step-1-256k": 0.095 / 1000 * RMB,
"step-1-flash": 0.001 / 1000 * RMB,
"step-2-16k": 0.038 / 1000 * RMB,
"step-1v-8k": 0.005 / 1000 * RMB,
"step-1v-32k": 0.015 / 1000 * RMB,
// aws llama3 https://aws.amazon.com/cn/bedrock/pricing/
"llama3-8b-8192(33)": 0.0003 / 0.002, // $0.0003 / 1K tokens
"llama3-70b-8192(33)": 0.00265 / 0.002, // $0.00265 / 1K tokens
@@ -197,8 +205,10 @@ var CompletionRatio = map[string]float64{
"llama3-70b-8192(33)": 0.0035 / 0.00265,
}
var DefaultModelRatio map[string]float64
var DefaultCompletionRatio map[string]float64
var (
DefaultModelRatio map[string]float64
DefaultCompletionRatio map[string]float64
)
func init() {
DefaultModelRatio = make(map[string]float64)

View File

@@ -45,5 +45,6 @@ const (
Novita
VertextAI
Proxy
SiliconFlow
Dummy
)

View File

@@ -45,6 +45,7 @@ var ChannelBaseURLs = []string{
"https://api.novita.ai/v3/openai", // 41
"", // 42
"", // 43
"https://api.siliconflow.cn", // 44
}
func init() {

View File

@@ -1,7 +1,15 @@
package model
type ResponseFormat struct {
Type string `json:"type,omitempty"`
Type string `json:"type,omitempty"`
JsonSchema *JSONSchema `json:"json_schema,omitempty"`
}
type JSONSchema struct {
Description string `json:"description,omitempty"`
Name string `json:"name"`
Schema map[string]interface{} `json:"schema,omitempty"`
Strict *bool `json:"strict,omitempty"`
}
type GeneralOpenAIRequest struct {
@@ -29,6 +37,7 @@ type GeneralOpenAIRequest struct {
Dimensions int `json:"dimensions,omitempty"`
Instruction string `json:"instruction,omitempty"`
Size string `json:"size,omitempty"`
NumCtx int `json:"num_ctx,omitempty"`
}
func (r GeneralOpenAIRequest) ParseInput() []string {

View File

@@ -11,12 +11,14 @@ import EditToken from '../pages/Token/EditToken';
const COPY_OPTIONS = [
{ key: 'next', text: 'ChatGPT Next Web', value: 'next' },
{ key: 'ama', text: 'ChatGPT Web & Midjourney', value: 'ama' },
{ key: 'opencat', text: 'OpenCat', value: 'opencat' }
{ key: 'opencat', text: 'OpenCat', value: 'opencat' },
{ key: 'lobechat', text: 'LobeChat', value: 'lobechat' },
];
const OPEN_LINK_OPTIONS = [
{ key: 'ama', text: 'ChatGPT Web & Midjourney', value: 'ama' },
{ key: 'opencat', text: 'OpenCat', value: 'opencat' }
{ key: 'opencat', text: 'OpenCat', value: 'opencat' },
{ key: 'lobechat', text: 'LobeChat', value: 'lobechat' }
];
function renderTimestamp(timestamp) {
@@ -60,7 +62,12 @@ const TokensTable = () => {
onOpenLink('next-mj');
}
},
{ node: 'item', key: 'opencat', name: 'OpenCat', value: 'opencat' }
{ node: 'item', key: 'opencat', name: 'OpenCat', value: 'opencat' },
{
node: 'item', key: 'lobechat', name: 'LobeChat', onClick: () => {
onOpenLink('lobechat');
}
}
];
const columns = [
@@ -177,6 +184,11 @@ const TokensTable = () => {
node: 'item', key: 'opencat', name: 'OpenCat', onClick: () => {
onOpenLink('opencat', record.key);
}
},
{
node: 'item', key: 'lobechat', name: 'LobeChat', onClick: () => {
onOpenLink('lobechat');
}
}
]
}
@@ -382,6 +394,9 @@ const TokensTable = () => {
case 'next-mj':
url = mjLink + `/#/?settings={"key":"sk-${key}","url":"${serverAddress}"}`;
break;
case 'lobechat':
url = chatLink + `/?settings={"keyVaults":{"openai":{"apiKey":"sk-${key}","baseURL":"${serverAddress}"/v1"}}}`;
break;
default:
if (!chatLink) {
showError('管理员未设置聊天链接');

View File

@@ -29,6 +29,7 @@ export const CHANNEL_OPTIONS = [
{ key: 39, text: 'together.ai', value: 39, color: 'blue' },
{ key: 42, text: 'VertexAI', value: 42, color: 'blue' },
{ key: 43, text: 'Proxy', value: 43, color: 'blue' },
{ key: 44, text: 'SiliconFlow', value: 44, color: 'blue' },
{ key: 8, text: '自定义渠道', value: 8, color: 'pink' },
{ key: 22, text: '知识库FastGPT', value: 22, color: 'blue' },
{ key: 21, text: '知识库AI Proxy', value: 21, color: 'purple' },

View File

@@ -173,6 +173,12 @@ export const CHANNEL_OPTIONS = {
value: 43,
color: 'primary'
},
44: {
key: 44,
text: 'SiliconFlow',
value: 44,
color: 'primary'
},
41: {
key: 41,
text: 'Novita',

View File

@@ -8,11 +8,12 @@ import {
IconKey,
IconGardenCart,
IconUser,
IconUserScan
IconUserScan,
IconMessageCircle
} from '@tabler/icons-react';
// constant
const icons = { IconDashboard, IconSitemap, IconArticle, IconCoin, IconAdjustments, IconKey, IconGardenCart, IconUser, IconUserScan };
const icons = { IconDashboard, IconSitemap, IconArticle, IconCoin, IconAdjustments, IconKey, IconGardenCart, IconUser, IconUserScan,IconMessageCircle };
// ==============================|| DASHBOARD MENU ITEMS ||============================== //
@@ -29,6 +30,15 @@ const panel = {
breadcrumbs: false,
isAdmin: false
},
{
id: 'chat',
title: '聊天',
type: 'item',
url: '/panel/chat',
icon: icons.IconMessageCircle,
breadcrumbs: false,
isAdmin: false
},
{
id: 'channel',
title: '渠道',

View File

@@ -3,6 +3,7 @@ import { lazy } from 'react';
// project imports
import MainLayout from 'layout/MainLayout';
import Loadable from 'ui-component/Loadable';
import Chat from "../views/Chat";
const Channel = Loadable(lazy(() => import('views/Channel')));
const Log = Loadable(lazy(() => import('views/Log')));
@@ -31,6 +32,10 @@ const MainRoutes = {
path: 'dashboard',
element: <Dashboard />
},
{
path: 'chat',
element: <Chat />
},
{
path: 'channel',
element: <Channel />

View File

@@ -0,0 +1,28 @@
.MuiContainer-root {
padding-left: 0;
padding-right: 0;
height: calc(100% - 1px);
max-width: unset;
}
.css-1xnbu7n-MuiContainer-root {
/* 如果有特定样式,请在此处添加 */
}
.css-9d4wr9 {
background-color: #eef2f6;
width: 100%;
min-height: calc(100vh - 88px);
flex-grow: 1;
padding: 0;
margin-top: 83.746px;
margin-right: 0;
border-radius: 12px;
border-bottom-left-radius: 0;
border-bottom-right-radius: 0;
transition: margin 225ms cubic-bezier(0.0, 0, 0.2, 1) 0ms;
}
.chat-container {
height: 100%;
}

View File

@@ -0,0 +1,69 @@
import React, { useEffect, useState } from "react";
import { API } from "../../utils/api";
import "./index.css";
const useIsSmallScreen = () => {
const [isSmallScreen, setIsSmallScreen] = useState(window.innerWidth <= 768);
useEffect(() => {
const handleResize = () => {
setIsSmallScreen(window.innerWidth <= 768);
};
window.addEventListener('resize', handleResize);
return () => {
window.removeEventListener('resize', handleResize);
};
}, []);
return isSmallScreen;
};
const Chat = () => {
const [chatUrl, setChatUrl] = useState("");
const [loading, setLoading] = useState(true);
// const isSmallScreen = useIsSmallScreen();
const loadTokens = async () => {
try {
const res = await API.get(`/api/token/`);
const siteInfo = JSON.parse(localStorage.getItem('siteInfo'));
if (!siteInfo) {
console.error("siteInfo not found in localStorage");
setLoading(false);
return;
}
// const url = `https://like.chatapi.asia/#/?settings={"key":"sk-xxx","url":"https://chat.chatapi.asia"}`;
const serverAddress = siteInfo.server_address;
const key = res.data.data[0].key;
const url = `${siteInfo.chat_link}/#/?settings={"key":"sk-${key}","url":"${serverAddress}"}`;
setChatUrl(url);
} catch (error) {
console.error("Error loading tokens:", error);
} finally {
setLoading(false);
}
};
useEffect(() => {
loadTokens();
}, []);
if (loading) {
return <div className="chat-container">Loading...</div>;
}
return (
<div className="chat-container">
<iframe
src={chatUrl}
style={{ height: '100%', width: '100%', padding: 0, border: 'none' }}
title="Chat"
/>
</div>
);
};
export default Chat;

View File

@@ -32,7 +32,8 @@ const COPY_OPTIONS = [
encode: false
},
{ key: 'ama', text: 'BotGem', url: 'ama://set-api-key?server={serverAddress}&key=sk-{key}', encode: true },
{ key: 'opencat', text: 'OpenCat', url: 'opencat://team/join?domain={serverAddress}&token=sk-{key}', encode: true }
{ key: 'opencat', text: 'OpenCat', url: 'opencat://team/join?domain={serverAddress}&token=sk-{key}', encode: true },
{ key: 'lobechat', text: 'LobeChat', url: 'https://lobehub.com/?settings={"keyVaults":{"openai":{"apiKey":"user-key","baseURL":"https://your-proxy.com/v1"}}}', encode: true }
];
function replacePlaceholders(text, key, serverAddress) {

View File

@@ -10,12 +10,14 @@ const COPY_OPTIONS = [
{ key: 'next', text: 'ChatGPT Next Web', value: 'next' },
{ key: 'ama', text: 'BotGem', value: 'ama' },
{ key: 'opencat', text: 'OpenCat', value: 'opencat' },
{ key: 'lobechat', text: 'LobeChat', value: 'lobechat' },
];
const OPEN_LINK_OPTIONS = [
{ key: 'next', text: 'ChatGPT Next Web', value: 'next' },
{ key: 'ama', text: 'BotGem', value: 'ama' },
{ key: 'opencat', text: 'OpenCat', value: 'opencat' },
{ key: 'lobechat', text: 'LobeChat', value: 'lobechat' },
];
function renderTimestamp(timestamp) {
@@ -114,6 +116,9 @@ const TokensTable = () => {
case 'next':
url = nextUrl;
break;
case 'lobechat':
url = nextLink + `/?settings={"keyVaults":{"openai":{"apiKey":"sk-${key}","baseURL":"${serverAddress}"/v1"}}}`;
break;
default:
url = `sk-${key}`;
}
@@ -153,7 +158,11 @@ const TokensTable = () => {
case 'opencat':
url = `opencat://team/join?domain=${encodedServerAddress}&token=sk-${key}`;
break;
case 'lobechat':
url = chatLink + `/?settings={"keyVaults":{"openai":{"apiKey":"sk-${key}","baseURL":"${serverAddress}"/v1"}}}`;
break;
default:
url = defaultUrl;
}

View File

@@ -29,6 +29,7 @@ export const CHANNEL_OPTIONS = [
{ key: 39, text: 'together.ai', value: 39, color: 'blue' },
{ key: 42, text: 'VertexAI', value: 42, color: 'blue' },
{ key: 43, text: 'Proxy', value: 43, color: 'blue' },
{ key: 44, text: 'SiliconFlow', value: 44, color: 'blue' },
{ key: 8, text: '自定义渠道', value: 8, color: 'pink' },
{ key: 22, text: '知识库FastGPT', value: 22, color: 'blue' },
{ key: 21, text: '知识库AI Proxy', value: 21, color: 'purple' },