feat: support dall-e3 api mirrors, add name field for ApiKey

This commit is contained in:
RockYang
2024-01-04 16:29:57 +08:00
parent a06a81a415
commit bcc622a24d
13 changed files with 51 additions and 75 deletions

View File

@@ -15,7 +15,7 @@
</el-upload>
</el-row>
<el-form-item label="昵称">
{{ user['nickname'] }}
<el-input v-model="user['nickname']"/>
</el-form-item>
<el-form-item label="手机号">
<span>{{ user.mobile }}</span>
@@ -44,16 +44,6 @@
<el-tag type="danger">{{ dateFormat(user['expired_time']) }}</el-tag>
</el-form-item>
<el-form-item label="OpenAI API KEY">
<el-input v-model="user.chat_config['api_keys']['OpenAI']"/>
</el-form-item>
<el-form-item label="Azure API KEY">
<el-input v-model="user['chat_config']['api_keys']['Azure']"/>
</el-form-item>
<el-form-item label="ChatGLM API KEY">
<el-input v-model="user['chat_config']['api_keys']['ChatGLM']"/>
</el-form-item>
<el-row class="opt-line">
<el-button color="#47fff1" :dark="false" round @click="save">保存</el-button>
</el-row>
@@ -78,7 +68,6 @@ const user = ref({
mobile: '',
calls: 0,
tokens: 0,
chat_config: {api_keys: {OpenAI: "", Azure: "", ChatGLM: ""}}
})
const vipImg = ref("/images/vip.png")
@@ -87,7 +76,6 @@ onMounted(() => {
// 获取最新用户信息
httpGet('/api/user/profile').then(res => {
user.value = res.data
user.value.chat_config.api_keys = res.data.chat_config.api_keys ?? {OpenAI: "", Azure: "", ChatGLM: ""}
}).catch(e => {
ElMessage.error("获取用户信息失败:" + e.message)
});

View File

@@ -8,7 +8,16 @@
<el-row>
<el-table :data="items" :row-key="row => row.id" table-layout="auto">
<el-table-column prop="platform" label="所属平台"/>
<el-table-column prop="value" label="KEY"/>
<el-table-column prop="name" label="名称"/>
<el-table-column prop="value" label="KEY">
<template #default="scope">
<el-tooltip class="box-item"
effect="dark"
:content="scope.row.api_url"
placement="top">{{ scope.row.value }}
</el-tooltip>
</template>
</el-table-column>
<el-table-column prop="type" label="用途">
<template #default="scope">
<el-tag v-if="scope.row.type === 'chat'">聊天</el-tag>
@@ -67,6 +76,9 @@
</el-option>
</el-select>
</el-form-item>
<el-form-item label="名称" prop="name">
<el-input v-model="item.name" autocomplete="off"/>
</el-form-item>
<el-form-item label="用途" prop="type">
<el-select v-model="item.type" placeholder="请选择用途" @change="changePlatform">
<el-option v-for="item in types" :value="item.value" :label="item.name" :key="item.value">{{
@@ -125,6 +137,7 @@ const item = ref({})
const showDialog = ref(false)
const rules = reactive({
platform: [{required: true, message: '请选择平台', trigger: 'change',}],
name: [{required: true, message: '请输入名称', trigger: 'change',}],
type: [{required: true, message: '请选择用途', trigger: 'change',}],
value: [{required: true, message: '请输入 API KEY 值', trigger: 'change',}]
})
@@ -135,8 +148,8 @@ const platforms = ref([
{
name: "OpenAIChatGPT",
value: "OpenAI",
api_url: "https://api.fast-tunnel.one/v1/chat/completions",
img_url: "https://api.openai.com/v1/images/generations"
api_url: "https://gpt.bemore.lol/v1/chat/completions",
img_url: "https://gpt.bemore.lol/v1/images/generations"
},
{
name: "【讯飞】星火大模型",

View File

@@ -204,9 +204,6 @@
</el-form-item>
<el-divider content-position="center">OpenAI</el-divider>
<el-form-item label="API 地址" prop="open_ai.api_url">
<el-input v-model="chat['open_ai']['api_url']" placeholder="支持变量,{model} => 模型名称"/>
</el-form-item>
<el-form-item label="模型创意度">
<el-slider v-model="chat['open_ai']['temperature']" :max="2" :step="0.1"/>
<div class="tip">值越大 AI 回答越发散值越小回答越保守建议保持默认值</div>
@@ -216,9 +213,6 @@
</el-form-item>
<el-divider content-position="center">Azure</el-divider>
<el-form-item label="API 地址" prop="azure.api_url">
<el-input v-model="chat['azure']['api_url']" placeholder="支持变量,{model} => 模型名称"/>
</el-form-item>
<el-form-item label="模型创意度">
<el-slider v-model="chat['azure']['temperature']" :max="2" :step="0.1"/>
<div class="tip">值越大 AI 回答越发散值越小回答越保守建议保持默认值</div>
@@ -228,9 +222,6 @@
</el-form-item>
<el-divider content-position="center">ChatGLM</el-divider>
<el-form-item label="API 地址" prop="chat_gml.api_url">
<el-input v-model="chat['chat_gml']['api_url']" placeholder="支持变量,{model} => 模型名称"/>
</el-form-item>
<el-form-item label="模型创意度">
<el-slider v-model="chat['chat_gml']['temperature']" :max="1" :step="0.01"/>
<div class="tip">值越大 AI 回答越发散值越小回答越保守建议保持默认值</div>
@@ -240,9 +231,6 @@
</el-form-item>
<el-divider content-position="center">文心一言</el-divider>
<el-form-item label="API 地址" prop="baidu.api_url">
<el-input v-model="chat['baidu']['api_url']" placeholder="支持变量,{model} => 模型名称"/>
</el-form-item>
<el-form-item label="模型创意度">
<el-slider v-model="chat['baidu']['temperature']" :max="1" :step="0.01"/>
<div class="tip">值越大 AI 回答越发散值越小回答越保守建议保持默认值</div>
@@ -252,9 +240,6 @@
</el-form-item>
<el-divider content-position="center">讯飞星火</el-divider>
<el-form-item label="API 地址" prop="xun_fei.api_url">
<el-input v-model="chat['xun_fei']['api_url']" placeholder="支持变量,{model} => 模型名称"/>
</el-form-item>
<el-form-item label="模型创意度">
<el-slider v-model="chat['xun_fei']['temperature']" :max="1" :step="0.1"/>
<div class="tip">值越大 AI 回答越发散值越小回答越保守建议保持默认值</div>
@@ -264,10 +249,7 @@
</el-form-item>
<el-divider content-position="center">AI绘图</el-divider>
<el-form-item label="DALL-E3 API地址">
<el-input v-model="chat['dall_api_url']" placeholder="OpenAI官方API需要配合代理使用"/>
</el-form-item>
<el-form-item label="默认出图数量">
<el-form-item label="DALL-E3出图数量">
<el-input v-model.number="chat['dall_img_num']" placeholder="调用 DALL E3 API 传入的出图数量"/>
</el-form-item>
<el-form-item style="text-align: right">
@@ -287,11 +269,11 @@ import {InfoFilled, UploadFilled} from "@element-plus/icons-vue";
const system = ref({models: []})
const chat = ref({
open_ai: {api_url: "", temperature: 1, max_tokens: 1024},
azure: {api_url: "", temperature: 1, max_tokens: 1024},
chat_gml: {api_url: "", temperature: 0.95, max_tokens: 1024},
baidu: {api_url: "", temperature: 0.95, max_tokens: 1024},
xun_fei: {api_url: "", temperature: 0.5, max_tokens: 1024},
open_ai: {temperature: 1, max_tokens: 1024},
azure: {temperature: 1, max_tokens: 1024},
chat_gml: {temperature: 0.95, max_tokens: 1024},
baidu: {temperature: 0.95, max_tokens: 1024},
xun_fei: {temperature: 0.5, max_tokens: 1024},
context_deep: 0,
enable_context: true,
enable_history: true,