更新模型列表/新增gpt-4o,gpt-4o-all,claude-3.5等模型识图/文件分析功能

This commit is contained in:
小易 2024-07-05 21:32:22 +08:00
parent c5d4ec7990
commit 0efaa552fa
10 changed files with 244 additions and 109 deletions

View File

@ -4,6 +4,16 @@
# Yi - Ai 更新日志
## V2.6.020240705
### 功能优化
1. **更新模型列表新增gpt-4o、claude3.5等热门模型**
2. **新增模型识图功能**
- 支持gpt-4o识图功能gpt-4o-all文件分析功能
## V2.5.120240205
### 功能优化

View File

@ -1,5 +1,5 @@
{
"version": "2.5.0",
"version": "2.6.0",
"scripts": {
"dev": "vite",
"build:test": "vue-tsc --noEmit && vite build --mode test",

View File

@ -166,6 +166,7 @@ export const MODEL_LIST = [
"gpt-3.5-turbo",
"gpt-3.5-turbo-1106",
"gpt-3.5-turbo-16k",
"gpt-3.5-turbo-instruct",
"gpt-4",
"gpt-4-0613",
"gpt-4-32k",
@ -174,10 +175,21 @@ export const MODEL_LIST = [
"gpt-4-vision-preview",
"gpt-4-all",
"gpt-4-0125-preview",
'dall-e-3',
"gpt-4-turbo-2024-04-09",
"gpt-4-turbo-preview",
"gpt-4o",
"gpt-4o-2024-05-13",
"gpt-4o-all",
"dall-e-3",
// claude
"claude-2.0",
"claude-2.1",
"claude-3.0",
"claude-3-5-sonnet-20240620",
"claude-3-haiku-20240307",
"claude-3-opus-20240229",
"claude-3-sonnet-20240229",
"claude-instant-1.2",
// gemini
"gemini-pro",
// 百度文心
@ -200,6 +212,15 @@ export const MODEL_LIST = [
"360GPT_S2_V9",
// 讯飞星火
"SparkDesk",
"SparkDesk-v3.5",
// kimi
"kimi",
"kimi-128k",
"kimi-all",
//deepseek
"deepseek",
"deepseek-chat",
"deepseek-coder",
];
// 模型列表 0 mj 1 Dall-e
@ -284,42 +305,63 @@ export const MODELSMAP = {
export const MODELSMAPLIST = {
1: [
"gpt-3.5-turbo",
"gpt-3.5-turbo-1106",
"gpt-3.5-turbo-16k",
"gpt-4",
"gpt-4-0613",
"gpt-4-32k",
"gpt-4-32k-0613",
"gpt-4-1106-preview",
"gpt-4-vision-preview",
"gpt-4-all",
"gpt-4-0125-preview",
'dall-e-3',
// claude
"claude-2.0",
"claude-2.1",
// gemini
"gemini-pro",
// 百度文心
"ERNIE-Bot",
"ERNIE-Bot-4",
"ERNIE-Bot-turbo",
// 阿里通义
"qwen-turbo",
"qwen-plus",
"qwen-max",
"qwen-max-lingcontext",
// 腾讯混元
"hunyuan",
// 清华智谱
"chatglm_turbo",
"chatglm_pro",
"chatglm_std",
"chatglm_lite",
// 360 智脑
"360GPT_S2_V9",
// 讯飞星火
"SparkDesk",
"gpt-3.5-turbo-1106",
"gpt-3.5-turbo-16k",
"gpt-3.5-turbo-instruct",
"gpt-4",
"gpt-4-0613",
"gpt-4-32k",
"gpt-4-32k-0613",
"gpt-4-1106-preview",
"gpt-4-vision-preview",
"gpt-4-all",
"gpt-4-0125-preview",
"gpt-4-turbo-2024-04-09",
"gpt-4-turbo-preview",
"gpt-4o",
"gpt-4o-2024-05-13",
"gpt-4o-all",
"dall-e-3",
// claude
"claude-2.0",
"claude-2.1",
"claude-3.0",
"claude-3-5-sonnet-20240620",
"claude-3-haiku-20240307",
"claude-3-opus-20240229",
"claude-3-sonnet-20240229",
"claude-instant-1.2",
// gemini
"gemini-pro",
// 百度文心
"ERNIE-Bot",
"ERNIE-Bot-4",
"ERNIE-Bot-turbo",
// 阿里通义
"qwen-turbo",
"qwen-plus",
"qwen-max",
"qwen-max-lingcontext",
// 腾讯混元
"hunyuan",
// 清华智谱
"chatglm_turbo",
"chatglm_pro",
"chatglm_std",
"chatglm_lite",
// 360 智脑
"360GPT_S2_V9",
// 讯飞星火
"SparkDesk",
"SparkDesk-v3.5",
// kimi
"kimi",
"kimi-128k",
"kimi-all",
//deepseek
"deepseek",
"deepseek-chat",
"deepseek-coder",
],
2: [
"ERNIE-Bot",

5
chat/.env.development Normal file
View File

@ -0,0 +1,5 @@
# 本地链接生产
VITE_GLOB_API_URL=http://172.20.10.2:9520/api
VITE_GLOB_OPEN_LONG_REPLY=false
VITE_GLOB_APP_PWA=false

View File

@ -1,6 +1,6 @@
{
"name": "chatgpt-cooper",
"version": "2.5.0",
"version": "2.6.0",
"private": true,
"description": "ChatGPT Cooper",
"author": "Yi <a8052@qq.com>",

View File

@ -1,6 +1,14 @@
<script setup lang='ts'>
import type { Ref } from 'vue'
import { computed, nextTick, onMounted, onUnmounted, ref, watch } from 'vue'
import {
computed,
nextTick,
onMounted,
onUnmounted,
ref,
watch,
onBeforeUnmount,
} from 'vue'
import {
NButton,
NCard,
@ -16,6 +24,7 @@ import {
NSpace,
NPopselect,
NText,
NDropdown,
} from 'naive-ui'
import type { MessageRenderMessage } from 'naive-ui'
@ -35,6 +44,7 @@ import { SvgIcon } from '@/components/common'
import { useBasicLayout } from '@/hooks/useBasicLayout'
import type { Theme } from '@/store/modules/app/helper'
import modelSvg from '@/assets/icons/modelSvg.svg'
import voiceSvg from '@/assets/icons/voicetype.svg'
import {
useAppStore,
@ -50,6 +60,12 @@ const uploadUrl = ref(`${import.meta.env.VITE_GLOB_API_URL}/upload/file`)
const useGlobalStore = useGlobalStoreWithOut()
const authStore = useAuthStore()
const route = useRoute()
const uploadModels = [
'gpt-4-vision-preview',
'gpt-4o',
'gpt-4o-all',
'claude-3-5-sonnet-20240620',
]
let controller = new AbortController()
const dialog = useDialog()
@ -90,35 +106,35 @@ const themeOptions: {
const videoOptions: {
label: string
value: string
key: string
}[] = [
{
value: 'alloy',
key: 'alloy',
label: 'alloy',
},
{
value: 'echo',
key: 'echo',
label: 'echo',
},
{
value: 'fable',
key: 'fable',
label: 'fable',
},
{
value: 'nova',
key: 'nova',
label: 'nova',
},
{
value: 'onyx',
key: 'onyx',
label: 'onyx',
},
{
value: 'shimmer',
key: 'shimmer',
label: 'shimmer',
},
]
let currentVideo = ref('alloy')
let voice = ref('alloy')
const theme = computed(() => appStore.theme)
const globaelConfig = computed(() => authStore.globalConfig)
@ -233,48 +249,72 @@ function handleSignIn() {
useGlobalStore.updateSignInDialog(true)
}
const audioRef = ref(null)
const audioState = ref('Play')
function hendleVideo(item) {
var data = JSON.stringify({
model: 'tts-1',
input: item.text,
voice: 'alloy',
})
axios({
method: 'post',
url: 'https://api.oneapi.dwyu.cn/v1/audio/speech',
headers: {
Authorization:
'Bearer sk-z726fTNvD1jzSBZ42e8dF919840b48A5820e4e5d9d4e70A4',
'Content-Type': 'application/json',
},
data: data,
})
.then(function (response) {
console.log('--response.data', response.data)
const audio = audioRef.value
const blob = new Blob([response.data], { type: 'audio/mpeg' })
if (!audio) return
audio.src = URL.createObjectURL(blob)
console.log(audio)
audio.load()
audio.play()
// onBeforeUnmount(() => {
// //
// if (player && !player.paused) {
// //
// player.pause()
// isResponseVideo.value = false
// isPlay.value = false
// player = null
// }
// })
// if (audio.paused) {
// audio.play()
// audioState.value = 'Stop'
// } else {
// audio.pause()
// audio.currentTime = 0
// audioState.value = 'Play'
// }
})
.catch(function (error) {
console.error('There was an error fetching the audio data', error)
})
}
// let isResponseVideo = ref(false)
// let isPlay = ref(false)
// let player = null
// function hendleVideo(type, item) {
// isResponseVideo.value = true
// isPlay.value = true
// if (!player) {
// player = new window.Audio()
// }
// //
// if (!player.paused || type === 'stop') {
// //
// player.pause()
// isResponseVideo.value = false
// isPlay.value = false
// } else {
// //
// var data = JSON.stringify({
// model: 'tts-1',
// input: item.text,
// voice: voice.value,
// })
// axios({
// method: 'post',
// url: '',
// headers: {
// Authorization:
// 'Bearer key',
// 'Content-Type': 'application/json',
// },
// responseType: 'arraybuffer', // responseType 'arraybuffer'
// data: data,
// })
// .then(async function (response) {
// if (!isResponseVideo.value) return
// const blob = new Blob([response.data], { type: 'audio/mpeg' })
// player.src = URL.createObjectURL(blob)
// player.load()
// player.play()
// //
// player.onended = function () {
// isPlay.value = false
// }
// isResponseVideo.value = false
// })
// .catch(function (error) {
// isResponseVideo.value = false
// player.onended = null
// isPlay.value = false
// console.error('There was an error fetching the audio data', error)
// })
// }
// }
// gpt-4-all
let curFile: File | null
@ -831,6 +871,11 @@ function onInputeTip() {
nextTick(() => getTipsRefHeight())
}
function handleSelect(key) {
voice.value = key
ms.success(`切换语音模式成功,当前使用:${key}`)
}
onMounted(async () => {
chatStore.queryChatPre()
@ -898,7 +943,7 @@ onUnmounted(() => {
:imageUrl="item.imageUrl"
@regenerate="handleSubmit(index)"
@delete="handleDelete(item)"
@video="hendleVideo(item)"
:isPlay="isPlay"
/>
<div class="sticky bottom-1 left-0 flex justify-center">
<NButton v-if="loading" @click="handleStop">
@ -1051,6 +1096,24 @@ onUnmounted(() => {
</template>
切换模型
</NTooltip>
<!-- <NDropdown
trigger="hover"
:options="videoOptions"
@select="handleSelect"
>
<NButton
icon-placement="left"
class="shrink0 flex h-8 w-8 items-center justify-center rounded border transition dark:border-neutral-700 dark:hover:bg-[#33373c]"
style="height: 2rem; padding: 0 8px"
>
<template #icon>
<span class="text-base text-slate-500 dark:text-slate-400">
<img :src="voiceSvg" class="h-8" alt="" />
</span>
</template>
<span style="color: #3076fd">{{ voice }}</span>
</NButton>
</NDropdown> -->
</div>
</div>
<div
@ -1100,9 +1163,9 @@ onUnmounted(() => {
<NTooltip
v-if="
!dataBase64 &&
(chatStore.activeConfig.modelInfo.model === 'gpt-4-all' ||
chatStore.activeConfig.modelInfo.model ===
'gpt-4-vision-preview')
uploadModels.includes(
chatStore.activeConfig.modelInfo.model
)
"
trigger="hover"
placement="bottom-end"
@ -1122,7 +1185,11 @@ onUnmounted(() => {
style="display: none"
:accept="
chatStore.activeConfig.modelInfo.model ===
'gpt-4-vision-preview'
'gpt-4-vision-preview' ||
chatStore.activeConfig.modelInfo.model ===
'gpt-4o' ||
chatStore.activeConfig.modelInfo.model ===
'claude-3-5-sonnet-20240620'
? 'image/*'
: 'text/plain,image/*, application/msword, application/vnd.openxmlformats-officedocument.wordprocessingml.document, application/pdf'
"
@ -1233,10 +1300,6 @@ onUnmounted(() => {
/>
</NCard>
</NModal>
<!-- <audio ref="audioRef" controls>
<source type="audio/mpeg" />
</audio> -->
</div>
</template>

View File

@ -1,6 +1,6 @@
{
"name": "yi-ai",
"version": "2.5.0",
"version": "2.6.0",
"description": "使用 Nestjs 和 Vue3 搭建的 AIGC 生态社区 持续集成AI能力到社区之中",
"main": "index.js",
"author": "longyanjiang",

View File

@ -1,6 +1,6 @@
{
"name": "service",
"version": "2.5.0",
"version": "2.6.0",
"description": "",
"author": "",
"private": true,

View File

@ -4,7 +4,7 @@ export enum VerificationUseStatusEnum {
}
export const ModelsMapCn = {
1: '系统内置大模型',
1: 'Ai大模型',
2: '百度千帆大模型',
3: '清华智谱大模型'
}

View File

@ -97,7 +97,7 @@ export class NineStore implements NineStoreInterface {
let nextNumTokensEstimate = 0;
// messages.push({ role: 'system', content: systemMessage, name })
if (systemMessage) {
const specialModels = ['gemini-pro', 'ERNIE','hunyuan'];
const specialModels = ['gemini-pro', 'ERNIE', 'hunyuan'];
const isSpecialModel = activeModel && specialModels.some((specialModel) => activeModel.includes(specialModel));
if (isSpecialModel) {
messages.push({ role: 'user', content: systemMessage, name });
@ -108,8 +108,9 @@ export class NineStore implements NineStoreInterface {
}
const systemMessageOffset = messages.length;
let round = 0;
const uploadModels = ['gpt-4-vision-preview', 'gpt-4o', 'claude-3-5-sonnet-20240620'];
// 特殊处理 gpt-4-vision-preview 模型
if (activeModel === 'gpt-4-vision-preview' && imageUrl) {
if (uploadModels.includes(activeModel) && imageUrl) {
const content = [
{
type: 'text',
@ -123,14 +124,22 @@ export class NineStore implements NineStoreInterface {
},
];
messages.push({ role: 'user', content: content, name });
} else if (uploadModels.includes(activeModel) && !imageUrl) {
const content = [
{
type: 'text',
text: text,
},
];
messages.push({ role: 'user', content: content, name });
} else {
// 处理 gpt-4-all 模型
if (model === 'gpt-4-all' && imageUrl) {
if ((model === 'gpt-4-all' || model === 'gpt-4o-all') && imageUrl) {
text = imageUrl + '\n' + text;
}
messages.push({ role: 'user', content: text, name });
}
// Logger.debug(`发送的参数:${messages}`)
// Logger.debug(`发送的参数:${messages}`);
let nextMessages = messages;
do {
@ -148,12 +157,18 @@ export class NineStore implements NineStoreInterface {
// 特别处理包含 imageUrl 的消息
if (imageUrl) {
if (activeModel === 'gpt-4-vision-preview') {
content = [
{ type: 'text', text: text },
{ type: 'image_url', image_url: { url: imageUrl } },
];
if (uploadModels.includes(activeModel)) {
if (role === 'assistant') {
content = [{ type: 'text', text: text }];
} else {
content = [
{ type: 'text', text: text },
{ type: 'image_url', image_url: { url: imageUrl } },
];
}
}
} else if (!imageUrl && uploadModels.includes(activeModel)) {
content = [{ type: 'text', text: text }];
}
/* 将本轮消息插入到列表中 */