mirror of
https://github.com/vastxie/99AI.git
synced 2025-11-13 04:03:45 +08:00
v2.6.0
This commit is contained in:
93
dist/modules/chatgpt/baidu.js
vendored
93
dist/modules/chatgpt/baidu.js
vendored
@@ -1,93 +0,0 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.sendMessageFromBaidu = exports.getAccessToken = void 0;
|
||||
const axios = require('axios');
|
||||
const getApiModelMaps = () => {
|
||||
let res = {};
|
||||
const maps = {
|
||||
'ERNIE-Bot': 'completions',
|
||||
'ERNIE-Bot-turbo': 'eb-instant',
|
||||
'BLOOMZ-7B': 'bloomz_7b1',
|
||||
'ERNIE-Bot-4': 'completions_pro',
|
||||
'Llama-2-7b-chat': 'llama_2_7b',
|
||||
'Llama-2-13b-chat': 'llama_2_13b',
|
||||
'ChatGLM2-6B-32K': 'chatglm2_6b_32k',
|
||||
'Qianfan-Chinese-Llama-2-7B': 'qianfan_chinese_llama_2_7b',
|
||||
};
|
||||
Object.keys(maps).map(key => {
|
||||
res[`${key.toLowerCase()}`] = maps[key];
|
||||
});
|
||||
return res;
|
||||
};
|
||||
function getAccessToken(key, secret) {
|
||||
let url = `https://aip.baidubce.com/oauth/2.0/token?grant_type=client_credentials&client_id=${key}&client_secret=${secret}`;
|
||||
return new Promise((resolve, reject) => {
|
||||
axios
|
||||
.post(url)
|
||||
.then((response) => {
|
||||
resolve(response.data.access_token);
|
||||
})
|
||||
.catch((error) => {
|
||||
reject(error);
|
||||
});
|
||||
});
|
||||
}
|
||||
exports.getAccessToken = getAccessToken;
|
||||
function sendMessageFromBaidu(messagesHistory, { onProgress, accessToken, model, temperature = 0.95 }) {
|
||||
const endUrl = getApiModelMaps()[model.trim().toLowerCase()];
|
||||
return new Promise((resolve, reject) => {
|
||||
const url = `https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/${endUrl}?access_token=${accessToken}`;
|
||||
var options = {
|
||||
method: 'POST',
|
||||
url,
|
||||
responseType: 'stream',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
data: {
|
||||
stream: true,
|
||||
messages: messagesHistory,
|
||||
},
|
||||
};
|
||||
axios(options)
|
||||
.then((response) => {
|
||||
const stream = response.data;
|
||||
let resData = {};
|
||||
let cacheChunk = '';
|
||||
let cacheResText = '';
|
||||
stream.on('data', (chunk) => {
|
||||
const lines = chunk
|
||||
.toString()
|
||||
.split('\n\n')
|
||||
.filter((line) => line.trim() !== '');
|
||||
for (const line of lines) {
|
||||
const message = line.replace('data: ', '');
|
||||
try {
|
||||
const msg = cacheChunk + message;
|
||||
const parseData = JSON.parse(msg);
|
||||
cacheChunk = '';
|
||||
const { is_end, result } = parseData;
|
||||
result && (cacheResText += result);
|
||||
if (is_end) {
|
||||
resData = parseData;
|
||||
resData.text = cacheResText;
|
||||
}
|
||||
onProgress(parseData);
|
||||
}
|
||||
catch (error) {
|
||||
cacheChunk = message;
|
||||
}
|
||||
}
|
||||
});
|
||||
stream.on('end', () => {
|
||||
cacheResText = '';
|
||||
cacheChunk = '';
|
||||
resolve(resData);
|
||||
});
|
||||
})
|
||||
.catch((error) => {
|
||||
reject(new Error(error));
|
||||
});
|
||||
});
|
||||
}
|
||||
exports.sendMessageFromBaidu = sendMessageFromBaidu;
|
||||
4
dist/modules/chatgpt/chatgpt.module.js
vendored
4
dist/modules/chatgpt/chatgpt.module.js
vendored
@@ -21,7 +21,7 @@ const chatLog_service_1 = require("../chatLog/chatLog.service");
|
||||
const chatLog_entity_1 = require("../chatLog/chatLog.entity");
|
||||
const accountLog_entity_1 = require("../userBalance/accountLog.entity");
|
||||
const config_entity_1 = require("../globalConfig/config.entity");
|
||||
const gptkeys_entity_1 = require("./gptkeys.entity");
|
||||
const gptKeys_entity_1 = require("./gptKeys.entity");
|
||||
const whiteList_entity_1 = require("./whiteList.entity");
|
||||
const cramiPackage_entity_1 = require("../crami/cramiPackage.entity");
|
||||
const chatGroup_entity_1 = require("../chatGroup/chatGroup.entity");
|
||||
@@ -48,7 +48,7 @@ ChatgptModule = __decorate([
|
||||
chatLog_entity_1.ChatLogEntity,
|
||||
accountLog_entity_1.AccountLogEntity,
|
||||
config_entity_1.ConfigEntity,
|
||||
gptkeys_entity_1.GptKeysEntity,
|
||||
gptKeys_entity_1.GptKeysEntity,
|
||||
whiteList_entity_1.WhiteListEntity,
|
||||
user_entity_1.UserEntity,
|
||||
cramiPackage_entity_1.CramiPackageEntity,
|
||||
|
||||
29
dist/modules/chatgpt/chatgpt.service.js
vendored
29
dist/modules/chatgpt/chatgpt.service.js
vendored
@@ -29,7 +29,7 @@ const typeorm_1 = require("typeorm");
|
||||
const typeorm_2 = require("@nestjs/typeorm");
|
||||
const badwords_service_1 = require("../badwords/badwords.service");
|
||||
const autoreply_service_1 = require("../autoreply/autoreply.service");
|
||||
const gptkeys_entity_1 = require("./gptkeys.entity");
|
||||
const gptKeys_entity_1 = require("./gptKeys.entity");
|
||||
const globalConfig_service_1 = require("../globalConfig/globalConfig.service");
|
||||
const fanyi_service_1 = require("../fanyi/fanyi.service");
|
||||
const app_entity_1 = require("../app/app.entity");
|
||||
@@ -127,7 +127,7 @@ let ChatgptService = class ChatgptService {
|
||||
}
|
||||
}
|
||||
async chatProcess(body, req, res) {
|
||||
var _a, _b, _c;
|
||||
var _a, _b, _c, _d;
|
||||
const abortController = req.abortController;
|
||||
const { options = {}, appId, cusromPrompt, systemMessage = '' } = body;
|
||||
let setSystemMessage = systemMessage;
|
||||
@@ -184,7 +184,6 @@ let ChatgptService = class ChatgptService {
|
||||
setSystemMessage = systemPreMessage + `\n Current date: ${currentDate}`;
|
||||
}
|
||||
const mergedOptions = await this.getRequestParams(options, setSystemMessage, currentRequestModelKey, groupConfig.modelInfo);
|
||||
const { maxModelTokens = 8000, maxResponseTokens = 4096, key } = currentRequestModelKey;
|
||||
res && res.status(200);
|
||||
let response = null;
|
||||
let othersInfo = null;
|
||||
@@ -270,6 +269,7 @@ let ChatgptService = class ChatgptService {
|
||||
maxTokenRes,
|
||||
apiKey: modelKey,
|
||||
model,
|
||||
prompt,
|
||||
fileInfo,
|
||||
temperature,
|
||||
proxyUrl: proxyResUrl,
|
||||
@@ -277,8 +277,8 @@ let ChatgptService = class ChatgptService {
|
||||
res.write(firstChunk ? JSON.stringify(chat) : `\n${JSON.stringify(chat)}`);
|
||||
lastChat = chat;
|
||||
firstChunk = false;
|
||||
},
|
||||
});
|
||||
}
|
||||
}, this.uploadService);
|
||||
isSuccess = true;
|
||||
const userMessageData = {
|
||||
id: this.nineStore.getUuid(),
|
||||
@@ -297,7 +297,8 @@ let ChatgptService = class ChatgptService {
|
||||
text: response.text,
|
||||
role: 'assistant',
|
||||
name: undefined,
|
||||
usage: response.usage,
|
||||
usage: response === null || response === void 0 ? void 0 : response.usage,
|
||||
fileInfo: response === null || response === void 0 ? void 0 : response.fileInfo,
|
||||
parentMessageId: userMessageData.id,
|
||||
conversationId: response === null || response === void 0 ? void 0 : response.conversationId,
|
||||
};
|
||||
@@ -319,10 +320,11 @@ let ChatgptService = class ChatgptService {
|
||||
temperature,
|
||||
proxyUrl: proxyResUrl,
|
||||
onProgress: null,
|
||||
prompt,
|
||||
});
|
||||
}
|
||||
let { usage } = response === null || response === void 0 ? void 0 : response.detail;
|
||||
const { prompt_tokens = 0, completion_tokens = 0, total_tokens = 0 } = usage;
|
||||
const usage = ((_a = response.detail) === null || _a === void 0 ? void 0 : _a.usage) || { prompt_tokens: 1, completion_tokens: 1, total_tokens: 2 };
|
||||
const { prompt_tokens, completion_tokens, total_tokens } = usage;
|
||||
let charge = deduct;
|
||||
if (isTokenBased === true) {
|
||||
charge = Math.ceil((deduct * total_tokens) / tokenFeeRatio);
|
||||
@@ -355,6 +357,7 @@ let ChatgptService = class ChatgptService {
|
||||
userId: req.user.id,
|
||||
type: balance_constant_1.DeductionKey.CHAT_TYPE,
|
||||
prompt: prompt,
|
||||
fileInfo: response === null || response === void 0 ? void 0 : response.fileInfo,
|
||||
answer: response.text,
|
||||
promptTokens: prompt_tokens,
|
||||
completionTokens: completion_tokens,
|
||||
@@ -376,7 +379,7 @@ let ChatgptService = class ChatgptService {
|
||||
temperature,
|
||||
}),
|
||||
});
|
||||
common_1.Logger.debug(`本次调用: ${req.user.id} model: ${model} key -> ${key}, 模型名称: ${modelName}, 最大回复token: ${maxResponseTokens}`, 'ChatgptService');
|
||||
common_1.Logger.debug(`用户ID: ${req.user.id} 模型名称: ${modelName}-${model}, 消耗token: ${total_tokens}, 消耗积分: ${charge}`, 'ChatgptService');
|
||||
const userBalance = await this.userBalanceService.queryUserBalance(req.user.id);
|
||||
response.userBanance = Object.assign({}, userBalance);
|
||||
response.result && (response.result = '');
|
||||
@@ -391,8 +394,8 @@ let ChatgptService = class ChatgptService {
|
||||
catch (error) {
|
||||
console.log('chat-error <----------------------------------------->', modelKey, error);
|
||||
const code = (error === null || error === void 0 ? void 0 : error.statusCode) || 400;
|
||||
const status = ((_a = error === null || error === void 0 ? void 0 : error.response) === null || _a === void 0 ? void 0 : _a.status) || (error === null || error === void 0 ? void 0 : error.statusCode) || 400;
|
||||
console.log('chat-error-detail <----------------------------------------->', 'code: ', code, 'message', error === null || error === void 0 ? void 0 : error.message, 'statusText:', (_b = error === null || error === void 0 ? void 0 : error.response) === null || _b === void 0 ? void 0 : _b.statusText, 'status', (_c = error === null || error === void 0 ? void 0 : error.response) === null || _c === void 0 ? void 0 : _c.status);
|
||||
const status = ((_b = error === null || error === void 0 ? void 0 : error.response) === null || _b === void 0 ? void 0 : _b.status) || (error === null || error === void 0 ? void 0 : error.statusCode) || 400;
|
||||
console.log('chat-error-detail <----------------------------------------->', 'code: ', code, 'message', error === null || error === void 0 ? void 0 : error.message, 'statusText:', (_c = error === null || error === void 0 ? void 0 : error.response) === null || _c === void 0 ? void 0 : _c.statusText, 'status', (_d = error === null || error === void 0 ? void 0 : error.response) === null || _d === void 0 ? void 0 : _d.status);
|
||||
if (error.status && error.status === 402) {
|
||||
const errMsg = { message: `Catch Error ${error.message}`, code: 402 };
|
||||
if (res) {
|
||||
@@ -452,7 +455,7 @@ let ChatgptService = class ChatgptService {
|
||||
const money = (body === null || body === void 0 ? void 0 : body.quality) === 'hd' ? 4 : 2;
|
||||
await this.userBalanceService.validateBalance(req, 'mjDraw', money);
|
||||
let images = [];
|
||||
const detailKeyInfo = await this.modelsService.getRandomDrawKey();
|
||||
const detailKeyInfo = await this.modelsService.getCurrentModelKeyInfo('dall-e-3');
|
||||
const keyId = detailKeyInfo === null || detailKeyInfo === void 0 ? void 0 : detailKeyInfo.id;
|
||||
const { key, proxyResUrl } = await this.formatModelToken(detailKeyInfo);
|
||||
common_1.Logger.log(`draw paompt info <==**==> ${body.prompt}, key ===> ${key}`, 'DrawService');
|
||||
@@ -795,7 +798,7 @@ let ChatgptService = class ChatgptService {
|
||||
};
|
||||
ChatgptService = __decorate([
|
||||
(0, common_1.Injectable)(),
|
||||
__param(0, (0, typeorm_2.InjectRepository)(gptkeys_entity_1.GptKeysEntity)),
|
||||
__param(0, (0, typeorm_2.InjectRepository)(gptKeys_entity_1.GptKeysEntity)),
|
||||
__param(1, (0, typeorm_2.InjectRepository)(config_entity_1.ConfigEntity)),
|
||||
__param(2, (0, typeorm_2.InjectRepository)(chatBoxType_entity_1.ChatBoxTypeEntity)),
|
||||
__param(3, (0, typeorm_2.InjectRepository)(chatBox_entity_1.ChatBoxEntity)),
|
||||
|
||||
245
dist/modules/chatgpt/openai.js
vendored
245
dist/modules/chatgpt/openai.js
vendored
@@ -3,95 +3,168 @@ Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.getTokenCount = exports.sendMessageFromOpenAi = void 0;
|
||||
const axios_1 = require("axios");
|
||||
const tiktoken_1 = require("@dqbd/tiktoken");
|
||||
const utils_1 = require("../../common/utils");
|
||||
const common_1 = require("@nestjs/common");
|
||||
const uuid = require("uuid");
|
||||
const tokenizer = (0, tiktoken_1.get_encoding)('cl100k_base');
|
||||
function getFullUrl(proxyUrl) {
|
||||
const processedUrl = proxyUrl.endsWith('/') ? proxyUrl.slice(0, -1) : proxyUrl;
|
||||
const baseUrl = processedUrl || 'https://api.openai.com';
|
||||
return `${baseUrl}/v1/chat/completions`;
|
||||
}
|
||||
function sendMessageFromOpenAi(messagesHistory, inputs) {
|
||||
var _a;
|
||||
const { onProgress, maxToken, apiKey, model, temperature = 0.95, proxyUrl } = inputs;
|
||||
const max_tokens = compilerToken(model, maxToken);
|
||||
const options = {
|
||||
method: 'POST',
|
||||
url: getFullUrl(proxyUrl),
|
||||
responseType: 'stream',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
Authorization: `Bearer ${(0, utils_1.removeSpecialCharacters)(apiKey)}`,
|
||||
},
|
||||
data: {
|
||||
max_tokens,
|
||||
stream: true,
|
||||
temperature,
|
||||
model,
|
||||
messages: messagesHistory
|
||||
},
|
||||
};
|
||||
const prompt = (_a = messagesHistory[messagesHistory.length - 1]) === null || _a === void 0 ? void 0 : _a.content;
|
||||
return new Promise(async (resolve, reject) => {
|
||||
async function sendMessageFromOpenAi(messagesHistory, inputs, uploadService) {
|
||||
var _a, _b, _c, _d;
|
||||
const { onProgress, maxToken, apiKey, model, temperature = 0.8, proxyUrl, prompt } = inputs;
|
||||
if (model.includes('dall')) {
|
||||
let result = { text: '', fileInfo: '' };
|
||||
try {
|
||||
const options = {
|
||||
method: 'POST',
|
||||
url: `${proxyUrl}/v1/images/generations`,
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
Authorization: `Bearer ${apiKey}`,
|
||||
},
|
||||
data: {
|
||||
prompt: prompt,
|
||||
model: model,
|
||||
response_format: 'b64_json'
|
||||
},
|
||||
};
|
||||
const response = await (0, axios_1.default)(options);
|
||||
const stream = response.data;
|
||||
let result = { text: '' };
|
||||
stream.on('data', (chunk) => {
|
||||
var _a;
|
||||
const splitArr = chunk.toString().split('\n\n').filter((line) => line.trim() !== '');
|
||||
for (const line of splitArr) {
|
||||
const data = line.replace('data:', '');
|
||||
let ISEND = false;
|
||||
try {
|
||||
ISEND = JSON.parse(data).choices[0].finish_reason === 'stop';
|
||||
}
|
||||
catch (error) {
|
||||
ISEND = false;
|
||||
}
|
||||
if (ISEND) {
|
||||
result.text = result.text.trim();
|
||||
return result;
|
||||
}
|
||||
try {
|
||||
const parsedData = JSON.parse(data);
|
||||
if (parsedData.id) {
|
||||
result.id = parsedData.id;
|
||||
}
|
||||
if ((_a = parsedData.choices) === null || _a === void 0 ? void 0 : _a.length) {
|
||||
const delta = parsedData.choices[0].delta;
|
||||
result.delta = delta.content;
|
||||
if (delta === null || delta === void 0 ? void 0 : delta.content)
|
||||
result.text += delta.content;
|
||||
if (delta.role) {
|
||||
result.role = delta.role;
|
||||
}
|
||||
result.detail = parsedData;
|
||||
}
|
||||
onProgress && onProgress({ text: result.text });
|
||||
}
|
||||
catch (error) {
|
||||
console.log('parse Error', data);
|
||||
}
|
||||
}
|
||||
});
|
||||
stream.on('end', () => {
|
||||
if (result.detail && result.text) {
|
||||
const promptTokens = getTokenCount(prompt);
|
||||
const completionTokens = getTokenCount(result.text);
|
||||
result.detail.usage = {
|
||||
prompt_tokens: promptTokens,
|
||||
completion_tokens: completionTokens,
|
||||
total_tokens: promptTokens + completionTokens,
|
||||
estimated: true
|
||||
};
|
||||
}
|
||||
return resolve(result);
|
||||
});
|
||||
const { b64_json, revised_prompt } = response.data.data[0];
|
||||
const buffer = Buffer.from(b64_json, 'base64');
|
||||
let imgUrl = '';
|
||||
try {
|
||||
const filename = uuid.v4().slice(0, 10) + '.png';
|
||||
common_1.Logger.debug(`------> 开始上传图片!!!`, 'MidjourneyService');
|
||||
const buffer = Buffer.from(b64_json, 'base64');
|
||||
imgUrl = await uploadService.uploadFile({ filename, buffer });
|
||||
common_1.Logger.debug(`图片上传成功,URL: ${imgUrl}`, 'MidjourneyService');
|
||||
}
|
||||
catch (error) {
|
||||
common_1.Logger.error(`上传图片过程中出现错误: ${error}`, 'MidjourneyService');
|
||||
}
|
||||
result.fileInfo = imgUrl;
|
||||
result.text = revised_prompt;
|
||||
onProgress && onProgress({ text: result.text });
|
||||
return result;
|
||||
}
|
||||
catch (error) {
|
||||
reject(error);
|
||||
const status = ((_a = error === null || error === void 0 ? void 0 : error.response) === null || _a === void 0 ? void 0 : _a.status) || 500;
|
||||
console.log('openai-draw error: ', JSON.stringify(error), status);
|
||||
const message = (_d = (_c = (_b = error === null || error === void 0 ? void 0 : error.response) === null || _b === void 0 ? void 0 : _b.data) === null || _c === void 0 ? void 0 : _c.error) === null || _d === void 0 ? void 0 : _d.message;
|
||||
if (status === 429) {
|
||||
result.text = '当前请求已过载、请稍等会儿再试试吧!';
|
||||
return result;
|
||||
}
|
||||
if (status === 400 && message.includes('This request has been blocked by our content filters')) {
|
||||
result.text = '您的请求已被系统拒绝。您的提示可能存在一些非法的文本。';
|
||||
return result;
|
||||
}
|
||||
if (status === 400 && message.includes('Billing hard limit has been reached')) {
|
||||
result.text = '当前模型key已被封禁、已冻结当前调用Key、尝试重新对话试试吧!';
|
||||
return result;
|
||||
}
|
||||
if (status === 500) {
|
||||
result.text = '绘制图片失败,请检查你的提示词是否有非法描述!';
|
||||
return result;
|
||||
}
|
||||
if (status === 401) {
|
||||
result.text = '绘制图片失败,此次绘画被拒绝了!';
|
||||
return result;
|
||||
}
|
||||
result.text = '绘制图片失败,请稍后试试吧!';
|
||||
return result;
|
||||
}
|
||||
});
|
||||
}
|
||||
else {
|
||||
let result = { text: '' };
|
||||
const options = {
|
||||
method: 'POST',
|
||||
url: getFullUrl(proxyUrl),
|
||||
responseType: 'stream',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
Accept: "application/json",
|
||||
Authorization: `Bearer ${apiKey}`,
|
||||
},
|
||||
data: {
|
||||
stream: true,
|
||||
temperature,
|
||||
model,
|
||||
messages: messagesHistory,
|
||||
},
|
||||
};
|
||||
if (model === 'gpt-4-vision-preview') {
|
||||
options.data.max_tokens = 2048;
|
||||
}
|
||||
return new Promise(async (resolve, reject) => {
|
||||
try {
|
||||
const response = await (0, axios_1.default)(options);
|
||||
const stream = response.data;
|
||||
stream.on('data', (chunk) => {
|
||||
var _a;
|
||||
const splitArr = chunk.toString().split('\n\n').filter((line) => line.trim() !== '');
|
||||
for (const line of splitArr) {
|
||||
const data = line.replace('data:', '');
|
||||
let ISEND = false;
|
||||
try {
|
||||
ISEND = JSON.parse(data).choices[0].finish_reason === 'stop';
|
||||
}
|
||||
catch (error) {
|
||||
ISEND = false;
|
||||
}
|
||||
if (ISEND) {
|
||||
result.text = result.text.trim();
|
||||
return result;
|
||||
}
|
||||
try {
|
||||
if (data !== " [DONE]" && data !== "[DONE]" && data != "[DONE] ") {
|
||||
const parsedData = JSON.parse(data);
|
||||
if (parsedData.id) {
|
||||
result.id = parsedData.id;
|
||||
}
|
||||
if ((_a = parsedData.choices) === null || _a === void 0 ? void 0 : _a.length) {
|
||||
const delta = parsedData.choices[0].delta;
|
||||
result.delta = delta.content;
|
||||
if (delta === null || delta === void 0 ? void 0 : delta.content)
|
||||
result.text += delta.content;
|
||||
if (delta.role) {
|
||||
result.role = delta.role;
|
||||
}
|
||||
result.detail = parsedData;
|
||||
}
|
||||
onProgress && onProgress({ text: result.text });
|
||||
}
|
||||
}
|
||||
catch (error) {
|
||||
console.log('parse Error', data);
|
||||
}
|
||||
}
|
||||
});
|
||||
let totalText = '';
|
||||
messagesHistory.forEach(message => {
|
||||
totalText += message.content + ' ';
|
||||
});
|
||||
stream.on('end', () => {
|
||||
if (result.detail && result.text) {
|
||||
const promptTokens = getTokenCount(totalText);
|
||||
const completionTokens = getTokenCount(result.text);
|
||||
result.detail.usage = {
|
||||
prompt_tokens: promptTokens,
|
||||
completion_tokens: completionTokens,
|
||||
total_tokens: promptTokens + completionTokens,
|
||||
estimated: true
|
||||
};
|
||||
}
|
||||
return resolve(result);
|
||||
});
|
||||
}
|
||||
catch (error) {
|
||||
reject(error);
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
exports.sendMessageFromOpenAi = sendMessageFromOpenAi;
|
||||
function getTokenCount(text) {
|
||||
@@ -104,19 +177,3 @@ function getTokenCount(text) {
|
||||
return tokenizer.encode(text).length;
|
||||
}
|
||||
exports.getTokenCount = getTokenCount;
|
||||
function compilerToken(model, maxToken) {
|
||||
let max = 0;
|
||||
if (model.includes(3.5)) {
|
||||
max = maxToken > 4096 ? 4096 : maxToken;
|
||||
}
|
||||
if (model.includes('gpt-4')) {
|
||||
max = maxToken > 8192 ? 8192 : maxToken;
|
||||
}
|
||||
if (model.includes('preview')) {
|
||||
max = maxToken > 4096 ? 4096 : maxToken;
|
||||
}
|
||||
if (model.includes('32k')) {
|
||||
max = maxToken > 32768 ? 32768 : maxToken;
|
||||
}
|
||||
return max;
|
||||
}
|
||||
|
||||
6
dist/modules/chatgpt/store.js
vendored
6
dist/modules/chatgpt/store.js
vendored
@@ -3,7 +3,6 @@ Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.NineStore = void 0;
|
||||
const uuid_1 = require("uuid");
|
||||
const tiktoken_1 = require("@dqbd/tiktoken");
|
||||
const common_1 = require("@nestjs/common");
|
||||
const tokenizer = (0, tiktoken_1.get_encoding)('cl100k_base');
|
||||
class NineStore {
|
||||
constructor(options) {
|
||||
@@ -32,7 +31,7 @@ class NineStore {
|
||||
let messages = [];
|
||||
let nextNumTokensEstimate = 0;
|
||||
if (systemMessage) {
|
||||
const specialModels = ['gemini-pro', 'ERNIE', 'qwen', 'SparkDesk', 'hunyuan'];
|
||||
const specialModels = ['gemini-pro', 'ERNIE', 'hunyuan'];
|
||||
const isSpecialModel = model && specialModels.some(specialModel => model.includes(specialModel));
|
||||
if (isSpecialModel) {
|
||||
messages.push({ role: 'user', content: systemMessage, name });
|
||||
@@ -65,7 +64,6 @@ class NineStore {
|
||||
}
|
||||
messages.push({ role: 'user', content: text, name });
|
||||
}
|
||||
common_1.Logger.debug(`发送的参数:${messages}`);
|
||||
let nextMessages = messages;
|
||||
do {
|
||||
if (!parentMessageId) {
|
||||
@@ -77,7 +75,7 @@ class NineStore {
|
||||
}
|
||||
const { text, name, role, fileInfo } = parentMessage;
|
||||
let content = text;
|
||||
if (role === 'user' && fileInfo) {
|
||||
if (fileInfo) {
|
||||
if (model === 'gpt-4-vision-preview') {
|
||||
content = [
|
||||
{ "type": "text", "text": text },
|
||||
|
||||
101
dist/modules/chatgpt/zhipu.js
vendored
101
dist/modules/chatgpt/zhipu.js
vendored
@@ -1,101 +0,0 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.sendMessageFromZhipu = exports.compilerStream = exports.compilerMetaJsonStr = exports.generateToken = void 0;
|
||||
const axios = require('axios');
|
||||
const jwt = require('jsonwebtoken');
|
||||
function generateToken(apikey, expSeconds = 1000 * 60 * 60 * 24 * 360) {
|
||||
const [id, secret] = apikey.split('.');
|
||||
const payload = {
|
||||
api_key: id,
|
||||
exp: Math.round(Date.now()) + expSeconds * 1000,
|
||||
timestamp: Math.round(Date.now()),
|
||||
};
|
||||
return jwt.sign(payload, secret, { algorithm: 'HS256', header: { alg: 'HS256', sign_type: 'SIGN' } });
|
||||
}
|
||||
exports.generateToken = generateToken;
|
||||
function compilerMetaJsonStr(data) {
|
||||
let jsonStr = {};
|
||||
try {
|
||||
jsonStr = JSON.parse(data);
|
||||
}
|
||||
catch (error) {
|
||||
jsonStr = {
|
||||
usage: {
|
||||
completion_tokens: 49,
|
||||
prompt_tokens: 333,
|
||||
total_tokens: 399
|
||||
},
|
||||
};
|
||||
console.error('json parse error from zhipu!', data);
|
||||
}
|
||||
return jsonStr;
|
||||
}
|
||||
exports.compilerMetaJsonStr = compilerMetaJsonStr;
|
||||
function compilerStream(streamArr) {
|
||||
var _a;
|
||||
if (streamArr.length === 3) {
|
||||
return {
|
||||
event: streamArr[0].replace('event:', ''),
|
||||
id: streamArr[1].replace('id:', ''),
|
||||
is_end: false,
|
||||
result: streamArr[2].replace('data:', '').trim()
|
||||
};
|
||||
}
|
||||
if (streamArr.length === 4) {
|
||||
return {
|
||||
event: streamArr[0].replace('event:', ''),
|
||||
id: streamArr[1].replace('id:', ''),
|
||||
result: streamArr[2].replace('data:', '').trim(),
|
||||
is_end: true,
|
||||
usage: (_a = compilerMetaJsonStr(streamArr[3].replace('meta:', ''))) === null || _a === void 0 ? void 0 : _a.usage
|
||||
};
|
||||
}
|
||||
}
|
||||
exports.compilerStream = compilerStream;
|
||||
async function sendMessageFromZhipu(messagesHistory, { onProgress, key, model, temperature = 0.95 }) {
|
||||
const token = await generateToken(key);
|
||||
return new Promise((resolve, reject) => {
|
||||
const url = `https://open.bigmodel.cn/api/paas/v3/model-api/${model}/sse-invoke`;
|
||||
const options = {
|
||||
method: 'POST',
|
||||
url,
|
||||
responseType: 'stream',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'Authorization': token
|
||||
},
|
||||
data: {
|
||||
prompt: messagesHistory,
|
||||
temperature
|
||||
}
|
||||
};
|
||||
axios(options)
|
||||
.then(response => {
|
||||
const stream = response.data;
|
||||
let resData;
|
||||
let cacheResText = '';
|
||||
stream.on('data', chunk => {
|
||||
const stramArr = chunk.toString().split("\n").filter((line) => line.trim() !== "");
|
||||
const parseData = compilerStream(stramArr);
|
||||
if (!parseData)
|
||||
return;
|
||||
const { id, result, is_end } = parseData;
|
||||
result && (cacheResText += result.trim());
|
||||
if (is_end) {
|
||||
parseData.is_end = false;
|
||||
resData = parseData;
|
||||
resData.text = cacheResText;
|
||||
}
|
||||
onProgress(parseData);
|
||||
});
|
||||
stream.on('end', () => {
|
||||
resolve(resData);
|
||||
cacheResText = '';
|
||||
});
|
||||
})
|
||||
.catch(error => {
|
||||
console.log('error: ', error);
|
||||
});
|
||||
});
|
||||
}
|
||||
exports.sendMessageFromZhipu = sendMessageFromZhipu;
|
||||
Reference in New Issue
Block a user