This commit is contained in:
vastxie
2024-01-27 00:40:50 +08:00
parent af6eedafa0
commit fa238c1f03
160 changed files with 1160 additions and 1122 deletions

View File

@@ -35,10 +35,8 @@ const fanyi_service_1 = require("../fanyi/fanyi.service");
const app_entity_1 = require("../app/app.entity");
const chatGroup_service_1 = require("../chatGroup/chatGroup.service");
const models_service_1 = require("../models/models.service");
const baidu_1 = require("./baidu");
const helper_1 = require("./helper");
const store_1 = require("./store");
const zhipu_1 = require("./zhipu");
const openai_1 = require("./openai");
const chatBoxType_entity_1 = require("./chatBoxType.entity");
const chatBox_entity_1 = require("./chatBox.entity");
@@ -134,7 +132,7 @@ let ChatgptService = class ChatgptService {
const { options = {}, appId, cusromPrompt, systemMessage = '' } = body;
let setSystemMessage = systemMessage;
const { parentMessageId } = options;
const { prompt } = body;
const { prompt, fileInfo } = body;
const { groupId, usingNetwork } = options;
const groupInfo = await this.chatGroupService.getGroupInfoFromId(groupId);
const groupConfig = (groupInfo === null || groupInfo === void 0 ? void 0 : groupInfo.config) ? JSON.parse(groupInfo.config) : await this.modelsService.getBaseConfig();
@@ -149,7 +147,7 @@ let ChatgptService = class ChatgptService {
if (!currentRequestModelKey) {
throw new common_1.HttpException('当前流程所需要的模型已被管理员下架、请联系管理员上架专属模型!', common_1.HttpStatus.BAD_REQUEST);
}
const { deduct, isTokenBased, deductType, key: modelKey, secret, modelName, id: keyId, accessToken } = currentRequestModelKey;
const { deduct, isTokenBased, tokenFeeRatio, deductType, key: modelKey, secret, modelName, id: keyId, accessToken } = currentRequestModelKey;
await this.userService.checkUserStatus(req.user);
await this.userBalanceService.validateBalance(req, deductType === 1 ? 'model3' : 'model4', deduct);
res && res.setHeader('Content-type', 'application/octet-stream; charset=utf-8');
@@ -208,6 +206,7 @@ let ChatgptService = class ChatgptService {
userId: req.user.id,
type: balance_constant_1.DeductionKey.CHAT_TYPE,
prompt,
fileInfo: fileInfo,
answer: '',
promptTokens: prompt_tokens,
completionTokens: 0,
@@ -249,79 +248,45 @@ let ChatgptService = class ChatgptService {
});
let charge = deduct;
if (isTokenBased === true) {
charge = deduct * total_tokens;
charge = Math.ceil((deduct * total_tokens) / tokenFeeRatio);
}
await this.userBalanceService.deductFromBalance(req.user.id, `model${deductType === 1 ? 3 : 4}`, charge, total_tokens);
});
if (Number(keyType) === 1) {
const { key, maxToken, maxTokenRes, proxyResUrl } = await this.formatModelToken(currentRequestModelKey);
const { parentMessageId, completionParams, systemMessage } = mergedOptions;
const { model, temperature } = completionParams;
const { context: messagesHistory } = await this.nineStore.buildMessageFromParentMessageId(usingNetwork ? netWorkPrompt : prompt, {
parentMessageId,
systemMessage,
maxModelToken: maxToken,
maxResponseTokens: maxTokenRes,
maxRounds: (0, helper_1.addOneIfOdd)(rounds),
});
let firstChunk = true;
response = await (0, openai_1.sendMessageFromOpenAi)(messagesHistory, {
maxToken,
maxTokenRes,
apiKey: modelKey,
model,
temperature,
proxyUrl: proxyResUrl,
onProgress: (chat) => {
res.write(firstChunk ? JSON.stringify(chat) : `\n${JSON.stringify(chat)}`);
lastChat = chat;
firstChunk = false;
},
});
isSuccess = true;
}
if (Number(keyType) === 2) {
let firstChunk = true;
const { context: messagesHistory } = await this.nineStore.buildMessageFromParentMessageId(usingNetwork ? netWorkPrompt : prompt, {
parentMessageId,
maxRounds: (0, helper_1.addOneIfOdd)(rounds),
});
response = await (0, baidu_1.sendMessageFromBaidu)(usingNetwork ? netWorkPrompt : messagesHistory, {
temperature,
accessToken,
model,
onProgress: (data) => {
res.write(firstChunk ? JSON.stringify(data) : `\n${JSON.stringify(data)}`);
firstChunk = false;
lastChat = data;
},
});
isSuccess = true;
}
if (Number(keyType) === 3) {
let firstChunk = true;
const { context: messagesHistory } = await this.nineStore.buildMessageFromParentMessageId(usingNetwork ? netWorkPrompt : prompt, {
parentMessageId,
maxRounds: (0, helper_1.addOneIfOdd)(rounds),
});
response = await (0, zhipu_1.sendMessageFromZhipu)(usingNetwork ? netWorkPrompt : messagesHistory, {
temperature,
key,
model,
onProgress: (data) => {
res.write(firstChunk ? JSON.stringify(data) : `\n${JSON.stringify(data)}`);
firstChunk = false;
lastChat = data;
},
});
isSuccess = true;
}
const { key, maxToken, maxTokenRes, proxyResUrl } = await this.formatModelToken(currentRequestModelKey);
const { parentMessageId, completionParams, systemMessage } = mergedOptions;
const { model, temperature } = completionParams;
const { context: messagesHistory } = await this.nineStore.buildMessageFromParentMessageId(usingNetwork ? netWorkPrompt : prompt, {
parentMessageId,
systemMessage,
maxModelToken: maxToken,
maxResponseTokens: maxTokenRes,
maxRounds: (0, helper_1.addOneIfOdd)(rounds),
fileInfo: fileInfo,
model: model
});
let firstChunk = true;
response = await (0, openai_1.sendMessageFromOpenAi)(messagesHistory, {
maxToken,
maxTokenRes,
apiKey: modelKey,
model,
fileInfo,
temperature,
proxyUrl: proxyResUrl,
onProgress: (chat) => {
res.write(firstChunk ? JSON.stringify(chat) : `\n${JSON.stringify(chat)}`);
lastChat = chat;
firstChunk = false;
},
});
isSuccess = true;
const userMessageData = {
id: this.nineStore.getUuid(),
text: prompt,
role: 'user',
name: undefined,
usage: null,
fileInfo: fileInfo,
parentMessageId: parentMessageId,
conversationId: response === null || response === void 0 ? void 0 : response.conversationId,
};
@@ -356,11 +321,12 @@ let ChatgptService = class ChatgptService {
onProgress: null,
});
}
const formatResponse = await (0, helper_1.unifiedFormattingResponse)(keyType, response, othersInfo);
const { prompt_tokens = 0, completion_tokens = 0, total_tokens = 0 } = formatResponse.usage;
let prompt_tokens = response.prompt_tokens || 0;
let completion_tokens = response.completion_tokens || 0;
let total_tokens = response.total_tokens || 0;
let charge = deduct;
if (isTokenBased === true) {
charge = deduct * total_tokens;
charge = Math.ceil((deduct * total_tokens) / tokenFeeRatio);
}
await this.userBalanceService.deductFromBalance(req.user.id, `model${deductType === 1 ? 3 : 4}`, charge, total_tokens);
await this.modelsService.saveUseLog(keyId, total_tokens);
@@ -371,11 +337,12 @@ let ChatgptService = class ChatgptService {
userId: req.user.id,
type: balance_constant_1.DeductionKey.CHAT_TYPE,
prompt,
fileInfo: fileInfo,
answer: '',
promptTokens: prompt_tokens,
completionTokens: 0,
totalTokens: total_tokens,
model: formatResponse.model,
model: model,
role: 'user',
groupId,
requestOptions: JSON.stringify({
@@ -389,7 +356,7 @@ let ChatgptService = class ChatgptService {
userId: req.user.id,
type: balance_constant_1.DeductionKey.CHAT_TYPE,
prompt: prompt,
answer: formatResponse === null || formatResponse === void 0 ? void 0 : formatResponse.text,
answer: response.text,
promptTokens: prompt_tokens,
completionTokens: completion_tokens,
totalTokens: total_tokens,

View File

@@ -27,6 +27,10 @@ __decorate([
(0, class_validator_1.IsNotEmpty)({ message: '提问信息不能为空!' }),
__metadata("design:type", String)
], ChatProcessDto.prototype, "prompt", void 0);
__decorate([
(0, swagger_1.ApiProperty)({ example: 'https://123.png', description: '对话附带的链接', required: false }),
__metadata("design:type", String)
], ChatProcessDto.prototype, "url", void 0);
__decorate([
(0, swagger_1.ApiProperty)({ example: '{ parentMessageId: 0 }', description: '上次对话信息', required: false }),
(0, class_transformer_1.Type)(() => Options),

View File

@@ -4,7 +4,6 @@ exports.addOneIfOdd = exports.unifiedFormattingResponse = void 0;
function unifiedFormattingResponse(keyType, response, others) {
let formatRes = {
keyType,
model: '',
parentMessageId: '',
text: '',
usage: {
@@ -13,45 +12,26 @@ function unifiedFormattingResponse(keyType, response, others) {
total_tokens: 0,
}
};
if ([1].includes(Number(keyType))) {
const { model, parentMessageId } = response === null || response === void 0 ? void 0 : response.detail;
let { usage } = response === null || response === void 0 ? void 0 : response.detail;
if (!usage) {
usage = {
prompt_tokens: 0,
completion_tokens: 0,
total_tokens: 0
};
const { parentMessageId } = response === null || response === void 0 ? void 0 : response.detail;
let { usage } = response === null || response === void 0 ? void 0 : response.detail;
if (!usage) {
usage = {
prompt_tokens: 0,
completion_tokens: 0,
total_tokens: 0
};
}
const { prompt_tokens, completion_tokens, total_tokens } = usage;
formatRes = {
keyType,
parentMessageId,
text: response.text,
usage: {
prompt_tokens,
completion_tokens,
total_tokens
}
const { prompt_tokens, completion_tokens, total_tokens } = usage;
formatRes = {
keyType,
model,
parentMessageId,
text: response.text,
usage: {
prompt_tokens,
completion_tokens,
total_tokens
}
};
}
if ([2, 3].includes(Number(keyType))) {
const { usage, text } = response;
const { prompt_tokens, completion_tokens, total_tokens } = usage;
const { model, parentMessageId } = others;
formatRes = {
keyType,
model,
parentMessageId,
text,
usage: {
prompt_tokens,
completion_tokens,
total_tokens
}
};
}
};
return formatRes;
}
exports.unifiedFormattingResponse = unifiedFormattingResponse;

View File

@@ -13,7 +13,6 @@ function getFullUrl(proxyUrl) {
function sendMessageFromOpenAi(messagesHistory, inputs) {
var _a;
const { onProgress, maxToken, apiKey, model, temperature = 0.95, proxyUrl } = inputs;
console.log('current request options: ', apiKey, model, maxToken, proxyUrl);
const max_tokens = compilerToken(model, maxToken);
const options = {
method: 'POST',
@@ -49,7 +48,7 @@ function sendMessageFromOpenAi(messagesHistory, inputs) {
catch (error) {
ISEND = false;
}
if (data === '[DONE]' || ISEND) {
if (ISEND) {
result.text = result.text.trim();
return result;
}
@@ -98,6 +97,9 @@ exports.sendMessageFromOpenAi = sendMessageFromOpenAi;
function getTokenCount(text) {
if (!text)
return 0;
if (typeof text !== 'string') {
text = String(text);
}
text = text.replace(/<\|endoftext\|>/g, '');
return tokenizer.encode(text).length;
}

View File

@@ -3,6 +3,7 @@ Object.defineProperty(exports, "__esModule", { value: true });
exports.NineStore = void 0;
const uuid_1 = require("uuid");
const tiktoken_1 = require("@dqbd/tiktoken");
const common_1 = require("@nestjs/common");
const tokenizer = (0, tiktoken_1.get_encoding)('cl100k_base');
class NineStore {
constructor(options) {
@@ -26,16 +27,46 @@ class NineStore {
await this.store.set(message.id, message, expires);
}
async buildMessageFromParentMessageId(text, options) {
let { maxRounds, maxModelToken, maxResponseTokens, systemMessage = '', name } = options;
let { maxRounds, maxModelToken, maxResponseTokens, systemMessage = '', name, fileInfo, model } = options;
let { parentMessageId } = options;
let messages = [];
let nextNumTokensEstimate = 0;
if (systemMessage) {
messages.push({ role: 'system', content: systemMessage });
const specialModels = ['gemini-pro', 'ERNIE', 'qwen', 'SparkDesk', 'hunyuan'];
const isSpecialModel = specialModels.some(specialModel => model.includes(specialModel));
if (isSpecialModel) {
messages.push({ role: 'user', content: systemMessage });
messages.push({ role: 'assistant', content: "好的" });
}
else {
messages.push({ role: 'system', content: systemMessage, name });
}
}
const systemMessageOffset = messages.length;
let round = 0;
let nextMessages = text ? messages.concat([{ role: 'user', content: text, name }]) : messages;
if (model === 'gpt-4-vision-preview' && fileInfo) {
const content = [
{
"type": "text",
"text": text
},
{
"type": "image_url",
"image_url": {
"url": fileInfo
}
}
];
messages.push({ role: 'user', content: content, name });
}
else {
if (model === 'gpt-4-all' && fileInfo) {
text = fileInfo + "\n" + text;
}
messages.push({ role: 'user', content: text, name });
}
common_1.Logger.debug(`发送的参数:${messages}`);
let nextMessages = messages;
do {
if (!parentMessageId) {
break;
@@ -44,9 +75,21 @@ class NineStore {
if (!parentMessage) {
break;
}
const { text, name, role } = parentMessage;
const { text, name, role, fileInfo } = parentMessage;
let content = text;
if (role === 'user' && fileInfo) {
if (model === 'gpt-4-vision-preview') {
content = [
{ "type": "text", "text": text },
{ "type": "image_url", "image_url": { "url": fileInfo } }
];
}
else if (model === 'gpt-4-all') {
content = fileInfo + "\n" + text;
}
}
nextMessages = nextMessages.slice(0, systemMessageOffset).concat([
{ role, content: text, name },
{ role, content, name },
...nextMessages.slice(systemMessageOffset)
]);
round++;
@@ -69,7 +112,16 @@ class NineStore {
}
_getTokenCount(messages) {
let text = messages.reduce((pre, cur) => {
return pre += cur.content;
if (Array.isArray(cur.content)) {
const contentText = cur.content
.filter((item) => item.type === 'text')
.map((item) => item.text)
.join(' ');
return pre + contentText;
}
else {
return pre + (cur.content || '');
}
}, '');
text = text.replace(/<\|endoftext\|>/g, '');
return tokenizer.encode(text).length;