mirror of
https://github.com/vastxie/99AI.git
synced 2025-11-14 04:33:41 +08:00
v 2.5.0
This commit is contained in:
40
dist/modules/chatGroup/chatGroup.service.js
vendored
40
dist/modules/chatGroup/chatGroup.service.js
vendored
@@ -27,32 +27,38 @@ let ChatGroupService = class ChatGroupService {
|
||||
}
|
||||
async create(body, req) {
|
||||
const { id } = req.user;
|
||||
const { appId } = body;
|
||||
const { appId, modelConfig: bodyModelConfig } = body;
|
||||
let modelConfig = bodyModelConfig;
|
||||
if (!modelConfig) {
|
||||
modelConfig = await this.modelsService.getBaseConfig(appId);
|
||||
if (!modelConfig) {
|
||||
throw new common_1.HttpException('管理员未配置任何AI模型、请先联系管理员开通聊天模型配置!', common_1.HttpStatus.BAD_REQUEST);
|
||||
}
|
||||
}
|
||||
const params = { title: '新对话', userId: id };
|
||||
if (appId) {
|
||||
const appInfo = await this.appEntity.findOne({ where: { id: appId } });
|
||||
if (!appInfo) {
|
||||
throw new common_1.HttpException('非法操作、您在使用一个不存在的应用!', common_1.HttpStatus.BAD_REQUEST);
|
||||
}
|
||||
else {
|
||||
const { status, name } = appInfo;
|
||||
const g = await this.chatGroupEntity.count({ where: { userId: id, appId, isDelete: false } });
|
||||
if (g > 0) {
|
||||
throw new common_1.HttpException('当前应用已经开启了一个对话无需新建了!', common_1.HttpStatus.BAD_REQUEST);
|
||||
}
|
||||
if (![1, 3, 4, 5].includes(status)) {
|
||||
throw new common_1.HttpException('非法操作、您在使用一个未启用的应用!', common_1.HttpStatus.BAD_REQUEST);
|
||||
}
|
||||
name && (params['title'] = name);
|
||||
appId && (params['appId'] = appId);
|
||||
const { status, name } = appInfo;
|
||||
const existingGroupCount = await this.chatGroupEntity.count({ where: { userId: id, appId, isDelete: false } });
|
||||
if (existingGroupCount > 0) {
|
||||
throw new common_1.HttpException('当前应用已经开启了一个对话无需新建了!', common_1.HttpStatus.BAD_REQUEST);
|
||||
}
|
||||
if (![1, 3, 4, 5].includes(status)) {
|
||||
throw new common_1.HttpException('非法操作、您在使用一个未启用的应用!', common_1.HttpStatus.BAD_REQUEST);
|
||||
}
|
||||
if (name) {
|
||||
params['title'] = name;
|
||||
}
|
||||
params['appId'] = appId;
|
||||
}
|
||||
const modelConfig = await this.modelsService.getBaseConfig(appId);
|
||||
appId && (modelConfig.appId = appId);
|
||||
if (!modelConfig) {
|
||||
throw new common_1.HttpException('管理员未配置任何AI模型、请先联系管理员开通聊天模型配置!', common_1.HttpStatus.BAD_REQUEST);
|
||||
if (appId) {
|
||||
modelConfig.appId = appId;
|
||||
}
|
||||
return await this.chatGroupEntity.save(Object.assign(Object.assign({}, params), { config: JSON.stringify(modelConfig) }));
|
||||
const newGroup = await this.chatGroupEntity.save(Object.assign(Object.assign({}, params), { config: JSON.stringify(modelConfig) }));
|
||||
return newGroup;
|
||||
}
|
||||
async query(req) {
|
||||
try {
|
||||
|
||||
@@ -19,4 +19,8 @@ __decorate([
|
||||
(0, class_validator_1.IsOptional)(),
|
||||
__metadata("design:type", Number)
|
||||
], CreateGroupDto.prototype, "appId", void 0);
|
||||
__decorate([
|
||||
(0, swagger_1.ApiProperty)({ example: "", description: '对话模型配置项序列化的字符串', required: false }),
|
||||
__metadata("design:type", Object)
|
||||
], CreateGroupDto.prototype, "modelConfig", void 0);
|
||||
exports.CreateGroupDto = CreateGroupDto;
|
||||
|
||||
4
dist/modules/chatLog/chatLog.service.js
vendored
4
dist/modules/chatLog/chatLog.service.js
vendored
@@ -195,7 +195,7 @@ let ChatLogService = class ChatLogService {
|
||||
}
|
||||
const list = await this.chatLogEntity.find({ where });
|
||||
return list.map((item) => {
|
||||
const { prompt, role, answer, createdAt, model, conversationOptions, requestOptions, id } = item;
|
||||
const { prompt, role, answer, createdAt, model, conversationOptions, requestOptions, id, fileInfo } = item;
|
||||
let parseConversationOptions = null;
|
||||
let parseRequestOptions = null;
|
||||
try {
|
||||
@@ -212,6 +212,8 @@ let ChatLogService = class ChatLogService {
|
||||
error: false,
|
||||
conversationOptions: parseConversationOptions,
|
||||
requestOptions: parseRequestOptions,
|
||||
fileInfo: fileInfo,
|
||||
model: model,
|
||||
};
|
||||
});
|
||||
}
|
||||
|
||||
113
dist/modules/chatgpt/chatgpt.service.js
vendored
113
dist/modules/chatgpt/chatgpt.service.js
vendored
@@ -35,10 +35,8 @@ const fanyi_service_1 = require("../fanyi/fanyi.service");
|
||||
const app_entity_1 = require("../app/app.entity");
|
||||
const chatGroup_service_1 = require("../chatGroup/chatGroup.service");
|
||||
const models_service_1 = require("../models/models.service");
|
||||
const baidu_1 = require("./baidu");
|
||||
const helper_1 = require("./helper");
|
||||
const store_1 = require("./store");
|
||||
const zhipu_1 = require("./zhipu");
|
||||
const openai_1 = require("./openai");
|
||||
const chatBoxType_entity_1 = require("./chatBoxType.entity");
|
||||
const chatBox_entity_1 = require("./chatBox.entity");
|
||||
@@ -134,7 +132,7 @@ let ChatgptService = class ChatgptService {
|
||||
const { options = {}, appId, cusromPrompt, systemMessage = '' } = body;
|
||||
let setSystemMessage = systemMessage;
|
||||
const { parentMessageId } = options;
|
||||
const { prompt } = body;
|
||||
const { prompt, fileInfo } = body;
|
||||
const { groupId, usingNetwork } = options;
|
||||
const groupInfo = await this.chatGroupService.getGroupInfoFromId(groupId);
|
||||
const groupConfig = (groupInfo === null || groupInfo === void 0 ? void 0 : groupInfo.config) ? JSON.parse(groupInfo.config) : await this.modelsService.getBaseConfig();
|
||||
@@ -149,7 +147,7 @@ let ChatgptService = class ChatgptService {
|
||||
if (!currentRequestModelKey) {
|
||||
throw new common_1.HttpException('当前流程所需要的模型已被管理员下架、请联系管理员上架专属模型!', common_1.HttpStatus.BAD_REQUEST);
|
||||
}
|
||||
const { deduct, isTokenBased, deductType, key: modelKey, secret, modelName, id: keyId, accessToken } = currentRequestModelKey;
|
||||
const { deduct, isTokenBased, tokenFeeRatio, deductType, key: modelKey, secret, modelName, id: keyId, accessToken } = currentRequestModelKey;
|
||||
await this.userService.checkUserStatus(req.user);
|
||||
await this.userBalanceService.validateBalance(req, deductType === 1 ? 'model3' : 'model4', deduct);
|
||||
res && res.setHeader('Content-type', 'application/octet-stream; charset=utf-8');
|
||||
@@ -208,6 +206,7 @@ let ChatgptService = class ChatgptService {
|
||||
userId: req.user.id,
|
||||
type: balance_constant_1.DeductionKey.CHAT_TYPE,
|
||||
prompt,
|
||||
fileInfo: fileInfo,
|
||||
answer: '',
|
||||
promptTokens: prompt_tokens,
|
||||
completionTokens: 0,
|
||||
@@ -249,79 +248,45 @@ let ChatgptService = class ChatgptService {
|
||||
});
|
||||
let charge = deduct;
|
||||
if (isTokenBased === true) {
|
||||
charge = deduct * total_tokens;
|
||||
charge = Math.ceil((deduct * total_tokens) / tokenFeeRatio);
|
||||
}
|
||||
await this.userBalanceService.deductFromBalance(req.user.id, `model${deductType === 1 ? 3 : 4}`, charge, total_tokens);
|
||||
});
|
||||
if (Number(keyType) === 1) {
|
||||
const { key, maxToken, maxTokenRes, proxyResUrl } = await this.formatModelToken(currentRequestModelKey);
|
||||
const { parentMessageId, completionParams, systemMessage } = mergedOptions;
|
||||
const { model, temperature } = completionParams;
|
||||
const { context: messagesHistory } = await this.nineStore.buildMessageFromParentMessageId(usingNetwork ? netWorkPrompt : prompt, {
|
||||
parentMessageId,
|
||||
systemMessage,
|
||||
maxModelToken: maxToken,
|
||||
maxResponseTokens: maxTokenRes,
|
||||
maxRounds: (0, helper_1.addOneIfOdd)(rounds),
|
||||
});
|
||||
let firstChunk = true;
|
||||
response = await (0, openai_1.sendMessageFromOpenAi)(messagesHistory, {
|
||||
maxToken,
|
||||
maxTokenRes,
|
||||
apiKey: modelKey,
|
||||
model,
|
||||
temperature,
|
||||
proxyUrl: proxyResUrl,
|
||||
onProgress: (chat) => {
|
||||
res.write(firstChunk ? JSON.stringify(chat) : `\n${JSON.stringify(chat)}`);
|
||||
lastChat = chat;
|
||||
firstChunk = false;
|
||||
},
|
||||
});
|
||||
isSuccess = true;
|
||||
}
|
||||
if (Number(keyType) === 2) {
|
||||
let firstChunk = true;
|
||||
const { context: messagesHistory } = await this.nineStore.buildMessageFromParentMessageId(usingNetwork ? netWorkPrompt : prompt, {
|
||||
parentMessageId,
|
||||
maxRounds: (0, helper_1.addOneIfOdd)(rounds),
|
||||
});
|
||||
response = await (0, baidu_1.sendMessageFromBaidu)(usingNetwork ? netWorkPrompt : messagesHistory, {
|
||||
temperature,
|
||||
accessToken,
|
||||
model,
|
||||
onProgress: (data) => {
|
||||
res.write(firstChunk ? JSON.stringify(data) : `\n${JSON.stringify(data)}`);
|
||||
firstChunk = false;
|
||||
lastChat = data;
|
||||
},
|
||||
});
|
||||
isSuccess = true;
|
||||
}
|
||||
if (Number(keyType) === 3) {
|
||||
let firstChunk = true;
|
||||
const { context: messagesHistory } = await this.nineStore.buildMessageFromParentMessageId(usingNetwork ? netWorkPrompt : prompt, {
|
||||
parentMessageId,
|
||||
maxRounds: (0, helper_1.addOneIfOdd)(rounds),
|
||||
});
|
||||
response = await (0, zhipu_1.sendMessageFromZhipu)(usingNetwork ? netWorkPrompt : messagesHistory, {
|
||||
temperature,
|
||||
key,
|
||||
model,
|
||||
onProgress: (data) => {
|
||||
res.write(firstChunk ? JSON.stringify(data) : `\n${JSON.stringify(data)}`);
|
||||
firstChunk = false;
|
||||
lastChat = data;
|
||||
},
|
||||
});
|
||||
isSuccess = true;
|
||||
}
|
||||
const { key, maxToken, maxTokenRes, proxyResUrl } = await this.formatModelToken(currentRequestModelKey);
|
||||
const { parentMessageId, completionParams, systemMessage } = mergedOptions;
|
||||
const { model, temperature } = completionParams;
|
||||
const { context: messagesHistory } = await this.nineStore.buildMessageFromParentMessageId(usingNetwork ? netWorkPrompt : prompt, {
|
||||
parentMessageId,
|
||||
systemMessage,
|
||||
maxModelToken: maxToken,
|
||||
maxResponseTokens: maxTokenRes,
|
||||
maxRounds: (0, helper_1.addOneIfOdd)(rounds),
|
||||
fileInfo: fileInfo,
|
||||
model: model
|
||||
});
|
||||
let firstChunk = true;
|
||||
response = await (0, openai_1.sendMessageFromOpenAi)(messagesHistory, {
|
||||
maxToken,
|
||||
maxTokenRes,
|
||||
apiKey: modelKey,
|
||||
model,
|
||||
fileInfo,
|
||||
temperature,
|
||||
proxyUrl: proxyResUrl,
|
||||
onProgress: (chat) => {
|
||||
res.write(firstChunk ? JSON.stringify(chat) : `\n${JSON.stringify(chat)}`);
|
||||
lastChat = chat;
|
||||
firstChunk = false;
|
||||
},
|
||||
});
|
||||
isSuccess = true;
|
||||
const userMessageData = {
|
||||
id: this.nineStore.getUuid(),
|
||||
text: prompt,
|
||||
role: 'user',
|
||||
name: undefined,
|
||||
usage: null,
|
||||
fileInfo: fileInfo,
|
||||
parentMessageId: parentMessageId,
|
||||
conversationId: response === null || response === void 0 ? void 0 : response.conversationId,
|
||||
};
|
||||
@@ -356,11 +321,12 @@ let ChatgptService = class ChatgptService {
|
||||
onProgress: null,
|
||||
});
|
||||
}
|
||||
const formatResponse = await (0, helper_1.unifiedFormattingResponse)(keyType, response, othersInfo);
|
||||
const { prompt_tokens = 0, completion_tokens = 0, total_tokens = 0 } = formatResponse.usage;
|
||||
let prompt_tokens = response.prompt_tokens || 0;
|
||||
let completion_tokens = response.completion_tokens || 0;
|
||||
let total_tokens = response.total_tokens || 0;
|
||||
let charge = deduct;
|
||||
if (isTokenBased === true) {
|
||||
charge = deduct * total_tokens;
|
||||
charge = Math.ceil((deduct * total_tokens) / tokenFeeRatio);
|
||||
}
|
||||
await this.userBalanceService.deductFromBalance(req.user.id, `model${deductType === 1 ? 3 : 4}`, charge, total_tokens);
|
||||
await this.modelsService.saveUseLog(keyId, total_tokens);
|
||||
@@ -371,11 +337,12 @@ let ChatgptService = class ChatgptService {
|
||||
userId: req.user.id,
|
||||
type: balance_constant_1.DeductionKey.CHAT_TYPE,
|
||||
prompt,
|
||||
fileInfo: fileInfo,
|
||||
answer: '',
|
||||
promptTokens: prompt_tokens,
|
||||
completionTokens: 0,
|
||||
totalTokens: total_tokens,
|
||||
model: formatResponse.model,
|
||||
model: model,
|
||||
role: 'user',
|
||||
groupId,
|
||||
requestOptions: JSON.stringify({
|
||||
@@ -389,7 +356,7 @@ let ChatgptService = class ChatgptService {
|
||||
userId: req.user.id,
|
||||
type: balance_constant_1.DeductionKey.CHAT_TYPE,
|
||||
prompt: prompt,
|
||||
answer: formatResponse === null || formatResponse === void 0 ? void 0 : formatResponse.text,
|
||||
answer: response.text,
|
||||
promptTokens: prompt_tokens,
|
||||
completionTokens: completion_tokens,
|
||||
totalTokens: total_tokens,
|
||||
|
||||
4
dist/modules/chatgpt/dto/chatProcess.dto.js
vendored
4
dist/modules/chatgpt/dto/chatProcess.dto.js
vendored
@@ -27,6 +27,10 @@ __decorate([
|
||||
(0, class_validator_1.IsNotEmpty)({ message: '提问信息不能为空!' }),
|
||||
__metadata("design:type", String)
|
||||
], ChatProcessDto.prototype, "prompt", void 0);
|
||||
__decorate([
|
||||
(0, swagger_1.ApiProperty)({ example: 'https://123.png', description: '对话附带的链接', required: false }),
|
||||
__metadata("design:type", String)
|
||||
], ChatProcessDto.prototype, "url", void 0);
|
||||
__decorate([
|
||||
(0, swagger_1.ApiProperty)({ example: '{ parentMessageId: 0 }', description: '上次对话信息', required: false }),
|
||||
(0, class_transformer_1.Type)(() => Options),
|
||||
|
||||
58
dist/modules/chatgpt/helper.js
vendored
58
dist/modules/chatgpt/helper.js
vendored
@@ -4,7 +4,6 @@ exports.addOneIfOdd = exports.unifiedFormattingResponse = void 0;
|
||||
function unifiedFormattingResponse(keyType, response, others) {
|
||||
let formatRes = {
|
||||
keyType,
|
||||
model: '',
|
||||
parentMessageId: '',
|
||||
text: '',
|
||||
usage: {
|
||||
@@ -13,45 +12,26 @@ function unifiedFormattingResponse(keyType, response, others) {
|
||||
total_tokens: 0,
|
||||
}
|
||||
};
|
||||
if ([1].includes(Number(keyType))) {
|
||||
const { model, parentMessageId } = response === null || response === void 0 ? void 0 : response.detail;
|
||||
let { usage } = response === null || response === void 0 ? void 0 : response.detail;
|
||||
if (!usage) {
|
||||
usage = {
|
||||
prompt_tokens: 0,
|
||||
completion_tokens: 0,
|
||||
total_tokens: 0
|
||||
};
|
||||
const { parentMessageId } = response === null || response === void 0 ? void 0 : response.detail;
|
||||
let { usage } = response === null || response === void 0 ? void 0 : response.detail;
|
||||
if (!usage) {
|
||||
usage = {
|
||||
prompt_tokens: 0,
|
||||
completion_tokens: 0,
|
||||
total_tokens: 0
|
||||
};
|
||||
}
|
||||
const { prompt_tokens, completion_tokens, total_tokens } = usage;
|
||||
formatRes = {
|
||||
keyType,
|
||||
parentMessageId,
|
||||
text: response.text,
|
||||
usage: {
|
||||
prompt_tokens,
|
||||
completion_tokens,
|
||||
total_tokens
|
||||
}
|
||||
const { prompt_tokens, completion_tokens, total_tokens } = usage;
|
||||
formatRes = {
|
||||
keyType,
|
||||
model,
|
||||
parentMessageId,
|
||||
text: response.text,
|
||||
usage: {
|
||||
prompt_tokens,
|
||||
completion_tokens,
|
||||
total_tokens
|
||||
}
|
||||
};
|
||||
}
|
||||
if ([2, 3].includes(Number(keyType))) {
|
||||
const { usage, text } = response;
|
||||
const { prompt_tokens, completion_tokens, total_tokens } = usage;
|
||||
const { model, parentMessageId } = others;
|
||||
formatRes = {
|
||||
keyType,
|
||||
model,
|
||||
parentMessageId,
|
||||
text,
|
||||
usage: {
|
||||
prompt_tokens,
|
||||
completion_tokens,
|
||||
total_tokens
|
||||
}
|
||||
};
|
||||
}
|
||||
};
|
||||
return formatRes;
|
||||
}
|
||||
exports.unifiedFormattingResponse = unifiedFormattingResponse;
|
||||
|
||||
6
dist/modules/chatgpt/openai.js
vendored
6
dist/modules/chatgpt/openai.js
vendored
@@ -13,7 +13,6 @@ function getFullUrl(proxyUrl) {
|
||||
function sendMessageFromOpenAi(messagesHistory, inputs) {
|
||||
var _a;
|
||||
const { onProgress, maxToken, apiKey, model, temperature = 0.95, proxyUrl } = inputs;
|
||||
console.log('current request options: ', apiKey, model, maxToken, proxyUrl);
|
||||
const max_tokens = compilerToken(model, maxToken);
|
||||
const options = {
|
||||
method: 'POST',
|
||||
@@ -49,7 +48,7 @@ function sendMessageFromOpenAi(messagesHistory, inputs) {
|
||||
catch (error) {
|
||||
ISEND = false;
|
||||
}
|
||||
if (data === '[DONE]' || ISEND) {
|
||||
if (ISEND) {
|
||||
result.text = result.text.trim();
|
||||
return result;
|
||||
}
|
||||
@@ -98,6 +97,9 @@ exports.sendMessageFromOpenAi = sendMessageFromOpenAi;
|
||||
function getTokenCount(text) {
|
||||
if (!text)
|
||||
return 0;
|
||||
if (typeof text !== 'string') {
|
||||
text = String(text);
|
||||
}
|
||||
text = text.replace(/<\|endoftext\|>/g, '');
|
||||
return tokenizer.encode(text).length;
|
||||
}
|
||||
|
||||
64
dist/modules/chatgpt/store.js
vendored
64
dist/modules/chatgpt/store.js
vendored
@@ -3,6 +3,7 @@ Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.NineStore = void 0;
|
||||
const uuid_1 = require("uuid");
|
||||
const tiktoken_1 = require("@dqbd/tiktoken");
|
||||
const common_1 = require("@nestjs/common");
|
||||
const tokenizer = (0, tiktoken_1.get_encoding)('cl100k_base');
|
||||
class NineStore {
|
||||
constructor(options) {
|
||||
@@ -26,16 +27,46 @@ class NineStore {
|
||||
await this.store.set(message.id, message, expires);
|
||||
}
|
||||
async buildMessageFromParentMessageId(text, options) {
|
||||
let { maxRounds, maxModelToken, maxResponseTokens, systemMessage = '', name } = options;
|
||||
let { maxRounds, maxModelToken, maxResponseTokens, systemMessage = '', name, fileInfo, model } = options;
|
||||
let { parentMessageId } = options;
|
||||
let messages = [];
|
||||
let nextNumTokensEstimate = 0;
|
||||
if (systemMessage) {
|
||||
messages.push({ role: 'system', content: systemMessage });
|
||||
const specialModels = ['gemini-pro', 'ERNIE', 'qwen', 'SparkDesk', 'hunyuan'];
|
||||
const isSpecialModel = specialModels.some(specialModel => model.includes(specialModel));
|
||||
if (isSpecialModel) {
|
||||
messages.push({ role: 'user', content: systemMessage });
|
||||
messages.push({ role: 'assistant', content: "好的" });
|
||||
}
|
||||
else {
|
||||
messages.push({ role: 'system', content: systemMessage, name });
|
||||
}
|
||||
}
|
||||
const systemMessageOffset = messages.length;
|
||||
let round = 0;
|
||||
let nextMessages = text ? messages.concat([{ role: 'user', content: text, name }]) : messages;
|
||||
if (model === 'gpt-4-vision-preview' && fileInfo) {
|
||||
const content = [
|
||||
{
|
||||
"type": "text",
|
||||
"text": text
|
||||
},
|
||||
{
|
||||
"type": "image_url",
|
||||
"image_url": {
|
||||
"url": fileInfo
|
||||
}
|
||||
}
|
||||
];
|
||||
messages.push({ role: 'user', content: content, name });
|
||||
}
|
||||
else {
|
||||
if (model === 'gpt-4-all' && fileInfo) {
|
||||
text = fileInfo + "\n" + text;
|
||||
}
|
||||
messages.push({ role: 'user', content: text, name });
|
||||
}
|
||||
common_1.Logger.debug(`发送的参数:${messages}`);
|
||||
let nextMessages = messages;
|
||||
do {
|
||||
if (!parentMessageId) {
|
||||
break;
|
||||
@@ -44,9 +75,21 @@ class NineStore {
|
||||
if (!parentMessage) {
|
||||
break;
|
||||
}
|
||||
const { text, name, role } = parentMessage;
|
||||
const { text, name, role, fileInfo } = parentMessage;
|
||||
let content = text;
|
||||
if (role === 'user' && fileInfo) {
|
||||
if (model === 'gpt-4-vision-preview') {
|
||||
content = [
|
||||
{ "type": "text", "text": text },
|
||||
{ "type": "image_url", "image_url": { "url": fileInfo } }
|
||||
];
|
||||
}
|
||||
else if (model === 'gpt-4-all') {
|
||||
content = fileInfo + "\n" + text;
|
||||
}
|
||||
}
|
||||
nextMessages = nextMessages.slice(0, systemMessageOffset).concat([
|
||||
{ role, content: text, name },
|
||||
{ role, content, name },
|
||||
...nextMessages.slice(systemMessageOffset)
|
||||
]);
|
||||
round++;
|
||||
@@ -69,7 +112,16 @@ class NineStore {
|
||||
}
|
||||
_getTokenCount(messages) {
|
||||
let text = messages.reduce((pre, cur) => {
|
||||
return pre += cur.content;
|
||||
if (Array.isArray(cur.content)) {
|
||||
const contentText = cur.content
|
||||
.filter((item) => item.type === 'text')
|
||||
.map((item) => item.text)
|
||||
.join(' ');
|
||||
return pre + contentText;
|
||||
}
|
||||
else {
|
||||
return pre + (cur.content || '');
|
||||
}
|
||||
}, '');
|
||||
text = text.replace(/<\|endoftext\|>/g, '');
|
||||
return tokenizer.encode(text).length;
|
||||
|
||||
10
dist/modules/midjourney/midjourney.service.js
vendored
10
dist/modules/midjourney/midjourney.service.js
vendored
@@ -92,6 +92,7 @@ let MidjourneyService = class MidjourneyService {
|
||||
await this.updateDrawData(jobData, drawRes);
|
||||
this.lockPrompt = this.lockPrompt.filter((item) => item !== drawInfo.randomDrawId);
|
||||
}
|
||||
this.drawSuccess(jobData);
|
||||
return true;
|
||||
}
|
||||
catch (error) {
|
||||
@@ -754,10 +755,15 @@ let MidjourneyService = class MidjourneyService {
|
||||
}
|
||||
async drawFailed(jobData) {
|
||||
const { id, userId, action } = jobData;
|
||||
const amount = action === 2 ? 1 : 4;
|
||||
await this.userBalanceService.refundMjBalance(userId, amount);
|
||||
await this.midjourneyEntity.update({ id }, { status: 4 });
|
||||
}
|
||||
async drawSuccess(jobData) {
|
||||
const { id, userId, action } = jobData;
|
||||
const amount = action === 2 ? 1 : 4;
|
||||
common_1.Logger.debug(`绘画完成,执行扣费,扣除费用:${amount}积分。`);
|
||||
await this.userBalanceService.refundMjBalance(userId, -amount);
|
||||
await this.midjourneyEntity.update({ id }, { status: 3 });
|
||||
}
|
||||
async getList(params) {
|
||||
const { page = 1, size = 20, rec, userId, status } = params;
|
||||
if (Number(size) === 999) {
|
||||
|
||||
4
dist/modules/models/dto/setModel.dto.js
vendored
4
dist/modules/models/dto/setModel.dto.js
vendored
@@ -85,4 +85,8 @@ __decorate([
|
||||
(0, swagger_1.ApiProperty)({ example: true, description: '是否使用token计费', required: false }),
|
||||
__metadata("design:type", Boolean)
|
||||
], SetModelDto.prototype, "isTokenBased", void 0);
|
||||
__decorate([
|
||||
(0, swagger_1.ApiProperty)({ example: true, description: 'token计费比例', required: false }),
|
||||
__metadata("design:type", Number)
|
||||
], SetModelDto.prototype, "tokenFeeRatio", void 0);
|
||||
exports.SetModelDto = SetModelDto;
|
||||
|
||||
4
dist/modules/models/models.entity.js
vendored
4
dist/modules/models/models.entity.js
vendored
@@ -98,6 +98,10 @@ __decorate([
|
||||
(0, typeorm_1.Column)({ comment: '是否使用token计费: 0:不是 1:是', default: 0 }),
|
||||
__metadata("design:type", Boolean)
|
||||
], ModelsEntity.prototype, "isTokenBased", void 0);
|
||||
__decorate([
|
||||
(0, typeorm_1.Column)({ comment: 'token计费比例', default: 0 }),
|
||||
__metadata("design:type", Number)
|
||||
], ModelsEntity.prototype, "tokenFeeRatio", void 0);
|
||||
ModelsEntity = __decorate([
|
||||
(0, typeorm_1.Entity)({ name: 'models' })
|
||||
], ModelsEntity);
|
||||
|
||||
6
dist/modules/queue/queue.service.js
vendored
6
dist/modules/queue/queue.service.js
vendored
@@ -43,7 +43,6 @@ let QueueService = class QueueService {
|
||||
const timeout = (await this.globalConfigService.getConfigs(['mjTimeoutMs'])) || 200000;
|
||||
const job = await this.mjDrawQueue.add('mjDraw', { id: res.id, action: imgUrl ? 4 : 1, userId: req.user.id }, { delay: 1000, timeout: +timeout });
|
||||
this.jobIds.push(job.id);
|
||||
await this.userBalanceService.deductFromBalance(req.user.id, 'mjDraw', 4, 4);
|
||||
return true;
|
||||
}
|
||||
if (!drawId || !orderId) {
|
||||
@@ -57,7 +56,6 @@ let QueueService = class QueueService {
|
||||
const res = await this.midjourneyService.addDrawQueue(params);
|
||||
const timeout = (await this.globalConfigService.getConfigs(['mjTimeoutMs'])) || 200000;
|
||||
const job = await this.mjDrawQueue.add('mjDraw', { id: res.id, action, userId: req.user.id }, { delay: 1000, timeout: +timeout });
|
||||
await this.userBalanceService.deductFromBalance(req.user.id, 'mjDraw', 1, 1);
|
||||
this.jobIds.push(job.id);
|
||||
return;
|
||||
}
|
||||
@@ -68,7 +66,6 @@ let QueueService = class QueueService {
|
||||
const timeout = (await this.globalConfigService.getConfigs(['mjTimeoutMs'])) || 200000;
|
||||
const job = await this.mjDrawQueue.add('mjDraw', { id: res.id, action, userId: req.user.id }, { delay: 1000, timeout: +timeout });
|
||||
this.jobIds.push(job.id);
|
||||
await this.userBalanceService.deductFromBalance(req.user.id, 'mjDraw', 4, 4);
|
||||
return;
|
||||
}
|
||||
if (action === midjourney_constant_1.MidjourneyActionEnum.REGENERATE) {
|
||||
@@ -78,7 +75,6 @@ let QueueService = class QueueService {
|
||||
const timeout = (await this.globalConfigService.getConfigs(['mjTimeoutMs'])) || 200000;
|
||||
const job = await this.mjDrawQueue.add('mjDraw', { id: res.id, action, userId: req.user.id }, { delay: 1000, timeout: +timeout });
|
||||
this.jobIds.push(job.id);
|
||||
await this.userBalanceService.deductFromBalance(req.user.id, 'mjDraw', 4, 4);
|
||||
return;
|
||||
}
|
||||
if (action === midjourney_constant_1.MidjourneyActionEnum.VARY) {
|
||||
@@ -88,7 +84,6 @@ let QueueService = class QueueService {
|
||||
const timeout = (await this.globalConfigService.getConfigs(['mjTimeoutMs'])) || 200000;
|
||||
const job = await this.mjDrawQueue.add('mjDraw', { id: res.id, action, userId: req.user.id }, { delay: 1000, timeout: +timeout });
|
||||
this.jobIds.push(job.id);
|
||||
await this.userBalanceService.deductFromBalance(req.user.id, 'mjDraw', 4, 4);
|
||||
return;
|
||||
}
|
||||
if (action === midjourney_constant_1.MidjourneyActionEnum.ZOOM) {
|
||||
@@ -98,7 +93,6 @@ let QueueService = class QueueService {
|
||||
const timeout = (await this.globalConfigService.getConfigs(['mjTimeoutMs'])) || 200000;
|
||||
const job = await this.mjDrawQueue.add('mjDraw', { id: res.id, action, userId: req.user.id }, { delay: 1000, timeout: +timeout });
|
||||
this.jobIds.push(job.id);
|
||||
await this.userBalanceService.deductFromBalance(req.user.id, 'mjDraw', 4, 4);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
14
dist/modules/userBalance/userBalance.service.js
vendored
14
dist/modules/userBalance/userBalance.service.js
vendored
@@ -235,9 +235,16 @@ let UserBalanceService = class UserBalanceService {
|
||||
if (updateKey.includes('MjCount')) {
|
||||
useKey = 'useDrawMjToken';
|
||||
}
|
||||
const updateBalance = { [updateKey]: b[updateKey] - amount < 0 ? 0 : b[updateKey] - amount, [useKey]: b[useKey] + UseAmount };
|
||||
useKey === 'useModel3Token' && (updateBalance['useModel3Count'] = b['useModel3Count'] + 1);
|
||||
useKey === 'useModel4Token' && (updateBalance['useModel4Count'] = b['useModel4Count'] + 1);
|
||||
const updateBalance = {
|
||||
[updateKey]: b[updateKey] - amount < 0 ? 0 : b[updateKey] - amount,
|
||||
[useKey]: b[useKey] + UseAmount,
|
||||
};
|
||||
if (useKey === 'useModel3Token') {
|
||||
updateBalance['useModel3Count'] = b['useModel3Count'] + amount;
|
||||
}
|
||||
if (useKey === 'useModel4Token') {
|
||||
updateBalance['useModel4Count'] = b['useModel4Count'] + amount;
|
||||
}
|
||||
const result = await this.userBalanceEntity.update({ userId }, updateBalance);
|
||||
if (result.affected === 0) {
|
||||
throw new common_1.HttpException('消费余额失败!', common_1.HttpStatus.BAD_REQUEST);
|
||||
@@ -465,6 +472,7 @@ let UserBalanceService = class UserBalanceService {
|
||||
return await this.userBalanceEntity.find({ where: { userId: (0, typeorm_2.In)(ids) } });
|
||||
}
|
||||
async refundMjBalance(userId, amount) {
|
||||
return await this.deductFromBalance(userId, 'mjDraw', -amount);
|
||||
}
|
||||
async upgradeBalance() {
|
||||
const users = await this.userEntity.find();
|
||||
|
||||
Reference in New Issue
Block a user