mirror of
https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web.git
synced 2025-11-17 14:33:41 +08:00
merge upstream
This commit is contained in:
@@ -1,5 +1,6 @@
|
||||
import { NextRequest, NextResponse } from "next/server";
|
||||
import { auth } from "@/app/api/auth";
|
||||
import { ModelProvider } from "@/app/constant";
|
||||
|
||||
const BASE_URL = process.env.MIDJOURNEY_PROXY_URL ?? null;
|
||||
const MIDJOURNEY_PROXY_KEY = process.env.MIDJOURNEY_PROXY_KEY ?? null;
|
||||
@@ -40,7 +41,7 @@ async function handle(
|
||||
jsonBody = {};
|
||||
}
|
||||
|
||||
const authResult = auth(req);
|
||||
const authResult = auth(req, ModelProvider.GPT);
|
||||
// if (authResult.error) {
|
||||
// return NextResponse.json(authResult, {
|
||||
// status: 401,
|
||||
|
||||
@@ -121,11 +121,11 @@ export const DEFAULT_MODELS = [
|
||||
// name: "gpt-4",
|
||||
// available: true,
|
||||
// },
|
||||
{
|
||||
name: "gpt-3.5-turbo-16k",
|
||||
describe: "GPT-3,最快,笨",
|
||||
available: false,
|
||||
},
|
||||
// {
|
||||
// name: "gpt-3.5-turbo-16k",
|
||||
// describe: "GPT-3,最快,笨",
|
||||
// available: false,
|
||||
// },
|
||||
{
|
||||
name: "gpt-3.5-turbo-1106",
|
||||
describe: "GPT-3,最快,笨,最便宜",
|
||||
@@ -136,11 +136,11 @@ export const DEFAULT_MODELS = [
|
||||
providerType: "openai",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "gpt-4",
|
||||
describe: "GPT-4,聪明,贵,慢",
|
||||
available: false,
|
||||
},
|
||||
// {
|
||||
// name: "gpt-4",
|
||||
// describe: "GPT-4,聪明,贵,慢",
|
||||
// available: false,
|
||||
// },
|
||||
{
|
||||
name: "gpt-4-1106-preview",
|
||||
describe: "GPT-4,又强又快,推荐",
|
||||
@@ -153,23 +153,24 @@ export const DEFAULT_MODELS = [
|
||||
},
|
||||
{
|
||||
name: "gemini-pro",
|
||||
available: true,
|
||||
available: false,
|
||||
describe: "谷歌的,不知道杂用",
|
||||
provider: {
|
||||
id: "google",
|
||||
providerName: "Google",
|
||||
providerType: "google",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "gpt-4-32k",
|
||||
describe: "GPT-4,聪明,慢,但是白嫖",
|
||||
available: false,
|
||||
},
|
||||
{
|
||||
name: "gpt-4-all",
|
||||
describe: "GPT-4全能版,联网绘图多模态,又慢又贵",
|
||||
available: false,
|
||||
},
|
||||
// {
|
||||
// name: "gpt-4-32k",
|
||||
// describe: "GPT-4,聪明,慢,但是白嫖",
|
||||
// available: false,
|
||||
// },
|
||||
// {
|
||||
// name: "gpt-4-all",
|
||||
// describe: "GPT-4全能版,联网绘图多模态,又慢又贵",
|
||||
// available: false,
|
||||
// },
|
||||
// {
|
||||
// name: "gpt-4v",
|
||||
// describe: "GPT-4,官方网页版,最聪明,贵且慢",
|
||||
@@ -184,6 +185,11 @@ export const DEFAULT_MODELS = [
|
||||
name: "midjourney",
|
||||
describe: "绘图用,不用选",
|
||||
available: false,
|
||||
provider: {
|
||||
id: "openai",
|
||||
providerName: "OpenAI",
|
||||
providerType: "openai",
|
||||
},
|
||||
},
|
||||
] as const;
|
||||
|
||||
|
||||
@@ -58,7 +58,7 @@ export const CN_MASKS: BuiltinMask[] = [
|
||||
},
|
||||
],
|
||||
modelConfig: {
|
||||
model: "gpt-3.5-turbo-16k",
|
||||
model: "gpt-3.5-turbo-1106",
|
||||
temperature: 1,
|
||||
max_tokens: 2000,
|
||||
presence_penalty: 0,
|
||||
@@ -84,7 +84,7 @@ export const CN_MASKS: BuiltinMask[] = [
|
||||
},
|
||||
],
|
||||
modelConfig: {
|
||||
model: "gpt-3.5-turbo-16k",
|
||||
model: "gpt-3.5-turbo-1106",
|
||||
temperature: 1,
|
||||
max_tokens: 2000,
|
||||
presence_penalty: 0,
|
||||
@@ -110,7 +110,7 @@ export const CN_MASKS: BuiltinMask[] = [
|
||||
},
|
||||
],
|
||||
modelConfig: {
|
||||
model: "gpt-3.5-turbo-16k",
|
||||
model: "gpt-3.5-turbo-1106",
|
||||
temperature: 1,
|
||||
max_tokens: 2000,
|
||||
presence_penalty: 0,
|
||||
@@ -136,7 +136,7 @@ export const CN_MASKS: BuiltinMask[] = [
|
||||
},
|
||||
],
|
||||
modelConfig: {
|
||||
model: "gpt-3.5-turbo-16k",
|
||||
model: "gpt-3.5-turbo-1106",
|
||||
temperature: 1,
|
||||
max_tokens: 2000,
|
||||
presence_penalty: 0,
|
||||
@@ -162,7 +162,7 @@ export const CN_MASKS: BuiltinMask[] = [
|
||||
},
|
||||
],
|
||||
modelConfig: {
|
||||
model: "gpt-3.5-turbo-16k",
|
||||
model: "gpt-3.5-turbo-1106",
|
||||
temperature: 1,
|
||||
max_tokens: 2000,
|
||||
presence_penalty: 0,
|
||||
@@ -188,7 +188,7 @@ export const CN_MASKS: BuiltinMask[] = [
|
||||
},
|
||||
],
|
||||
modelConfig: {
|
||||
model: "gpt-3.5-turbo-16k",
|
||||
model: "gpt-3.5-turbo-1106",
|
||||
temperature: 1,
|
||||
max_tokens: 2000,
|
||||
presence_penalty: 0,
|
||||
@@ -214,7 +214,7 @@ export const CN_MASKS: BuiltinMask[] = [
|
||||
},
|
||||
],
|
||||
modelConfig: {
|
||||
model: "gpt-3.5-turbo-16k",
|
||||
model: "gpt-3.5-turbo-1106",
|
||||
temperature: 1,
|
||||
max_tokens: 2000,
|
||||
presence_penalty: 0,
|
||||
@@ -240,7 +240,7 @@ export const CN_MASKS: BuiltinMask[] = [
|
||||
},
|
||||
],
|
||||
modelConfig: {
|
||||
model: "gpt-3.5-turbo-16k",
|
||||
model: "gpt-3.5-turbo-1106",
|
||||
temperature: 1,
|
||||
max_tokens: 2000,
|
||||
presence_penalty: 0,
|
||||
@@ -272,7 +272,7 @@ export const CN_MASKS: BuiltinMask[] = [
|
||||
},
|
||||
],
|
||||
modelConfig: {
|
||||
model: "gpt-3.5-turbo-16k",
|
||||
model: "gpt-3.5-turbo-1106",
|
||||
temperature: 0.5,
|
||||
max_tokens: 2000,
|
||||
presence_penalty: 0,
|
||||
@@ -298,7 +298,7 @@ export const CN_MASKS: BuiltinMask[] = [
|
||||
},
|
||||
],
|
||||
modelConfig: {
|
||||
model: "gpt-3.5-turbo-16k",
|
||||
model: "gpt-3.5-turbo-1106",
|
||||
temperature: 1,
|
||||
max_tokens: 2000,
|
||||
presence_penalty: 0,
|
||||
@@ -331,7 +331,7 @@ export const CN_MASKS: BuiltinMask[] = [
|
||||
},
|
||||
],
|
||||
modelConfig: {
|
||||
model: "gpt-3.5-turbo-16k",
|
||||
model: "gpt-3.5-turbo-1106",
|
||||
temperature: 1,
|
||||
max_tokens: 2000,
|
||||
presence_penalty: 0,
|
||||
@@ -364,7 +364,7 @@ export const CN_MASKS: BuiltinMask[] = [
|
||||
},
|
||||
],
|
||||
modelConfig: {
|
||||
model: "gpt-3.5-turbo-16k",
|
||||
model: "gpt-3.5-turbo-1106",
|
||||
temperature: 1,
|
||||
max_tokens: 2000,
|
||||
presence_penalty: 0,
|
||||
@@ -422,7 +422,7 @@ export const CN_MASKS: BuiltinMask[] = [
|
||||
},
|
||||
],
|
||||
modelConfig: {
|
||||
model: "gpt-3.5-turbo-16k",
|
||||
model: "gpt-3.5-turbo-1106",
|
||||
temperature: 1,
|
||||
max_tokens: 2000,
|
||||
presence_penalty: 0,
|
||||
@@ -454,7 +454,7 @@ export const CN_MASKS: BuiltinMask[] = [
|
||||
},
|
||||
],
|
||||
modelConfig: {
|
||||
model: "gpt-4",
|
||||
model: "gpt-4-1106-preview",
|
||||
temperature: 0.5,
|
||||
max_tokens: 2000,
|
||||
presence_penalty: 0,
|
||||
|
||||
@@ -14,7 +14,7 @@ export const EN_MASKS: BuiltinMask[] = [
|
||||
},
|
||||
],
|
||||
modelConfig: {
|
||||
model: "gpt-4",
|
||||
model: "gpt-4-1106-preview",
|
||||
temperature: 0.3,
|
||||
max_tokens: 2000,
|
||||
presence_penalty: 0,
|
||||
@@ -60,7 +60,7 @@ export const EN_MASKS: BuiltinMask[] = [
|
||||
},
|
||||
],
|
||||
modelConfig: {
|
||||
model: "gpt-4",
|
||||
model: "gpt-4-1106-preview",
|
||||
temperature: 0.5,
|
||||
max_tokens: 2000,
|
||||
presence_penalty: 0,
|
||||
@@ -86,7 +86,7 @@ export const EN_MASKS: BuiltinMask[] = [
|
||||
},
|
||||
],
|
||||
modelConfig: {
|
||||
model: "gpt-3.5-turbo-16k",
|
||||
model: "gpt-3.5-turbo-1106",
|
||||
temperature: 0.5,
|
||||
max_tokens: 2000,
|
||||
presence_penalty: 0,
|
||||
@@ -118,7 +118,7 @@ export const EN_MASKS: BuiltinMask[] = [
|
||||
},
|
||||
],
|
||||
modelConfig: {
|
||||
model: "gpt-4",
|
||||
model: "gpt-4-1106-preview",
|
||||
temperature: 0.5,
|
||||
max_tokens: 2000,
|
||||
presence_penalty: 0,
|
||||
|
||||
@@ -137,7 +137,7 @@ export const useAppConfig = createPersistStore(
|
||||
}),
|
||||
{
|
||||
name: StoreKey.Config,
|
||||
version: 3.89,
|
||||
version: 3.891,
|
||||
migrate(persistedState, version) {
|
||||
const state = persistedState as ChatConfig;
|
||||
|
||||
@@ -168,7 +168,7 @@ export const useAppConfig = createPersistStore(
|
||||
if (version < 3.8) {
|
||||
state.lastUpdate = Date.now();
|
||||
}
|
||||
if (version < 3.89) {
|
||||
if (version < 3.891) {
|
||||
state.lastUpdate = Date.now();
|
||||
return { ...DEFAULT_CONFIG };
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user