merge upstream

This commit is contained in:
sijinhui 2023-12-27 22:55:28 +08:00
parent 19f4ef9194
commit 1e7d20580d
6 changed files with 89 additions and 82 deletions

View File

@ -1,40 +1,40 @@
name: Upstream Sync
permissions:
contents: write
on:
schedule:
- cron: "0 0 * * *" # every day
workflow_dispatch:
jobs:
sync_latest_from_upstream:
name: Sync latest commits from upstream repo
runs-on: ubuntu-latest
if: ${{ github.event.repository.fork }}
steps:
# Step 1: run a standard checkout action
- name: Checkout target repo
uses: actions/checkout@v3
# Step 2: run the sync action
- name: Sync upstream changes
id: sync
uses: aormsby/Fork-Sync-With-Upstream-action@v3.4
with:
upstream_sync_repo: ChatGPTNextWeb/ChatGPT-Next-Web
upstream_sync_branch: main
target_sync_branch: main
target_repo_token: ${{ secrets.GITHUB_TOKEN }} # automatically generated, no need to set
# Set test_mode true to run tests instead of the true action!!
test_mode: false
- name: Sync check
if: failure()
run: |
echo "[Error] 由于上游仓库的 workflow 文件变更,导致 GitHub 自动暂停了本次自动更新,你需要手动 Sync Fork 一次详细教程请查看https://github.com/Yidadaa/ChatGPT-Next-Web/blob/main/README_CN.md#%E6%89%93%E5%BC%80%E8%87%AA%E5%8A%A8%E6%9B%B4%E6%96%B0"
echo "[Error] Due to a change in the workflow file of the upstream repository, GitHub has automatically suspended the scheduled automatic update. You need to manually sync your fork. Please refer to the detailed tutorial for instructions: https://github.com/Yidadaa/ChatGPT-Next-Web#enable-automatic-updates"
exit 1
#name: Upstream Sync
#
#permissions:
# contents: write
#
#on:
# schedule:
# - cron: "0 0 * * *" # every day
# workflow_dispatch:
#
#jobs:
# sync_latest_from_upstream:
# name: Sync latest commits from upstream repo
# runs-on: ubuntu-latest
# if: ${{ github.event.repository.fork }}
#
# steps:
# # Step 1: run a standard checkout action
# - name: Checkout target repo
# uses: actions/checkout@v3
#
# # Step 2: run the sync action
# - name: Sync upstream changes
# id: sync
# uses: aormsby/Fork-Sync-With-Upstream-action@v3.4
# with:
# upstream_sync_repo: ChatGPTNextWeb/ChatGPT-Next-Web
# upstream_sync_branch: main
# target_sync_branch: main
# target_repo_token: ${{ secrets.GITHUB_TOKEN }} # automatically generated, no need to set
#
# # Set test_mode true to run tests instead of the true action!!
# test_mode: false
#
# - name: Sync check
# if: failure()
# run: |
# echo "[Error] 由于上游仓库的 workflow 文件变更,导致 GitHub 自动暂停了本次自动更新,你需要手动 Sync Fork 一次详细教程请查看https://github.com/Yidadaa/ChatGPT-Next-Web/blob/main/README_CN.md#%E6%89%93%E5%BC%80%E8%87%AA%E5%8A%A8%E6%9B%B4%E6%96%B0"
# echo "[Error] Due to a change in the workflow file of the upstream repository, GitHub has automatically suspended the scheduled automatic update. You need to manually sync your fork. Please refer to the detailed tutorial for instructions: https://github.com/Yidadaa/ChatGPT-Next-Web#enable-automatic-updates"
# exit 1

View File

@ -1,5 +1,6 @@
import { NextRequest, NextResponse } from "next/server";
import { auth } from "@/app/api/auth";
import { ModelProvider } from "@/app/constant";
const BASE_URL = process.env.MIDJOURNEY_PROXY_URL ?? null;
const MIDJOURNEY_PROXY_KEY = process.env.MIDJOURNEY_PROXY_KEY ?? null;
@ -40,7 +41,7 @@ async function handle(
jsonBody = {};
}
const authResult = auth(req);
const authResult = auth(req, ModelProvider.GPT);
// if (authResult.error) {
// return NextResponse.json(authResult, {
// status: 401,

View File

@ -121,11 +121,11 @@ export const DEFAULT_MODELS = [
// name: "gpt-4",
// available: true,
// },
{
name: "gpt-3.5-turbo-16k",
describe: "GPT-3,最快,笨",
available: false,
},
// {
// name: "gpt-3.5-turbo-16k",
// describe: "GPT-3,最快,笨",
// available: false,
// },
{
name: "gpt-3.5-turbo-1106",
describe: "GPT-3,最快,笨,最便宜",
@ -136,11 +136,11 @@ export const DEFAULT_MODELS = [
providerType: "openai",
},
},
{
name: "gpt-4",
describe: "GPT-4,聪明,贵,慢",
available: false,
},
// {
// name: "gpt-4",
// describe: "GPT-4,聪明,贵,慢",
// available: false,
// },
{
name: "gpt-4-1106-preview",
describe: "GPT-4,又强又快,推荐",
@ -153,23 +153,24 @@ export const DEFAULT_MODELS = [
},
{
name: "gemini-pro",
available: true,
available: false,
describe: "谷歌的,不知道杂用",
provider: {
id: "google",
providerName: "Google",
providerType: "google",
},
},
{
name: "gpt-4-32k",
describe: "GPT-4,聪明,慢,但是白嫖",
available: false,
},
{
name: "gpt-4-all",
describe: "GPT-4全能版,联网绘图多模态,又慢又贵",
available: false,
},
// {
// name: "gpt-4-32k",
// describe: "GPT-4,聪明,慢,但是白嫖",
// available: false,
// },
// {
// name: "gpt-4-all",
// describe: "GPT-4全能版,联网绘图多模态,又慢又贵",
// available: false,
// },
// {
// name: "gpt-4v",
// describe: "GPT-4,官方网页版,最聪明,贵且慢",
@ -184,6 +185,11 @@ export const DEFAULT_MODELS = [
name: "midjourney",
describe: "绘图用,不用选",
available: false,
provider: {
id: "openai",
providerName: "OpenAI",
providerType: "openai",
},
},
] as const;

View File

@ -58,7 +58,7 @@ export const CN_MASKS: BuiltinMask[] = [
},
],
modelConfig: {
model: "gpt-3.5-turbo-16k",
model: "gpt-3.5-turbo-1106",
temperature: 1,
max_tokens: 2000,
presence_penalty: 0,
@ -84,7 +84,7 @@ export const CN_MASKS: BuiltinMask[] = [
},
],
modelConfig: {
model: "gpt-3.5-turbo-16k",
model: "gpt-3.5-turbo-1106",
temperature: 1,
max_tokens: 2000,
presence_penalty: 0,
@ -110,7 +110,7 @@ export const CN_MASKS: BuiltinMask[] = [
},
],
modelConfig: {
model: "gpt-3.5-turbo-16k",
model: "gpt-3.5-turbo-1106",
temperature: 1,
max_tokens: 2000,
presence_penalty: 0,
@ -136,7 +136,7 @@ export const CN_MASKS: BuiltinMask[] = [
},
],
modelConfig: {
model: "gpt-3.5-turbo-16k",
model: "gpt-3.5-turbo-1106",
temperature: 1,
max_tokens: 2000,
presence_penalty: 0,
@ -162,7 +162,7 @@ export const CN_MASKS: BuiltinMask[] = [
},
],
modelConfig: {
model: "gpt-3.5-turbo-16k",
model: "gpt-3.5-turbo-1106",
temperature: 1,
max_tokens: 2000,
presence_penalty: 0,
@ -188,7 +188,7 @@ export const CN_MASKS: BuiltinMask[] = [
},
],
modelConfig: {
model: "gpt-3.5-turbo-16k",
model: "gpt-3.5-turbo-1106",
temperature: 1,
max_tokens: 2000,
presence_penalty: 0,
@ -214,7 +214,7 @@ export const CN_MASKS: BuiltinMask[] = [
},
],
modelConfig: {
model: "gpt-3.5-turbo-16k",
model: "gpt-3.5-turbo-1106",
temperature: 1,
max_tokens: 2000,
presence_penalty: 0,
@ -240,7 +240,7 @@ export const CN_MASKS: BuiltinMask[] = [
},
],
modelConfig: {
model: "gpt-3.5-turbo-16k",
model: "gpt-3.5-turbo-1106",
temperature: 1,
max_tokens: 2000,
presence_penalty: 0,
@ -272,7 +272,7 @@ export const CN_MASKS: BuiltinMask[] = [
},
],
modelConfig: {
model: "gpt-3.5-turbo-16k",
model: "gpt-3.5-turbo-1106",
temperature: 0.5,
max_tokens: 2000,
presence_penalty: 0,
@ -298,7 +298,7 @@ export const CN_MASKS: BuiltinMask[] = [
},
],
modelConfig: {
model: "gpt-3.5-turbo-16k",
model: "gpt-3.5-turbo-1106",
temperature: 1,
max_tokens: 2000,
presence_penalty: 0,
@ -331,7 +331,7 @@ export const CN_MASKS: BuiltinMask[] = [
},
],
modelConfig: {
model: "gpt-3.5-turbo-16k",
model: "gpt-3.5-turbo-1106",
temperature: 1,
max_tokens: 2000,
presence_penalty: 0,
@ -364,7 +364,7 @@ export const CN_MASKS: BuiltinMask[] = [
},
],
modelConfig: {
model: "gpt-3.5-turbo-16k",
model: "gpt-3.5-turbo-1106",
temperature: 1,
max_tokens: 2000,
presence_penalty: 0,
@ -422,7 +422,7 @@ export const CN_MASKS: BuiltinMask[] = [
},
],
modelConfig: {
model: "gpt-3.5-turbo-16k",
model: "gpt-3.5-turbo-1106",
temperature: 1,
max_tokens: 2000,
presence_penalty: 0,
@ -454,7 +454,7 @@ export const CN_MASKS: BuiltinMask[] = [
},
],
modelConfig: {
model: "gpt-4",
model: "gpt-4-1106-preview",
temperature: 0.5,
max_tokens: 2000,
presence_penalty: 0,

View File

@ -14,7 +14,7 @@ export const EN_MASKS: BuiltinMask[] = [
},
],
modelConfig: {
model: "gpt-4",
model: "gpt-4-1106-preview",
temperature: 0.3,
max_tokens: 2000,
presence_penalty: 0,
@ -60,7 +60,7 @@ export const EN_MASKS: BuiltinMask[] = [
},
],
modelConfig: {
model: "gpt-4",
model: "gpt-4-1106-preview",
temperature: 0.5,
max_tokens: 2000,
presence_penalty: 0,
@ -86,7 +86,7 @@ export const EN_MASKS: BuiltinMask[] = [
},
],
modelConfig: {
model: "gpt-3.5-turbo-16k",
model: "gpt-3.5-turbo-1106",
temperature: 0.5,
max_tokens: 2000,
presence_penalty: 0,
@ -118,7 +118,7 @@ export const EN_MASKS: BuiltinMask[] = [
},
],
modelConfig: {
model: "gpt-4",
model: "gpt-4-1106-preview",
temperature: 0.5,
max_tokens: 2000,
presence_penalty: 0,

View File

@ -137,7 +137,7 @@ export const useAppConfig = createPersistStore(
}),
{
name: StoreKey.Config,
version: 3.89,
version: 3.891,
migrate(persistedState, version) {
const state = persistedState as ChatConfig;
@ -168,7 +168,7 @@ export const useAppConfig = createPersistStore(
if (version < 3.8) {
state.lastUpdate = Date.now();
}
if (version < 3.89) {
if (version < 3.891) {
state.lastUpdate = Date.now();
return { ...DEFAULT_CONFIG };
}