Merge remote-tracking branch 'upstream/main' into dev

# Conflicts:
#	.github/workflows/deploy_preview.yml
#	app/client/api.ts
#	app/client/platforms/google.ts
#	app/store/chat.ts
This commit is contained in:
sijinhui 2024-02-08 21:29:22 +08:00
commit adc55d67d2
18 changed files with 751 additions and 156 deletions

View File

@ -43,12 +43,9 @@ jobs:
- os: ubuntu-latest - os: ubuntu-latest
arch: x86_64 arch: x86_64
rust_target: x86_64-unknown-linux-gnu rust_target: x86_64-unknown-linux-gnu
- os: macos-latest
arch: x86_64
rust_target: x86_64-apple-darwin
- os: macos-latest - os: macos-latest
arch: aarch64 arch: aarch64
rust_target: aarch64-apple-darwin rust_target: x86_64-apple-darwin,aarch64-apple-darwin
- os: windows-latest - os: windows-latest
arch: x86_64 arch: x86_64
rust_target: x86_64-pc-windows-msvc rust_target: x86_64-pc-windows-msvc
@ -60,13 +57,14 @@ jobs:
uses: actions/setup-node@v3 uses: actions/setup-node@v3
with: with:
node-version: 18 node-version: 18
cache: 'yarn'
- name: install Rust stable - name: install Rust stable
uses: dtolnay/rust-toolchain@stable uses: dtolnay/rust-toolchain@stable
with: with:
targets: ${{ matrix.config.rust_target }} targets: ${{ matrix.config.rust_target }}
- uses: Swatinem/rust-cache@v2 - uses: Swatinem/rust-cache@v2
with: with:
key: ${{ matrix.config.rust_target }} key: ${{ matrix.config.os }}
- name: install dependencies (ubuntu only) - name: install dependencies (ubuntu only)
if: matrix.config.os == 'ubuntu-latest' if: matrix.config.os == 'ubuntu-latest'
run: | run: |
@ -79,8 +77,15 @@ jobs:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
TAURI_PRIVATE_KEY: ${{ secrets.TAURI_PRIVATE_KEY }} TAURI_PRIVATE_KEY: ${{ secrets.TAURI_PRIVATE_KEY }}
TAURI_KEY_PASSWORD: ${{ secrets.TAURI_KEY_PASSWORD }} TAURI_KEY_PASSWORD: ${{ secrets.TAURI_KEY_PASSWORD }}
APPLE_CERTIFICATE: ${{ secrets.APPLE_CERTIFICATE }}
APPLE_CERTIFICATE_PASSWORD: ${{ secrets.APPLE_CERTIFICATE_PASSWORD }}
APPLE_SIGNING_IDENTITY: ${{ secrets.APPLE_SIGNING_IDENTITY }}
APPLE_ID: ${{ secrets.APPLE_ID }}
APPLE_PASSWORD: ${{ secrets.APPLE_PASSWORD }}
APPLE_TEAM_ID: ${{ secrets.APPLE_TEAM_ID }}
with: with:
releaseId: ${{ needs.create-release.outputs.release_id }} releaseId: ${{ needs.create-release.outputs.release_id }}
args: ${{ matrix.config.os == 'macos-latest' && '--target universal-apple-darwin' || '' }}
publish-release: publish-release:
permissions: permissions:

84
.github/workflows/deploy_preview.yml vendored Normal file
View File

@ -0,0 +1,84 @@
name: VercelPreviewDeployment
on:
pull_request_target:
types:
- opened
- synchronize
- reopened
env:
VERCEL_TEAM: ${{ secrets.VERCEL_TEAM }}
VERCEL_TOKEN: ${{ secrets.VERCEL_TOKEN }}
VERCEL_ORG_ID: ${{ secrets.VERCEL_ORG_ID }}
VERCEL_PROJECT_ID: ${{ secrets.VERCEL_PROJECT_ID }}
VERCEL_PR_DOMAIN_SUFFIX: ${{ secrets.VERCEL_PR_DOMAIN_SUFFIX }}
permissions:
contents: read
statuses: write
pull-requests: write
jobs:
deploy-preview:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
with:
ref: ${{ github.event.pull_request.head.sha }}
- name: Extract branch name
shell: bash
run: echo "branch=${GITHUB_HEAD_REF:-${GITHUB_REF#refs/heads/}}" >> "$GITHUB_OUTPUT"
id: extract_branch
- name: Hash branch name
uses: pplanel/hash-calculator-action@v1.3.1
id: hash_branch
with:
input: ${{ steps.extract_branch.outputs.branch }}
method: MD5
- name: Set Environment Variables
id: set_env
if: github.event_name == 'pull_request_target'
run: |
echo "VERCEL_ALIAS_DOMAIN=${{ github.event.pull_request.number }}-${{ github.workflow }}.${VERCEL_PR_DOMAIN_SUFFIX}" >> $GITHUB_OUTPUT
- name: Install Vercel CLI
run: npm install --global vercel@latest
- name: Cache dependencies
uses: actions/cache@v2
id: cache-npm
with:
path: ~/.npm
key: npm-${{ hashFiles('package-lock.json') }}
restore-keys: npm-
- name: Pull Vercel Environment Information
run: vercel pull --yes --environment=preview --token=${VERCEL_TOKEN}
- name: Deploy Project Artifacts to Vercel
id: vercel
env:
META_TAG: ${{ steps.hash_branch.outputs.digest }}-${{ github.run_number }}-${{ github.run_attempt}}
run: |
set -e
vercel pull --yes --environment=preview --token=${VERCEL_TOKEN}
vercel build --token=${VERCEL_TOKEN}
vercel deploy --prebuilt --archive=tgz --token=${VERCEL_TOKEN} --meta base_hash=${{ env.META_TAG }}
DEFAULT_URL=$(vercel ls --token=${VERCEL_TOKEN} --meta base_hash=${{ env.META_TAG }})
ALIAS_URL=$(vercel alias set ${DEFAULT_URL} ${{ steps.set_env.outputs.VERCEL_ALIAS_DOMAIN }} --token=${VERCEL_TOKEN} --scope ${VERCEL_TEAM}| awk '{print $3}')
echo "New preview URL: ${DEFAULT_URL}"
echo "New alias URL: ${ALIAS_URL}"
echo "VERCEL_URL=${ALIAS_URL}" >> "$GITHUB_OUTPUT"
- uses: mshick/add-pr-comment@v2
with:
message: |
Your build has completed!
[Preview deployment](${{ steps.vercel.outputs.VERCEL_URL }})

View File

@ -155,7 +155,7 @@ export function getHeaders(isAzure?: boolean) {
Accept: "application/json", Accept: "application/json",
}; };
const modelConfig = useChatStore.getState().currentSession().mask.modelConfig; const modelConfig = useChatStore.getState().currentSession().mask.modelConfig;
const isGoogle = modelConfig.model === "gemini-pro"; const isGoogle = modelConfig.model.startsWith("gemini");
// const isAzure = accessStore.provider === ServiceProvider.Azure; // const isAzure = accessStore.provider === ServiceProvider.Azure;
const authHeader = isAzure ? "api-key" : "Authorization"; const authHeader = isAzure ? "api-key" : "Authorization";
const apiKey = isGoogle const apiKey = isGoogle
@ -163,20 +163,23 @@ export function getHeaders(isAzure?: boolean) {
: isAzure : isAzure
? accessStore.azureApiKey ? accessStore.azureApiKey
: accessStore.openaiApiKey; : accessStore.openaiApiKey;
const clientConfig = getClientConfig();
const makeBearer = (s: string) => `${isAzure ? "" : "Bearer "}${s.trim()}`; const makeBearer = (s: string) => `${isAzure ? "" : "Bearer "}${s.trim()}`;
const validString = (x: string) => x && x.length > 0; const validString = (x: string) => x && x.length > 0;
// use user's api key first // when using google api in app, not set auth header
if (validString(apiKey)) { if (!(isGoogle && clientConfig?.isApp)) {
headers[authHeader] = makeBearer(apiKey); // use user's api key first
} else if ( if (validString(apiKey)) {
accessStore.enabledAccessControl() && headers[authHeader] = makeBearer(apiKey);
validString(accessStore.accessCode) } else if (
) { accessStore.enabledAccessControl() &&
headers[authHeader] = makeBearer( validString(accessStore.accessCode)
ACCESS_CODE_PREFIX + accessStore.accessCode, ) {
); headers[authHeader] = makeBearer(
ACCESS_CODE_PREFIX + accessStore.accessCode,
);
}
} }
if (validString(accessStore.midjourneyProxyUrl)) { if (validString(accessStore.midjourneyProxyUrl)) {

View File

@ -1,15 +1,8 @@
import { Google, REQUEST_TIMEOUT_MS } from "@/app/constant"; import { Google, REQUEST_TIMEOUT_MS } from "@/app/constant";
import { ChatOptions, getHeaders, LLMApi, LLMModel, LLMUsage } from "../api"; import { ChatOptions, getHeaders, LLMApi, LLMModel, LLMUsage } from "../api";
import { useAccessStore, useAppConfig, useChatStore } from "@/app/store"; import { useAccessStore, useAppConfig, useChatStore } from "@/app/store";
import {
EventStreamContentType,
fetchEventSource,
} from "@fortaine/fetch-event-source";
import { prettyObject } from "@/app/utils/format";
import { getClientConfig } from "@/app/config/client"; import { getClientConfig } from "@/app/config/client";
import Locale from "../../locales"; import { DEFAULT_API_HOST } from "@/app/constant";
import { getServerSideConfig } from "@/app/config/server";
// import de from "@/app/locales/de";
export class GeminiProApi implements LLMApi { export class GeminiProApi implements LLMApi {
extractMessage(res: any) { extractMessage(res: any) {
console.log("[Response] gemini-pro response: ", res); console.log("[Response] gemini-pro response: ", res);
@ -21,7 +14,7 @@ export class GeminiProApi implements LLMApi {
); );
} }
async chat(options: ChatOptions): Promise<void> { async chat(options: ChatOptions): Promise<void> {
const apiClient = this; // const apiClient = this;
const messages = options.messages.map((v) => ({ const messages = options.messages.map((v) => ({
role: v.role.replace("assistant", "model").replace("system", "user"), role: v.role.replace("assistant", "model").replace("system", "user"),
parts: [{ text: v.content }], parts: [{ text: v.content }],
@ -79,13 +72,27 @@ export class GeminiProApi implements LLMApi {
], ],
}; };
console.log("[Request] google payload: ", requestPayload); const accessStore = useAccessStore.getState();
let baseUrl = accessStore.googleUrl;
const isApp = !!getClientConfig()?.isApp;
const shouldStream = !!options.config.stream; let shouldStream = !!options.config.stream;
const controller = new AbortController(); const controller = new AbortController();
options.onController?.(controller); options.onController?.(controller);
try { try {
const chatPath = this.path(Google.ChatPath); let chatPath = this.path(Google.ChatPath);
// let baseUrl = accessStore.googleUrl;
if (!baseUrl) {
baseUrl = isApp
? DEFAULT_API_HOST + "/api/proxy/google/" + Google.ChatPath
: chatPath;
}
if (isApp) {
baseUrl += `?key=${accessStore.googleApiKey}`;
}
const chatPayload = { const chatPayload = {
method: "POST", method: "POST",
body: JSON.stringify(requestPayload), body: JSON.stringify(requestPayload),
@ -101,10 +108,6 @@ export class GeminiProApi implements LLMApi {
if (shouldStream) { if (shouldStream) {
let responseText = ""; let responseText = "";
let remainText = ""; let remainText = "";
let streamChatPath = chatPath.replace(
"generateContent",
"streamGenerateContent",
);
let finished = false; let finished = false;
let existingTexts: string[] = []; let existingTexts: string[] = [];
@ -134,7 +137,11 @@ export class GeminiProApi implements LLMApi {
// start animaion // start animaion
animateResponseText(); animateResponseText();
fetch(streamChatPath, chatPayload)
fetch(
baseUrl.replace("generateContent", "streamGenerateContent"),
chatPayload,
)
.then((response) => { .then((response) => {
const reader = response?.body?.getReader(); const reader = response?.body?.getReader();
const decoder = new TextDecoder(); const decoder = new TextDecoder();
@ -185,11 +192,9 @@ export class GeminiProApi implements LLMApi {
console.error("Error:", error); console.error("Error:", error);
}); });
} else { } else {
const res = await fetch(chatPath, chatPayload); const res = await fetch(baseUrl, chatPayload);
clearTimeout(requestTimeoutId); clearTimeout(requestTimeoutId);
const resJson = await res.json(); const resJson = await res.json();
if (resJson?.promptFeedback?.blockReason) { if (resJson?.promptFeedback?.blockReason) {
// being blocked // being blocked
options.onError?.( options.onError?.(

View File

@ -1,3 +1,4 @@
"use client";
import { import {
ApiPath, ApiPath,
AZURE_MODELS, AZURE_MODELS,
@ -46,7 +47,9 @@ export class ChatGPTApi implements LLMApi {
if (baseUrl.length === 0) { if (baseUrl.length === 0) {
const isApp = !!getClientConfig()?.isApp; const isApp = !!getClientConfig()?.isApp;
baseUrl = isApp ? DEFAULT_API_HOST : ApiPath.OpenAI; baseUrl = isApp
? DEFAULT_API_HOST + "/proxy" + ApiPath.OpenAI
: ApiPath.OpenAI;
} }
if (baseUrl.endsWith("/")) { if (baseUrl.endsWith("/")) {
@ -60,6 +63,8 @@ export class ChatGPTApi implements LLMApi {
path = makeAzurePath(path, accessStore.azureApiVersion, azureModel); path = makeAzurePath(path, accessStore.azureApiVersion, azureModel);
} }
console.log("[Proxy Endpoint] ", baseUrl, path);
return [baseUrl, path].join("/"); return [baseUrl, path].join("/");
} }

View File

@ -307,7 +307,7 @@ export function PreviewActions(props: {
setShouldExport(false); setShouldExport(false);
var api: ClientApi; var api: ClientApi;
if (config.modelConfig.model === "gemini-pro") { if (config.modelConfig.model.startsWith("gemini")) {
api = new ClientApi(ModelProvider.GeminiPro); api = new ClientApi(ModelProvider.GeminiPro);
} else { } else {
api = new ClientApi(ModelProvider.GPT); api = new ClientApi(ModelProvider.GPT);

View File

@ -174,7 +174,7 @@ export function useLoadData() {
const config = useAppConfig(); const config = useAppConfig();
var api: ClientApi; var api: ClientApi;
if (config.modelConfig.model === "gemini-pro") { if (config.modelConfig.model.startsWith("gemini")) {
api = new ClientApi(ModelProvider.GeminiPro); api = new ClientApi(ModelProvider.GeminiPro);
} else { } else {
api = new ClientApi(ModelProvider.GPT); api = new ClientApi(ModelProvider.GPT);

View File

@ -92,7 +92,7 @@ export function ModelConfigList(props: {
></input> ></input>
</ListItem> </ListItem>
{props.modelConfig.model === "gemini-pro" ? null : ( {props.modelConfig.model.startsWith("gemini") ? null : (
<> <>
<ListItem <ListItem
title={Locale.Settings.PresencePenalty.Title} title={Locale.Settings.PresencePenalty.Title}

View File

@ -8,8 +8,7 @@ export const FETCH_COMMIT_URL = `https://api.github.com/repos/${OWNER}/${REPO}/c
export const FETCH_TAG_URL = `https://api.github.com/repos/${OWNER}/${REPO}/tags?per_page=1`; export const FETCH_TAG_URL = `https://api.github.com/repos/${OWNER}/${REPO}/tags?per_page=1`;
export const RUNTIME_CONFIG_DOM = "danger-runtime-config"; export const RUNTIME_CONFIG_DOM = "danger-runtime-config";
export const DEFAULT_CORS_HOST = "https://a.nextweb.fun"; export const DEFAULT_API_HOST = "https://api.nextchat.dev";
export const DEFAULT_API_HOST = `${DEFAULT_CORS_HOST}/api/proxy`;
export const OPENAI_BASE_URL = "https://api.openai.com"; export const OPENAI_BASE_URL = "https://api.openai.com";
export const GEMINI_BASE_URL = "https://generativelanguage.googleapis.com/"; export const GEMINI_BASE_URL = "https://generativelanguage.googleapis.com/";

View File

@ -12,7 +12,9 @@ import { ensure } from "../utils/clone";
let fetchState = 0; // 0 not fetch, 1 fetching, 2 done let fetchState = 0; // 0 not fetch, 1 fetching, 2 done
const DEFAULT_OPENAI_URL = const DEFAULT_OPENAI_URL =
getClientConfig()?.buildMode === "export" ? DEFAULT_API_HOST : ApiPath.OpenAI; getClientConfig()?.buildMode === "export"
? DEFAULT_API_HOST + "/api/proxy/openai"
: ApiPath.OpenAI;
const DEFAULT_ACCESS_STATE = { const DEFAULT_ACCESS_STATE = {
accessCode: "", accessCode: "",

View File

@ -104,13 +104,14 @@ function fillTemplateWith(input: string, modelConfig: ModelConfig) {
KnowledgeCutOffDate[modelConfig.model] ?? KnowledgeCutOffDate.default; KnowledgeCutOffDate[modelConfig.model] ?? KnowledgeCutOffDate.default;
// Find the model in the DEFAULT_MODELS array that matches the modelConfig.model // Find the model in the DEFAULT_MODELS array that matches the modelConfig.model
const modelInfo = DEFAULT_MODELS.find((m) => m.name === modelConfig.model); const modelInfo = DEFAULT_MODELS.find((m) => m.name === modelConfig.model);
if (!modelInfo) {
throw new Error( var serviceProvider = "OpenAI";
`Model ${modelConfig.model} not found in DEFAULT_MODELS array.`, if (modelInfo) {
); // TODO: auto detect the providerName from the modelConfig.model
// Directly use the providerName from the modelInfo
serviceProvider = modelInfo.provider.providerName;
} }
// Directly use the providerName from the modelInfo
const serviceProvider = modelInfo.provider.providerName;
const vars = { const vars = {
ServiceProvider: serviceProvider, ServiceProvider: serviceProvider,
@ -617,7 +618,7 @@ export const useChatStore = createPersistStore(
extAttr?.setAutoScroll(true); extAttr?.setAutoScroll(true);
} else { } else {
var api: ClientApi; var api: ClientApi;
if (modelConfig.model === "gemini-pro") { if (modelConfig.model.startsWith("gemini")) {
api = new ClientApi(ModelProvider.GeminiPro); api = new ClientApi(ModelProvider.GeminiPro);
} else { } else {
api = new ClientApi(ModelProvider.GPT); api = new ClientApi(ModelProvider.GPT);
@ -803,7 +804,7 @@ export const useChatStore = createPersistStore(
const modelConfig = session.mask.modelConfig; const modelConfig = session.mask.modelConfig;
var api: ClientApi; var api: ClientApi;
if (modelConfig.model === "gemini-pro") { if (modelConfig.model.startsWith("gemini")) {
api = new ClientApi(ModelProvider.GeminiPro); api = new ClientApi(ModelProvider.GeminiPro);
} else { } else {
api = new ClientApi(ModelProvider.GPT); api = new ClientApi(ModelProvider.GPT);

View File

@ -1,8 +1,8 @@
import { getClientConfig } from "../config/client"; import { getClientConfig } from "../config/client";
import { ApiPath, DEFAULT_CORS_HOST } from "../constant"; import { ApiPath, DEFAULT_API_HOST } from "../constant";
export function corsPath(path: string) { export function corsPath(path: string) {
const baseUrl = getClientConfig()?.isApp ? `${DEFAULT_CORS_HOST}` : ""; const baseUrl = getClientConfig()?.isApp ? `${DEFAULT_API_HOST}` : "";
if (!path.startsWith("/")) { if (!path.startsWith("/")) {
path = "/" + path; path = "/" + path;

View File

@ -120,8 +120,17 @@ if (mode !== "export") {
nextConfig.rewrites = async () => { nextConfig.rewrites = async () => {
const ret = [ const ret = [
// adjust for previous verison directly using "/api/proxy/" as proxy base route
{ {
source: "/api/proxy/:path*", source: "/api/proxy/v1/:path*",
destination: "https://api.openai.com/v1/:path*",
},
{
source: "/api/proxy/google/:path*",
destination: "https://generativelanguage.googleapis.com/:path*",
},
{
source: "/api/proxy/openai/:path*",
destination: "https://api.openai.com/:path*", destination: "https://api.openai.com/:path*",
}, },
{ {

View File

@ -1,5 +1,5 @@
{ {
"name": "chatgpt-next-web", "name": "nextchat",
"private": false, "private": false,
"license": "mit", "license": "mit",
"scripts": { "scripts": {
@ -85,4 +85,4 @@
"resolutions": { "resolutions": {
"lint-staged/yaml": "^2.2.2" "lint-staged/yaml": "^2.2.2"
} }
} }

View File

@ -54,7 +54,7 @@ if ! command -v node >/dev/null || ! command -v git >/dev/null || ! command -v y
fi fi
# Clone the repository and install dependencies # Clone the repository and install dependencies
git clone https://github.com/Yidadaa/ChatGPT-Next-Web git clone https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web
cd ChatGPT-Next-Web cd ChatGPT-Next-Web
yarn install yarn install

643
src-tauri/Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -1,27 +1,45 @@
[package] [package]
name = "chatgpt-next-web" name = "nextchat"
version = "0.1.0" version = "0.1.0"
description = "A cross platform app for LLM ChatBot." description = "A cross platform app for LLM ChatBot."
authors = ["Yidadaa"] authors = ["Yidadaa"]
license = "mit" license = "mit"
repository = "" repository = ""
default-run = "chatgpt-next-web" default-run = "nextchat"
edition = "2021" edition = "2021"
rust-version = "1.60" rust-version = "1.60"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[build-dependencies] [build-dependencies]
tauri-build = { version = "1.3.0", features = [] } tauri-build = { version = "1.5.1", features = [] }
[dependencies] [dependencies]
serde_json = "1.0" serde_json = "1.0"
serde = { version = "1.0", features = ["derive"] } serde = { version = "1.0", features = ["derive"] }
tauri = { version = "1.3.0", features = ["notification-all", "fs-all", "clipboard-all", "dialog-all", "shell-open", "updater", "window-close", "window-hide", "window-maximize", "window-minimize", "window-set-icon", "window-set-ignore-cursor-events", "window-set-resizable", "window-show", "window-start-dragging", "window-unmaximize", "window-unminimize"] } tauri = { version = "1.5.4", features = [
"notification-all",
"fs-all",
"clipboard-all",
"dialog-all",
"shell-open",
"updater",
"window-close",
"window-hide",
"window-maximize",
"window-minimize",
"window-set-icon",
"window-set-ignore-cursor-events",
"window-set-resizable",
"window-show",
"window-start-dragging",
"window-unmaximize",
"window-unminimize",
] }
tauri-plugin-window-state = { git = "https://github.com/tauri-apps/plugins-workspace", branch = "v1" } tauri-plugin-window-state = { git = "https://github.com/tauri-apps/plugins-workspace", branch = "v1" }
[features] [features]
# this feature is used for production builds or when `devPath` points to the filesystem and the built-in dev server is disabled. # this feature is used for production builds or when `devPath` points to the filesystem and the built-in dev server is disabled.
# If you use cargo directly instead of tauri's cli you can use this feature flag to switch between tauri's `dev` and `build` modes. # If you use cargo directly instead of tauri's cli you can use this feature flag to switch between tauri's `dev` and `build` modes.
# DO NOT REMOVE!! # DO NOT REMOVE!!
custom-protocol = [ "tauri/custom-protocol" ] custom-protocol = ["tauri/custom-protocol"]

View File

@ -9,7 +9,7 @@
}, },
"package": { "package": {
"productName": "NextChat", "productName": "NextChat",
"version": "2.10.1" "version": "2.10.3"
}, },
"tauri": { "tauri": {
"allowlist": { "allowlist": {
@ -86,12 +86,13 @@
} }
}, },
"security": { "security": {
"csp": null "csp": null,
"dangerousUseHttpScheme": true
}, },
"updater": { "updater": {
"active": true, "active": true,
"endpoints": [ "endpoints": [
"https://github.com/Yidadaa/ChatGPT-Next-Web/releases/latest/download/latest.json" "https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/releases/latest/download/latest.json"
], ],
"dialog": false, "dialog": false,
"windows": { "windows": {