mirror of
				https://github.com/songquanpeng/one-api.git
				synced 2025-10-31 13:53:41 +08:00 
			
		
		
		
	Compare commits
	
		
			54 Commits
		
	
	
		
			v0.5.3-alp
			...
			v0.5.5-alp
		
	
	| Author | SHA1 | Date | |
|---|---|---|---|
|  | 04acdb1ccb | ||
|  | f0d5e102a3 | ||
|  | abbf2fded0 | ||
|  | ef2c5abb5b | ||
|  | 56b5007379 | ||
|  | d09d317459 | ||
|  | 1c4409ae80 | ||
|  | 5ee24e8acf | ||
|  | 4f2f911e4d | ||
|  | fdb2cccf65 | ||
|  | a3e267df7e | ||
|  | ac7c0f3a76 | ||
|  | efeb9a16ce | ||
|  | 05e4f2b439 | ||
|  | 7e058bfb9b | ||
|  | dfaa0183b7 | ||
|  | 1b56becfaa | ||
|  | 23b1c63538 | ||
|  | 49d1a63402 | ||
|  | 2a7b82650c | ||
|  | 8ea7b9aae2 | ||
|  | 5136b12612 | ||
|  | 80a49e01a3 | ||
|  | 8fb082ba3b | ||
|  | 86c2627c24 | ||
|  | 90b4cac7f3 | ||
|  | e4bacc45d6 | ||
|  | da1d81998f | ||
|  | cac61b9f66 | ||
|  | 3da12e99d9 | ||
|  | 4ef5e2020c | ||
|  | af20063a8d | ||
|  | ca512f6a38 | ||
|  | 0e9ff8825e | ||
|  | e0b4f96b5b | ||
|  | eae9b6e607 | ||
|  | 7bddc73b96 | ||
|  | 2a527ee436 | ||
|  | e42119b73d | ||
|  | 821c559e89 | ||
|  | 7e2bca7e9c | ||
|  | 1e16ef3e0d | ||
|  | 476a46ad7e | ||
|  | c58f710227 | ||
|  | 150d068e9f | ||
|  | be780462f1 | ||
|  | f2159e1033 | ||
|  | 466005de07 | ||
|  | 2b088a1678 | ||
|  | 3a18cebe34 | ||
|  | cc36bf9c13 | ||
|  | 3b36608bbd | ||
|  | 29fa94e7d2 | ||
|  | 9c436921d1 | 
| @@ -1,10 +1,11 @@ | ||||
| FROM node:16 as builder | ||||
|  | ||||
| WORKDIR /build | ||||
| COPY web/package.json . | ||||
| RUN npm install | ||||
| COPY ./web . | ||||
| COPY ./VERSION . | ||||
| RUN npm install | ||||
| RUN REACT_APP_VERSION=$(cat VERSION) npm run build | ||||
| RUN DISABLE_ESLINT_PLUGIN='true' REACT_APP_VERSION=$(cat VERSION) npm run build | ||||
|  | ||||
| FROM golang AS builder2 | ||||
|  | ||||
| @@ -13,9 +14,10 @@ ENV GO111MODULE=on \ | ||||
|     GOOS=linux | ||||
|  | ||||
| WORKDIR /build | ||||
| ADD go.mod go.sum ./ | ||||
| RUN go mod download | ||||
| COPY . . | ||||
| COPY --from=builder /build/build ./web/build | ||||
| RUN go mod download | ||||
| RUN go build -ldflags "-s -w -X 'one-api/common.Version=$(cat VERSION)' -extldflags '-static'" -o one-api | ||||
|  | ||||
| FROM alpine | ||||
|   | ||||
| @@ -1,5 +1,5 @@ | ||||
| <p align="right"> | ||||
|     <a href="./README.md">中文</a> | <strong>English</strong> | ||||
|     <a href="./README.md">中文</a> | <strong>English</strong> | <a href="./README.ja.md">日本語</a> | ||||
| </p> | ||||
|  | ||||
| <p align="center"> | ||||
| @@ -190,7 +190,7 @@ If you encounter a blank page after deployment, refer to [#97](https://github.co | ||||
| > Zeabur's servers are located overseas, automatically solving network issues, and the free quota is sufficient for personal usage. | ||||
|  | ||||
| 1. First, fork the code. | ||||
| 2. Go to [Zeabur](https://zeabur.com/), log in, and enter the console. | ||||
| 2. Go to [Zeabur](https://zeabur.com?referralCode=songquanpeng), log in, and enter the console. | ||||
| 3. Create a new project. In Service -> Add Service, select Marketplace, and choose MySQL. Note down the connection parameters (username, password, address, and port). | ||||
| 4. Copy the connection parameters and run ```create database `one-api` ``` to create the database. | ||||
| 5. Then, in Service -> Add Service, select Git (authorization is required for the first use) and choose your forked repository. | ||||
| @@ -283,7 +283,7 @@ If the channel ID is not provided, load balancing will be used to distribute the | ||||
|     + Double-check that your interface address and API Key are correct. | ||||
|  | ||||
| ## Related Projects | ||||
| [FastGPT](https://github.com/c121914yu/FastGPT): Build an AI knowledge base in three minutes | ||||
| [FastGPT](https://github.com/labring/FastGPT): Knowledge question answering system based on the LLM | ||||
|  | ||||
| ## Note | ||||
| This project is an open-source project. Please use it in compliance with OpenAI's [Terms of Use](https://openai.com/policies/terms-of-use) and **applicable laws and regulations**. It must not be used for illegal purposes. | ||||
|   | ||||
							
								
								
									
										298
									
								
								README.ja.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										298
									
								
								README.ja.md
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,298 @@ | ||||
| <p align="right"> | ||||
|     <a href="./README.md">中文</a> | <a href="./README.en.md">English</a> | <strong>日本語</strong> | ||||
| </p> | ||||
|  | ||||
| <p align="center"> | ||||
|   <a href="https://github.com/songquanpeng/one-api"><img src="https://raw.githubusercontent.com/songquanpeng/one-api/main/web/public/logo.png" width="150" height="150" alt="one-api logo"></a> | ||||
| </p> | ||||
|  | ||||
| <div align="center"> | ||||
|  | ||||
| # One API | ||||
|  | ||||
| _✨ 標準的な OpenAI API フォーマットを通じてすべての LLM にアクセスでき、導入と利用が容易です ✨_ | ||||
|  | ||||
| </div> | ||||
|  | ||||
| <p align="center"> | ||||
|   <a href="https://raw.githubusercontent.com/songquanpeng/one-api/main/LICENSE"> | ||||
|     <img src="https://img.shields.io/github/license/songquanpeng/one-api?color=brightgreen" alt="license"> | ||||
|   </a> | ||||
|   <a href="https://github.com/songquanpeng/one-api/releases/latest"> | ||||
|     <img src="https://img.shields.io/github/v/release/songquanpeng/one-api?color=brightgreen&include_prereleases" alt="release"> | ||||
|   </a> | ||||
|   <a href="https://hub.docker.com/repository/docker/justsong/one-api"> | ||||
|     <img src="https://img.shields.io/docker/pulls/justsong/one-api?color=brightgreen" alt="docker pull"> | ||||
|   </a> | ||||
|   <a href="https://github.com/songquanpeng/one-api/releases/latest"> | ||||
|     <img src="https://img.shields.io/github/downloads/songquanpeng/one-api/total?color=brightgreen&include_prereleases" alt="release"> | ||||
|   </a> | ||||
|   <a href="https://goreportcard.com/report/github.com/songquanpeng/one-api"> | ||||
|     <img src="https://goreportcard.com/badge/github.com/songquanpeng/one-api" alt="GoReportCard"> | ||||
|   </a> | ||||
| </p> | ||||
|  | ||||
| <p align="center"> | ||||
|   <a href="#deployment">デプロイチュートリアル</a> | ||||
|   · | ||||
|   <a href="#usage">使用方法</a> | ||||
|   · | ||||
|   <a href="https://github.com/songquanpeng/one-api/issues">フィードバック</a> | ||||
|   · | ||||
|   <a href="#screenshots">スクリーンショット</a> | ||||
|   · | ||||
|   <a href="https://openai.justsong.cn/">ライブデモ</a> | ||||
|   · | ||||
|   <a href="#faq">FAQ</a> | ||||
|   · | ||||
|   <a href="#related-projects">関連プロジェクト</a> | ||||
|   · | ||||
|   <a href="https://iamazing.cn/page/reward">寄付</a> | ||||
| </p> | ||||
|  | ||||
| > **警告**: この README は ChatGPT によって翻訳されています。翻訳ミスを発見した場合は遠慮なく PR を投稿してください。 | ||||
|  | ||||
| > **警告**: 英語版の Docker イメージは `justsong/one-api-en` です。 | ||||
|  | ||||
| > **注**: Docker からプルされた最新のイメージは、`alpha` リリースかもしれません。安定性が必要な場合は、手動でバージョンを指定してください。 | ||||
|  | ||||
| ## 特徴 | ||||
| 1. 複数の大型モデルをサポート: | ||||
|    + [x] [OpenAI ChatGPT シリーズモデル](https://platform.openai.com/docs/guides/gpt/chat-completions-api) ([Azure OpenAI API](https://learn.microsoft.com/en-us/azure/ai-services/openai/reference) をサポート) | ||||
|    + [x] [Anthropic Claude シリーズモデル](https://anthropic.com) | ||||
|    + [x] [Google PaLM2 シリーズモデル](https://developers.generativeai.google) | ||||
|    + [x] [Baidu Wenxin Yiyuan シリーズモデル](https://cloud.baidu.com/doc/WENXINWORKSHOP/index.html) | ||||
|    + [x] [Alibaba Tongyi Qianwen シリーズモデル](https://help.aliyun.com/document_detail/2400395.html) | ||||
|    + [x] [Zhipu ChatGLM シリーズモデル](https://bigmodel.cn) | ||||
| 2. **ロードバランシング**による複数チャンネルへのアクセスをサポート。 | ||||
| 3. ストリーム伝送によるタイプライター的効果を可能にする**ストリームモード**に対応。 | ||||
| 4. **マルチマシンデプロイ**に対応。[詳細はこちら](#multi-machine-deployment)を参照。 | ||||
| 5. トークンの有効期限や使用回数を設定できる**トークン管理**に対応しています。 | ||||
| 6. **バウチャー管理**に対応しており、バウチャーの一括生成やエクスポートが可能です。バウチャーは口座残高の補充に利用できます。 | ||||
| 7. **チャンネル管理**に対応し、チャンネルの一括作成が可能。 | ||||
| 8. グループごとに異なるレートを設定するための**ユーザーグループ**と**チャンネルグループ**をサポートしています。 | ||||
| 9. チャンネル**モデルリスト設定**に対応。 | ||||
| 10. **クォータ詳細チェック**をサポート。 | ||||
| 11. **ユーザー招待報酬**をサポートします。 | ||||
| 12. 米ドルでの残高表示が可能。 | ||||
| 13. 新規ユーザー向けのお知らせ公開、リチャージリンク設定、初期残高設定に対応。 | ||||
| 14. 豊富な**カスタマイズ**オプションを提供します: | ||||
|     1. システム名、ロゴ、フッターのカスタマイズが可能。 | ||||
|     2. HTML と Markdown コードを使用したホームページとアバウトページのカスタマイズ、または iframe を介したスタンドアロンウェブページの埋め込みをサポートしています。 | ||||
| 15. システム・アクセストークンによる管理 API アクセスをサポートする。 | ||||
| 16. Cloudflare Turnstile によるユーザー認証に対応。 | ||||
| 17. ユーザー管理と複数のユーザーログイン/登録方法をサポート: | ||||
|     + 電子メールによるログイン/登録とパスワードリセット。 | ||||
|     + [GitHub OAuth](https://github.com/settings/applications/new)。 | ||||
|     + WeChat 公式アカウントの認証([WeChat Server](https://github.com/songquanpeng/wechat-server)の追加導入が必要)。 | ||||
| 18. 他の主要なモデル API が利用可能になった場合、即座にサポートし、カプセル化する。 | ||||
|  | ||||
| ## デプロイメント | ||||
| ### Docker デプロイメント | ||||
| デプロイコマンド: `docker run --name one-api -d --restart always -p 3000:3000 -e TZ=Asia/Shanghai -v /home/ubuntu/data/one-api:/data justsong/one-api-en`。 | ||||
|  | ||||
| コマンドを更新する: `docker run --rm -v /var/run/docker.sock:/var/run/docker.sock containrr/watchtower -cR`。 | ||||
|  | ||||
| `-p 3000:3000` の最初の `3000` はホストのポートで、必要に応じて変更できます。 | ||||
|  | ||||
| データはホストの `/home/ubuntu/data/one-api` ディレクトリに保存される。このディレクトリが存在し、書き込み権限があることを確認する、もしくは適切なディレクトリに変更してください。 | ||||
|  | ||||
| Nginxリファレンス設定: | ||||
| ``` | ||||
| server{ | ||||
|    server_name openai.justsong.cn;  # ドメイン名は適宜変更 | ||||
|  | ||||
|    location / { | ||||
|           client_max_body_size  64m; | ||||
|           proxy_http_version 1.1; | ||||
|           proxy_pass http://localhost:3000;  # それに応じてポートを変更 | ||||
|           proxy_set_header Host $host; | ||||
|           proxy_set_header X-Forwarded-For $remote_addr; | ||||
|           proxy_cache_bypass $http_upgrade; | ||||
|           proxy_set_header Accept-Encoding gzip; | ||||
|           proxy_read_timeout 300s;  # GPT-4 はより長いタイムアウトが必要 | ||||
|    } | ||||
| } | ||||
| ``` | ||||
|  | ||||
| 次に、Let's Encrypt certbot を使って HTTPS を設定します: | ||||
| ```bash | ||||
| # Ubuntu に certbot をインストール: | ||||
| sudo snap install --classic certbot | ||||
| sudo ln -s /snap/bin/certbot /usr/bin/certbot | ||||
| # 証明書の生成と Nginx 設定の変更 | ||||
| sudo certbot --nginx | ||||
| # プロンプトに従う | ||||
| # Nginx を再起動 | ||||
| sudo service nginx restart | ||||
| ``` | ||||
|  | ||||
| 初期アカウントのユーザー名は `root` で、パスワードは `123456` です。 | ||||
|  | ||||
| ### マニュアルデプロイ | ||||
| 1. [GitHub Releases](https://github.com/songquanpeng/one-api/releases/latest) から実行ファイルをダウンロードする、もしくはソースからコンパイルする: | ||||
|    ```shell | ||||
|    git clone https://github.com/songquanpeng/one-api.git | ||||
|  | ||||
|    # フロントエンドのビルド | ||||
|    cd one-api/web | ||||
|    npm install | ||||
|    npm run build | ||||
|  | ||||
|    # バックエンドのビルド | ||||
|    cd .. | ||||
|    go mod download | ||||
|    go build -ldflags "-s -w" -o one-api | ||||
|    ``` | ||||
| 2. 実行: | ||||
|    ```shell | ||||
|    chmod u+x one-api | ||||
|    ./one-api --port 3000 --log-dir ./logs | ||||
|    ``` | ||||
| 3. [http://localhost:3000/](http://localhost:3000/) にアクセスし、ログインする。初期アカウントのユーザー名は `root`、パスワードは `123456` である。 | ||||
|  | ||||
| より詳細なデプロイのチュートリアルについては、[このページ](https://iamazing.cn/page/how-to-deploy-a-website) を参照してください。 | ||||
|  | ||||
| ### マルチマシンデプロイ | ||||
| 1. すべてのサーバに同じ `SESSION_SECRET` を設定する。 | ||||
| 2. `SQL_DSN` を設定し、SQLite の代わりに MySQL を使用する。すべてのサーバは同じデータベースに接続する。 | ||||
| 3. マスターノード以外のノードの `NODE_TYPE` を `slave` に設定する。 | ||||
| 4. データベースから定期的に設定を同期するサーバーには `SYNC_FREQUENCY` を設定する。 | ||||
| 5. マスター以外のノードでは、オプションで `FRONTEND_BASE_URL` を設定して、ページ要求をマスターサーバーにリダイレクトすることができます。 | ||||
| 6. マスター以外のノードには Redis を個別にインストールし、`REDIS_CONN_STRING` を設定して、キャッシュの有効期限が切れていないときにデータベースにゼロレイテンシーでアクセスできるようにする。 | ||||
| 7. メインサーバーでもデータベースへのアクセスが高レイテンシになる場合は、Redis を有効にし、`SYNC_FREQUENCY` を設定してデータベースから定期的に設定を同期する必要がある。 | ||||
|  | ||||
| Please refer to the [environment variables](#environment-variables) section for details on using environment variables. | ||||
|  | ||||
| ### コントロールパネル(例: Baota)への展開 | ||||
| 詳しい手順は [#175](https://github.com/songquanpeng/one-api/issues/175) を参照してください。 | ||||
|  | ||||
| 配置後に空白のページが表示される場合は、[#97](https://github.com/songquanpeng/one-api/issues/97) を参照してください。 | ||||
|  | ||||
| ### サードパーティプラットフォームへのデプロイ | ||||
| <details> | ||||
| <summary><strong>Sealos へのデプロイ</strong></summary> | ||||
| <div> | ||||
|  | ||||
| > Sealos は、高い同時実行性、ダイナミックなスケーリング、数百万人のユーザーに対する安定した運用をサポートしています。 | ||||
|  | ||||
| > 下のボタンをクリックすると、ワンクリックで展開できます。👇 | ||||
|  | ||||
| [](https://cloud.sealos.io/?openapp=system-fastdeploy?templateName=one-api) | ||||
|  | ||||
|  | ||||
| </div> | ||||
| </details> | ||||
|  | ||||
| <details> | ||||
| <summary><strong>Zeabur へのデプロイ</strong></summary> | ||||
| <div> | ||||
|  | ||||
| > Zeabur のサーバーは海外にあるため、ネットワークの問題は自動的に解決されます。 | ||||
|  | ||||
| 1. まず、コードをフォークする。 | ||||
| 2. [Zeabur](https://zeabur.com?referralCode=songquanpeng) にアクセスしてログインし、コンソールに入る。 | ||||
| 3. 新しいプロジェクトを作成します。Service -> Add ServiceでMarketplace を選択し、MySQL を選択する。接続パラメータ(ユーザー名、パスワード、アドレス、ポート)をメモします。 | ||||
| 4. 接続パラメータをコピーし、```create database `one-api` ``` を実行してデータベースを作成する。 | ||||
| 5. その後、Service -> Add Service で Git を選択し(最初の使用には認証が必要です)、フォークしたリポジトリを選択します。 | ||||
| 6. 自動デプロイが開始されますが、一旦キャンセルしてください。Variable タブで `PORT` に `3000` を追加し、`SQL_DSN` に `<username>:<password>@tcp(<addr>:<port>)/one-api` を追加します。変更を保存する。SQL_DSN` が設定されていないと、データが永続化されず、再デプロイ後にデータが失われるので注意すること。 | ||||
| 7. 再デプロイを選択します。 | ||||
| 8. Domains タブで、"my-one-api" のような適切なドメイン名の接頭辞を選択する。最終的なドメイン名は "my-one-api.zeabur.app" となります。独自のドメイン名を CNAME することもできます。 | ||||
| 9. デプロイが完了するのを待ち、生成されたドメイン名をクリックして One API にアクセスします。 | ||||
|  | ||||
| </div> | ||||
| </details> | ||||
|  | ||||
| ## コンフィグ | ||||
| システムは箱から出してすぐに使えます。 | ||||
|  | ||||
| 環境変数やコマンドラインパラメータを設定することで、システムを構成することができます。 | ||||
|  | ||||
| システム起動後、`root` ユーザーとしてログインし、さらにシステムを設定します。 | ||||
|  | ||||
| ## 使用方法 | ||||
| `Channels` ページで API Key を追加し、`Tokens` ページでアクセストークンを追加する。 | ||||
|  | ||||
| アクセストークンを使って One API にアクセスすることができる。使い方は [OpenAI API](https://platform.openai.com/docs/api-reference/introduction) と同じです。 | ||||
|  | ||||
| OpenAI API が使用されている場所では、API Base に One API のデプロイアドレスを設定することを忘れないでください(例: `https://openai.justsong.cn`)。API Key は One API で生成されたトークンでなければなりません。 | ||||
|  | ||||
| 具体的な API Base のフォーマットは、使用しているクライアントに依存することに注意してください。 | ||||
|  | ||||
| ```mermaid | ||||
| graph LR | ||||
|     A(ユーザ) | ||||
|     A --->|リクエスト| B(One API) | ||||
|     B -->|中継リクエスト| C(OpenAI) | ||||
|     B -->|中継リクエスト| D(Azure) | ||||
|     B -->|中継リクエスト| E(その他のダウンストリームチャンネル) | ||||
| ``` | ||||
|  | ||||
| 現在のリクエストにどのチャネルを使うかを指定するには、トークンの後に チャネル ID を追加します: 例えば、`Authorization: Bearer ONE_API_KEY-CHANNEL_ID` のようにします。 | ||||
| チャンネル ID を指定するためには、トークンは管理者によって作成される必要があることに注意してください。 | ||||
|  | ||||
| もしチャネル ID が指定されない場合、ロードバランシングによってリクエストが複数のチャネルに振り分けられます。 | ||||
|  | ||||
| ### 環境変数 | ||||
| 1. `REDIS_CONN_STRING`: 設定すると、リクエストレート制限のためのストレージとして、メモリの代わりに Redis が使われる。 | ||||
|     + 例: `REDIS_CONN_STRING=redis://default:redispw@localhost:49153` | ||||
| 2. `SESSION_SECRET`: 設定すると、固定セッションキーが使用され、システムの再起動後もログインユーザーのクッキーが有効であることが保証されます。 | ||||
|     + 例: `SESSION_SECRET=random_string` | ||||
| 3. `SQL_DSN`: 設定すると、SQLite の代わりに指定したデータベースが使用されます。MySQL バージョン 8.0 を使用してください。 | ||||
|     + 例: `SQL_DSN=root:123456@tcp(localhost:3306)/oneapi` | ||||
| 4. `FRONTEND_BASE_URL`: 設定されると、バックエンドアドレスではなく、指定されたフロントエンドアドレスが使われる。 | ||||
|     + 例: `FRONTEND_BASE_URL=https://openai.justsong.cn` | ||||
| 5. `SYNC_FREQUENCY`: 設定された場合、システムは定期的にデータベースからコンフィグを秒単位で同期する。設定されていない場合、同期は行われません。 | ||||
|     + 例: `SYNC_FREQUENCY=60` | ||||
| 6. `NODE_TYPE`: 設定すると、ノードのタイプを指定する。有効な値は `master` と `slave` である。設定されていない場合、デフォルトは `master`。 | ||||
|     + 例: `NODE_TYPE=slave` | ||||
| 7. `CHANNEL_UPDATE_FREQUENCY`: 設定すると、チャンネル残高を分単位で定期的に更新する。設定されていない場合、更新は行われません。 | ||||
|     + 例: `CHANNEL_UPDATE_FREQUENCY=1440` | ||||
| 8. `CHANNEL_TEST_FREQUENCY`: 設定すると、チャンネルを定期的にテストする。設定されていない場合、テストは行われません。 | ||||
|     + 例: `CHANNEL_TEST_FREQUENCY=1440` | ||||
| 9. `POLLING_INTERVAL`: チャネル残高の更新とチャネルの可用性をテストするときのリクエスト間の時間間隔 (秒)。デフォルトは間隔なし。 | ||||
|     + 例: `POLLING_INTERVAL=5` | ||||
|  | ||||
| ### コマンドラインパラメータ | ||||
| 1. `--port <port_number>`: サーバがリッスンするポート番号を指定。デフォルトは `3000` です。 | ||||
|     + 例: `--port 3000` | ||||
| 2. `--log-dir <log_dir>`: ログディレクトリを指定。設定しない場合、ログは保存されません。 | ||||
|     + 例: `--log-dir ./logs` | ||||
| 3. `--version`: システムのバージョン番号を表示して終了する。 | ||||
| 4. `--help`: コマンドの使用法ヘルプとパラメータの説明を表示。 | ||||
|  | ||||
| ## スクリーンショット | ||||
|  | ||||
|  | ||||
|  | ||||
| ## FAQ | ||||
| 1. ノルマとは何か?どのように計算されますか?One API にはノルマ計算の問題はありますか? | ||||
|     + ノルマ = グループ倍率 * モデル倍率 * (プロンプトトークンの数 + 完了トークンの数 * 完了倍率) | ||||
|     + 完了倍率は、公式の定義と一致するように、GPT3.5 では 1.33、GPT4 では 2 に固定されています。 | ||||
|     + ストリームモードでない場合、公式 API は消費したトークンの総数を返す。ただし、プロンプトとコンプリートの消費倍率は異なるので注意してください。 | ||||
| 2. アカウント残高は十分なのに、"insufficient quota" と表示されるのはなぜですか? | ||||
|     + トークンのクォータが十分かどうかご確認ください。トークンクォータはアカウント残高とは別のものです。 | ||||
|     + トークンクォータは最大使用量を設定するためのもので、ユーザーが自由に設定できます。 | ||||
| 3. チャンネルを使おうとすると "No available channels" と表示されます。どうすればいいですか? | ||||
|     + ユーザーとチャンネルグループの設定を確認してください。 | ||||
|     + チャンネルモデルの設定も確認してください。 | ||||
| 4. チャンネルテストがエラーを報告する: "invalid character '<' looking for beginning of value" | ||||
|     + このエラーは、返された値が有効な JSON ではなく、HTML ページである場合に発生する。 | ||||
|     + ほとんどの場合、デプロイサイトのIPかプロキシのノードが CloudFlare によってブロックされています。 | ||||
| 5. ChatGPT Next Web でエラーが発生しました: "Failed to fetch" | ||||
|     + デプロイ時に `BASE_URL` を設定しないでください。 | ||||
|     + インターフェイスアドレスと API Key が正しいか再確認してください。 | ||||
|  | ||||
| ## 関連プロジェクト | ||||
| [FastGPT](https://github.com/labring/FastGPT): LLM に基づく知識質問応答システム | ||||
|  | ||||
| ## 注 | ||||
| 本プロジェクトはオープンソースプロジェクトです。OpenAI の[利用規約](https://openai.com/policies/terms-of-use)および**適用される法令**を遵守してご利用ください。違法な目的での利用はご遠慮ください。 | ||||
|  | ||||
| このプロジェクトは MIT ライセンスで公開されています。これに基づき、ページの最下部に帰属表示と本プロジェクトへのリンクを含める必要があります。 | ||||
|  | ||||
| このプロジェクトを基にした派生プロジェクトについても同様です。 | ||||
|  | ||||
| 帰属表示を含めたくない場合は、事前に許可を得なければなりません。 | ||||
|  | ||||
| MIT ライセンスによると、このプロジェクトを利用するリスクと責任は利用者が負うべきであり、このオープンソースプロジェクトの開発者は責任を負いません。 | ||||
							
								
								
									
										51
									
								
								README.md
									
									
									
									
									
								
							
							
						
						
									
										51
									
								
								README.md
									
									
									
									
									
								
							| @@ -1,5 +1,5 @@ | ||||
| <p align="right"> | ||||
|    <strong>中文</strong> | <a href="./README.en.md">English</a> | ||||
|    <strong>中文</strong> | <a href="./README.en.md">English</a> | <a href="./README.ja.md">日本語</a> | ||||
| </p> | ||||
|  | ||||
|  | ||||
| @@ -51,11 +51,13 @@ _✨ 通过标准的 OpenAI API 格式访问所有的大模型,开箱即用  | ||||
|   <a href="https://iamazing.cn/page/reward">赞赏支持</a> | ||||
| </p> | ||||
|  | ||||
| > **Note**:本项目为开源项目,使用者必须在遵循 OpenAI 的[使用条款](https://openai.com/policies/terms-of-use)以及**法律法规**的情况下使用,不得用于非法用途。 | ||||
| > **Note** | ||||
| > 本项目为开源项目,使用者必须在遵循 OpenAI 的[使用条款](https://openai.com/policies/terms-of-use)以及**法律法规**的情况下使用,不得用于非法用途。 | ||||
| >  | ||||
| > 根据[《生成式人工智能服务管理暂行办法》](http://www.cac.gov.cn/2023-07/13/c_1690898327029107.htm)的要求,请勿对中国地区公众提供一切未经备案的生成式人工智能服务。 | ||||
|  | ||||
| > **Note**:使用 Docker 拉取的最新镜像可能是 `alpha` 版本,如果追求稳定性请手动指定版本。 | ||||
|  | ||||
| > **Warning**:从 `v0.3` 版本升级到 `v0.4` 版本需要手动迁移数据库,请手动执行[数据库迁移脚本](./bin/migration_v0.3-v0.4.sql)。 | ||||
| > **Warning** | ||||
| > 使用 Docker 拉取的最新镜像可能是 `alpha` 版本,如果追求稳定性请手动指定版本。 | ||||
|  | ||||
| ## 功能 | ||||
| 1. 支持多种大模型: | ||||
| @@ -66,6 +68,7 @@ _✨ 通过标准的 OpenAI API 格式访问所有的大模型,开箱即用  | ||||
|    + [x] [阿里通义千问系列模型](https://help.aliyun.com/document_detail/2400395.html) | ||||
|    + [x] [讯飞星火认知大模型](https://www.xfyun.cn/doc/spark/Web.html) | ||||
|    + [x] [智谱 ChatGLM 系列模型](https://bigmodel.cn) | ||||
|    + [x] [360 智脑](https://ai.360.cn) | ||||
| 2. 支持配置镜像以及众多第三方代理服务: | ||||
|    + [x] [OpenAI-SB](https://openai-sb.com) | ||||
|    + [x] [API2D](https://api2d.com/r/197971) | ||||
| @@ -102,16 +105,18 @@ _✨ 通过标准的 OpenAI API 格式访问所有的大模型,开箱即用  | ||||
| ### 基于 Docker 进行部署 | ||||
| 部署命令:`docker run --name one-api -d --restart always -p 3000:3000 -e TZ=Asia/Shanghai -v /home/ubuntu/data/one-api:/data justsong/one-api` | ||||
|  | ||||
| 如果上面的镜像无法拉取,可以尝试使用 GitHub 的 Docker 镜像,将上面的 `justsong/one-api` 替换为 `ghcr.io/songquanpeng/one-api` 即可。 | ||||
|  | ||||
| 如果你的并发量较大,推荐设置 `SQL_DSN`,详见下面[环境变量](#环境变量)一节。 | ||||
|  | ||||
| 更新命令:`docker run --rm -v /var/run/docker.sock:/var/run/docker.sock containrrr/watchtower -cR` | ||||
|  | ||||
| `-p 3000:3000` 中的第一个 `3000` 是宿主机的端口,可以根据需要进行修改。 | ||||
| 其中,`-p 3000:3000` 中的第一个 `3000` 是宿主机的端口,可以根据需要进行修改。 | ||||
|  | ||||
| 数据将会保存在宿主机的 `/home/ubuntu/data/one-api` 目录,请确保该目录存在且具有写入权限,或者更改为合适的目录。 | ||||
|  | ||||
| 如果启动失败,请添加 `--privileged=true`,具体参考 #482。 | ||||
|  | ||||
| 如果上面的镜像无法拉取,可以尝试使用 GitHub 的 Docker 镜像,将上面的 `justsong/one-api` 替换为 `ghcr.io/songquanpeng/one-api` 即可。 | ||||
|  | ||||
| 如果你的并发量较大,**务必**设置 `SQL_DSN`,详见下面[环境变量](#环境变量)一节。 | ||||
|  | ||||
| 更新命令:`docker run --rm -v /var/run/docker.sock:/var/run/docker.sock containrrr/watchtower -cR` | ||||
|  | ||||
| Nginx 的参考配置: | ||||
| ``` | ||||
| server{ | ||||
| @@ -213,7 +218,7 @@ docker run --name chatgpt-web -d -p 3002:3002 -e OPENAI_API_BASE_URL=https://ope | ||||
|  | ||||
| > Sealos 的服务器在国外,不需要额外处理网络问题,支持高并发 & 动态伸缩。 | ||||
|  | ||||
| 点击以下按钮一键部署: | ||||
| 点击以下按钮一键部署(部署后访问出现 404 请等待 3~5 分钟): | ||||
|  | ||||
| [](https://cloud.sealos.io/?openapp=system-fastdeploy?templateName=one-api) | ||||
|  | ||||
| @@ -227,7 +232,7 @@ docker run --name chatgpt-web -d -p 3002:3002 -e OPENAI_API_BASE_URL=https://ope | ||||
| > Zeabur 的服务器在国外,自动解决了网络的问题,同时免费的额度也足够个人使用。 | ||||
|  | ||||
| 1. 首先 fork 一份代码。 | ||||
| 2. 进入 [Zeabur](https://zeabur.com/),登录,进入控制台。 | ||||
| 2. 进入 [Zeabur](https://zeabur.com?referralCode=songquanpeng),登录,进入控制台。 | ||||
| 3. 新建一个 Project,在 Service -> Add Service 选择 Marketplace,选择 MySQL,并记下连接参数(用户名、密码、地址、端口)。 | ||||
| 4. 复制链接参数,运行 ```create database `one-api` ``` 创建数据库。 | ||||
| 5. 然后在 Service -> Add Service,选择 Git(第一次使用需要先授权),选择你 fork 的仓库。 | ||||
| @@ -272,15 +277,23 @@ graph LR | ||||
| 不加的话将会使用负载均衡的方式使用多个渠道。 | ||||
|  | ||||
| ### 环境变量 | ||||
| 1. `REDIS_CONN_STRING`:设置之后将使用 Redis 作为请求频率限制的存储,而非使用内存存储。 | ||||
| 1. `REDIS_CONN_STRING`:设置之后将使用 Redis 作为缓存使用。 | ||||
|    + 例子:`REDIS_CONN_STRING=redis://default:redispw@localhost:49153` | ||||
|    + 如果数据库访问延迟很低,没有必要启用 Redis,启用后反而会出现数据滞后的问题。 | ||||
| 2. `SESSION_SECRET`:设置之后将使用固定的会话密钥,这样系统重新启动后已登录用户的 cookie 将依旧有效。 | ||||
|    + 例子:`SESSION_SECRET=random_string` | ||||
| 3. `SQL_DSN`:设置之后将使用指定数据库而非 SQLite,请使用 MySQL 8.0 版本。 | ||||
|    + 例子:`SQL_DSN=root:123456@tcp(localhost:3306)/oneapi` | ||||
| 3. `SQL_DSN`:设置之后将使用指定数据库而非 SQLite,请使用 MySQL 或 PostgreSQL。 | ||||
|    + 例子: | ||||
|      + MySQL:`SQL_DSN=root:123456@tcp(localhost:3306)/oneapi` | ||||
|      + PostgreSQL:`SQL_DSN=postgres://postgres:123456@localhost:5432/oneapi`(适配中,欢迎反馈) | ||||
|    + 注意需要提前建立数据库 `oneapi`,无需手动建表,程序将自动建表。 | ||||
|    + 如果使用本地数据库:部署命令可添加 `--network="host"` 以使得容器内的程序可以访问到宿主机上的 MySQL。 | ||||
|    + 如果使用云数据库:如果云服务器需要验证身份,需要在连接参数中添加 `?tls=skip-verify`。 | ||||
|    + 请根据你的数据库配置修改下列参数(或者保持默认值): | ||||
|      + `SQL_MAX_IDLE_CONNS`:最大空闲连接数,默认为 `100`。 | ||||
|      + `SQL_MAX_OPEN_CONNS`:最大打开连接数,默认为 `1000`。 | ||||
|        + 如果报错 `Error 1040: Too many connections`,请适当减小该值。 | ||||
|      + `SQL_CONN_MAX_LIFETIME`:连接的最大生命周期,默认为 `60`,单位分钟。 | ||||
| 4. `FRONTEND_BASE_URL`:设置之后将重定向页面请求到指定的地址,仅限从服务器设置。 | ||||
|    + 例子:`FRONTEND_BASE_URL=https://openai.justsong.cn` | ||||
| 5. `SYNC_FREQUENCY`:设置之后将定期与数据库同步配置,单位为秒,未设置则不进行同步。 | ||||
| @@ -329,11 +342,13 @@ https://openai.justsong.cn | ||||
| 5. ChatGPT Next Web 报错:`Failed to fetch` | ||||
|    + 部署的时候不要设置 `BASE_URL`。 | ||||
|    + 检查你的接口地址和 API Key 有没有填对。 | ||||
|    + 检查是否启用了 HTTPS,浏览器会拦截 HTTPS 域名下的 HTTP 请求。 | ||||
| 6. 报错:`当前分组负载已饱和,请稍后再试` | ||||
|    + 上游通道 429 了。 | ||||
|  | ||||
| ## 相关项目 | ||||
| [FastGPT](https://github.com/c121914yu/FastGPT): 三分钟搭建 AI 知识库 | ||||
| * [FastGPT](https://github.com/labring/FastGPT): 基于 LLM 大语言模型的知识库问答系统 | ||||
| * [ChatGPT Next Web](https://github.com/Yidadaa/ChatGPT-Next-Web):  一键拥有你自己的跨平台 ChatGPT 应用 | ||||
|  | ||||
| ## 注意 | ||||
|  | ||||
|   | ||||
| @@ -55,6 +55,8 @@ var EmailDomainWhitelist = []string{ | ||||
| 	"foxmail.com", | ||||
| } | ||||
|  | ||||
| var DebugEnabled = os.Getenv("DEBUG") == "true" | ||||
|  | ||||
| var LogConsumeEnabled = true | ||||
|  | ||||
| var SMTPServer = "" | ||||
| @@ -152,25 +154,28 @@ const ( | ||||
| ) | ||||
|  | ||||
| const ( | ||||
| 	ChannelTypeUnknown   = 0 | ||||
| 	ChannelTypeOpenAI    = 1 | ||||
| 	ChannelTypeAPI2D     = 2 | ||||
| 	ChannelTypeAzure     = 3 | ||||
| 	ChannelTypeCloseAI   = 4 | ||||
| 	ChannelTypeOpenAISB  = 5 | ||||
| 	ChannelTypeOpenAIMax = 6 | ||||
| 	ChannelTypeOhMyGPT   = 7 | ||||
| 	ChannelTypeCustom    = 8 | ||||
| 	ChannelTypeAILS      = 9 | ||||
| 	ChannelTypeAIProxy   = 10 | ||||
| 	ChannelTypePaLM      = 11 | ||||
| 	ChannelTypeAPI2GPT   = 12 | ||||
| 	ChannelTypeAIGC2D    = 13 | ||||
| 	ChannelTypeAnthropic = 14 | ||||
| 	ChannelTypeBaidu     = 15 | ||||
| 	ChannelTypeZhipu     = 16 | ||||
| 	ChannelTypeAli       = 17 | ||||
| 	ChannelTypeXunfei    = 18 | ||||
| 	ChannelTypeUnknown        = 0 | ||||
| 	ChannelTypeOpenAI         = 1 | ||||
| 	ChannelTypeAPI2D          = 2 | ||||
| 	ChannelTypeAzure          = 3 | ||||
| 	ChannelTypeCloseAI        = 4 | ||||
| 	ChannelTypeOpenAISB       = 5 | ||||
| 	ChannelTypeOpenAIMax      = 6 | ||||
| 	ChannelTypeOhMyGPT        = 7 | ||||
| 	ChannelTypeCustom         = 8 | ||||
| 	ChannelTypeAILS           = 9 | ||||
| 	ChannelTypeAIProxy        = 10 | ||||
| 	ChannelTypePaLM           = 11 | ||||
| 	ChannelTypeAPI2GPT        = 12 | ||||
| 	ChannelTypeAIGC2D         = 13 | ||||
| 	ChannelTypeAnthropic      = 14 | ||||
| 	ChannelTypeBaidu          = 15 | ||||
| 	ChannelTypeZhipu          = 16 | ||||
| 	ChannelTypeAli            = 17 | ||||
| 	ChannelTypeXunfei         = 18 | ||||
| 	ChannelType360            = 19 | ||||
| 	ChannelTypeOpenRouter     = 20 | ||||
| 	ChannelTypeAIProxyLibrary = 21 | ||||
| ) | ||||
|  | ||||
| var ChannelBaseURLs = []string{ | ||||
| @@ -193,4 +198,7 @@ var ChannelBaseURLs = []string{ | ||||
| 	"https://open.bigmodel.cn",       // 16 | ||||
| 	"https://dashscope.aliyuncs.com", // 17 | ||||
| 	"",                               // 18 | ||||
| 	"https://ai.360.cn",              // 19 | ||||
| 	"https://openrouter.ai/api",      // 20 | ||||
| 	"https://api.aiproxy.io",         // 21 | ||||
| } | ||||
|   | ||||
| @@ -1,6 +1,9 @@ | ||||
| package common | ||||
|  | ||||
| import "encoding/json" | ||||
| import ( | ||||
| 	"encoding/json" | ||||
| 	"strings" | ||||
| ) | ||||
|  | ||||
| // ModelRatio | ||||
| // https://platform.openai.com/docs/models/model-endpoint-compatibility | ||||
| @@ -10,46 +13,51 @@ import "encoding/json" | ||||
| // 1 === $0.002 / 1K tokens | ||||
| // 1 === ¥0.014 / 1k tokens | ||||
| var ModelRatio = map[string]float64{ | ||||
| 	"gpt-4":                   15, | ||||
| 	"gpt-4-0314":              15, | ||||
| 	"gpt-4-0613":              15, | ||||
| 	"gpt-4-32k":               30, | ||||
| 	"gpt-4-32k-0314":          30, | ||||
| 	"gpt-4-32k-0613":          30, | ||||
| 	"gpt-3.5-turbo":           0.75, // $0.0015 / 1K tokens | ||||
| 	"gpt-3.5-turbo-0301":      0.75, | ||||
| 	"gpt-3.5-turbo-0613":      0.75, | ||||
| 	"gpt-3.5-turbo-16k":       1.5, // $0.003 / 1K tokens | ||||
| 	"gpt-3.5-turbo-16k-0613":  1.5, | ||||
| 	"text-ada-001":            0.2, | ||||
| 	"text-babbage-001":        0.25, | ||||
| 	"text-curie-001":          1, | ||||
| 	"text-davinci-002":        10, | ||||
| 	"text-davinci-003":        10, | ||||
| 	"text-davinci-edit-001":   10, | ||||
| 	"code-davinci-edit-001":   10, | ||||
| 	"whisper-1":               10, | ||||
| 	"davinci":                 10, | ||||
| 	"curie":                   10, | ||||
| 	"babbage":                 10, | ||||
| 	"ada":                     10, | ||||
| 	"text-embedding-ada-002":  0.05, | ||||
| 	"text-search-ada-doc-001": 10, | ||||
| 	"text-moderation-stable":  0.1, | ||||
| 	"text-moderation-latest":  0.1, | ||||
| 	"dall-e":                  8, | ||||
| 	"claude-instant-1":        0.75, | ||||
| 	"claude-2":                30, | ||||
| 	"ERNIE-Bot":               0.8572, // ¥0.012 / 1k tokens | ||||
| 	"ERNIE-Bot-turbo":         0.5715, // ¥0.008 / 1k tokens | ||||
| 	"Embedding-V1":            0.1429, // ¥0.002 / 1k tokens | ||||
| 	"PaLM-2":                  1, | ||||
| 	"chatglm_pro":             0.7143, // ¥0.01 / 1k tokens | ||||
| 	"chatglm_std":             0.3572, // ¥0.005 / 1k tokens | ||||
| 	"chatglm_lite":            0.1429, // ¥0.002 / 1k tokens | ||||
| 	"qwen-v1":                 0.8572, // TBD: https://help.aliyun.com/document_detail/2399482.html?spm=a2c4g.2399482.0.0.1ad347feilAgag | ||||
| 	"qwen-plus-v1":            0.5715, // Same as above | ||||
| 	"SparkDesk":               0.8572, // TBD | ||||
| 	"gpt-4":                     15, | ||||
| 	"gpt-4-0314":                15, | ||||
| 	"gpt-4-0613":                15, | ||||
| 	"gpt-4-32k":                 30, | ||||
| 	"gpt-4-32k-0314":            30, | ||||
| 	"gpt-4-32k-0613":            30, | ||||
| 	"gpt-3.5-turbo":             0.75, // $0.0015 / 1K tokens | ||||
| 	"gpt-3.5-turbo-0301":        0.75, | ||||
| 	"gpt-3.5-turbo-0613":        0.75, | ||||
| 	"gpt-3.5-turbo-16k":         1.5, // $0.003 / 1K tokens | ||||
| 	"gpt-3.5-turbo-16k-0613":    1.5, | ||||
| 	"text-ada-001":              0.2, | ||||
| 	"text-babbage-001":          0.25, | ||||
| 	"text-curie-001":            1, | ||||
| 	"text-davinci-002":          10, | ||||
| 	"text-davinci-003":          10, | ||||
| 	"text-davinci-edit-001":     10, | ||||
| 	"code-davinci-edit-001":     10, | ||||
| 	"whisper-1":                 15, // $0.006 / minute -> $0.006 / 150 words -> $0.006 / 200 tokens -> $0.03 / 1k tokens | ||||
| 	"davinci":                   10, | ||||
| 	"curie":                     10, | ||||
| 	"babbage":                   10, | ||||
| 	"ada":                       10, | ||||
| 	"text-embedding-ada-002":    0.05, | ||||
| 	"text-search-ada-doc-001":   10, | ||||
| 	"text-moderation-stable":    0.1, | ||||
| 	"text-moderation-latest":    0.1, | ||||
| 	"dall-e":                    8, | ||||
| 	"claude-instant-1":          0.815,  // $1.63 / 1M tokens | ||||
| 	"claude-2":                  5.51,   // $11.02 / 1M tokens | ||||
| 	"ERNIE-Bot":                 0.8572, // ¥0.012 / 1k tokens | ||||
| 	"ERNIE-Bot-turbo":           0.5715, // ¥0.008 / 1k tokens | ||||
| 	"Embedding-V1":              0.1429, // ¥0.002 / 1k tokens | ||||
| 	"PaLM-2":                    1, | ||||
| 	"chatglm_pro":               0.7143, // ¥0.01 / 1k tokens | ||||
| 	"chatglm_std":               0.3572, // ¥0.005 / 1k tokens | ||||
| 	"chatglm_lite":              0.1429, // ¥0.002 / 1k tokens | ||||
| 	"qwen-v1":                   0.8572, // TBD: https://help.aliyun.com/document_detail/2399482.html?spm=a2c4g.2399482.0.0.1ad347feilAgag | ||||
| 	"qwen-plus-v1":              0.5715, // Same as above | ||||
| 	"SparkDesk":                 0.8572, // TBD | ||||
| 	"360GPT_S2_V9":              0.8572, // ¥0.012 / 1k tokens | ||||
| 	"embedding-bert-512-v1":     0.0715, // ¥0.001 / 1k tokens | ||||
| 	"embedding_s1_v1":           0.0715, // ¥0.001 / 1k tokens | ||||
| 	"semantic_similarity_s1_v1": 0.0715, // ¥0.001 / 1k tokens | ||||
| 	"360GPT_S2_V9.4":            0.8572, // ¥0.012 / 1k tokens | ||||
| } | ||||
|  | ||||
| func ModelRatio2JSONString() string { | ||||
| @@ -73,3 +81,19 @@ func GetModelRatio(name string) float64 { | ||||
| 	} | ||||
| 	return ratio | ||||
| } | ||||
|  | ||||
| func GetCompletionRatio(name string) float64 { | ||||
| 	if strings.HasPrefix(name, "gpt-3.5") { | ||||
| 		return 1.333333 | ||||
| 	} | ||||
| 	if strings.HasPrefix(name, "gpt-4") { | ||||
| 		return 2 | ||||
| 	} | ||||
| 	if strings.HasPrefix(name, "claude-instant-1") { | ||||
| 		return 3.38 | ||||
| 	} | ||||
| 	if strings.HasPrefix(name, "claude-2") { | ||||
| 		return 2.965517 | ||||
| 	} | ||||
| 	return 1 | ||||
| } | ||||
|   | ||||
| @@ -61,3 +61,8 @@ func RedisDel(key string) error { | ||||
| 	ctx := context.Background() | ||||
| 	return RDB.Del(ctx, key).Err() | ||||
| } | ||||
|  | ||||
| func RedisDecrease(key string, value int64) error { | ||||
| 	ctx := context.Background() | ||||
| 	return RDB.DecrBy(ctx, key, value).Err() | ||||
| } | ||||
|   | ||||
| @@ -7,6 +7,7 @@ import ( | ||||
| 	"log" | ||||
| 	"math/rand" | ||||
| 	"net" | ||||
| 	"os" | ||||
| 	"os/exec" | ||||
| 	"runtime" | ||||
| 	"strconv" | ||||
| @@ -177,3 +178,15 @@ func Max(a int, b int) int { | ||||
| 		return b | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func GetOrDefault(env string, defaultValue int) int { | ||||
| 	if env == "" || os.Getenv(env) == "" { | ||||
| 		return defaultValue | ||||
| 	} | ||||
| 	num, err := strconv.Atoi(os.Getenv(env)) | ||||
| 	if err != nil { | ||||
| 		SysError(fmt.Sprintf("failed to parse %s: %s, using default value: %d", env, err.Error(), defaultValue)) | ||||
| 		return defaultValue | ||||
| 	} | ||||
| 	return num | ||||
| } | ||||
|   | ||||
| @@ -24,6 +24,10 @@ func testChannel(channel *model.Channel, request ChatRequest) (error, *OpenAIErr | ||||
| 		fallthrough | ||||
| 	case common.ChannelTypeZhipu: | ||||
| 		fallthrough | ||||
| 	case common.ChannelTypeAli: | ||||
| 		fallthrough | ||||
| 	case common.ChannelType360: | ||||
| 		fallthrough | ||||
| 	case common.ChannelTypeXunfei: | ||||
| 		return errors.New("该渠道类型当前版本不支持测试,请手动测试"), nil | ||||
| 	case common.ChannelTypeAzure: | ||||
| @@ -174,7 +178,7 @@ func testAllChannels(notify bool) error { | ||||
| 				err = errors.New(fmt.Sprintf("响应时间 %.2fs 超过阈值 %.2fs", float64(milliseconds)/1000.0, float64(disableThreshold)/1000.0)) | ||||
| 				disableChannel(channel.Id, channel.Name, err.Error()) | ||||
| 			} | ||||
| 			if shouldDisableChannel(openaiErr) { | ||||
| 			if shouldDisableChannel(openaiErr, -1) { | ||||
| 				disableChannel(channel.Id, channel.Name, err.Error()) | ||||
| 			} | ||||
| 			channel.UpdateResponseTime(milliseconds) | ||||
|   | ||||
| @@ -85,7 +85,7 @@ func AddChannel(c *gin.Context) { | ||||
| 	} | ||||
| 	channel.CreatedTime = common.GetTimestamp() | ||||
| 	keys := strings.Split(channel.Key, "\n") | ||||
| 	channels := make([]model.Channel, 0) | ||||
| 	channels := make([]model.Channel, 0, len(keys)) | ||||
| 	for _, key := range keys { | ||||
| 		if key == "" { | ||||
| 			continue | ||||
|   | ||||
| @@ -63,6 +63,15 @@ func init() { | ||||
| 			Root:       "dall-e", | ||||
| 			Parent:     nil, | ||||
| 		}, | ||||
| 		{ | ||||
| 			Id:         "whisper-1", | ||||
| 			Object:     "model", | ||||
| 			Created:    1677649963, | ||||
| 			OwnedBy:    "openai", | ||||
| 			Permission: permission, | ||||
| 			Root:       "whisper-1", | ||||
| 			Parent:     nil, | ||||
| 		}, | ||||
| 		{ | ||||
| 			Id:         "gpt-3.5-turbo", | ||||
| 			Object:     "model", | ||||
| @@ -360,6 +369,51 @@ func init() { | ||||
| 			Root:       "SparkDesk", | ||||
| 			Parent:     nil, | ||||
| 		}, | ||||
| 		{ | ||||
| 			Id:         "360GPT_S2_V9", | ||||
| 			Object:     "model", | ||||
| 			Created:    1677649963, | ||||
| 			OwnedBy:    "360", | ||||
| 			Permission: permission, | ||||
| 			Root:       "360GPT_S2_V9", | ||||
| 			Parent:     nil, | ||||
| 		}, | ||||
| 		{ | ||||
| 			Id:         "embedding-bert-512-v1", | ||||
| 			Object:     "model", | ||||
| 			Created:    1677649963, | ||||
| 			OwnedBy:    "360", | ||||
| 			Permission: permission, | ||||
| 			Root:       "embedding-bert-512-v1", | ||||
| 			Parent:     nil, | ||||
| 		}, | ||||
| 		{ | ||||
| 			Id:         "embedding_s1_v1", | ||||
| 			Object:     "model", | ||||
| 			Created:    1677649963, | ||||
| 			OwnedBy:    "360", | ||||
| 			Permission: permission, | ||||
| 			Root:       "embedding_s1_v1", | ||||
| 			Parent:     nil, | ||||
| 		}, | ||||
| 		{ | ||||
| 			Id:         "semantic_similarity_s1_v1", | ||||
| 			Object:     "model", | ||||
| 			Created:    1677649963, | ||||
| 			OwnedBy:    "360", | ||||
| 			Permission: permission, | ||||
| 			Root:       "semantic_similarity_s1_v1", | ||||
| 			Parent:     nil, | ||||
| 		}, | ||||
| 		{ | ||||
| 			Id:         "360GPT_S2_V9.4", | ||||
| 			Object:     "model", | ||||
| 			Created:    1677649963, | ||||
| 			OwnedBy:    "360", | ||||
| 			Permission: permission, | ||||
| 			Root:       "360GPT_S2_V9.4", | ||||
| 			Parent:     nil, | ||||
| 		}, | ||||
| 	} | ||||
| 	openAIModelsMap = make(map[string]OpenAIModels) | ||||
| 	for _, model := range openAIModels { | ||||
|   | ||||
							
								
								
									
										220
									
								
								controller/relay-aiproxy.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										220
									
								
								controller/relay-aiproxy.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,220 @@ | ||||
| package controller | ||||
|  | ||||
| import ( | ||||
| 	"bufio" | ||||
| 	"encoding/json" | ||||
| 	"fmt" | ||||
| 	"github.com/gin-gonic/gin" | ||||
| 	"io" | ||||
| 	"net/http" | ||||
| 	"one-api/common" | ||||
| 	"strconv" | ||||
| 	"strings" | ||||
| ) | ||||
|  | ||||
| // https://docs.aiproxy.io/dev/library#使用已经定制好的知识库进行对话问答 | ||||
|  | ||||
| type AIProxyLibraryRequest struct { | ||||
| 	Model     string `json:"model"` | ||||
| 	Query     string `json:"query"` | ||||
| 	LibraryId string `json:"libraryId"` | ||||
| 	Stream    bool   `json:"stream"` | ||||
| } | ||||
|  | ||||
| type AIProxyLibraryError struct { | ||||
| 	ErrCode int    `json:"errCode"` | ||||
| 	Message string `json:"message"` | ||||
| } | ||||
|  | ||||
| type AIProxyLibraryDocument struct { | ||||
| 	Title string `json:"title"` | ||||
| 	URL   string `json:"url"` | ||||
| } | ||||
|  | ||||
| type AIProxyLibraryResponse struct { | ||||
| 	Success   bool                     `json:"success"` | ||||
| 	Answer    string                   `json:"answer"` | ||||
| 	Documents []AIProxyLibraryDocument `json:"documents"` | ||||
| 	AIProxyLibraryError | ||||
| } | ||||
|  | ||||
| type AIProxyLibraryStreamResponse struct { | ||||
| 	Content   string                   `json:"content"` | ||||
| 	Finish    bool                     `json:"finish"` | ||||
| 	Model     string                   `json:"model"` | ||||
| 	Documents []AIProxyLibraryDocument `json:"documents"` | ||||
| } | ||||
|  | ||||
| func requestOpenAI2AIProxyLibrary(request GeneralOpenAIRequest) *AIProxyLibraryRequest { | ||||
| 	query := "" | ||||
| 	if len(request.Messages) != 0 { | ||||
| 		query = request.Messages[len(request.Messages)-1].Content | ||||
| 	} | ||||
| 	return &AIProxyLibraryRequest{ | ||||
| 		Model:  request.Model, | ||||
| 		Stream: request.Stream, | ||||
| 		Query:  query, | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func aiProxyDocuments2Markdown(documents []AIProxyLibraryDocument) string { | ||||
| 	if len(documents) == 0 { | ||||
| 		return "" | ||||
| 	} | ||||
| 	content := "\n\n参考文档:\n" | ||||
| 	for i, document := range documents { | ||||
| 		content += fmt.Sprintf("%d. [%s](%s)\n", i+1, document.Title, document.URL) | ||||
| 	} | ||||
| 	return content | ||||
| } | ||||
|  | ||||
| func responseAIProxyLibrary2OpenAI(response *AIProxyLibraryResponse) *OpenAITextResponse { | ||||
| 	content := response.Answer + aiProxyDocuments2Markdown(response.Documents) | ||||
| 	choice := OpenAITextResponseChoice{ | ||||
| 		Index: 0, | ||||
| 		Message: Message{ | ||||
| 			Role:    "assistant", | ||||
| 			Content: content, | ||||
| 		}, | ||||
| 		FinishReason: "stop", | ||||
| 	} | ||||
| 	fullTextResponse := OpenAITextResponse{ | ||||
| 		Id:      common.GetUUID(), | ||||
| 		Object:  "chat.completion", | ||||
| 		Created: common.GetTimestamp(), | ||||
| 		Choices: []OpenAITextResponseChoice{choice}, | ||||
| 	} | ||||
| 	return &fullTextResponse | ||||
| } | ||||
|  | ||||
| func documentsAIProxyLibrary(documents []AIProxyLibraryDocument) *ChatCompletionsStreamResponse { | ||||
| 	var choice ChatCompletionsStreamResponseChoice | ||||
| 	choice.Delta.Content = aiProxyDocuments2Markdown(documents) | ||||
| 	choice.FinishReason = &stopFinishReason | ||||
| 	return &ChatCompletionsStreamResponse{ | ||||
| 		Id:      common.GetUUID(), | ||||
| 		Object:  "chat.completion.chunk", | ||||
| 		Created: common.GetTimestamp(), | ||||
| 		Model:   "", | ||||
| 		Choices: []ChatCompletionsStreamResponseChoice{choice}, | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func streamResponseAIProxyLibrary2OpenAI(response *AIProxyLibraryStreamResponse) *ChatCompletionsStreamResponse { | ||||
| 	var choice ChatCompletionsStreamResponseChoice | ||||
| 	choice.Delta.Content = response.Content | ||||
| 	return &ChatCompletionsStreamResponse{ | ||||
| 		Id:      common.GetUUID(), | ||||
| 		Object:  "chat.completion.chunk", | ||||
| 		Created: common.GetTimestamp(), | ||||
| 		Model:   response.Model, | ||||
| 		Choices: []ChatCompletionsStreamResponseChoice{choice}, | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func aiProxyLibraryStreamHandler(c *gin.Context, resp *http.Response) (*OpenAIErrorWithStatusCode, *Usage) { | ||||
| 	var usage Usage | ||||
| 	scanner := bufio.NewScanner(resp.Body) | ||||
| 	scanner.Split(func(data []byte, atEOF bool) (advance int, token []byte, err error) { | ||||
| 		if atEOF && len(data) == 0 { | ||||
| 			return 0, nil, nil | ||||
| 		} | ||||
| 		if i := strings.Index(string(data), "\n"); i >= 0 { | ||||
| 			return i + 1, data[0:i], nil | ||||
| 		} | ||||
| 		if atEOF { | ||||
| 			return len(data), data, nil | ||||
| 		} | ||||
| 		return 0, nil, nil | ||||
| 	}) | ||||
| 	dataChan := make(chan string) | ||||
| 	stopChan := make(chan bool) | ||||
| 	go func() { | ||||
| 		for scanner.Scan() { | ||||
| 			data := scanner.Text() | ||||
| 			if len(data) < 5 { // ignore blank line or wrong format | ||||
| 				continue | ||||
| 			} | ||||
| 			if data[:5] != "data:" { | ||||
| 				continue | ||||
| 			} | ||||
| 			data = data[5:] | ||||
| 			dataChan <- data | ||||
| 		} | ||||
| 		stopChan <- true | ||||
| 	}() | ||||
| 	setEventStreamHeaders(c) | ||||
| 	var documents []AIProxyLibraryDocument | ||||
| 	c.Stream(func(w io.Writer) bool { | ||||
| 		select { | ||||
| 		case data := <-dataChan: | ||||
| 			var AIProxyLibraryResponse AIProxyLibraryStreamResponse | ||||
| 			err := json.Unmarshal([]byte(data), &AIProxyLibraryResponse) | ||||
| 			if err != nil { | ||||
| 				common.SysError("error unmarshalling stream response: " + err.Error()) | ||||
| 				return true | ||||
| 			} | ||||
| 			if len(AIProxyLibraryResponse.Documents) != 0 { | ||||
| 				documents = AIProxyLibraryResponse.Documents | ||||
| 			} | ||||
| 			response := streamResponseAIProxyLibrary2OpenAI(&AIProxyLibraryResponse) | ||||
| 			jsonResponse, err := json.Marshal(response) | ||||
| 			if err != nil { | ||||
| 				common.SysError("error marshalling stream response: " + err.Error()) | ||||
| 				return true | ||||
| 			} | ||||
| 			c.Render(-1, common.CustomEvent{Data: "data: " + string(jsonResponse)}) | ||||
| 			return true | ||||
| 		case <-stopChan: | ||||
| 			response := documentsAIProxyLibrary(documents) | ||||
| 			jsonResponse, err := json.Marshal(response) | ||||
| 			if err != nil { | ||||
| 				common.SysError("error marshalling stream response: " + err.Error()) | ||||
| 				return true | ||||
| 			} | ||||
| 			c.Render(-1, common.CustomEvent{Data: "data: " + string(jsonResponse)}) | ||||
| 			c.Render(-1, common.CustomEvent{Data: "data: [DONE]"}) | ||||
| 			return false | ||||
| 		} | ||||
| 	}) | ||||
| 	err := resp.Body.Close() | ||||
| 	if err != nil { | ||||
| 		return errorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil | ||||
| 	} | ||||
| 	return nil, &usage | ||||
| } | ||||
|  | ||||
| func aiProxyLibraryHandler(c *gin.Context, resp *http.Response) (*OpenAIErrorWithStatusCode, *Usage) { | ||||
| 	var AIProxyLibraryResponse AIProxyLibraryResponse | ||||
| 	responseBody, err := io.ReadAll(resp.Body) | ||||
| 	if err != nil { | ||||
| 		return errorWrapper(err, "read_response_body_failed", http.StatusInternalServerError), nil | ||||
| 	} | ||||
| 	err = resp.Body.Close() | ||||
| 	if err != nil { | ||||
| 		return errorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil | ||||
| 	} | ||||
| 	err = json.Unmarshal(responseBody, &AIProxyLibraryResponse) | ||||
| 	if err != nil { | ||||
| 		return errorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError), nil | ||||
| 	} | ||||
| 	if AIProxyLibraryResponse.ErrCode != 0 { | ||||
| 		return &OpenAIErrorWithStatusCode{ | ||||
| 			OpenAIError: OpenAIError{ | ||||
| 				Message: AIProxyLibraryResponse.Message, | ||||
| 				Type:    strconv.Itoa(AIProxyLibraryResponse.ErrCode), | ||||
| 				Code:    AIProxyLibraryResponse.ErrCode, | ||||
| 			}, | ||||
| 			StatusCode: resp.StatusCode, | ||||
| 		}, nil | ||||
| 	} | ||||
| 	fullTextResponse := responseAIProxyLibrary2OpenAI(&AIProxyLibraryResponse) | ||||
| 	jsonResponse, err := json.Marshal(fullTextResponse) | ||||
| 	if err != nil { | ||||
| 		return errorWrapper(err, "marshal_response_body_failed", http.StatusInternalServerError), nil | ||||
| 	} | ||||
| 	c.Writer.Header().Set("Content-Type", "application/json") | ||||
| 	c.Writer.WriteHeader(resp.StatusCode) | ||||
| 	_, err = c.Writer.Write(jsonResponse) | ||||
| 	return nil, &fullTextResponse.Usage | ||||
| } | ||||
| @@ -121,7 +121,10 @@ func responseAli2OpenAI(response *AliChatResponse) *OpenAITextResponse { | ||||
| func streamResponseAli2OpenAI(aliResponse *AliChatResponse) *ChatCompletionsStreamResponse { | ||||
| 	var choice ChatCompletionsStreamResponseChoice | ||||
| 	choice.Delta.Content = aliResponse.Output.Text | ||||
| 	choice.FinishReason = aliResponse.Output.FinishReason | ||||
| 	if aliResponse.Output.FinishReason != "null" { | ||||
| 		finishReason := aliResponse.Output.FinishReason | ||||
| 		choice.FinishReason = &finishReason | ||||
| 	} | ||||
| 	response := ChatCompletionsStreamResponse{ | ||||
| 		Id:      aliResponse.RequestId, | ||||
| 		Object:  "chat.completion.chunk", | ||||
| @@ -163,11 +166,7 @@ func aliStreamHandler(c *gin.Context, resp *http.Response) (*OpenAIErrorWithStat | ||||
| 		} | ||||
| 		stopChan <- true | ||||
| 	}() | ||||
| 	c.Writer.Header().Set("Content-Type", "text/event-stream") | ||||
| 	c.Writer.Header().Set("Cache-Control", "no-cache") | ||||
| 	c.Writer.Header().Set("Connection", "keep-alive") | ||||
| 	c.Writer.Header().Set("Transfer-Encoding", "chunked") | ||||
| 	c.Writer.Header().Set("X-Accel-Buffering", "no") | ||||
| 	setEventStreamHeaders(c) | ||||
| 	lastResponseText := "" | ||||
| 	c.Stream(func(w io.Writer) bool { | ||||
| 		select { | ||||
| @@ -178,9 +177,11 @@ func aliStreamHandler(c *gin.Context, resp *http.Response) (*OpenAIErrorWithStat | ||||
| 				common.SysError("error unmarshalling stream response: " + err.Error()) | ||||
| 				return true | ||||
| 			} | ||||
| 			usage.PromptTokens += aliResponse.Usage.InputTokens | ||||
| 			usage.CompletionTokens += aliResponse.Usage.OutputTokens | ||||
| 			usage.TotalTokens += aliResponse.Usage.InputTokens + aliResponse.Usage.OutputTokens | ||||
| 			if aliResponse.Usage.OutputTokens != 0 { | ||||
| 				usage.PromptTokens = aliResponse.Usage.InputTokens | ||||
| 				usage.CompletionTokens = aliResponse.Usage.OutputTokens | ||||
| 				usage.TotalTokens = aliResponse.Usage.InputTokens + aliResponse.Usage.OutputTokens | ||||
| 			} | ||||
| 			response := streamResponseAli2OpenAI(&aliResponse) | ||||
| 			response.Choices[0].Delta.Content = strings.TrimPrefix(response.Choices[0].Delta.Content, lastResponseText) | ||||
| 			lastResponseText = aliResponse.Output.Text | ||||
|   | ||||
							
								
								
									
										147
									
								
								controller/relay-audio.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										147
									
								
								controller/relay-audio.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,147 @@ | ||||
| package controller | ||||
|  | ||||
| import ( | ||||
| 	"bytes" | ||||
| 	"encoding/json" | ||||
| 	"fmt" | ||||
| 	"io" | ||||
| 	"net/http" | ||||
| 	"one-api/common" | ||||
| 	"one-api/model" | ||||
|  | ||||
| 	"github.com/gin-gonic/gin" | ||||
| ) | ||||
|  | ||||
| func relayAudioHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode { | ||||
| 	audioModel := "whisper-1" | ||||
|  | ||||
| 	tokenId := c.GetInt("token_id") | ||||
| 	channelType := c.GetInt("channel") | ||||
| 	userId := c.GetInt("id") | ||||
| 	group := c.GetString("group") | ||||
|  | ||||
| 	preConsumedTokens := common.PreConsumedQuota | ||||
| 	modelRatio := common.GetModelRatio(audioModel) | ||||
| 	groupRatio := common.GetGroupRatio(group) | ||||
| 	ratio := modelRatio * groupRatio | ||||
| 	preConsumedQuota := int(float64(preConsumedTokens) * ratio) | ||||
| 	userQuota, err := model.CacheGetUserQuota(userId) | ||||
| 	if err != nil { | ||||
| 		return errorWrapper(err, "get_user_quota_failed", http.StatusInternalServerError) | ||||
| 	} | ||||
| 	err = model.CacheDecreaseUserQuota(userId, preConsumedQuota) | ||||
| 	if err != nil { | ||||
| 		return errorWrapper(err, "decrease_user_quota_failed", http.StatusInternalServerError) | ||||
| 	} | ||||
| 	if userQuota > 100*preConsumedQuota { | ||||
| 		// in this case, we do not pre-consume quota | ||||
| 		// because the user has enough quota | ||||
| 		preConsumedQuota = 0 | ||||
| 	} | ||||
| 	if preConsumedQuota > 0 { | ||||
| 		err := model.PreConsumeTokenQuota(tokenId, preConsumedQuota) | ||||
| 		if err != nil { | ||||
| 			return errorWrapper(err, "pre_consume_token_quota_failed", http.StatusForbidden) | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	// map model name | ||||
| 	modelMapping := c.GetString("model_mapping") | ||||
| 	if modelMapping != "" { | ||||
| 		modelMap := make(map[string]string) | ||||
| 		err := json.Unmarshal([]byte(modelMapping), &modelMap) | ||||
| 		if err != nil { | ||||
| 			return errorWrapper(err, "unmarshal_model_mapping_failed", http.StatusInternalServerError) | ||||
| 		} | ||||
| 		if modelMap[audioModel] != "" { | ||||
| 			audioModel = modelMap[audioModel] | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	baseURL := common.ChannelBaseURLs[channelType] | ||||
| 	requestURL := c.Request.URL.String() | ||||
|  | ||||
| 	if c.GetString("base_url") != "" { | ||||
| 		baseURL = c.GetString("base_url") | ||||
| 	} | ||||
|  | ||||
| 	fullRequestURL := fmt.Sprintf("%s%s", baseURL, requestURL) | ||||
| 	requestBody := c.Request.Body | ||||
|  | ||||
| 	req, err := http.NewRequest(c.Request.Method, fullRequestURL, requestBody) | ||||
| 	if err != nil { | ||||
| 		return errorWrapper(err, "new_request_failed", http.StatusInternalServerError) | ||||
| 	} | ||||
| 	req.Header.Set("Authorization", c.Request.Header.Get("Authorization")) | ||||
| 	req.Header.Set("Content-Type", c.Request.Header.Get("Content-Type")) | ||||
| 	req.Header.Set("Accept", c.Request.Header.Get("Accept")) | ||||
|  | ||||
| 	resp, err := httpClient.Do(req) | ||||
| 	if err != nil { | ||||
| 		return errorWrapper(err, "do_request_failed", http.StatusInternalServerError) | ||||
| 	} | ||||
|  | ||||
| 	err = req.Body.Close() | ||||
| 	if err != nil { | ||||
| 		return errorWrapper(err, "close_request_body_failed", http.StatusInternalServerError) | ||||
| 	} | ||||
| 	err = c.Request.Body.Close() | ||||
| 	if err != nil { | ||||
| 		return errorWrapper(err, "close_request_body_failed", http.StatusInternalServerError) | ||||
| 	} | ||||
| 	var audioResponse AudioResponse | ||||
|  | ||||
| 	defer func() { | ||||
| 		go func() { | ||||
| 			quota := countTokenText(audioResponse.Text, audioModel) | ||||
| 			quotaDelta := quota - preConsumedQuota | ||||
| 			err := model.PostConsumeTokenQuota(tokenId, quotaDelta) | ||||
| 			if err != nil { | ||||
| 				common.SysError("error consuming token remain quota: " + err.Error()) | ||||
| 			} | ||||
| 			err = model.CacheUpdateUserQuota(userId) | ||||
| 			if err != nil { | ||||
| 				common.SysError("error update user quota cache: " + err.Error()) | ||||
| 			} | ||||
| 			if quota != 0 { | ||||
| 				tokenName := c.GetString("token_name") | ||||
| 				logContent := fmt.Sprintf("模型倍率 %.2f,分组倍率 %.2f", modelRatio, groupRatio) | ||||
| 				model.RecordConsumeLog(userId, 0, 0, audioModel, tokenName, quota, logContent) | ||||
| 				model.UpdateUserUsedQuotaAndRequestCount(userId, quota) | ||||
| 				channelId := c.GetInt("channel_id") | ||||
| 				model.UpdateChannelUsedQuota(channelId, quota) | ||||
| 			} | ||||
| 		}() | ||||
| 	}() | ||||
|  | ||||
| 	responseBody, err := io.ReadAll(resp.Body) | ||||
|  | ||||
| 	if err != nil { | ||||
| 		return errorWrapper(err, "read_response_body_failed", http.StatusInternalServerError) | ||||
| 	} | ||||
| 	err = resp.Body.Close() | ||||
| 	if err != nil { | ||||
| 		return errorWrapper(err, "close_response_body_failed", http.StatusInternalServerError) | ||||
| 	} | ||||
| 	err = json.Unmarshal(responseBody, &audioResponse) | ||||
| 	if err != nil { | ||||
| 		return errorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError) | ||||
| 	} | ||||
|  | ||||
| 	resp.Body = io.NopCloser(bytes.NewBuffer(responseBody)) | ||||
|  | ||||
| 	for k, v := range resp.Header { | ||||
| 		c.Writer.Header().Set(k, v[0]) | ||||
| 	} | ||||
| 	c.Writer.WriteHeader(resp.StatusCode) | ||||
|  | ||||
| 	_, err = io.Copy(c.Writer, resp.Body) | ||||
| 	if err != nil { | ||||
| 		return errorWrapper(err, "copy_response_body_failed", http.StatusInternalServerError) | ||||
| 	} | ||||
| 	err = resp.Body.Close() | ||||
| 	if err != nil { | ||||
| 		return errorWrapper(err, "close_response_body_failed", http.StatusInternalServerError) | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
| @@ -3,22 +3,22 @@ package controller | ||||
| import ( | ||||
| 	"bufio" | ||||
| 	"encoding/json" | ||||
| 	"errors" | ||||
| 	"fmt" | ||||
| 	"github.com/gin-gonic/gin" | ||||
| 	"io" | ||||
| 	"net/http" | ||||
| 	"one-api/common" | ||||
| 	"strings" | ||||
| 	"sync" | ||||
| 	"time" | ||||
| ) | ||||
|  | ||||
| // https://cloud.baidu.com/doc/WENXINWORKSHOP/s/flfmc9do2 | ||||
|  | ||||
| type BaiduTokenResponse struct { | ||||
| 	RefreshToken  string `json:"refresh_token"` | ||||
| 	ExpiresIn     int    `json:"expires_in"` | ||||
| 	SessionKey    string `json:"session_key"` | ||||
| 	AccessToken   string `json:"access_token"` | ||||
| 	Scope         string `json:"scope"` | ||||
| 	SessionSecret string `json:"session_secret"` | ||||
| 	ExpiresIn   int    `json:"expires_in"` | ||||
| 	AccessToken string `json:"access_token"` | ||||
| } | ||||
|  | ||||
| type BaiduMessage struct { | ||||
| @@ -73,6 +73,16 @@ type BaiduEmbeddingResponse struct { | ||||
| 	BaiduError | ||||
| } | ||||
|  | ||||
| type BaiduAccessToken struct { | ||||
| 	AccessToken      string    `json:"access_token"` | ||||
| 	Error            string    `json:"error,omitempty"` | ||||
| 	ErrorDescription string    `json:"error_description,omitempty"` | ||||
| 	ExpiresIn        int64     `json:"expires_in,omitempty"` | ||||
| 	ExpiresAt        time.Time `json:"-"` | ||||
| } | ||||
|  | ||||
| var baiduTokenStore sync.Map | ||||
|  | ||||
| func requestOpenAI2Baidu(request GeneralOpenAIRequest) *BaiduChatRequest { | ||||
| 	messages := make([]BaiduMessage, 0, len(request.Messages)) | ||||
| 	for _, message := range request.Messages { | ||||
| @@ -120,7 +130,9 @@ func responseBaidu2OpenAI(response *BaiduChatResponse) *OpenAITextResponse { | ||||
| func streamResponseBaidu2OpenAI(baiduResponse *BaiduChatStreamResponse) *ChatCompletionsStreamResponse { | ||||
| 	var choice ChatCompletionsStreamResponseChoice | ||||
| 	choice.Delta.Content = baiduResponse.Result | ||||
| 	choice.FinishReason = "stop" | ||||
| 	if baiduResponse.IsEnd { | ||||
| 		choice.FinishReason = &stopFinishReason | ||||
| 	} | ||||
| 	response := ChatCompletionsStreamResponse{ | ||||
| 		Id:      baiduResponse.Id, | ||||
| 		Object:  "chat.completion.chunk", | ||||
| @@ -138,8 +150,12 @@ func embeddingRequestOpenAI2Baidu(request GeneralOpenAIRequest) *BaiduEmbeddingR | ||||
| 	switch request.Input.(type) { | ||||
| 	case string: | ||||
| 		baiduEmbeddingRequest.Input = []string{request.Input.(string)} | ||||
| 	case []string: | ||||
| 		baiduEmbeddingRequest.Input = request.Input.([]string) | ||||
| 	case []any: | ||||
| 		for _, item := range request.Input.([]any) { | ||||
| 			if str, ok := item.(string); ok { | ||||
| 				baiduEmbeddingRequest.Input = append(baiduEmbeddingRequest.Input, str) | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 	return &baiduEmbeddingRequest | ||||
| } | ||||
| @@ -189,11 +205,7 @@ func baiduStreamHandler(c *gin.Context, resp *http.Response) (*OpenAIErrorWithSt | ||||
| 		} | ||||
| 		stopChan <- true | ||||
| 	}() | ||||
| 	c.Writer.Header().Set("Content-Type", "text/event-stream") | ||||
| 	c.Writer.Header().Set("Cache-Control", "no-cache") | ||||
| 	c.Writer.Header().Set("Connection", "keep-alive") | ||||
| 	c.Writer.Header().Set("Transfer-Encoding", "chunked") | ||||
| 	c.Writer.Header().Set("X-Accel-Buffering", "no") | ||||
| 	setEventStreamHeaders(c) | ||||
| 	c.Stream(func(w io.Writer) bool { | ||||
| 		select { | ||||
| 		case data := <-dataChan: | ||||
| @@ -203,9 +215,11 @@ func baiduStreamHandler(c *gin.Context, resp *http.Response) (*OpenAIErrorWithSt | ||||
| 				common.SysError("error unmarshalling stream response: " + err.Error()) | ||||
| 				return true | ||||
| 			} | ||||
| 			usage.PromptTokens += baiduResponse.Usage.PromptTokens | ||||
| 			usage.CompletionTokens += baiduResponse.Usage.CompletionTokens | ||||
| 			usage.TotalTokens += baiduResponse.Usage.TotalTokens | ||||
| 			if baiduResponse.Usage.TotalTokens != 0 { | ||||
| 				usage.TotalTokens = baiduResponse.Usage.TotalTokens | ||||
| 				usage.PromptTokens = baiduResponse.Usage.PromptTokens | ||||
| 				usage.CompletionTokens = baiduResponse.Usage.TotalTokens - baiduResponse.Usage.PromptTokens | ||||
| 			} | ||||
| 			response := streamResponseBaidu2OpenAI(&baiduResponse) | ||||
| 			jsonResponse, err := json.Marshal(response) | ||||
| 			if err != nil { | ||||
| @@ -297,3 +311,60 @@ func baiduEmbeddingHandler(c *gin.Context, resp *http.Response) (*OpenAIErrorWit | ||||
| 	_, err = c.Writer.Write(jsonResponse) | ||||
| 	return nil, &fullTextResponse.Usage | ||||
| } | ||||
|  | ||||
| func getBaiduAccessToken(apiKey string) (string, error) { | ||||
| 	if val, ok := baiduTokenStore.Load(apiKey); ok { | ||||
| 		var accessToken BaiduAccessToken | ||||
| 		if accessToken, ok = val.(BaiduAccessToken); ok { | ||||
| 			// soon this will expire | ||||
| 			if time.Now().Add(time.Hour).After(accessToken.ExpiresAt) { | ||||
| 				go func() { | ||||
| 					_, _ = getBaiduAccessTokenHelper(apiKey) | ||||
| 				}() | ||||
| 			} | ||||
| 			return accessToken.AccessToken, nil | ||||
| 		} | ||||
| 	} | ||||
| 	accessToken, err := getBaiduAccessTokenHelper(apiKey) | ||||
| 	if err != nil { | ||||
| 		return "", err | ||||
| 	} | ||||
| 	if accessToken == nil { | ||||
| 		return "", errors.New("getBaiduAccessToken return a nil token") | ||||
| 	} | ||||
| 	return (*accessToken).AccessToken, nil | ||||
| } | ||||
|  | ||||
| func getBaiduAccessTokenHelper(apiKey string) (*BaiduAccessToken, error) { | ||||
| 	parts := strings.Split(apiKey, "|") | ||||
| 	if len(parts) != 2 { | ||||
| 		return nil, errors.New("invalid baidu apikey") | ||||
| 	} | ||||
| 	req, err := http.NewRequest("POST", fmt.Sprintf("https://aip.baidubce.com/oauth/2.0/token?grant_type=client_credentials&client_id=%s&client_secret=%s", | ||||
| 		parts[0], parts[1]), nil) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	req.Header.Add("Content-Type", "application/json") | ||||
| 	req.Header.Add("Accept", "application/json") | ||||
| 	res, err := impatientHTTPClient.Do(req) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	defer res.Body.Close() | ||||
|  | ||||
| 	var accessToken BaiduAccessToken | ||||
| 	err = json.NewDecoder(res.Body).Decode(&accessToken) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	if accessToken.Error != "" { | ||||
| 		return nil, errors.New(accessToken.Error + ": " + accessToken.ErrorDescription) | ||||
| 	} | ||||
| 	if accessToken.AccessToken == "" { | ||||
| 		return nil, errors.New("getBaiduAccessTokenHelper get empty access token") | ||||
| 	} | ||||
| 	accessToken.ExpiresAt = time.Now().Add(time.Duration(accessToken.ExpiresIn) * time.Second) | ||||
| 	baiduTokenStore.Store(apiKey, accessToken) | ||||
| 	return &accessToken, nil | ||||
| } | ||||
|   | ||||
| @@ -81,7 +81,10 @@ func requestOpenAI2Claude(textRequest GeneralOpenAIRequest) *ClaudeRequest { | ||||
| func streamResponseClaude2OpenAI(claudeResponse *ClaudeResponse) *ChatCompletionsStreamResponse { | ||||
| 	var choice ChatCompletionsStreamResponseChoice | ||||
| 	choice.Delta.Content = claudeResponse.Completion | ||||
| 	choice.FinishReason = stopReasonClaude2OpenAI(claudeResponse.StopReason) | ||||
| 	finishReason := stopReasonClaude2OpenAI(claudeResponse.StopReason) | ||||
| 	if finishReason != "null" { | ||||
| 		choice.FinishReason = &finishReason | ||||
| 	} | ||||
| 	var response ChatCompletionsStreamResponse | ||||
| 	response.Object = "chat.completion.chunk" | ||||
| 	response.Model = claudeResponse.Model | ||||
| @@ -138,11 +141,7 @@ func claudeStreamHandler(c *gin.Context, resp *http.Response) (*OpenAIErrorWithS | ||||
| 		} | ||||
| 		stopChan <- true | ||||
| 	}() | ||||
| 	c.Writer.Header().Set("Content-Type", "text/event-stream") | ||||
| 	c.Writer.Header().Set("Cache-Control", "no-cache") | ||||
| 	c.Writer.Header().Set("Connection", "keep-alive") | ||||
| 	c.Writer.Header().Set("Transfer-Encoding", "chunked") | ||||
| 	c.Writer.Header().Set("X-Accel-Buffering", "no") | ||||
| 	setEventStreamHeaders(c) | ||||
| 	c.Stream(func(w io.Writer) bool { | ||||
| 		select { | ||||
| 		case data := <-dataChan: | ||||
|   | ||||
| @@ -66,11 +66,7 @@ func openaiStreamHandler(c *gin.Context, resp *http.Response, relayMode int) (*O | ||||
| 		} | ||||
| 		stopChan <- true | ||||
| 	}() | ||||
| 	c.Writer.Header().Set("Content-Type", "text/event-stream") | ||||
| 	c.Writer.Header().Set("Cache-Control", "no-cache") | ||||
| 	c.Writer.Header().Set("Connection", "keep-alive") | ||||
| 	c.Writer.Header().Set("Transfer-Encoding", "chunked") | ||||
| 	c.Writer.Header().Set("X-Accel-Buffering", "no") | ||||
| 	setEventStreamHeaders(c) | ||||
| 	c.Stream(func(w io.Writer) bool { | ||||
| 		select { | ||||
| 		case data := <-dataChan: | ||||
|   | ||||
| @@ -94,7 +94,7 @@ func streamResponsePaLM2OpenAI(palmResponse *PaLMChatResponse) *ChatCompletionsS | ||||
| 	if len(palmResponse.Candidates) > 0 { | ||||
| 		choice.Delta.Content = palmResponse.Candidates[0].Content | ||||
| 	} | ||||
| 	choice.FinishReason = "stop" | ||||
| 	choice.FinishReason = &stopFinishReason | ||||
| 	var response ChatCompletionsStreamResponse | ||||
| 	response.Object = "chat.completion.chunk" | ||||
| 	response.Model = "palm2" | ||||
| @@ -143,11 +143,7 @@ func palmStreamHandler(c *gin.Context, resp *http.Response) (*OpenAIErrorWithSta | ||||
| 		dataChan <- string(jsonResponse) | ||||
| 		stopChan <- true | ||||
| 	}() | ||||
| 	c.Writer.Header().Set("Content-Type", "text/event-stream") | ||||
| 	c.Writer.Header().Set("Cache-Control", "no-cache") | ||||
| 	c.Writer.Header().Set("Connection", "keep-alive") | ||||
| 	c.Writer.Header().Set("Transfer-Encoding", "chunked") | ||||
| 	c.Writer.Header().Set("X-Accel-Buffering", "no") | ||||
| 	setEventStreamHeaders(c) | ||||
| 	c.Stream(func(w io.Writer) bool { | ||||
| 		select { | ||||
| 		case data := <-dataChan: | ||||
|   | ||||
| @@ -5,13 +5,13 @@ import ( | ||||
| 	"encoding/json" | ||||
| 	"errors" | ||||
| 	"fmt" | ||||
| 	"github.com/gin-gonic/gin" | ||||
| 	"io" | ||||
| 	"net/http" | ||||
| 	"one-api/common" | ||||
| 	"one-api/model" | ||||
| 	"strings" | ||||
|  | ||||
| 	"github.com/gin-gonic/gin" | ||||
| 	"time" | ||||
| ) | ||||
|  | ||||
| const ( | ||||
| @@ -22,12 +22,17 @@ const ( | ||||
| 	APITypeZhipu | ||||
| 	APITypeAli | ||||
| 	APITypeXunfei | ||||
| 	APITypeAIProxyLibrary | ||||
| ) | ||||
|  | ||||
| var httpClient *http.Client | ||||
| var impatientHTTPClient *http.Client | ||||
|  | ||||
| func init() { | ||||
| 	httpClient = &http.Client{} | ||||
| 	impatientHTTPClient = &http.Client{ | ||||
| 		Timeout: 5 * time.Second, | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func relayTextHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode { | ||||
| @@ -100,6 +105,8 @@ func relayTextHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode { | ||||
| 		apiType = APITypeAli | ||||
| 	case common.ChannelTypeXunfei: | ||||
| 		apiType = APITypeXunfei | ||||
| 	case common.ChannelTypeAIProxyLibrary: | ||||
| 		apiType = APITypeAIProxyLibrary | ||||
| 	} | ||||
| 	baseURL := common.ChannelBaseURLs[channelType] | ||||
| 	requestURL := c.Request.URL.String() | ||||
| @@ -146,7 +153,11 @@ func relayTextHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode { | ||||
| 		} | ||||
| 		apiKey := c.Request.Header.Get("Authorization") | ||||
| 		apiKey = strings.TrimPrefix(apiKey, "Bearer ") | ||||
| 		fullRequestURL += "?access_token=" + apiKey // TODO: access token expire in 30 days | ||||
| 		var err error | ||||
| 		if apiKey, err = getBaiduAccessToken(apiKey); err != nil { | ||||
| 			return errorWrapper(err, "invalid_baidu_config", http.StatusInternalServerError) | ||||
| 		} | ||||
| 		fullRequestURL += "?access_token=" + apiKey | ||||
| 	case APITypePaLM: | ||||
| 		fullRequestURL = "https://generativelanguage.googleapis.com/v1beta2/models/chat-bison-001:generateMessage" | ||||
| 		if baseURL != "" { | ||||
| @@ -163,6 +174,8 @@ func relayTextHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode { | ||||
| 		fullRequestURL = fmt.Sprintf("https://open.bigmodel.cn/api/paas/v3/model-api/%s/%s", textRequest.Model, method) | ||||
| 	case APITypeAli: | ||||
| 		fullRequestURL = "https://dashscope.aliyuncs.com/api/v1/services/aigc/text-generation/generation" | ||||
| 	case APITypeAIProxyLibrary: | ||||
| 		fullRequestURL = fmt.Sprintf("%s/api/library/ask", baseURL) | ||||
| 	} | ||||
| 	var promptTokens int | ||||
| 	var completionTokens int | ||||
| @@ -186,7 +199,11 @@ func relayTextHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode { | ||||
| 	if err != nil { | ||||
| 		return errorWrapper(err, "get_user_quota_failed", http.StatusInternalServerError) | ||||
| 	} | ||||
| 	if userQuota > 10*preConsumedQuota { | ||||
| 	err = model.CacheDecreaseUserQuota(userId, preConsumedQuota) | ||||
| 	if err != nil { | ||||
| 		return errorWrapper(err, "decrease_user_quota_failed", http.StatusInternalServerError) | ||||
| 	} | ||||
| 	if userQuota > 100*preConsumedQuota { | ||||
| 		// in this case, we do not pre-consume quota | ||||
| 		// because the user has enough quota | ||||
| 		preConsumedQuota = 0 | ||||
| @@ -251,6 +268,14 @@ func relayTextHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode { | ||||
| 			return errorWrapper(err, "marshal_text_request_failed", http.StatusInternalServerError) | ||||
| 		} | ||||
| 		requestBody = bytes.NewBuffer(jsonStr) | ||||
| 	case APITypeAIProxyLibrary: | ||||
| 		aiProxyLibraryRequest := requestOpenAI2AIProxyLibrary(textRequest) | ||||
| 		aiProxyLibraryRequest.LibraryId = c.GetString("library_id") | ||||
| 		jsonStr, err := json.Marshal(aiProxyLibraryRequest) | ||||
| 		if err != nil { | ||||
| 			return errorWrapper(err, "marshal_text_request_failed", http.StatusInternalServerError) | ||||
| 		} | ||||
| 		requestBody = bytes.NewBuffer(jsonStr) | ||||
| 	} | ||||
|  | ||||
| 	var req *http.Request | ||||
| @@ -270,6 +295,10 @@ func relayTextHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode { | ||||
| 				req.Header.Set("api-key", apiKey) | ||||
| 			} else { | ||||
| 				req.Header.Set("Authorization", c.Request.Header.Get("Authorization")) | ||||
| 				if channelType == common.ChannelTypeOpenRouter { | ||||
| 					req.Header.Set("HTTP-Referer", "https://github.com/songquanpeng/one-api") | ||||
| 					req.Header.Set("X-Title", "One API") | ||||
| 				} | ||||
| 			} | ||||
| 		case APITypeClaude: | ||||
| 			req.Header.Set("x-api-key", apiKey) | ||||
| @@ -286,6 +315,8 @@ func relayTextHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode { | ||||
| 			if textRequest.Stream { | ||||
| 				req.Header.Set("X-DashScope-SSE", "enable") | ||||
| 			} | ||||
| 		default: | ||||
| 			req.Header.Set("Authorization", "Bearer "+apiKey) | ||||
| 		} | ||||
| 		req.Header.Set("Content-Type", c.Request.Header.Get("Content-Type")) | ||||
| 		req.Header.Set("Accept", c.Request.Header.Get("Accept")) | ||||
| @@ -303,53 +334,54 @@ func relayTextHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode { | ||||
| 			return errorWrapper(err, "close_request_body_failed", http.StatusInternalServerError) | ||||
| 		} | ||||
| 		isStream = isStream || strings.HasPrefix(resp.Header.Get("Content-Type"), "text/event-stream") | ||||
|  | ||||
| 		if resp.StatusCode != http.StatusOK { | ||||
| 			return relayErrorHandler(resp) | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	var textResponse TextResponse | ||||
| 	tokenName := c.GetString("token_name") | ||||
| 	channelId := c.GetInt("channel_id") | ||||
|  | ||||
| 	defer func() { | ||||
| 		if consumeQuota { | ||||
| 			quota := 0 | ||||
| 			completionRatio := 1.0 | ||||
| 			if strings.HasPrefix(textRequest.Model, "gpt-3.5") { | ||||
| 				completionRatio = 1.333333 | ||||
| 			} | ||||
| 			if strings.HasPrefix(textRequest.Model, "gpt-4") { | ||||
| 				completionRatio = 2 | ||||
| 			} | ||||
| 		// c.Writer.Flush() | ||||
| 		go func() { | ||||
| 			if consumeQuota { | ||||
| 				quota := 0 | ||||
| 				completionRatio := common.GetCompletionRatio(textRequest.Model) | ||||
| 				promptTokens = textResponse.Usage.PromptTokens | ||||
| 				completionTokens = textResponse.Usage.CompletionTokens | ||||
|  | ||||
| 			promptTokens = textResponse.Usage.PromptTokens | ||||
| 			completionTokens = textResponse.Usage.CompletionTokens | ||||
| 				quota = promptTokens + int(float64(completionTokens)*completionRatio) | ||||
| 				quota = int(float64(quota) * ratio) | ||||
| 				if ratio != 0 && quota <= 0 { | ||||
| 					quota = 1 | ||||
| 				} | ||||
| 				totalTokens := promptTokens + completionTokens | ||||
| 				if totalTokens == 0 { | ||||
| 					// in this case, must be some error happened | ||||
| 					// we cannot just return, because we may have to return the pre-consumed quota | ||||
| 					quota = 0 | ||||
| 				} | ||||
| 				quotaDelta := quota - preConsumedQuota | ||||
| 				err := model.PostConsumeTokenQuota(tokenId, quotaDelta) | ||||
| 				if err != nil { | ||||
| 					common.SysError("error consuming token remain quota: " + err.Error()) | ||||
| 				} | ||||
| 				err = model.CacheUpdateUserQuota(userId) | ||||
| 				if err != nil { | ||||
| 					common.SysError("error update user quota cache: " + err.Error()) | ||||
| 				} | ||||
| 				if quota != 0 { | ||||
| 					logContent := fmt.Sprintf("模型倍率 %.2f,分组倍率 %.2f", modelRatio, groupRatio) | ||||
| 					model.RecordConsumeLog(userId, promptTokens, completionTokens, textRequest.Model, tokenName, quota, logContent) | ||||
| 					model.UpdateUserUsedQuotaAndRequestCount(userId, quota) | ||||
|  | ||||
| 			quota = promptTokens + int(float64(completionTokens)*completionRatio) | ||||
| 			quota = int(float64(quota) * ratio) | ||||
| 			if ratio != 0 && quota <= 0 { | ||||
| 				quota = 1 | ||||
| 					model.UpdateChannelUsedQuota(channelId, quota) | ||||
| 				} | ||||
| 			} | ||||
| 			totalTokens := promptTokens + completionTokens | ||||
| 			if totalTokens == 0 { | ||||
| 				// in this case, must be some error happened | ||||
| 				// we cannot just return, because we may have to return the pre-consumed quota | ||||
| 				quota = 0 | ||||
| 			} | ||||
| 			quotaDelta := quota - preConsumedQuota | ||||
| 			err := model.PostConsumeTokenQuota(tokenId, quotaDelta) | ||||
| 			if err != nil { | ||||
| 				common.SysError("error consuming token remain quota: " + err.Error()) | ||||
| 			} | ||||
| 			err = model.CacheUpdateUserQuota(userId) | ||||
| 			if err != nil { | ||||
| 				common.SysError("error update user quota cache: " + err.Error()) | ||||
| 			} | ||||
| 			if quota != 0 { | ||||
| 				tokenName := c.GetString("token_name") | ||||
| 				logContent := fmt.Sprintf("模型倍率 %.2f,分组倍率 %.2f", modelRatio, groupRatio) | ||||
| 				model.RecordConsumeLog(userId, promptTokens, completionTokens, textRequest.Model, tokenName, quota, logContent) | ||||
| 				model.UpdateUserUsedQuotaAndRequestCount(userId, quota) | ||||
| 				channelId := c.GetInt("channel_id") | ||||
| 				model.UpdateChannelUsedQuota(channelId, quota) | ||||
| 			} | ||||
| 		} | ||||
| 		}() | ||||
| 	}() | ||||
| 	switch apiType { | ||||
| 	case APITypeOpenAI: | ||||
| @@ -499,6 +531,26 @@ func relayTextHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode { | ||||
| 		} else { | ||||
| 			return errorWrapper(errors.New("xunfei api does not support non-stream mode"), "invalid_api_type", http.StatusBadRequest) | ||||
| 		} | ||||
| 	case APITypeAIProxyLibrary: | ||||
| 		if isStream { | ||||
| 			err, usage := aiProxyLibraryStreamHandler(c, resp) | ||||
| 			if err != nil { | ||||
| 				return err | ||||
| 			} | ||||
| 			if usage != nil { | ||||
| 				textResponse.Usage = *usage | ||||
| 			} | ||||
| 			return nil | ||||
| 		} else { | ||||
| 			err, usage := aiProxyLibraryHandler(c, resp) | ||||
| 			if err != nil { | ||||
| 				return err | ||||
| 			} | ||||
| 			if usage != nil { | ||||
| 				textResponse.Usage = *usage | ||||
| 			} | ||||
| 			return nil | ||||
| 		} | ||||
| 	default: | ||||
| 		return errorWrapper(errors.New("unknown api type"), "unknown_api_type", http.StatusInternalServerError) | ||||
| 	} | ||||
|   | ||||
| @@ -1,13 +1,38 @@ | ||||
| package controller | ||||
|  | ||||
| import ( | ||||
| 	"encoding/json" | ||||
| 	"fmt" | ||||
| 	"github.com/gin-gonic/gin" | ||||
| 	"github.com/pkoukk/tiktoken-go" | ||||
| 	"io" | ||||
| 	"net/http" | ||||
| 	"one-api/common" | ||||
| 	"strconv" | ||||
| ) | ||||
|  | ||||
| var stopFinishReason = "stop" | ||||
|  | ||||
| var tokenEncoderMap = map[string]*tiktoken.Tiktoken{} | ||||
|  | ||||
| func InitTokenEncoders() { | ||||
| 	common.SysLog("initializing token encoders") | ||||
| 	fallbackTokenEncoder, err := tiktoken.EncodingForModel("gpt-3.5-turbo") | ||||
| 	if err != nil { | ||||
| 		common.FatalLog(fmt.Sprintf("failed to get fallback token encoder: %s", err.Error())) | ||||
| 	} | ||||
| 	for model, _ := range common.ModelRatio { | ||||
| 		tokenEncoder, err := tiktoken.EncodingForModel(model) | ||||
| 		if err != nil { | ||||
| 			common.SysError(fmt.Sprintf("using fallback encoder for model %s", model)) | ||||
| 			tokenEncoderMap[model] = fallbackTokenEncoder | ||||
| 			continue | ||||
| 		} | ||||
| 		tokenEncoderMap[model] = tokenEncoder | ||||
| 	} | ||||
| 	common.SysLog("token encoders initialized") | ||||
| } | ||||
|  | ||||
| func getTokenEncoder(model string) *tiktoken.Tiktoken { | ||||
| 	if tokenEncoder, ok := tokenEncoderMap[model]; ok { | ||||
| 		return tokenEncoder | ||||
| @@ -92,15 +117,53 @@ func errorWrapper(err error, code string, statusCode int) *OpenAIErrorWithStatus | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func shouldDisableChannel(err *OpenAIError) bool { | ||||
| func shouldDisableChannel(err *OpenAIError, statusCode int) bool { | ||||
| 	if !common.AutomaticDisableChannelEnabled { | ||||
| 		return false | ||||
| 	} | ||||
| 	if err == nil { | ||||
| 		return false | ||||
| 	} | ||||
| 	if statusCode == http.StatusUnauthorized { | ||||
| 		return true | ||||
| 	} | ||||
| 	if err.Type == "insufficient_quota" || err.Code == "invalid_api_key" || err.Code == "account_deactivated" { | ||||
| 		return true | ||||
| 	} | ||||
| 	return false | ||||
| } | ||||
|  | ||||
| func setEventStreamHeaders(c *gin.Context) { | ||||
| 	c.Writer.Header().Set("Content-Type", "text/event-stream") | ||||
| 	c.Writer.Header().Set("Cache-Control", "no-cache") | ||||
| 	c.Writer.Header().Set("Connection", "keep-alive") | ||||
| 	c.Writer.Header().Set("Transfer-Encoding", "chunked") | ||||
| 	c.Writer.Header().Set("X-Accel-Buffering", "no") | ||||
| } | ||||
|  | ||||
| func relayErrorHandler(resp *http.Response) (openAIErrorWithStatusCode *OpenAIErrorWithStatusCode) { | ||||
| 	openAIErrorWithStatusCode = &OpenAIErrorWithStatusCode{ | ||||
| 		StatusCode: resp.StatusCode, | ||||
| 		OpenAIError: OpenAIError{ | ||||
| 			Message: fmt.Sprintf("bad response status code %d", resp.StatusCode), | ||||
| 			Type:    "one_api_error", | ||||
| 			Code:    "bad_response_status_code", | ||||
| 			Param:   strconv.Itoa(resp.StatusCode), | ||||
| 		}, | ||||
| 	} | ||||
| 	responseBody, err := io.ReadAll(resp.Body) | ||||
| 	if err != nil { | ||||
| 		return | ||||
| 	} | ||||
| 	err = resp.Body.Close() | ||||
| 	if err != nil { | ||||
| 		return | ||||
| 	} | ||||
| 	var textResponse TextResponse | ||||
| 	err = json.Unmarshal(responseBody, &textResponse) | ||||
| 	if err != nil { | ||||
| 		return | ||||
| 	} | ||||
| 	openAIErrorWithStatusCode.OpenAIError = textResponse.Error | ||||
| 	return | ||||
| } | ||||
|   | ||||
| @@ -75,7 +75,7 @@ type XunfeiChatResponse struct { | ||||
| 	} `json:"payload"` | ||||
| } | ||||
|  | ||||
| func requestOpenAI2Xunfei(request GeneralOpenAIRequest, xunfeiAppId string) *XunfeiChatRequest { | ||||
| func requestOpenAI2Xunfei(request GeneralOpenAIRequest, xunfeiAppId string, domain string) *XunfeiChatRequest { | ||||
| 	messages := make([]XunfeiMessage, 0, len(request.Messages)) | ||||
| 	for _, message := range request.Messages { | ||||
| 		if message.Role == "system" { | ||||
| @@ -96,7 +96,7 @@ func requestOpenAI2Xunfei(request GeneralOpenAIRequest, xunfeiAppId string) *Xun | ||||
| 	} | ||||
| 	xunfeiRequest := XunfeiChatRequest{} | ||||
| 	xunfeiRequest.Header.AppId = xunfeiAppId | ||||
| 	xunfeiRequest.Parameter.Chat.Domain = "general" | ||||
| 	xunfeiRequest.Parameter.Chat.Domain = domain | ||||
| 	xunfeiRequest.Parameter.Chat.Temperature = request.Temperature | ||||
| 	xunfeiRequest.Parameter.Chat.TopK = request.N | ||||
| 	xunfeiRequest.Parameter.Chat.MaxTokens = request.MaxTokens | ||||
| @@ -138,6 +138,9 @@ func streamResponseXunfei2OpenAI(xunfeiResponse *XunfeiChatResponse) *ChatComple | ||||
| 	} | ||||
| 	var choice ChatCompletionsStreamResponseChoice | ||||
| 	choice.Delta.Content = xunfeiResponse.Payload.Choices.Text[0].Content | ||||
| 	if xunfeiResponse.Payload.Choices.Status == 2 { | ||||
| 		choice.FinishReason = &stopFinishReason | ||||
| 	} | ||||
| 	response := ChatCompletionsStreamResponse{ | ||||
| 		Object:  "chat.completion.chunk", | ||||
| 		Created: common.GetTimestamp(), | ||||
| @@ -175,15 +178,28 @@ func buildXunfeiAuthUrl(hostUrl string, apiKey, apiSecret string) string { | ||||
|  | ||||
| func xunfeiStreamHandler(c *gin.Context, textRequest GeneralOpenAIRequest, appId string, apiSecret string, apiKey string) (*OpenAIErrorWithStatusCode, *Usage) { | ||||
| 	var usage Usage | ||||
| 	query := c.Request.URL.Query() | ||||
| 	apiVersion := query.Get("api-version") | ||||
| 	if apiVersion == "" { | ||||
| 		apiVersion = c.GetString("api_version") | ||||
| 	} | ||||
| 	if apiVersion == "" { | ||||
| 		apiVersion = "v1.1" | ||||
| 		common.SysLog("api_version not found, use default: " + apiVersion) | ||||
| 	} | ||||
| 	domain := "general" | ||||
| 	if apiVersion == "v2.1" { | ||||
| 		domain = "generalv2" | ||||
| 	} | ||||
| 	hostUrl := fmt.Sprintf("wss://spark-api.xf-yun.com/%s/chat", apiVersion) | ||||
| 	d := websocket.Dialer{ | ||||
| 		HandshakeTimeout: 5 * time.Second, | ||||
| 	} | ||||
| 	hostUrl := "wss://aichat.xf-yun.com/v1/chat" | ||||
| 	conn, resp, err := d.Dial(buildXunfeiAuthUrl(hostUrl, apiKey, apiSecret), nil) | ||||
| 	if err != nil || resp.StatusCode != 101 { | ||||
| 		return errorWrapper(err, "dial_failed", http.StatusInternalServerError), nil | ||||
| 	} | ||||
| 	data := requestOpenAI2Xunfei(textRequest, appId) | ||||
| 	data := requestOpenAI2Xunfei(textRequest, appId, domain) | ||||
| 	err = conn.WriteJSON(data) | ||||
| 	if err != nil { | ||||
| 		return errorWrapper(err, "write_json_failed", http.StatusInternalServerError), nil | ||||
| @@ -214,11 +230,7 @@ func xunfeiStreamHandler(c *gin.Context, textRequest GeneralOpenAIRequest, appId | ||||
| 		} | ||||
| 		stopChan <- true | ||||
| 	}() | ||||
| 	c.Writer.Header().Set("Content-Type", "text/event-stream") | ||||
| 	c.Writer.Header().Set("Cache-Control", "no-cache") | ||||
| 	c.Writer.Header().Set("Connection", "keep-alive") | ||||
| 	c.Writer.Header().Set("Transfer-Encoding", "chunked") | ||||
| 	c.Writer.Header().Set("X-Accel-Buffering", "no") | ||||
| 	setEventStreamHeaders(c) | ||||
| 	c.Stream(func(w io.Writer) bool { | ||||
| 		select { | ||||
| 		case xunfeiResponse := <-dataChan: | ||||
|   | ||||
| @@ -163,7 +163,6 @@ func responseZhipu2OpenAI(response *ZhipuResponse) *OpenAITextResponse { | ||||
| func streamResponseZhipu2OpenAI(zhipuResponse string) *ChatCompletionsStreamResponse { | ||||
| 	var choice ChatCompletionsStreamResponseChoice | ||||
| 	choice.Delta.Content = zhipuResponse | ||||
| 	choice.FinishReason = "" | ||||
| 	response := ChatCompletionsStreamResponse{ | ||||
| 		Object:  "chat.completion.chunk", | ||||
| 		Created: common.GetTimestamp(), | ||||
| @@ -176,7 +175,7 @@ func streamResponseZhipu2OpenAI(zhipuResponse string) *ChatCompletionsStreamResp | ||||
| func streamMetaResponseZhipu2OpenAI(zhipuResponse *ZhipuStreamMetaResponse) (*ChatCompletionsStreamResponse, *Usage) { | ||||
| 	var choice ChatCompletionsStreamResponseChoice | ||||
| 	choice.Delta.Content = "" | ||||
| 	choice.FinishReason = "stop" | ||||
| 	choice.FinishReason = &stopFinishReason | ||||
| 	response := ChatCompletionsStreamResponse{ | ||||
| 		Id:      zhipuResponse.RequestId, | ||||
| 		Object:  "chat.completion.chunk", | ||||
| @@ -225,11 +224,7 @@ func zhipuStreamHandler(c *gin.Context, resp *http.Response) (*OpenAIErrorWithSt | ||||
| 		} | ||||
| 		stopChan <- true | ||||
| 	}() | ||||
| 	c.Writer.Header().Set("Content-Type", "text/event-stream") | ||||
| 	c.Writer.Header().Set("Cache-Control", "no-cache") | ||||
| 	c.Writer.Header().Set("Connection", "keep-alive") | ||||
| 	c.Writer.Header().Set("Transfer-Encoding", "chunked") | ||||
| 	c.Writer.Header().Set("X-Accel-Buffering", "no") | ||||
| 	setEventStreamHeaders(c) | ||||
| 	c.Stream(func(w io.Writer) bool { | ||||
| 		select { | ||||
| 		case data := <-dataChan: | ||||
|   | ||||
| @@ -24,6 +24,7 @@ const ( | ||||
| 	RelayModeModerations | ||||
| 	RelayModeImagesGenerations | ||||
| 	RelayModeEdits | ||||
| 	RelayModeAudio | ||||
| ) | ||||
|  | ||||
| // https://platform.openai.com/docs/api-reference/chat | ||||
| @@ -40,6 +41,7 @@ type GeneralOpenAIRequest struct { | ||||
| 	Input       any       `json:"input,omitempty"` | ||||
| 	Instruction string    `json:"instruction,omitempty"` | ||||
| 	Size        string    `json:"size,omitempty"` | ||||
| 	Functions   any       `json:"functions,omitempty"` | ||||
| } | ||||
|  | ||||
| type ChatRequest struct { | ||||
| @@ -62,6 +64,10 @@ type ImageRequest struct { | ||||
| 	Size   string `json:"size"` | ||||
| } | ||||
|  | ||||
| type AudioResponse struct { | ||||
| 	Text string `json:"text,omitempty"` | ||||
| } | ||||
|  | ||||
| type Usage struct { | ||||
| 	PromptTokens     int `json:"prompt_tokens"` | ||||
| 	CompletionTokens int `json:"completion_tokens"` | ||||
| @@ -124,7 +130,7 @@ type ChatCompletionsStreamResponseChoice struct { | ||||
| 	Delta struct { | ||||
| 		Content string `json:"content"` | ||||
| 	} `json:"delta"` | ||||
| 	FinishReason string `json:"finish_reason,omitempty"` | ||||
| 	FinishReason *string `json:"finish_reason"` | ||||
| } | ||||
|  | ||||
| type ChatCompletionsStreamResponse struct { | ||||
| @@ -158,11 +164,15 @@ func Relay(c *gin.Context) { | ||||
| 		relayMode = RelayModeImagesGenerations | ||||
| 	} else if strings.HasPrefix(c.Request.URL.Path, "/v1/edits") { | ||||
| 		relayMode = RelayModeEdits | ||||
| 	} else if strings.HasPrefix(c.Request.URL.Path, "/v1/audio") { | ||||
| 		relayMode = RelayModeAudio | ||||
| 	} | ||||
| 	var err *OpenAIErrorWithStatusCode | ||||
| 	switch relayMode { | ||||
| 	case RelayModeImagesGenerations: | ||||
| 		err = relayImageHelper(c, relayMode) | ||||
| 	case RelayModeAudio: | ||||
| 		err = relayAudioHelper(c, relayMode) | ||||
| 	default: | ||||
| 		err = relayTextHelper(c, relayMode) | ||||
| 	} | ||||
| @@ -176,7 +186,7 @@ func Relay(c *gin.Context) { | ||||
| 			c.Redirect(http.StatusTemporaryRedirect, fmt.Sprintf("%s?retry=%d", c.Request.URL.Path, retryTimes-1)) | ||||
| 		} else { | ||||
| 			if err.StatusCode == http.StatusTooManyRequests { | ||||
| 				err.OpenAIError.Message = "当前分组负载已饱和,请稍后再试,或升级账户以提升服务质量。" | ||||
| 				err.OpenAIError.Message = "当前分组上游负载已饱和,请稍后再试" | ||||
| 			} | ||||
| 			c.JSON(err.StatusCode, gin.H{ | ||||
| 				"error": err.OpenAIError, | ||||
| @@ -185,7 +195,7 @@ func Relay(c *gin.Context) { | ||||
| 		channelId := c.GetInt("channel_id") | ||||
| 		common.SysError(fmt.Sprintf("relay error (channel #%d): %s", channelId, err.Message)) | ||||
| 		// https://platform.openai.com/docs/guides/error-codes/api-errors | ||||
| 		if shouldDisableChannel(&err.OpenAIError) { | ||||
| 		if shouldDisableChannel(&err.OpenAIError, err.StatusCode) { | ||||
| 			channelId := c.GetInt("channel_id") | ||||
| 			channelName := c.GetString("channel_name") | ||||
| 			disableChannel(channelId, channelName, err.Message) | ||||
| @@ -207,10 +217,10 @@ func RelayNotImplemented(c *gin.Context) { | ||||
|  | ||||
| func RelayNotFound(c *gin.Context) { | ||||
| 	err := OpenAIError{ | ||||
| 		Message: fmt.Sprintf("API not found: %s:%s", c.Request.Method, c.Request.URL.Path), | ||||
| 		Type:    "one_api_error", | ||||
| 		Message: fmt.Sprintf("Invalid URL (%s %s)", c.Request.Method, c.Request.URL.Path), | ||||
| 		Type:    "invalid_request_error", | ||||
| 		Param:   "", | ||||
| 		Code:    "api_not_found", | ||||
| 		Code:    "", | ||||
| 	} | ||||
| 	c.JSON(http.StatusNotFound, gin.H{ | ||||
| 		"error": err, | ||||
|   | ||||
| @@ -109,10 +109,10 @@ func AddToken(c *gin.Context) { | ||||
| 		}) | ||||
| 		return | ||||
| 	} | ||||
| 	if len(token.Name) == 0 || len(token.Name) > 20 { | ||||
| 	if len(token.Name) > 30 { | ||||
| 		c.JSON(http.StatusOK, gin.H{ | ||||
| 			"success": false, | ||||
| 			"message": "令牌名称长度必须在1-20之间", | ||||
| 			"message": "令牌名称过长", | ||||
| 		}) | ||||
| 		return | ||||
| 	} | ||||
| @@ -171,6 +171,13 @@ func UpdateToken(c *gin.Context) { | ||||
| 		}) | ||||
| 		return | ||||
| 	} | ||||
| 	if len(token.Name) > 30 { | ||||
| 		c.JSON(http.StatusOK, gin.H{ | ||||
| 			"success": false, | ||||
| 			"message": "令牌名称过长", | ||||
| 		}) | ||||
| 		return | ||||
| 	} | ||||
| 	cleanToken, err := model.GetTokenByIds(token.Id, userId) | ||||
| 	if err != nil { | ||||
| 		c.JSON(http.StatusOK, gin.H{ | ||||
|   | ||||
							
								
								
									
										10
									
								
								go.mod
									
									
									
									
									
								
							
							
						
						
									
										10
									
								
								go.mod
									
									
									
									
									
								
							| @@ -14,11 +14,11 @@ require ( | ||||
| 	github.com/golang-jwt/jwt v3.2.2+incompatible | ||||
| 	github.com/google/uuid v1.3.0 | ||||
| 	github.com/gorilla/websocket v1.5.0 | ||||
| 	github.com/pkoukk/tiktoken-go v0.1.1 | ||||
| 	github.com/pkoukk/tiktoken-go v0.1.5 | ||||
| 	golang.org/x/crypto v0.9.0 | ||||
| 	gorm.io/driver/mysql v1.4.3 | ||||
| 	gorm.io/driver/sqlite v1.4.3 | ||||
| 	gorm.io/gorm v1.24.0 | ||||
| 	gorm.io/gorm v1.25.0 | ||||
| ) | ||||
|  | ||||
| require ( | ||||
| @@ -26,7 +26,7 @@ require ( | ||||
| 	github.com/cespare/xxhash/v2 v2.1.2 // indirect | ||||
| 	github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 // indirect | ||||
| 	github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect | ||||
| 	github.com/dlclark/regexp2 v1.8.1 // indirect | ||||
| 	github.com/dlclark/regexp2 v1.10.0 // indirect | ||||
| 	github.com/gabriel-vasile/mimetype v1.4.2 // indirect | ||||
| 	github.com/gin-contrib/sse v0.1.0 // indirect | ||||
| 	github.com/go-playground/locales v0.14.1 // indirect | ||||
| @@ -36,6 +36,9 @@ require ( | ||||
| 	github.com/gorilla/context v1.1.1 // indirect | ||||
| 	github.com/gorilla/securecookie v1.1.1 // indirect | ||||
| 	github.com/gorilla/sessions v1.2.1 // indirect | ||||
| 	github.com/jackc/pgpassfile v1.0.0 // indirect | ||||
| 	github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect | ||||
| 	github.com/jackc/pgx/v5 v5.3.1 // indirect | ||||
| 	github.com/jinzhu/inflection v1.0.0 // indirect | ||||
| 	github.com/jinzhu/now v1.1.5 // indirect | ||||
| 	github.com/json-iterator/go v1.1.12 // indirect | ||||
| @@ -54,4 +57,5 @@ require ( | ||||
| 	golang.org/x/text v0.9.0 // indirect | ||||
| 	google.golang.org/protobuf v1.30.0 // indirect | ||||
| 	gopkg.in/yaml.v3 v3.0.1 // indirect | ||||
| 	gorm.io/driver/postgres v1.5.2 // indirect | ||||
| ) | ||||
|   | ||||
							
								
								
									
										18
									
								
								go.sum
									
									
									
									
									
								
							
							
						
						
									
										18
									
								
								go.sum
									
									
									
									
									
								
							| @@ -12,8 +12,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c | ||||
| github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= | ||||
| github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= | ||||
| github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= | ||||
| github.com/dlclark/regexp2 v1.8.1 h1:6Lcdwya6GjPUNsBct8Lg/yRPwMhABj269AAzdGSiR+0= | ||||
| github.com/dlclark/regexp2 v1.8.1/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= | ||||
| github.com/dlclark/regexp2 v1.10.0 h1:+/GIL799phkJqYW+3YbOd8LCcbHzT0Pbo8zl70MHsq0= | ||||
| github.com/dlclark/regexp2 v1.10.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= | ||||
| github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= | ||||
| github.com/gabriel-vasile/mimetype v1.4.2 h1:w5qFW6JKBz9Y393Y4q372O9A7cUSequkh1Q7OhCmWKU= | ||||
| github.com/gabriel-vasile/mimetype v1.4.2/go.mod h1:zApsH/mKG4w07erKIaJPFiX0Tsq9BFQgN3qGY5GnNgA= | ||||
| @@ -69,6 +69,12 @@ github.com/gorilla/sessions v1.2.1 h1:DHd3rPN5lE3Ts3D8rKkQ8x/0kqfeNmBAaiSi+o7Fsg | ||||
| github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= | ||||
| github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= | ||||
| github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= | ||||
| github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= | ||||
| github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= | ||||
| github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk= | ||||
| github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= | ||||
| github.com/jackc/pgx/v5 v5.3.1 h1:Fcr8QJ1ZeLi5zsPZqQeUZhNhxfkkKBOgJuYkJHoBOtU= | ||||
| github.com/jackc/pgx/v5 v5.3.1/go.mod h1:t3JDKnCBlYIc0ewLF0Q7B8MXmoIaBOZj/ic7iHozM/8= | ||||
| github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E= | ||||
| github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= | ||||
| github.com/jinzhu/now v1.1.4/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= | ||||
| @@ -112,8 +118,8 @@ github.com/pelletier/go-toml/v2 v2.0.1/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZO | ||||
| github.com/pelletier/go-toml/v2 v2.0.8 h1:0ctb6s9mE31h0/lhu+J6OPmVeDxJn+kYnJc2jZR9tGQ= | ||||
| github.com/pelletier/go-toml/v2 v2.0.8/go.mod h1:vuYfssBdrU2XDZ9bYydBu6t+6a6PYNcZljzZR9VXg+4= | ||||
| github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= | ||||
| github.com/pkoukk/tiktoken-go v0.1.1 h1:jtkYlIECjyM9OW1w4rjPmTohK4arORP9V25y6TM6nXo= | ||||
| github.com/pkoukk/tiktoken-go v0.1.1/go.mod h1:boMWvk9pQCOTx11pgu0DrIdrAKgQzzJKUP6vLXaz7Rw= | ||||
| github.com/pkoukk/tiktoken-go v0.1.5 h1:hAlT4dCf6Uk50x8E7HQrddhH3EWMKUN+LArExQQsQx4= | ||||
| github.com/pkoukk/tiktoken-go v0.1.5/go.mod h1:9NiV+i9mJKGj1rYOT+njbv+ZwA/zJxYdewGl6qVatpg= | ||||
| github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= | ||||
| github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= | ||||
| github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= | ||||
| @@ -187,9 +193,13 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= | ||||
| gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= | ||||
| gorm.io/driver/mysql v1.4.3 h1:/JhWJhO2v17d8hjApTltKNADm7K7YI2ogkR7avJUL3k= | ||||
| gorm.io/driver/mysql v1.4.3/go.mod h1:sSIebwZAVPiT+27jK9HIwvsqOGKx3YMPmrA3mBJR10c= | ||||
| gorm.io/driver/postgres v1.5.2 h1:ytTDxxEv+MplXOfFe3Lzm7SjG09fcdb3Z/c056DTBx0= | ||||
| gorm.io/driver/postgres v1.5.2/go.mod h1:fmpX0m2I1PKuR7mKZiEluwrP3hbs+ps7JIGMUBpCgl8= | ||||
| gorm.io/driver/sqlite v1.4.3 h1:HBBcZSDnWi5BW3B3rwvVTc510KGkBkexlOg0QrmLUuU= | ||||
| gorm.io/driver/sqlite v1.4.3/go.mod h1:0Aq3iPO+v9ZKbcdiz8gLWRw5VOPcBOPUQJFLq5e2ecI= | ||||
| gorm.io/gorm v1.23.8/go.mod h1:l2lP/RyAtc1ynaTjFksBde/O8v9oOGIApu2/xRitmZk= | ||||
| gorm.io/gorm v1.24.0 h1:j/CoiSm6xpRpmzbFJsQHYj+I8bGYWLXVHeYEyyKlF74= | ||||
| gorm.io/gorm v1.24.0/go.mod h1:DVrVomtaYTbqs7gB/x2uVvqnXzv0nqjB396B8cG4dBA= | ||||
| gorm.io/gorm v1.25.0 h1:+KtYtb2roDz14EQe4bla8CbQlmb9dN3VejSai3lprfU= | ||||
| gorm.io/gorm v1.25.0/go.mod h1:L4uxeKpfBml98NYqVqwAdmV1a2nBtAec/cf3fpucW/k= | ||||
| rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= | ||||
|   | ||||
							
								
								
									
										13
									
								
								i18n/en.json
									
									
									
									
									
								
							
							
						
						
									
										13
									
								
								i18n/en.json
									
									
									
									
									
								
							| @@ -39,8 +39,8 @@ | ||||
|   "兑换码个数必须大于0": "The number of redemption codes must be greater than 0", | ||||
|   "一次兑换码批量生成的个数不能大于 100": "The number of redemption codes generated in a batch cannot be greater than 100", | ||||
|   "通过令牌「%s」使用模型 %s 消耗 %s(模型倍率 %.2f,分组倍率 %.2f)": "Using model %s with token %s consumes %s (model rate %.2f, group rate %.2f)", | ||||
|   "当前分组负载已饱和,请稍后再试,或升级账户以提升服务质量。": "The current group load is saturated, please try again later, or upgrade your account to improve service quality.", | ||||
|   "令牌名称长度必须在1-20之间": "The length of the token name must be between 1-20", | ||||
|   "当前分组上游负载已饱和,请稍后再试": "The current group load is saturated, please try again later", | ||||
|   "令牌名称过长": "Token name is too long", | ||||
|   "令牌已过期,无法启用,请先修改令牌过期时间,或者设置为永不过期": "The token has expired and cannot be enabled. Please modify the expiration time of the token, or set it to never expire.", | ||||
|   "令牌可用额度已用尽,无法启用,请先修改令牌剩余额度,或者设置为无限额度": "The available quota of the token has been used up and cannot be enabled. Please modify the remaining quota of the token, or set it to unlimited quota", | ||||
|   "管理员关闭了密码登录": "The administrator has turned off password login", | ||||
| @@ -229,7 +229,7 @@ | ||||
|   "已是最新版本": "Is the latest version", | ||||
|   "检查更新": "Check for updates", | ||||
|   "公告": "Announcement", | ||||
|   "在此输入新的公告内容": "Enter new announcement content here", | ||||
|   "在此输入新的公告内容,支持 Markdown & HTML 代码": "Enter the new announcement content here, supports Markdown & HTML code", | ||||
|   "保存公告": "Save Announcement", | ||||
|   "个性化设置": "Personalization Settings", | ||||
|   "系统名称": "System Name", | ||||
| @@ -518,5 +518,10 @@ | ||||
|   ",图片演示。": "related image demo.", | ||||
|   "令牌创建成功,请在列表页面点击复制获取令牌!": "Token created successfully, please click copy on the list page to get the token!", | ||||
|   "代理": "Proxy", | ||||
|   "此项可选,用于通过代理站来进行 API 调用,请输入代理站地址,格式为:https://domain.com": "This is optional, used to make API calls through the proxy site, please enter the proxy site address, the format is: https://domain.com" | ||||
|   "此项可选,用于通过代理站来进行 API 调用,请输入代理站地址,格式为:https://domain.com": "This is optional, used to make API calls through the proxy site, please enter the proxy site address, the format is: https://domain.com", | ||||
|   "取消密码登录将导致所有未绑定其他登录方式的用户(包括管理员)无法通过密码登录,确认取消?": "Canceling password login will cause all users (including administrators) who have not bound other login methods to be unable to log in via password, confirm cancel?", | ||||
|   "按照如下格式输入:": "Enter in the following format:", | ||||
|   "模型版本": "Model version", | ||||
|   "请输入星火大模型版本,注意是接口地址中的版本号,例如:v2.1": "Please enter the version of the Starfire model, note that it is the version number in the interface address, for example: v2.1", | ||||
|   "点击查看": "click to view" | ||||
| } | ||||
|   | ||||
							
								
								
									
										4
									
								
								main.go
									
									
									
									
									
								
							
							
						
						
									
										4
									
								
								main.go
									
									
									
									
									
								
							| @@ -26,6 +26,9 @@ func main() { | ||||
| 	if os.Getenv("GIN_MODE") != "debug" { | ||||
| 		gin.SetMode(gin.ReleaseMode) | ||||
| 	} | ||||
| 	if common.DebugEnabled { | ||||
| 		common.SysLog("running in debug mode") | ||||
| 	} | ||||
| 	// Initialize SQL Database | ||||
| 	err := model.InitDB() | ||||
| 	if err != nil { | ||||
| @@ -74,6 +77,7 @@ func main() { | ||||
| 		} | ||||
| 		go controller.AutomaticallyTestChannels(frequency) | ||||
| 	} | ||||
| 	controller.InitTokenEncoders() | ||||
|  | ||||
| 	// Initialize HTTP server | ||||
| 	server := gin.Default() | ||||
|   | ||||
| @@ -58,7 +58,10 @@ func Distribute() func(c *gin.Context) { | ||||
| 		} else { | ||||
| 			// Select a channel for the user | ||||
| 			var modelRequest ModelRequest | ||||
| 			err := common.UnmarshalBodyReusable(c, &modelRequest) | ||||
| 			var err error | ||||
| 			if !strings.HasPrefix(c.Request.URL.Path, "/v1/audio") { | ||||
| 				err = common.UnmarshalBodyReusable(c, &modelRequest) | ||||
| 			} | ||||
| 			if err != nil { | ||||
| 				c.JSON(http.StatusBadRequest, gin.H{ | ||||
| 					"error": gin.H{ | ||||
| @@ -84,6 +87,11 @@ func Distribute() func(c *gin.Context) { | ||||
| 					modelRequest.Model = "dall-e" | ||||
| 				} | ||||
| 			} | ||||
| 			if strings.HasPrefix(c.Request.URL.Path, "/v1/audio") { | ||||
| 				if modelRequest.Model == "" { | ||||
| 					modelRequest.Model = "whisper-1" | ||||
| 				} | ||||
| 			} | ||||
| 			channel, err = model.CacheGetRandomSatisfiedChannel(userGroup, modelRequest.Model) | ||||
| 			if err != nil { | ||||
| 				message := fmt.Sprintf("当前分组 %s 下对于模型 %s 无可用渠道", userGroup, modelRequest.Model) | ||||
| @@ -107,8 +115,13 @@ func Distribute() func(c *gin.Context) { | ||||
| 		c.Set("model_mapping", channel.ModelMapping) | ||||
| 		c.Request.Header.Set("Authorization", fmt.Sprintf("Bearer %s", channel.Key)) | ||||
| 		c.Set("base_url", channel.BaseURL) | ||||
| 		if channel.Type == common.ChannelTypeAzure { | ||||
| 		switch channel.Type { | ||||
| 		case common.ChannelTypeAzure: | ||||
| 			c.Set("api_version", channel.Other) | ||||
| 		case common.ChannelTypeXunfei: | ||||
| 			c.Set("api_version", channel.Other) | ||||
| 		case common.ChannelTypeAIProxyLibrary: | ||||
| 			c.Set("library_id", channel.Other) | ||||
| 		} | ||||
| 		c.Next() | ||||
| 	} | ||||
|   | ||||
| @@ -95,6 +95,14 @@ func CacheUpdateUserQuota(id int) error { | ||||
| 	return err | ||||
| } | ||||
|  | ||||
| func CacheDecreaseUserQuota(id int, quota int) error { | ||||
| 	if !common.RedisEnabled { | ||||
| 		return nil | ||||
| 	} | ||||
| 	err := common.RedisDecrease(fmt.Sprintf("user_quota:%d", id), int64(quota)) | ||||
| 	return err | ||||
| } | ||||
|  | ||||
| func CacheIsUserEnabled(userId int) bool { | ||||
| 	if !common.RedisEnabled { | ||||
| 		return IsUserEnabled(userId) | ||||
|   | ||||
| @@ -2,10 +2,13 @@ package model | ||||
|  | ||||
| import ( | ||||
| 	"gorm.io/driver/mysql" | ||||
| 	"gorm.io/driver/postgres" | ||||
| 	"gorm.io/driver/sqlite" | ||||
| 	"gorm.io/gorm" | ||||
| 	"one-api/common" | ||||
| 	"os" | ||||
| 	"strings" | ||||
| 	"time" | ||||
| ) | ||||
|  | ||||
| var DB *gorm.DB | ||||
| @@ -33,34 +36,52 @@ func createRootAccountIfNeed() error { | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func CountTable(tableName string) (num int64) { | ||||
| 	DB.Table(tableName).Count(&num) | ||||
| 	return | ||||
| } | ||||
|  | ||||
| func InitDB() (err error) { | ||||
| 	var db *gorm.DB | ||||
| func chooseDB() (*gorm.DB, error) { | ||||
| 	if os.Getenv("SQL_DSN") != "" { | ||||
| 		dsn := os.Getenv("SQL_DSN") | ||||
| 		if strings.HasPrefix(dsn, "postgres://") { | ||||
| 			// Use PostgreSQL | ||||
| 			common.SysLog("using PostgreSQL as database") | ||||
| 			return gorm.Open(postgres.New(postgres.Config{ | ||||
| 				DSN:                  dsn, | ||||
| 				PreferSimpleProtocol: true, // disables implicit prepared statement usage | ||||
| 			}), &gorm.Config{ | ||||
| 				PrepareStmt: true, // precompile SQL | ||||
| 			}) | ||||
| 		} | ||||
| 		// Use MySQL | ||||
| 		common.SysLog("using MySQL as database") | ||||
| 		db, err = gorm.Open(mysql.Open(os.Getenv("SQL_DSN")), &gorm.Config{ | ||||
| 			PrepareStmt: true, // precompile SQL | ||||
| 		}) | ||||
| 	} else { | ||||
| 		// Use SQLite | ||||
| 		common.SysLog("SQL_DSN not set, using SQLite as database") | ||||
| 		common.UsingSQLite = true | ||||
| 		db, err = gorm.Open(sqlite.Open(common.SQLitePath), &gorm.Config{ | ||||
| 		return gorm.Open(mysql.Open(dsn), &gorm.Config{ | ||||
| 			PrepareStmt: true, // precompile SQL | ||||
| 		}) | ||||
| 	} | ||||
| 	common.SysLog("database connected") | ||||
| 	// Use SQLite | ||||
| 	common.SysLog("SQL_DSN not set, using SQLite as database") | ||||
| 	common.UsingSQLite = true | ||||
| 	return gorm.Open(sqlite.Open(common.SQLitePath), &gorm.Config{ | ||||
| 		PrepareStmt: true, // precompile SQL | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| func InitDB() (err error) { | ||||
| 	db, err := chooseDB() | ||||
| 	if err == nil { | ||||
| 		if common.DebugEnabled { | ||||
| 			db = db.Debug() | ||||
| 		} | ||||
| 		DB = db | ||||
| 		sqlDB, err := DB.DB() | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 		sqlDB.SetMaxIdleConns(common.GetOrDefault("SQL_MAX_IDLE_CONNS", 100)) | ||||
| 		sqlDB.SetMaxOpenConns(common.GetOrDefault("SQL_MAX_OPEN_CONNS", 1000)) | ||||
| 		sqlDB.SetConnMaxLifetime(time.Second * time.Duration(common.GetOrDefault("SQL_MAX_LIFETIME", 60))) | ||||
|  | ||||
| 		if !common.IsMasterNode { | ||||
| 			return nil | ||||
| 		} | ||||
| 		err := db.AutoMigrate(&Channel{}) | ||||
| 		err = db.AutoMigrate(&Channel{}) | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| 		} | ||||
|   | ||||
| @@ -26,8 +26,8 @@ func SetRelayRouter(router *gin.Engine) { | ||||
| 		relayV1Router.POST("/images/variations", controller.RelayNotImplemented) | ||||
| 		relayV1Router.POST("/embeddings", controller.Relay) | ||||
| 		relayV1Router.POST("/engines/:model/embeddings", controller.Relay) | ||||
| 		relayV1Router.POST("/audio/transcriptions", controller.RelayNotImplemented) | ||||
| 		relayV1Router.POST("/audio/translations", controller.RelayNotImplemented) | ||||
| 		relayV1Router.POST("/audio/transcriptions", controller.Relay) | ||||
| 		relayV1Router.POST("/audio/translations", controller.Relay) | ||||
| 		relayV1Router.GET("/files", controller.RelayNotImplemented) | ||||
| 		relayV1Router.POST("/files", controller.RelayNotImplemented) | ||||
| 		relayV1Router.DELETE("/files/:id", controller.RelayNotImplemented) | ||||
|   | ||||
| @@ -18,7 +18,7 @@ func SetWebRouter(router *gin.Engine, buildFS embed.FS, indexPage []byte) { | ||||
| 	router.Use(middleware.Cache()) | ||||
| 	router.Use(static.Serve("/", common.EmbedFolder(buildFS, "web/build"))) | ||||
| 	router.NoRoute(func(c *gin.Context) { | ||||
| 		if strings.HasPrefix(c.Request.RequestURI, "/v1") { | ||||
| 		if strings.HasPrefix(c.Request.RequestURI, "/v1") || strings.HasPrefix(c.Request.RequestURI, "/api") { | ||||
| 			controller.RelayNotFound(c) | ||||
| 			return | ||||
| 		} | ||||
|   | ||||
| @@ -368,7 +368,7 @@ const ChannelsTable = () => { | ||||
|                       }} style={{ cursor: 'pointer' }}> | ||||
|                       {renderBalance(channel.type, channel.balance)} | ||||
|                     </span>} | ||||
|                       content="点击更新" | ||||
|                       content='点击更新' | ||||
|                       basic | ||||
|                     /> | ||||
|                   </Table.Cell> | ||||
|   | ||||
| @@ -43,6 +43,7 @@ function renderType(type) { | ||||
|  | ||||
| const LogsTable = () => { | ||||
|   const [logs, setLogs] = useState([]); | ||||
|   const [showStat, setShowStat] = useState(false); | ||||
|   const [loading, setLoading] = useState(true); | ||||
|   const [activePage, setActivePage] = useState(1); | ||||
|   const [searchKeyword, setSearchKeyword] = useState(''); | ||||
| @@ -92,6 +93,17 @@ const LogsTable = () => { | ||||
|     } | ||||
|   }; | ||||
|  | ||||
|   const handleEyeClick = async () => { | ||||
|     if (!showStat) { | ||||
|       if (isAdminUser) { | ||||
|         await getLogStat(); | ||||
|       } else { | ||||
|         await getLogSelfStat(); | ||||
|       } | ||||
|     } | ||||
|     setShowStat(!showStat); | ||||
|   }; | ||||
|  | ||||
|   const loadLogs = async (startIdx) => { | ||||
|     let url = ''; | ||||
|     let localStartTimestamp = Date.parse(start_timestamp) / 1000; | ||||
| @@ -129,13 +141,8 @@ const LogsTable = () => { | ||||
|  | ||||
|   const refresh = async () => { | ||||
|     setLoading(true); | ||||
|     setActivePage(1) | ||||
|     setActivePage(1); | ||||
|     await loadLogs(0); | ||||
|     if (isAdminUser) { | ||||
|       getLogStat().then(); | ||||
|     } else { | ||||
|       getLogSelfStat().then(); | ||||
|     } | ||||
|   }; | ||||
|  | ||||
|   useEffect(() => { | ||||
| @@ -169,7 +176,7 @@ const LogsTable = () => { | ||||
|     if (logs.length === 0) return; | ||||
|     setLoading(true); | ||||
|     let sortedLogs = [...logs]; | ||||
|     if (typeof sortedLogs[0][key] === 'string'){ | ||||
|     if (typeof sortedLogs[0][key] === 'string') { | ||||
|       sortedLogs.sort((a, b) => { | ||||
|         return ('' + a[key]).localeCompare(b[key]); | ||||
|       }); | ||||
| @@ -190,7 +197,12 @@ const LogsTable = () => { | ||||
|   return ( | ||||
|     <> | ||||
|       <Segment> | ||||
|         <Header as='h3'>使用明细(总消耗额度:{renderQuota(stat.quota)})</Header> | ||||
|         <Header as='h3'> | ||||
|           使用明细(总消耗额度: | ||||
|           {showStat && renderQuota(stat.quota)} | ||||
|           {!showStat && <span onClick={handleEyeClick} style={{ cursor: 'pointer', color: 'gray' }}>点击查看</span>} | ||||
|           ) | ||||
|         </Header> | ||||
|         <Form> | ||||
|           <Form.Group> | ||||
|             { | ||||
| @@ -312,7 +324,7 @@ const LogsTable = () => { | ||||
|               .map((log, idx) => { | ||||
|                 if (log.deleted) return <></>; | ||||
|                 return ( | ||||
|                   <Table.Row key={log.created_at}> | ||||
|                   <Table.Row key={log.id}> | ||||
|                     <Table.Cell>{renderTimestamp(log.created_at)}</Table.Cell> | ||||
|                     { | ||||
|                       isAdminUser && ( | ||||
|   | ||||
| @@ -112,7 +112,7 @@ const OtherSetting = () => { | ||||
|           <Form.Group widths='equal'> | ||||
|             <Form.TextArea | ||||
|               label='公告' | ||||
|               placeholder='在此输入新的公告内容' | ||||
|               placeholder='在此输入新的公告内容,支持 Markdown & HTML 代码' | ||||
|               value={inputs.Notice} | ||||
|               name='Notice' | ||||
|               onChange={handleInputChange} | ||||
|   | ||||
| @@ -1,5 +1,5 @@ | ||||
| import React, { useEffect, useState } from 'react'; | ||||
| import { Button, Form, Label, Message, Pagination, Table } from 'semantic-ui-react'; | ||||
| import { Button, Form, Label, Popup, Pagination, Table } from 'semantic-ui-react'; | ||||
| import { Link } from 'react-router-dom'; | ||||
| import { API, copy, showError, showInfo, showSuccess, showWarning, timestamp2string } from '../helpers'; | ||||
|  | ||||
| @@ -240,15 +240,25 @@ const RedemptionsTable = () => { | ||||
|                       > | ||||
|                         复制 | ||||
|                       </Button> | ||||
|                       <Button | ||||
|                         size={'small'} | ||||
|                         negative | ||||
|                         onClick={() => { | ||||
|                           manageRedemption(redemption.id, 'delete', idx); | ||||
|                         }} | ||||
|                       <Popup | ||||
|                         trigger={ | ||||
|                           <Button size='small' negative> | ||||
|                             删除 | ||||
|                           </Button> | ||||
|                         } | ||||
|                         on='click' | ||||
|                         flowing | ||||
|                         hoverable | ||||
|                       > | ||||
|                         删除 | ||||
|                       </Button> | ||||
|                         <Button | ||||
|                           negative | ||||
|                           onClick={() => { | ||||
|                             manageRedemption(redemption.id, 'delete', idx); | ||||
|                           }} | ||||
|                         > | ||||
|                           确认删除 | ||||
|                         </Button> | ||||
|                       </Popup> | ||||
|                       <Button | ||||
|                         size={'small'} | ||||
|                         disabled={redemption.status === 3}  // used | ||||
|   | ||||
| @@ -1,5 +1,5 @@ | ||||
| import React, { useEffect, useState } from 'react'; | ||||
| import { Button, Divider, Form, Grid, Header, Input, Message } from 'semantic-ui-react'; | ||||
| import { Button, Divider, Form, Grid, Header, Modal, Message } from 'semantic-ui-react'; | ||||
| import { API, removeTrailingSlash, showError } from '../helpers'; | ||||
|  | ||||
| const SystemSetting = () => { | ||||
| @@ -33,6 +33,7 @@ const SystemSetting = () => { | ||||
|   let [loading, setLoading] = useState(false); | ||||
|   const [EmailDomainWhitelist, setEmailDomainWhitelist] = useState([]); | ||||
|   const [restrictedDomainInput, setRestrictedDomainInput] = useState(''); | ||||
|   const [showPasswordWarningModal, setShowPasswordWarningModal] = useState(false); | ||||
|  | ||||
|   const getOptions = async () => { | ||||
|     const res = await API.get('/api/option/'); | ||||
| @@ -95,6 +96,11 @@ const SystemSetting = () => { | ||||
|   }; | ||||
|  | ||||
|   const handleInputChange = async (e, { name, value }) => { | ||||
|     if (name === 'PasswordLoginEnabled' && inputs[name] === 'true') { | ||||
|       // block disabling password login | ||||
|       setShowPasswordWarningModal(true); | ||||
|       return; | ||||
|     } | ||||
|     if ( | ||||
|       name === 'Notice' || | ||||
|       name.startsWith('SMTP') || | ||||
| @@ -243,6 +249,32 @@ const SystemSetting = () => { | ||||
|               name='PasswordLoginEnabled' | ||||
|               onChange={handleInputChange} | ||||
|             /> | ||||
|             { | ||||
|               showPasswordWarningModal && | ||||
|               <Modal | ||||
|                 open={showPasswordWarningModal} | ||||
|                 onClose={() => setShowPasswordWarningModal(false)} | ||||
|                 size={'tiny'} | ||||
|                 style={{ maxWidth: '450px' }} | ||||
|               > | ||||
|                 <Modal.Header>警告</Modal.Header> | ||||
|                 <Modal.Content> | ||||
|                   <p>取消密码登录将导致所有未绑定其他登录方式的用户(包括管理员)无法通过密码登录,确认取消?</p> | ||||
|                 </Modal.Content> | ||||
|                 <Modal.Actions> | ||||
|                   <Button onClick={() => setShowPasswordWarningModal(false)}>取消</Button> | ||||
|                   <Button | ||||
|                     color='yellow' | ||||
|                     onClick={async () => { | ||||
|                       setShowPasswordWarningModal(false); | ||||
|                       await updateOption('PasswordLoginEnabled', 'false'); | ||||
|                     }} | ||||
|                   > | ||||
|                     确定 | ||||
|                   </Button> | ||||
|                 </Modal.Actions> | ||||
|               </Modal> | ||||
|             } | ||||
|             <Form.Checkbox | ||||
|               checked={inputs.PasswordRegisterEnabled === 'true'} | ||||
|               label='允许通过密码进行注册' | ||||
|   | ||||
| @@ -7,7 +7,10 @@ export const CHANNEL_OPTIONS = [ | ||||
|   { key: 17, text: '阿里通义千问', value: 17, color: 'orange' }, | ||||
|   { key: 18, text: '讯飞星火认知', value: 18, color: 'blue' }, | ||||
|   { key: 16, text: '智谱 ChatGLM', value: 16, color: 'violet' }, | ||||
|   { key: 19, text: '360 智脑', value: 19, color: 'blue' }, | ||||
|   { key: 8, text: '自定义渠道', value: 8, color: 'pink' }, | ||||
|   { key: 21, text: '知识库:AI Proxy', value: 21, color: 'purple' }, | ||||
|   { key: 20, text: '代理:OpenRouter', value: 20, color: 'black' }, | ||||
|   { key: 2, text: '代理:API2D', value: 2, color: 'blue' }, | ||||
|   { key: 5, text: '代理:OpenAI-SB', value: 5, color: 'brown' }, | ||||
|   { key: 7, text: '代理:OhMyGPT', value: 7, color: 'purple' }, | ||||
|   | ||||
| @@ -1,6 +1,11 @@ | ||||
| import { toast } from 'react-toastify'; | ||||
| import { toastConstants } from '../constants'; | ||||
| import React from 'react'; | ||||
|  | ||||
| const HTMLToastContent = ({ htmlContent }) => { | ||||
|   return <div dangerouslySetInnerHTML={{ __html: htmlContent }} />; | ||||
| }; | ||||
| export default HTMLToastContent; | ||||
| export function isAdmin() { | ||||
|   let user = localStorage.getItem('user'); | ||||
|   if (!user) return false; | ||||
| @@ -107,8 +112,12 @@ export function showInfo(message) { | ||||
|   toast.info(message, showInfoOptions); | ||||
| } | ||||
|  | ||||
| export function showNotice(message) { | ||||
|   toast.info(message, showNoticeOptions); | ||||
| export function showNotice(message, isHTML = false) { | ||||
|   if (isHTML) { | ||||
|     toast(<HTMLToastContent htmlContent={message} />, showNoticeOptions); | ||||
|   } else { | ||||
|     toast.info(message, showNoticeOptions); | ||||
|   } | ||||
| } | ||||
|  | ||||
| export function openPage(url) { | ||||
|   | ||||
| @@ -1,6 +1,6 @@ | ||||
| import React, { useEffect, useState } from 'react'; | ||||
| import { Button, Form, Header, Input, Message, Segment } from 'semantic-ui-react'; | ||||
| import { useParams } from 'react-router-dom'; | ||||
| import { useNavigate, useParams } from 'react-router-dom'; | ||||
| import { API, showError, showInfo, showSuccess, verifyJSON } from '../../helpers'; | ||||
| import { CHANNEL_OPTIONS } from '../../constants'; | ||||
|  | ||||
| @@ -12,9 +12,14 @@ const MODEL_MAPPING_EXAMPLE = { | ||||
|  | ||||
| const EditChannel = () => { | ||||
|   const params = useParams(); | ||||
|   const navigate = useNavigate(); | ||||
|   const channelId = params.id; | ||||
|   const isEdit = channelId !== undefined; | ||||
|   const [loading, setLoading] = useState(isEdit); | ||||
|   const handleCancel = () => { | ||||
|     navigate('/channel'); | ||||
|   }; | ||||
|  | ||||
|   const originInputs = { | ||||
|     name: '', | ||||
|     type: 1, | ||||
| @@ -56,6 +61,9 @@ const EditChannel = () => { | ||||
|         case 18: | ||||
|           localModels = ['SparkDesk']; | ||||
|           break; | ||||
|         case 19: | ||||
|           localModels = ['360GPT_S2_V9', 'embedding-bert-512-v1', 'embedding_s1_v1', 'semantic_similarity_s1_v1', '360GPT_S2_V9.4']; | ||||
|           break; | ||||
|       } | ||||
|       setInputs((inputs) => ({ ...inputs, models: localModels })); | ||||
|     } | ||||
| @@ -158,6 +166,9 @@ const EditChannel = () => { | ||||
|     if (localInputs.type === 3 && localInputs.other === '') { | ||||
|       localInputs.other = '2023-06-01-preview'; | ||||
|     } | ||||
|     if (localInputs.type === 18 && localInputs.other === '') { | ||||
|       localInputs.other = 'v2.1'; | ||||
|     } | ||||
|     if (localInputs.model_mapping === '') { | ||||
|       localInputs.model_mapping = '{}'; | ||||
|     } | ||||
| @@ -270,6 +281,34 @@ const EditChannel = () => { | ||||
|               options={groupOptions} | ||||
|             /> | ||||
|           </Form.Field> | ||||
|           { | ||||
|             inputs.type === 18 && ( | ||||
|               <Form.Field> | ||||
|                 <Form.Input | ||||
|                   label='模型版本' | ||||
|                   name='other' | ||||
|                   placeholder={'请输入星火大模型版本,注意是接口地址中的版本号,例如:v2.1'} | ||||
|                   onChange={handleInputChange} | ||||
|                   value={inputs.other} | ||||
|                   autoComplete='new-password' | ||||
|                 /> | ||||
|               </Form.Field> | ||||
|             ) | ||||
|           } | ||||
|           { | ||||
|             inputs.type === 21 && ( | ||||
|               <Form.Field> | ||||
|                 <Form.Input | ||||
|                   label='知识库 ID' | ||||
|                   name='other' | ||||
|                   placeholder={'请输入知识库 ID,例如:123456'} | ||||
|                   onChange={handleInputChange} | ||||
|                   value={inputs.other} | ||||
|                   autoComplete='new-password' | ||||
|                 /> | ||||
|               </Form.Field> | ||||
|             ) | ||||
|           } | ||||
|           <Form.Field> | ||||
|             <Form.Dropdown | ||||
|               label='模型' | ||||
| @@ -350,7 +389,7 @@ const EditChannel = () => { | ||||
|                 label='密钥' | ||||
|                 name='key' | ||||
|                 required | ||||
|                 placeholder={inputs.type === 15 ? '请输入 access token,当前版本暂不支持自动刷新,请每 30 天更新一次' : (inputs.type === 18 ? '按照如下格式输入:APPID|APISecret|APIKey' : '请输入渠道对应的鉴权密钥')} | ||||
|                 placeholder={inputs.type === 15 ? '按照如下格式输入:APIKey|SecretKey' : (inputs.type === 18 ? '按照如下格式输入:APPID|APISecret|APIKey' : '请输入渠道对应的鉴权密钥')} | ||||
|                 onChange={handleInputChange} | ||||
|                 value={inputs.key} | ||||
|                 autoComplete='new-password' | ||||
| @@ -381,6 +420,7 @@ const EditChannel = () => { | ||||
|               </Form.Field> | ||||
|             ) | ||||
|           } | ||||
|           <Button onClick={handleCancel}>取消</Button> | ||||
|           <Button type={isEdit ? 'button' : 'submit'} positive onClick={submit}>提交</Button> | ||||
|         </Form> | ||||
|       </Segment> | ||||
|   | ||||
| @@ -14,10 +14,11 @@ const Home = () => { | ||||
|     const { success, message, data } = res.data; | ||||
|     if (success) { | ||||
|       let oldNotice = localStorage.getItem('notice'); | ||||
|       if (data !== oldNotice && data !== '') { | ||||
|         showNotice(data); | ||||
|         localStorage.setItem('notice', data); | ||||
|       } | ||||
|         if (data !== oldNotice && data !== '') { | ||||
|             const htmlNotice = marked(data); | ||||
|             showNotice(htmlNotice, true); | ||||
|             localStorage.setItem('notice', data); | ||||
|         } | ||||
|     } else { | ||||
|       showError(message); | ||||
|     } | ||||
| @@ -64,7 +65,7 @@ const Home = () => { | ||||
|                     <Card.Meta>系统信息总览</Card.Meta> | ||||
|                     <Card.Description> | ||||
|                       <p>名称:{statusState?.status?.system_name}</p> | ||||
|                       <p>版本:{statusState?.status?.version}</p> | ||||
|                       <p>版本:{statusState?.status?.version ? statusState?.status?.version : "unknown"}</p> | ||||
|                       <p> | ||||
|                         源码: | ||||
|                         <a | ||||
|   | ||||
| @@ -1,11 +1,12 @@ | ||||
| import React, { useEffect, useState } from 'react'; | ||||
| import { Button, Form, Header, Segment } from 'semantic-ui-react'; | ||||
| import { useParams } from 'react-router-dom'; | ||||
| import { useParams, useNavigate } from 'react-router-dom'; | ||||
| import { API, downloadTextAsFile, showError, showSuccess } from '../../helpers'; | ||||
| import { renderQuota, renderQuotaWithPrompt } from '../../helpers/render'; | ||||
|  | ||||
| const EditRedemption = () => { | ||||
|   const params = useParams(); | ||||
|   const navigate = useNavigate(); | ||||
|   const redemptionId = params.id; | ||||
|   const isEdit = redemptionId !== undefined; | ||||
|   const [loading, setLoading] = useState(isEdit); | ||||
| @@ -17,6 +18,10 @@ const EditRedemption = () => { | ||||
|   const [inputs, setInputs] = useState(originInputs); | ||||
|   const { name, quota, count } = inputs; | ||||
|  | ||||
|   const handleCancel = () => { | ||||
|     navigate('/redemption'); | ||||
|   }; | ||||
|    | ||||
|   const handleInputChange = (e, { name, value }) => { | ||||
|     setInputs((inputs) => ({ ...inputs, [name]: value })); | ||||
|   }; | ||||
| @@ -113,6 +118,7 @@ const EditRedemption = () => { | ||||
|             </> | ||||
|           } | ||||
|           <Button positive onClick={submit}>提交</Button> | ||||
|           <Button onClick={handleCancel}>取消</Button> | ||||
|         </Form> | ||||
|       </Segment> | ||||
|     </> | ||||
|   | ||||
		Reference in New Issue
	
	Block a user