mirror of
https://github.com/songquanpeng/one-api.git
synced 2025-10-23 01:43:42 +08:00
Compare commits
96 Commits
v0.5.10-1
...
v0.6.2-alp
Author | SHA1 | Date | |
---|---|---|---|
|
cf16f44970 | ||
|
bf2e26a48f | ||
|
4fb22ad4ce | ||
|
95cfb8e8c9 | ||
|
c6ace985c2 | ||
|
10a926b8f3 | ||
|
2df877a352 | ||
|
9d8967f7d3 | ||
|
b35f3523d3 | ||
|
82e916b5ff | ||
|
de18d6fe16 | ||
|
1d0b7fb5ae | ||
|
f9490bb72e | ||
|
76467285e8 | ||
|
df1fd9aa81 | ||
|
614c2e0442 | ||
|
eac6a0b9aa | ||
|
b747cdbc6f | ||
|
6b27d6659a | ||
|
dc5b781191 | ||
|
c880b4a9a3 | ||
|
565ea58e68 | ||
|
f141a37a9e | ||
|
5b78886ad3 | ||
|
87c7c4f0e6 | ||
|
4c4a873890 | ||
|
0664bdfda1 | ||
|
32387d9c20 | ||
|
bd888f2eb7 | ||
|
cece77e533 | ||
|
2a5468e23c | ||
|
d0e415893b | ||
|
6cf5ce9a7a | ||
|
f598b9df87 | ||
|
532c50d212 | ||
|
2acc2f5017 | ||
|
604ac56305 | ||
|
9383b638a6 | ||
|
28d512a675 | ||
|
de9a58ca0b | ||
|
1aa374ccfb | ||
|
d548a01c59 | ||
|
2cd1a78203 | ||
|
b9d3cb0c45 | ||
|
ea407f0054 | ||
|
26e2e646cb | ||
|
4f214c48c6 | ||
|
2d760d4a01 | ||
|
e2ed0399f0 | ||
|
eed9f5fdf0 | ||
|
f2c51a494c | ||
|
8a4d6f3327 | ||
|
cf4e33cb12 | ||
|
5d60305570 | ||
|
d062bc60e4 | ||
|
39c1882970 | ||
|
9c42c7dfd9 | ||
|
903aaeded0 | ||
|
bdd4be562d | ||
|
37afb313b5 | ||
|
c9ebcab8b8 | ||
|
86261cc656 | ||
|
8491785c9d | ||
|
e848a3f7fa | ||
|
318adf5985 | ||
|
965d7fc3d2 | ||
|
aa3f605894 | ||
|
7b8eff1f22 | ||
|
e80cd508ba | ||
|
d37f836d53 | ||
|
e0b2d1ae47 | ||
|
797ead686b | ||
|
0d22cf9ead | ||
|
48989d4a0b | ||
|
6227eee5bc | ||
|
cbf8f07747 | ||
|
4a96031ce6 | ||
|
92886093ae | ||
|
0c022f17cb | ||
|
83f95935de | ||
|
aa03c89133 | ||
|
505817ca17 | ||
|
cb5a3df616 | ||
|
7772064d87 | ||
|
c50c609565 | ||
|
498dea2dbb | ||
|
c725cc8842 | ||
|
af8908db54 | ||
|
d8029550f7 | ||
|
f44fbe3fe7 | ||
|
1c8922153d | ||
|
f3c07e1451 | ||
|
40ceb29e54 | ||
|
0699ecd0af | ||
|
ee9e746520 | ||
|
a763681c2e |
46
.air.toml
46
.air.toml
@@ -1,46 +0,0 @@
|
||||
root = "."
|
||||
testdata_dir = "testdata"
|
||||
tmp_dir = "tmp"
|
||||
|
||||
[build]
|
||||
args_bin = []
|
||||
bin = "./tmp/main"
|
||||
cmd = "go build -o ./tmp/main ."
|
||||
delay = 1000
|
||||
exclude_dir = ["assets", "tmp", "vendor", "testdata", "web"]
|
||||
exclude_file = []
|
||||
exclude_regex = ["_test.go"]
|
||||
exclude_unchanged = false
|
||||
follow_symlink = false
|
||||
full_bin = ""
|
||||
include_dir = []
|
||||
include_ext = ["go", "tpl", "tmpl", "html"]
|
||||
include_file = []
|
||||
kill_delay = "0s"
|
||||
log = "build-errors.log"
|
||||
poll = false
|
||||
poll_interval = 0
|
||||
post_cmd = []
|
||||
pre_cmd = []
|
||||
rerun = false
|
||||
rerun_delay = 500
|
||||
send_interrupt = false
|
||||
stop_on_error = false
|
||||
|
||||
[color]
|
||||
app = ""
|
||||
build = "yellow"
|
||||
main = "magenta"
|
||||
runner = "green"
|
||||
watcher = "cyan"
|
||||
|
||||
[log]
|
||||
main_only = false
|
||||
time = false
|
||||
|
||||
[misc]
|
||||
clean_on_exit = false
|
||||
|
||||
[screen]
|
||||
clear_on_rebuild = false
|
||||
keep_scroll = true
|
49
.github/workflows/docker-image-amd64-en.yml
vendored
Normal file
49
.github/workflows/docker-image-amd64-en.yml
vendored
Normal file
@@ -0,0 +1,49 @@
|
||||
name: Publish Docker image (amd64, English)
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- '*'
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
name:
|
||||
description: 'reason'
|
||||
required: false
|
||||
jobs:
|
||||
push_to_registries:
|
||||
name: Push Docker image to multiple registries
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
packages: write
|
||||
contents: read
|
||||
steps:
|
||||
- name: Check out the repo
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Save version info
|
||||
run: |
|
||||
git describe --tags > VERSION
|
||||
|
||||
- name: Translate
|
||||
run: |
|
||||
python ./i18n/translate.py --repository_path . --json_file_path ./i18n/en.json
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Extract metadata (tags, labels) for Docker
|
||||
id: meta
|
||||
uses: docker/metadata-action@v4
|
||||
with:
|
||||
images: |
|
||||
justsong/one-api-en
|
||||
|
||||
- name: Build and push Docker images
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
54
.github/workflows/docker-image-amd64.yml
vendored
Normal file
54
.github/workflows/docker-image-amd64.yml
vendored
Normal file
@@ -0,0 +1,54 @@
|
||||
name: Publish Docker image (amd64)
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- '*'
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
name:
|
||||
description: 'reason'
|
||||
required: false
|
||||
jobs:
|
||||
push_to_registries:
|
||||
name: Push Docker image to multiple registries
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
packages: write
|
||||
contents: read
|
||||
steps:
|
||||
- name: Check out the repo
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Save version info
|
||||
run: |
|
||||
git describe --tags > VERSION
|
||||
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Log in to the Container registry
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Extract metadata (tags, labels) for Docker
|
||||
id: meta
|
||||
uses: docker/metadata-action@v4
|
||||
with:
|
||||
images: |
|
||||
justsong/one-api
|
||||
ghcr.io/${{ github.repository }}
|
||||
|
||||
- name: Build and push Docker images
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
62
.github/workflows/docker-image-arm64.yml
vendored
Normal file
62
.github/workflows/docker-image-arm64.yml
vendored
Normal file
@@ -0,0 +1,62 @@
|
||||
name: Publish Docker image (arm64)
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- '*'
|
||||
- '!*-alpha*'
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
name:
|
||||
description: 'reason'
|
||||
required: false
|
||||
jobs:
|
||||
push_to_registries:
|
||||
name: Push Docker image to multiple registries
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
packages: write
|
||||
contents: read
|
||||
steps:
|
||||
- name: Check out the repo
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Save version info
|
||||
run: |
|
||||
git describe --tags > VERSION
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Log in to the Container registry
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Extract metadata (tags, labels) for Docker
|
||||
id: meta
|
||||
uses: docker/metadata-action@v4
|
||||
with:
|
||||
images: |
|
||||
justsong/one-api
|
||||
ghcr.io/${{ github.repository }}
|
||||
|
||||
- name: Build and push Docker images
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
context: .
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
62
.github/workflows/docker-image.yml
vendored
62
.github/workflows/docker-image.yml
vendored
@@ -1,62 +0,0 @@
|
||||
name: one-api docker image
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
tags:
|
||||
- "v*"
|
||||
|
||||
env:
|
||||
# github.repository as <account>/<repo>
|
||||
IMAGE_NAME: martialbe/one-api
|
||||
|
||||
jobs:
|
||||
build-and-push:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
packages: write
|
||||
contents: read
|
||||
steps:
|
||||
- name: Check out the repo
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
- name: Login to GHCR
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GT_Token }}
|
||||
|
||||
- name: Docker meta
|
||||
id: meta
|
||||
uses: docker/metadata-action@v4
|
||||
with:
|
||||
# list of Docker images to use as base name for tags
|
||||
images: ghcr.io/${{ env.IMAGE_NAME }}
|
||||
# generate Docker tags based on the following events/attributes
|
||||
tags: |
|
||||
type=raw,value=dev,enable=${{ github.ref == 'refs/heads/main' }}
|
||||
type=raw,value=latest,enable=${{ startsWith(github.ref, 'refs/tags/') }}
|
||||
type=pep440,pattern={{raw}},enable=${{ startsWith(github.ref, 'refs/tags/') }}
|
||||
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
context: .
|
||||
platforms: linux/amd64
|
||||
build-args: |
|
||||
COMMIT_SHA=${{ fromJSON(steps.meta.outputs.json).labels['org.opencontainers.image.revision'] }}
|
||||
push: true
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
19
.github/workflows/linux-release.yml
vendored
19
.github/workflows/linux-release.yml
vendored
@@ -5,8 +5,13 @@ permissions:
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- "*"
|
||||
- "!*-alpha*"
|
||||
- '*'
|
||||
- '!*-alpha*'
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
name:
|
||||
description: 'reason'
|
||||
required: false
|
||||
jobs:
|
||||
release:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -23,17 +28,17 @@ jobs:
|
||||
CI: ""
|
||||
run: |
|
||||
cd web
|
||||
npm install
|
||||
REACT_APP_VERSION=$(git describe --tags) npm run build
|
||||
git describe --tags > VERSION
|
||||
REACT_APP_VERSION=$(git describe --tags) chmod u+x ./build.sh && ./build.sh
|
||||
cd ..
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: ">=1.18.0"
|
||||
go-version: '>=1.18.0'
|
||||
- name: Build Backend (amd64)
|
||||
run: |
|
||||
go mod download
|
||||
go build -ldflags "-s -w -X 'one-api/common.Version=$(git describe --tags)' -extldflags '-static'" -o one-api
|
||||
go build -ldflags "-s -w -X 'github.com/songquanpeng/one-api/common.Version=$(git describe --tags)' -extldflags '-static'" -o one-api
|
||||
|
||||
- name: Build Backend (arm64)
|
||||
run: |
|
||||
@@ -51,4 +56,4 @@ jobs:
|
||||
draft: true
|
||||
generate_release_notes: true
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GT_Token }}
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
19
.github/workflows/macos-release.yml
vendored
19
.github/workflows/macos-release.yml
vendored
@@ -5,8 +5,13 @@ permissions:
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- "*"
|
||||
- "!*-alpha*"
|
||||
- '*'
|
||||
- '!*-alpha*'
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
name:
|
||||
description: 'reason'
|
||||
required: false
|
||||
jobs:
|
||||
release:
|
||||
runs-on: macos-latest
|
||||
@@ -23,17 +28,17 @@ jobs:
|
||||
CI: ""
|
||||
run: |
|
||||
cd web
|
||||
npm install
|
||||
REACT_APP_VERSION=$(git describe --tags) npm run build
|
||||
git describe --tags > VERSION
|
||||
REACT_APP_VERSION=$(git describe --tags) chmod u+x ./build.sh && ./build.sh
|
||||
cd ..
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: ">=1.18.0"
|
||||
go-version: '>=1.18.0'
|
||||
- name: Build Backend
|
||||
run: |
|
||||
go mod download
|
||||
go build -ldflags "-X 'one-api/common.Version=$(git describe --tags)'" -o one-api-macos
|
||||
go build -ldflags "-X 'github.com/songquanpeng/one-api/common.Version=$(git describe --tags)'" -o one-api-macos
|
||||
- name: Release
|
||||
uses: softprops/action-gh-release@v1
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
@@ -42,4 +47,4 @@ jobs:
|
||||
draft: true
|
||||
generate_release_notes: true
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GT_Token }}
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
19
.github/workflows/windows-release.yml
vendored
19
.github/workflows/windows-release.yml
vendored
@@ -5,8 +5,13 @@ permissions:
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- "*"
|
||||
- "!*-alpha*"
|
||||
- '*'
|
||||
- '!*-alpha*'
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
name:
|
||||
description: 'reason'
|
||||
required: false
|
||||
jobs:
|
||||
release:
|
||||
runs-on: windows-latest
|
||||
@@ -25,18 +30,18 @@ jobs:
|
||||
env:
|
||||
CI: ""
|
||||
run: |
|
||||
cd web
|
||||
cd web/default
|
||||
npm install
|
||||
REACT_APP_VERSION=$(git describe --tags) npm run build
|
||||
cd ..
|
||||
cd ../..
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: ">=1.18.0"
|
||||
go-version: '>=1.18.0'
|
||||
- name: Build Backend
|
||||
run: |
|
||||
go mod download
|
||||
go build -ldflags "-s -w -X 'one-api/common.Version=$(git describe --tags)'" -o one-api.exe
|
||||
go build -ldflags "-s -w -X 'github.com/songquanpeng/one-api/common.Version=$(git describe --tags)'" -o one-api.exe
|
||||
- name: Release
|
||||
uses: softprops/action-gh-release@v1
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
@@ -45,4 +50,4 @@ jobs:
|
||||
draft: true
|
||||
generate_release_notes: true
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GT_Token }}
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
3
.gitignore
vendored
3
.gitignore
vendored
@@ -7,5 +7,4 @@ build
|
||||
*.db-journal
|
||||
logs
|
||||
data
|
||||
tmp/
|
||||
.env
|
||||
/web/node_modules
|
||||
|
19
Dockerfile
19
Dockerfile
@@ -1,10 +1,15 @@
|
||||
FROM node:16 as builder
|
||||
|
||||
WORKDIR /build
|
||||
COPY web/package.json .
|
||||
RUN npm install
|
||||
COPY ./web .
|
||||
WORKDIR /web
|
||||
COPY ./VERSION .
|
||||
COPY ./web .
|
||||
|
||||
WORKDIR /web/default
|
||||
RUN npm install
|
||||
RUN DISABLE_ESLINT_PLUGIN='true' REACT_APP_VERSION=$(cat VERSION) npm run build
|
||||
|
||||
WORKDIR /web/berry
|
||||
RUN npm install
|
||||
RUN DISABLE_ESLINT_PLUGIN='true' REACT_APP_VERSION=$(cat VERSION) npm run build
|
||||
|
||||
FROM golang AS builder2
|
||||
@@ -17,8 +22,8 @@ WORKDIR /build
|
||||
ADD go.mod go.sum ./
|
||||
RUN go mod download
|
||||
COPY . .
|
||||
COPY --from=builder /build/build ./web/build
|
||||
RUN go build -ldflags "-s -w -X 'one-api/common.Version=$(cat VERSION)' -extldflags '-static'" -o one-api
|
||||
COPY --from=builder /web/build ./web/build
|
||||
RUN go build -ldflags "-s -w -X 'github.com/songquanpeng/one-api/common.Version=$(cat VERSION)' -extldflags '-static'" -o one-api
|
||||
|
||||
FROM alpine
|
||||
|
||||
@@ -30,4 +35,4 @@ RUN apk update \
|
||||
COPY --from=builder2 /build/one-api /
|
||||
EXPOSE 3000
|
||||
WORKDIR /data
|
||||
ENTRYPOINT ["/one-api"]
|
||||
ENTRYPOINT ["/one-api"]
|
142
README.en.md
142
README.en.md
@@ -3,45 +3,35 @@
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://github.com/songquanpeng/one-api"><img src="https://raw.githubusercontent.com/songquanpeng/one-api/main/web/public/logo.png" width="150" height="150" alt="one-api logo"></a>
|
||||
<a href="https://github.com/songquanpeng/one-api"><img src="https://raw.githubusercontent.com/songquanpeng/one-api/main/web/default/public/logo.png" width="150" height="150" alt="one-api logo"></a>
|
||||
</p>
|
||||
|
||||
<div align="center">
|
||||
|
||||
# One API
|
||||
|
||||
_This project is a derivative of [one-api](https://github.com/songquanpeng/one-api), where the main focus has been on modularizing the module code from the original project and modifying the frontend interface. This project also adheres to the MIT License._
|
||||
|
||||
<p align="center">
|
||||
<a href="https://raw.githubusercontent.com/MartialBE/one-api/main/LICENSE">
|
||||
<img src="https://img.shields.io/github/license/MartialBE/one-api?color=brightgreen" alt="license">
|
||||
</a>
|
||||
<a href="https://github.com/MartialBE/one-api/releases/latest">
|
||||
<img src="https://img.shields.io/github/v/release/MartialBE/one-api?color=brightgreen&include_prereleases" alt="release">
|
||||
</a>
|
||||
<a href="https://github.com/users/MartialBE/packages/container/package/one-api">
|
||||
<img src="https://img.shields.io/badge/docker-ghcr.io-blue" alt="docker">
|
||||
</a>
|
||||
<a href="https://goreportcard.com/report/github.com/MartialBE/one-api">
|
||||
<img src="https://goreportcard.com/badge/github.com/MartialBE/one-api" alt="GoReportCard">
|
||||
</a>
|
||||
</p>
|
||||
|
||||
**Please do not mix with the original version, as the different channel ID may cause data disorder.**
|
||||
|
||||
## Screenshots
|
||||
|
||||

|
||||

|
||||
|
||||
_The following is the original project description:_
|
||||
|
||||
---
|
||||
|
||||
_✨ Access all LLM through the standard OpenAI API format, easy to deploy & use ✨_
|
||||
|
||||
</div>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://raw.githubusercontent.com/songquanpeng/one-api/main/LICENSE">
|
||||
<img src="https://img.shields.io/github/license/songquanpeng/one-api?color=brightgreen" alt="license">
|
||||
</a>
|
||||
<a href="https://github.com/songquanpeng/one-api/releases/latest">
|
||||
<img src="https://img.shields.io/github/v/release/songquanpeng/one-api?color=brightgreen&include_prereleases" alt="release">
|
||||
</a>
|
||||
<a href="https://hub.docker.com/repository/docker/justsong/one-api">
|
||||
<img src="https://img.shields.io/docker/pulls/justsong/one-api?color=brightgreen" alt="docker pull">
|
||||
</a>
|
||||
<a href="https://github.com/songquanpeng/one-api/releases/latest">
|
||||
<img src="https://img.shields.io/github/downloads/songquanpeng/one-api/total?color=brightgreen&include_prereleases" alt="release">
|
||||
</a>
|
||||
<a href="https://goreportcard.com/report/github.com/songquanpeng/one-api">
|
||||
<img src="https://goreportcard.com/badge/github.com/songquanpeng/one-api" alt="GoReportCard">
|
||||
</a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="#deployment">Deployment Tutorial</a>
|
||||
·
|
||||
@@ -67,14 +57,13 @@ _✨ Access all LLM through the standard OpenAI API format, easy to deploy & use
|
||||
> **Note**: The latest image pulled from Docker may be an `alpha` release. Specify the version manually if you require stability.
|
||||
|
||||
## Features
|
||||
|
||||
1. Support for multiple large models:
|
||||
- [x] [OpenAI ChatGPT Series Models](https://platform.openai.com/docs/guides/gpt/chat-completions-api) (Supports [Azure OpenAI API](https://learn.microsoft.com/en-us/azure/ai-services/openai/reference))
|
||||
- [x] [Anthropic Claude Series Models](https://anthropic.com)
|
||||
- [x] [Google PaLM2 and Gemini Series Models](https://developers.generativeai.google)
|
||||
- [x] [Baidu Wenxin Yiyuan Series Models](https://cloud.baidu.com/doc/WENXINWORKSHOP/index.html)
|
||||
- [x] [Alibaba Tongyi Qianwen Series Models](https://help.aliyun.com/document_detail/2400395.html)
|
||||
- [x] [Zhipu ChatGLM Series Models](https://bigmodel.cn)
|
||||
+ [x] [OpenAI ChatGPT Series Models](https://platform.openai.com/docs/guides/gpt/chat-completions-api) (Supports [Azure OpenAI API](https://learn.microsoft.com/en-us/azure/ai-services/openai/reference))
|
||||
+ [x] [Anthropic Claude Series Models](https://anthropic.com)
|
||||
+ [x] [Google PaLM2 and Gemini Series Models](https://developers.generativeai.google)
|
||||
+ [x] [Baidu Wenxin Yiyuan Series Models](https://cloud.baidu.com/doc/WENXINWORKSHOP/index.html)
|
||||
+ [x] [Alibaba Tongyi Qianwen Series Models](https://help.aliyun.com/document_detail/2400395.html)
|
||||
+ [x] [Zhipu ChatGLM Series Models](https://bigmodel.cn)
|
||||
2. Supports access to multiple channels through **load balancing**.
|
||||
3. Supports **stream mode** that enables typewriter-like effect through stream transmission.
|
||||
4. Supports **multi-machine deployment**. [See here](#multi-machine-deployment) for more details.
|
||||
@@ -93,15 +82,13 @@ _✨ Access all LLM through the standard OpenAI API format, easy to deploy & use
|
||||
15. Supports management API access through system access tokens.
|
||||
16. Supports Cloudflare Turnstile user verification.
|
||||
17. Supports user management and multiple user login/registration methods:
|
||||
- Email login/registration and password reset via email.
|
||||
- [GitHub OAuth](https://github.com/settings/applications/new).
|
||||
- WeChat Official Account authorization (requires additional deployment of [WeChat Server](https://github.com/songquanpeng/wechat-server)).
|
||||
+ Email login/registration and password reset via email.
|
||||
+ [GitHub OAuth](https://github.com/settings/applications/new).
|
||||
+ WeChat Official Account authorization (requires additional deployment of [WeChat Server](https://github.com/songquanpeng/wechat-server)).
|
||||
18. Immediate support and encapsulation of other major model APIs as they become available.
|
||||
|
||||
## Deployment
|
||||
|
||||
### Docker Deployment
|
||||
|
||||
Deployment command: `docker run --name one-api -d --restart always -p 3000:3000 -e TZ=Asia/Shanghai -v /home/ubuntu/data/one-api:/data justsong/one-api-en`
|
||||
|
||||
Update command: `docker run --rm -v /var/run/docker.sock:/var/run/docker.sock containrrr/watchtower -cR`
|
||||
@@ -111,11 +98,10 @@ The first `3000` in `-p 3000:3000` is the port of the host, which can be modifie
|
||||
Data will be saved in the `/home/ubuntu/data/one-api` directory on the host. Ensure that the directory exists and has write permissions, or change it to a suitable directory.
|
||||
|
||||
Nginx reference configuration:
|
||||
|
||||
```
|
||||
server{
|
||||
server_name openai.justsong.cn; # Modify your domain name accordingly
|
||||
|
||||
|
||||
location / {
|
||||
client_max_body_size 64m;
|
||||
proxy_http_version 1.1;
|
||||
@@ -129,7 +115,6 @@ server{
|
||||
```
|
||||
|
||||
Next, configure HTTPS with Let's Encrypt certbot:
|
||||
|
||||
```bash
|
||||
# Install certbot on Ubuntu:
|
||||
sudo snap install --classic certbot
|
||||
@@ -144,23 +129,20 @@ sudo service nginx restart
|
||||
The initial account username is `root` and password is `123456`.
|
||||
|
||||
### Manual Deployment
|
||||
|
||||
1. Download the executable file from [GitHub Releases](https://github.com/songquanpeng/one-api/releases/latest) or compile from source:
|
||||
|
||||
```shell
|
||||
git clone https://github.com/songquanpeng/one-api.git
|
||||
|
||||
|
||||
# Build the frontend
|
||||
cd one-api/web
|
||||
cd one-api/web/default
|
||||
npm install
|
||||
npm run build
|
||||
|
||||
|
||||
# Build the backend
|
||||
cd ..
|
||||
cd ../..
|
||||
go mod download
|
||||
go build -ldflags "-s -w" -o one-api
|
||||
```
|
||||
|
||||
2. Run:
|
||||
```shell
|
||||
chmod u+x one-api
|
||||
@@ -171,7 +153,6 @@ The initial account username is `root` and password is `123456`.
|
||||
For more detailed deployment tutorials, please refer to [this page](https://iamazing.cn/page/how-to-deploy-a-website).
|
||||
|
||||
### Multi-machine Deployment
|
||||
|
||||
1. Set the same `SESSION_SECRET` for all servers.
|
||||
2. Set `SQL_DSN` and use MySQL instead of SQLite. All servers should connect to the same database.
|
||||
3. Set the `NODE_TYPE` for all non-master nodes to `slave`.
|
||||
@@ -183,13 +164,11 @@ For more detailed deployment tutorials, please refer to [this page](https://iama
|
||||
Please refer to the [environment variables](#environment-variables) section for details on using environment variables.
|
||||
|
||||
### Deployment on Control Panels (e.g., Baota)
|
||||
|
||||
Refer to [#175](https://github.com/songquanpeng/one-api/issues/175) for detailed instructions.
|
||||
|
||||
If you encounter a blank page after deployment, refer to [#97](https://github.com/songquanpeng/one-api/issues/97) for possible solutions.
|
||||
|
||||
### Deployment on Third-Party Platforms
|
||||
|
||||
<details>
|
||||
<summary><strong>Deploy on Sealos</strong></summary>
|
||||
<div>
|
||||
@@ -200,6 +179,7 @@ If you encounter a blank page after deployment, refer to [#97](https://github.co
|
||||
|
||||
[](https://cloud.sealos.io/?openapp=system-fastdeploy?templateName=one-api)
|
||||
|
||||
|
||||
</div>
|
||||
</details>
|
||||
|
||||
@@ -214,7 +194,7 @@ If you encounter a blank page after deployment, refer to [#97](https://github.co
|
||||
1. First, fork the code.
|
||||
2. Go to [Zeabur](https://zeabur.com?referralCode=songquanpeng), log in, and enter the console.
|
||||
3. Create a new project. In Service -> Add Service, select Marketplace, and choose MySQL. Note down the connection parameters (username, password, address, and port).
|
||||
4. Copy the connection parameters and run `` create database `one-api` `` to create the database.
|
||||
4. Copy the connection parameters and run ```create database `one-api` ``` to create the database.
|
||||
5. Then, in Service -> Add Service, select Git (authorization is required for the first use) and choose your forked repository.
|
||||
6. Automatic deployment will start, but please cancel it for now. Go to the Variable tab, add a `PORT` with a value of `3000`, and then add a `SQL_DSN` with a value of `<username>:<password>@tcp(<addr>:<port>)/one-api`. Save the changes. Please note that if `SQL_DSN` is not set, data will not be persisted, and the data will be lost after redeployment.
|
||||
7. Select Redeploy.
|
||||
@@ -225,7 +205,6 @@ If you encounter a blank page after deployment, refer to [#97](https://github.co
|
||||
</details>
|
||||
|
||||
## Configuration
|
||||
|
||||
The system is ready to use out of the box.
|
||||
|
||||
You can configure it by setting environment variables or command line parameters.
|
||||
@@ -233,7 +212,6 @@ You can configure it by setting environment variables or command line parameters
|
||||
After the system starts, log in as the `root` user to further configure the system.
|
||||
|
||||
## Usage
|
||||
|
||||
Add your API Key on the `Channels` page, and then add an access token on the `Tokens` page.
|
||||
|
||||
You can then use your access token to access One API. The usage is consistent with the [OpenAI API](https://platform.openai.com/docs/api-reference/introduction).
|
||||
@@ -257,65 +235,59 @@ Note that the token needs to be created by an administrator to specify the chann
|
||||
If the channel ID is not provided, load balancing will be used to distribute the requests to multiple channels.
|
||||
|
||||
### Environment Variables
|
||||
|
||||
1. `REDIS_CONN_STRING`: When set, Redis will be used as the storage for request rate limiting instead of memory.
|
||||
- Example: `REDIS_CONN_STRING=redis://default:redispw@localhost:49153`
|
||||
+ Example: `REDIS_CONN_STRING=redis://default:redispw@localhost:49153`
|
||||
2. `SESSION_SECRET`: When set, a fixed session key will be used to ensure that cookies of logged-in users are still valid after the system restarts.
|
||||
- Example: `SESSION_SECRET=random_string`
|
||||
+ Example: `SESSION_SECRET=random_string`
|
||||
3. `SQL_DSN`: When set, the specified database will be used instead of SQLite. Please use MySQL version 8.0.
|
||||
- Example: `SQL_DSN=root:123456@tcp(localhost:3306)/oneapi`
|
||||
+ Example: `SQL_DSN=root:123456@tcp(localhost:3306)/oneapi`
|
||||
4. `FRONTEND_BASE_URL`: When set, the specified frontend address will be used instead of the backend address.
|
||||
- Example: `FRONTEND_BASE_URL=https://openai.justsong.cn`
|
||||
+ Example: `FRONTEND_BASE_URL=https://openai.justsong.cn`
|
||||
5. `SYNC_FREQUENCY`: When set, the system will periodically sync configurations from the database, with the unit in seconds. If not set, no sync will happen.
|
||||
- Example: `SYNC_FREQUENCY=60`
|
||||
+ Example: `SYNC_FREQUENCY=60`
|
||||
6. `NODE_TYPE`: When set, specifies the node type. Valid values are `master` and `slave`. If not set, it defaults to `master`.
|
||||
- Example: `NODE_TYPE=slave`
|
||||
+ Example: `NODE_TYPE=slave`
|
||||
7. `CHANNEL_UPDATE_FREQUENCY`: When set, it periodically updates the channel balances, with the unit in minutes. If not set, no update will happen.
|
||||
- Example: `CHANNEL_UPDATE_FREQUENCY=1440`
|
||||
+ Example: `CHANNEL_UPDATE_FREQUENCY=1440`
|
||||
8. `CHANNEL_TEST_FREQUENCY`: When set, it periodically tests the channels, with the unit in minutes. If not set, no test will happen.
|
||||
- Example: `CHANNEL_TEST_FREQUENCY=1440`
|
||||
+ Example: `CHANNEL_TEST_FREQUENCY=1440`
|
||||
9. `POLLING_INTERVAL`: The time interval (in seconds) between requests when updating channel balances and testing channel availability. Default is no interval.
|
||||
- Example: `POLLING_INTERVAL=5`
|
||||
+ Example: `POLLING_INTERVAL=5`
|
||||
|
||||
### Command Line Parameters
|
||||
|
||||
1. `--port <port_number>`: Specifies the port number on which the server listens. Defaults to `3000`.
|
||||
- Example: `--port 3000`
|
||||
+ Example: `--port 3000`
|
||||
2. `--log-dir <log_dir>`: Specifies the log directory. If not set, the logs will not be saved.
|
||||
- Example: `--log-dir ./logs`
|
||||
+ Example: `--log-dir ./logs`
|
||||
3. `--version`: Prints the system version number and exits.
|
||||
4. `--help`: Displays the command usage help and parameter descriptions.
|
||||
|
||||
## Screenshots
|
||||
|
||||

|
||||

|
||||
|
||||
## FAQ
|
||||
|
||||
1. What is quota? How is it calculated? Does One API have quota calculation issues?
|
||||
- Quota = Group multiplier _ Model multiplier _ (number of prompt tokens + number of completion tokens \* completion multiplier)
|
||||
- The completion multiplier is fixed at 1.33 for GPT3.5 and 2 for GPT4, consistent with the official definition.
|
||||
- If it is not a stream mode, the official API will return the total number of tokens consumed. However, please note that the consumption multipliers for prompts and completions are different.
|
||||
+ Quota = Group multiplier * Model multiplier * (number of prompt tokens + number of completion tokens * completion multiplier)
|
||||
+ The completion multiplier is fixed at 1.33 for GPT3.5 and 2 for GPT4, consistent with the official definition.
|
||||
+ If it is not a stream mode, the official API will return the total number of tokens consumed. However, please note that the consumption multipliers for prompts and completions are different.
|
||||
2. Why does it prompt "insufficient quota" even though my account balance is sufficient?
|
||||
- Please check if your token quota is sufficient. It is separate from the account balance.
|
||||
- The token quota is used to set the maximum usage and can be freely set by the user.
|
||||
+ Please check if your token quota is sufficient. It is separate from the account balance.
|
||||
+ The token quota is used to set the maximum usage and can be freely set by the user.
|
||||
3. It says "No available channels" when trying to use a channel. What should I do?
|
||||
- Please check the user and channel group settings.
|
||||
- Also check the channel model settings.
|
||||
+ Please check the user and channel group settings.
|
||||
+ Also check the channel model settings.
|
||||
4. Channel testing reports an error: "invalid character '<' looking for beginning of value"
|
||||
- This error occurs when the returned value is not valid JSON but an HTML page.
|
||||
- Most likely, the IP of your deployment site or the node of the proxy has been blocked by CloudFlare.
|
||||
+ This error occurs when the returned value is not valid JSON but an HTML page.
|
||||
+ Most likely, the IP of your deployment site or the node of the proxy has been blocked by CloudFlare.
|
||||
5. ChatGPT Next Web reports an error: "Failed to fetch"
|
||||
- Do not set `BASE_URL` during deployment.
|
||||
- Double-check that your interface address and API Key are correct.
|
||||
+ Do not set `BASE_URL` during deployment.
|
||||
+ Double-check that your interface address and API Key are correct.
|
||||
|
||||
## Related Projects
|
||||
|
||||
[FastGPT](https://github.com/labring/FastGPT): Knowledge question answering system based on the LLM
|
||||
|
||||
## Note
|
||||
|
||||
This project is an open-source project. Please use it in compliance with OpenAI's [Terms of Use](https://openai.com/policies/terms-of-use) and **applicable laws and regulations**. It must not be used for illegal purposes.
|
||||
|
||||
This project is released under the MIT license. Based on this, attribution and a link to this project must be included at the bottom of the page.
|
||||
|
140
README.ja.md
140
README.ja.md
@@ -3,45 +3,35 @@
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://github.com/songquanpeng/one-api"><img src="https://raw.githubusercontent.com/songquanpeng/one-api/main/web/public/logo.png" width="150" height="150" alt="one-api logo"></a>
|
||||
<a href="https://github.com/songquanpeng/one-api"><img src="https://raw.githubusercontent.com/songquanpeng/one-api/main/web/default/public/logo.png" width="150" height="150" alt="one-api logo"></a>
|
||||
</p>
|
||||
|
||||
<div align="center">
|
||||
|
||||
# One API
|
||||
|
||||
_このプロジェクトは、[one-api](https://github.com/songquanpeng/one-api)をベースにしており、元のプロジェクトのモジュールコードを分離し、モジュール化し、フロントエンドのインターフェースを変更しました。このプロジェクトも MIT ライセンスに従っています。_
|
||||
|
||||
<p align="center">
|
||||
<a href="https://raw.githubusercontent.com/MartialBE/one-api/main/LICENSE">
|
||||
<img src="https://img.shields.io/github/license/MartialBE/one-api?color=brightgreen" alt="license">
|
||||
</a>
|
||||
<a href="https://github.com/MartialBE/one-api/releases/latest">
|
||||
<img src="https://img.shields.io/github/v/release/MartialBE/one-api?color=brightgreen&include_prereleases" alt="release">
|
||||
</a>
|
||||
<a href="https://github.com/users/MartialBE/packages/container/package/one-api">
|
||||
<img src="https://img.shields.io/badge/docker-ghcr.io-blue" alt="docker">
|
||||
</a>
|
||||
<a href="https://goreportcard.com/report/github.com/MartialBE/one-api">
|
||||
<img src="https://goreportcard.com/badge/github.com/MartialBE/one-api" alt="GoReportCard">
|
||||
</a>
|
||||
</p>
|
||||
|
||||
**オリジナルバージョンと混合しないでください。チャンネル ID が異なるため、データの混乱を引き起こす可能性があります**
|
||||
|
||||
## スクリーンショット
|
||||
|
||||

|
||||

|
||||
|
||||
_以下は元の項目の説明です:_
|
||||
|
||||
---
|
||||
|
||||
_✨ 標準的な OpenAI API フォーマットを通じてすべての LLM にアクセスでき、導入と利用が容易です ✨_
|
||||
|
||||
</div>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://raw.githubusercontent.com/songquanpeng/one-api/main/LICENSE">
|
||||
<img src="https://img.shields.io/github/license/songquanpeng/one-api?color=brightgreen" alt="license">
|
||||
</a>
|
||||
<a href="https://github.com/songquanpeng/one-api/releases/latest">
|
||||
<img src="https://img.shields.io/github/v/release/songquanpeng/one-api?color=brightgreen&include_prereleases" alt="release">
|
||||
</a>
|
||||
<a href="https://hub.docker.com/repository/docker/justsong/one-api">
|
||||
<img src="https://img.shields.io/docker/pulls/justsong/one-api?color=brightgreen" alt="docker pull">
|
||||
</a>
|
||||
<a href="https://github.com/songquanpeng/one-api/releases/latest">
|
||||
<img src="https://img.shields.io/github/downloads/songquanpeng/one-api/total?color=brightgreen&include_prereleases" alt="release">
|
||||
</a>
|
||||
<a href="https://goreportcard.com/report/github.com/songquanpeng/one-api">
|
||||
<img src="https://goreportcard.com/badge/github.com/songquanpeng/one-api" alt="GoReportCard">
|
||||
</a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="#deployment">デプロイチュートリアル</a>
|
||||
·
|
||||
@@ -67,14 +57,13 @@ _✨ 標準的な OpenAI API フォーマットを通じてすべての LLM に
|
||||
> **注**: Docker からプルされた最新のイメージは、`alpha` リリースかもしれません。安定性が必要な場合は、手動でバージョンを指定してください。
|
||||
|
||||
## 特徴
|
||||
|
||||
1. 複数の大型モデルをサポート:
|
||||
- [x] [OpenAI ChatGPT シリーズモデル](https://platform.openai.com/docs/guides/gpt/chat-completions-api) ([Azure OpenAI API](https://learn.microsoft.com/en-us/azure/ai-services/openai/reference) をサポート)
|
||||
- [x] [Anthropic Claude シリーズモデル](https://anthropic.com)
|
||||
- [x] [Google PaLM2/Gemini シリーズモデル](https://developers.generativeai.google)
|
||||
- [x] [Baidu Wenxin Yiyuan シリーズモデル](https://cloud.baidu.com/doc/WENXINWORKSHOP/index.html)
|
||||
- [x] [Alibaba Tongyi Qianwen シリーズモデル](https://help.aliyun.com/document_detail/2400395.html)
|
||||
- [x] [Zhipu ChatGLM シリーズモデル](https://bigmodel.cn)
|
||||
+ [x] [OpenAI ChatGPT シリーズモデル](https://platform.openai.com/docs/guides/gpt/chat-completions-api) ([Azure OpenAI API](https://learn.microsoft.com/en-us/azure/ai-services/openai/reference) をサポート)
|
||||
+ [x] [Anthropic Claude シリーズモデル](https://anthropic.com)
|
||||
+ [x] [Google PaLM2/Gemini シリーズモデル](https://developers.generativeai.google)
|
||||
+ [x] [Baidu Wenxin Yiyuan シリーズモデル](https://cloud.baidu.com/doc/WENXINWORKSHOP/index.html)
|
||||
+ [x] [Alibaba Tongyi Qianwen シリーズモデル](https://help.aliyun.com/document_detail/2400395.html)
|
||||
+ [x] [Zhipu ChatGLM シリーズモデル](https://bigmodel.cn)
|
||||
2. **ロードバランシング**による複数チャンネルへのアクセスをサポート。
|
||||
3. ストリーム伝送によるタイプライター的効果を可能にする**ストリームモード**に対応。
|
||||
4. **マルチマシンデプロイ**に対応。[詳細はこちら](#multi-machine-deployment)を参照。
|
||||
@@ -93,15 +82,13 @@ _✨ 標準的な OpenAI API フォーマットを通じてすべての LLM に
|
||||
15. システム・アクセストークンによる管理 API アクセスをサポートする。
|
||||
16. Cloudflare Turnstile によるユーザー認証に対応。
|
||||
17. ユーザー管理と複数のユーザーログイン/登録方法をサポート:
|
||||
- 電子メールによるログイン/登録とパスワードリセット。
|
||||
- [GitHub OAuth](https://github.com/settings/applications/new)。
|
||||
- WeChat 公式アカウントの認証([WeChat Server](https://github.com/songquanpeng/wechat-server)の追加導入が必要)。
|
||||
+ 電子メールによるログイン/登録とパスワードリセット。
|
||||
+ [GitHub OAuth](https://github.com/settings/applications/new)。
|
||||
+ WeChat 公式アカウントの認証([WeChat Server](https://github.com/songquanpeng/wechat-server)の追加導入が必要)。
|
||||
18. 他の主要なモデル API が利用可能になった場合、即座にサポートし、カプセル化する。
|
||||
|
||||
## デプロイメント
|
||||
|
||||
### Docker デプロイメント
|
||||
|
||||
デプロイコマンド: `docker run --name one-api -d --restart always -p 3000:3000 -e TZ=Asia/Shanghai -v /home/ubuntu/data/one-api:/data justsong/one-api-en`。
|
||||
|
||||
コマンドを更新する: `docker run --rm -v /var/run/docker.sock:/var/run/docker.sock containrr/watchtower -cR`。
|
||||
@@ -110,8 +97,7 @@ _✨ 標準的な OpenAI API フォーマットを通じてすべての LLM に
|
||||
|
||||
データはホストの `/home/ubuntu/data/one-api` ディレクトリに保存される。このディレクトリが存在し、書き込み権限があることを確認する、もしくは適切なディレクトリに変更してください。
|
||||
|
||||
Nginx リファレンス設定:
|
||||
|
||||
Nginxリファレンス設定:
|
||||
```
|
||||
server{
|
||||
server_name openai.justsong.cn; # ドメイン名は適宜変更
|
||||
@@ -130,7 +116,6 @@ server{
|
||||
```
|
||||
|
||||
次に、Let's Encrypt certbot を使って HTTPS を設定します:
|
||||
|
||||
```bash
|
||||
# Ubuntu に certbot をインストール:
|
||||
sudo snap install --classic certbot
|
||||
@@ -145,23 +130,20 @@ sudo service nginx restart
|
||||
初期アカウントのユーザー名は `root` で、パスワードは `123456` です。
|
||||
|
||||
### マニュアルデプロイ
|
||||
|
||||
1. [GitHub Releases](https://github.com/songquanpeng/one-api/releases/latest) から実行ファイルをダウンロードする、もしくはソースからコンパイルする:
|
||||
|
||||
```shell
|
||||
git clone https://github.com/songquanpeng/one-api.git
|
||||
|
||||
# フロントエンドのビルド
|
||||
cd one-api/web
|
||||
cd one-api/web/default
|
||||
npm install
|
||||
npm run build
|
||||
|
||||
# バックエンドのビルド
|
||||
cd ..
|
||||
cd ../..
|
||||
go mod download
|
||||
go build -ldflags "-s -w" -o one-api
|
||||
```
|
||||
|
||||
2. 実行:
|
||||
```shell
|
||||
chmod u+x one-api
|
||||
@@ -172,7 +154,6 @@ sudo service nginx restart
|
||||
より詳細なデプロイのチュートリアルについては、[このページ](https://iamazing.cn/page/how-to-deploy-a-website) を参照してください。
|
||||
|
||||
### マルチマシンデプロイ
|
||||
|
||||
1. すべてのサーバに同じ `SESSION_SECRET` を設定する。
|
||||
2. `SQL_DSN` を設定し、SQLite の代わりに MySQL を使用する。すべてのサーバは同じデータベースに接続する。
|
||||
3. マスターノード以外のノードの `NODE_TYPE` を `slave` に設定する。
|
||||
@@ -184,13 +165,11 @@ sudo service nginx restart
|
||||
Please refer to the [environment variables](#environment-variables) section for details on using environment variables.
|
||||
|
||||
### コントロールパネル(例: Baota)への展開
|
||||
|
||||
詳しい手順は [#175](https://github.com/songquanpeng/one-api/issues/175) を参照してください。
|
||||
|
||||
配置後に空白のページが表示される場合は、[#97](https://github.com/songquanpeng/one-api/issues/97) を参照してください。
|
||||
|
||||
### サードパーティプラットフォームへのデプロイ
|
||||
|
||||
<details>
|
||||
<summary><strong>Sealos へのデプロイ</strong></summary>
|
||||
<div>
|
||||
@@ -201,6 +180,7 @@ Please refer to the [environment variables](#environment-variables) section for
|
||||
|
||||
[](https://cloud.sealos.io/?openapp=system-fastdeploy?templateName=one-api)
|
||||
|
||||
|
||||
</div>
|
||||
</details>
|
||||
|
||||
@@ -214,8 +194,8 @@ Please refer to the [environment variables](#environment-variables) section for
|
||||
|
||||
1. まず、コードをフォークする。
|
||||
2. [Zeabur](https://zeabur.com?referralCode=songquanpeng) にアクセスしてログインし、コンソールに入る。
|
||||
3. 新しいプロジェクトを作成します。Service -> Add Service で Marketplace を選択し、MySQL を選択する。接続パラメータ(ユーザー名、パスワード、アドレス、ポート)をメモします。
|
||||
4. 接続パラメータをコピーし、`` create database `one-api` `` を実行してデータベースを作成する。
|
||||
3. 新しいプロジェクトを作成します。Service -> Add ServiceでMarketplace を選択し、MySQL を選択する。接続パラメータ(ユーザー名、パスワード、アドレス、ポート)をメモします。
|
||||
4. 接続パラメータをコピーし、```create database `one-api` ``` を実行してデータベースを作成する。
|
||||
5. その後、Service -> Add Service で Git を選択し(最初の使用には認証が必要です)、フォークしたリポジトリを選択します。
|
||||
6. 自動デプロイが開始されますが、一旦キャンセルしてください。Variable タブで `PORT` に `3000` を追加し、`SQL_DSN` に `<username>:<password>@tcp(<addr>:<port>)/one-api` を追加します。変更を保存する。SQL_DSN` が設定されていないと、データが永続化されず、再デプロイ後にデータが失われるので注意すること。
|
||||
7. 再デプロイを選択します。
|
||||
@@ -226,7 +206,6 @@ Please refer to the [environment variables](#environment-variables) section for
|
||||
</details>
|
||||
|
||||
## コンフィグ
|
||||
|
||||
システムは箱から出してすぐに使えます。
|
||||
|
||||
環境変数やコマンドラインパラメータを設定することで、システムを構成することができます。
|
||||
@@ -234,7 +213,6 @@ Please refer to the [environment variables](#environment-variables) section for
|
||||
システム起動後、`root` ユーザーとしてログインし、さらにシステムを設定します。
|
||||
|
||||
## 使用方法
|
||||
|
||||
`Channels` ページで API Key を追加し、`Tokens` ページでアクセストークンを追加する。
|
||||
|
||||
アクセストークンを使って One API にアクセスすることができる。使い方は [OpenAI API](https://platform.openai.com/docs/api-reference/introduction) と同じです。
|
||||
@@ -258,65 +236,59 @@ graph LR
|
||||
もしチャネル ID が指定されない場合、ロードバランシングによってリクエストが複数のチャネルに振り分けられます。
|
||||
|
||||
### 環境変数
|
||||
|
||||
1. `REDIS_CONN_STRING`: 設定すると、リクエストレート制限のためのストレージとして、メモリの代わりに Redis が使われる。
|
||||
- 例: `REDIS_CONN_STRING=redis://default:redispw@localhost:49153`
|
||||
+ 例: `REDIS_CONN_STRING=redis://default:redispw@localhost:49153`
|
||||
2. `SESSION_SECRET`: 設定すると、固定セッションキーが使用され、システムの再起動後もログインユーザーのクッキーが有効であることが保証されます。
|
||||
- 例: `SESSION_SECRET=random_string`
|
||||
+ 例: `SESSION_SECRET=random_string`
|
||||
3. `SQL_DSN`: 設定すると、SQLite の代わりに指定したデータベースが使用されます。MySQL バージョン 8.0 を使用してください。
|
||||
- 例: `SQL_DSN=root:123456@tcp(localhost:3306)/oneapi`
|
||||
+ 例: `SQL_DSN=root:123456@tcp(localhost:3306)/oneapi`
|
||||
4. `FRONTEND_BASE_URL`: 設定されると、バックエンドアドレスではなく、指定されたフロントエンドアドレスが使われる。
|
||||
- 例: `FRONTEND_BASE_URL=https://openai.justsong.cn`
|
||||
+ 例: `FRONTEND_BASE_URL=https://openai.justsong.cn`
|
||||
5. `SYNC_FREQUENCY`: 設定された場合、システムは定期的にデータベースからコンフィグを秒単位で同期する。設定されていない場合、同期は行われません。
|
||||
- 例: `SYNC_FREQUENCY=60`
|
||||
+ 例: `SYNC_FREQUENCY=60`
|
||||
6. `NODE_TYPE`: 設定すると、ノードのタイプを指定する。有効な値は `master` と `slave` である。設定されていない場合、デフォルトは `master`。
|
||||
- 例: `NODE_TYPE=slave`
|
||||
+ 例: `NODE_TYPE=slave`
|
||||
7. `CHANNEL_UPDATE_FREQUENCY`: 設定すると、チャンネル残高を分単位で定期的に更新する。設定されていない場合、更新は行われません。
|
||||
- 例: `CHANNEL_UPDATE_FREQUENCY=1440`
|
||||
+ 例: `CHANNEL_UPDATE_FREQUENCY=1440`
|
||||
8. `CHANNEL_TEST_FREQUENCY`: 設定すると、チャンネルを定期的にテストする。設定されていない場合、テストは行われません。
|
||||
- 例: `CHANNEL_TEST_FREQUENCY=1440`
|
||||
+ 例: `CHANNEL_TEST_FREQUENCY=1440`
|
||||
9. `POLLING_INTERVAL`: チャネル残高の更新とチャネルの可用性をテストするときのリクエスト間の時間間隔 (秒)。デフォルトは間隔なし。
|
||||
- 例: `POLLING_INTERVAL=5`
|
||||
+ 例: `POLLING_INTERVAL=5`
|
||||
|
||||
### コマンドラインパラメータ
|
||||
|
||||
1. `--port <port_number>`: サーバがリッスンするポート番号を指定。デフォルトは `3000` です。
|
||||
- 例: `--port 3000`
|
||||
+ 例: `--port 3000`
|
||||
2. `--log-dir <log_dir>`: ログディレクトリを指定。設定しない場合、ログは保存されません。
|
||||
- 例: `--log-dir ./logs`
|
||||
+ 例: `--log-dir ./logs`
|
||||
3. `--version`: システムのバージョン番号を表示して終了する。
|
||||
4. `--help`: コマンドの使用法ヘルプとパラメータの説明を表示。
|
||||
|
||||
## スクリーンショット
|
||||
|
||||

|
||||

|
||||
|
||||
## FAQ
|
||||
|
||||
1. ノルマとは何か?どのように計算されますか?One API にはノルマ計算の問題はありますか?
|
||||
- ノルマ = グループ倍率 _ モデル倍率 _ (プロンプトトークンの数 + 完了トークンの数 \* 完了倍率)
|
||||
- 完了倍率は、公式の定義と一致するように、GPT3.5 では 1.33、GPT4 では 2 に固定されています。
|
||||
- ストリームモードでない場合、公式 API は消費したトークンの総数を返す。ただし、プロンプトとコンプリートの消費倍率は異なるので注意してください。
|
||||
+ ノルマ = グループ倍率 * モデル倍率 * (プロンプトトークンの数 + 完了トークンの数 * 完了倍率)
|
||||
+ 完了倍率は、公式の定義と一致するように、GPT3.5 では 1.33、GPT4 では 2 に固定されています。
|
||||
+ ストリームモードでない場合、公式 API は消費したトークンの総数を返す。ただし、プロンプトとコンプリートの消費倍率は異なるので注意してください。
|
||||
2. アカウント残高は十分なのに、"insufficient quota" と表示されるのはなぜですか?
|
||||
- トークンのクォータが十分かどうかご確認ください。トークンクォータはアカウント残高とは別のものです。
|
||||
- トークンクォータは最大使用量を設定するためのもので、ユーザーが自由に設定できます。
|
||||
+ トークンのクォータが十分かどうかご確認ください。トークンクォータはアカウント残高とは別のものです。
|
||||
+ トークンクォータは最大使用量を設定するためのもので、ユーザーが自由に設定できます。
|
||||
3. チャンネルを使おうとすると "No available channels" と表示されます。どうすればいいですか?
|
||||
- ユーザーとチャンネルグループの設定を確認してください。
|
||||
- チャンネルモデルの設定も確認してください。
|
||||
+ ユーザーとチャンネルグループの設定を確認してください。
|
||||
+ チャンネルモデルの設定も確認してください。
|
||||
4. チャンネルテストがエラーを報告する: "invalid character '<' looking for beginning of value"
|
||||
- このエラーは、返された値が有効な JSON ではなく、HTML ページである場合に発生する。
|
||||
- ほとんどの場合、デプロイサイトの IP かプロキシのノードが CloudFlare によってブロックされています。
|
||||
+ このエラーは、返された値が有効な JSON ではなく、HTML ページである場合に発生する。
|
||||
+ ほとんどの場合、デプロイサイトのIPかプロキシのノードが CloudFlare によってブロックされています。
|
||||
5. ChatGPT Next Web でエラーが発生しました: "Failed to fetch"
|
||||
- デプロイ時に `BASE_URL` を設定しないでください。
|
||||
- インターフェイスアドレスと API Key が正しいか再確認してください。
|
||||
+ デプロイ時に `BASE_URL` を設定しないでください。
|
||||
+ インターフェイスアドレスと API Key が正しいか再確認してください。
|
||||
|
||||
## 関連プロジェクト
|
||||
|
||||
[FastGPT](https://github.com/labring/FastGPT): LLM に基づく知識質問応答システム
|
||||
|
||||
## 注
|
||||
|
||||
本プロジェクトはオープンソースプロジェクトです。OpenAI の[利用規約](https://openai.com/policies/terms-of-use)および**適用される法令**を遵守してご利用ください。違法な目的での利用はご遠慮ください。
|
||||
|
||||
このプロジェクトは MIT ライセンスで公開されています。これに基づき、ページの最下部に帰属表示と本プロジェクトへのリンクを含める必要があります。
|
||||
|
231
README.md
231
README.md
@@ -2,46 +2,37 @@
|
||||
<strong>中文</strong> | <a href="./README.en.md">English</a> | <a href="./README.ja.md">日本語</a>
|
||||
</p>
|
||||
|
||||
|
||||
<p align="center">
|
||||
<a href="https://github.com/MartialBE/one-api"><img src="https://raw.githubusercontent.com/MartialBE/one-api/main/web/src/assets/images/logo.svg" width="150" height="150" alt="one-api logo"></a>
|
||||
<a href="https://github.com/songquanpeng/one-api"><img src="https://raw.githubusercontent.com/songquanpeng/one-api/main/web/default/public/logo.png" width="150" height="150" alt="one-api logo"></a>
|
||||
</p>
|
||||
|
||||
<div align="center">
|
||||
|
||||
# One API
|
||||
|
||||
_本项目是基于[one-api](https://github.com/songquanpeng/one-api)二次开发而来的,主要将原项目中的模块代码分离,模块化,并修改了前端界面。本项目同样遵循 MIT 协议。_
|
||||
|
||||
<p align="center">
|
||||
<a href="https://raw.githubusercontent.com/MartialBE/one-api/main/LICENSE">
|
||||
<img src="https://img.shields.io/github/license/MartialBE/one-api?color=brightgreen" alt="license">
|
||||
</a>
|
||||
<a href="https://github.com/MartialBE/one-api/releases/latest">
|
||||
<img src="https://img.shields.io/github/v/release/MartialBE/one-api?color=brightgreen&include_prereleases" alt="release">
|
||||
</a>
|
||||
<a href="https://github.com/users/MartialBE/packages/container/package/one-api">
|
||||
<img src="https://img.shields.io/badge/docker-ghcr.io-blue" alt="docker">
|
||||
</a>
|
||||
<a href="https://goreportcard.com/report/github.com/MartialBE/one-api">
|
||||
<img src="https://goreportcard.com/badge/github.com/MartialBE/one-api" alt="GoReportCard">
|
||||
</a>
|
||||
</p>
|
||||
|
||||
**请不要和原版混用,因为 channel id 不同的原因,会导致数据错乱**
|
||||
|
||||
# 截图展示
|
||||
|
||||

|
||||

|
||||
|
||||
_以下为原项目说明:_
|
||||
|
||||
---
|
||||
|
||||
_✨ 通过标准的 OpenAI API 格式访问所有的大模型,开箱即用 ✨_
|
||||
|
||||
</div>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://raw.githubusercontent.com/songquanpeng/one-api/main/LICENSE">
|
||||
<img src="https://img.shields.io/github/license/songquanpeng/one-api?color=brightgreen" alt="license">
|
||||
</a>
|
||||
<a href="https://github.com/songquanpeng/one-api/releases/latest">
|
||||
<img src="https://img.shields.io/github/v/release/songquanpeng/one-api?color=brightgreen&include_prereleases" alt="release">
|
||||
</a>
|
||||
<a href="https://hub.docker.com/repository/docker/justsong/one-api">
|
||||
<img src="https://img.shields.io/docker/pulls/justsong/one-api?color=brightgreen" alt="docker pull">
|
||||
</a>
|
||||
<a href="https://github.com/songquanpeng/one-api/releases/latest">
|
||||
<img src="https://img.shields.io/github/downloads/songquanpeng/one-api/total?color=brightgreen&include_prereleases" alt="release">
|
||||
</a>
|
||||
<a href="https://goreportcard.com/report/github.com/songquanpeng/one-api">
|
||||
<img src="https://goreportcard.com/badge/github.com/songquanpeng/one-api" alt="GoReportCard">
|
||||
</a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://github.com/songquanpeng/one-api#部署">部署教程</a>
|
||||
·
|
||||
@@ -62,7 +53,7 @@ _✨ 通过标准的 OpenAI API 格式访问所有的大模型,开箱即用
|
||||
|
||||
> [!NOTE]
|
||||
> 本项目为开源项目,使用者必须在遵循 OpenAI 的[使用条款](https://openai.com/policies/terms-of-use)以及**法律法规**的情况下使用,不得用于非法用途。
|
||||
>
|
||||
>
|
||||
> 根据[《生成式人工智能服务管理暂行办法》](http://www.cac.gov.cn/2023-07/13/c_1690898327029107.htm)的要求,请勿对中国地区公众提供一切未经备案的生成式人工智能服务。
|
||||
|
||||
> [!WARNING]
|
||||
@@ -72,17 +63,21 @@ _✨ 通过标准的 OpenAI API 格式访问所有的大模型,开箱即用
|
||||
> 使用 root 用户初次登录系统后,务必修改默认密码 `123456`!
|
||||
|
||||
## 功能
|
||||
|
||||
1. 支持多种大模型:
|
||||
- [x] [OpenAI ChatGPT 系列模型](https://platform.openai.com/docs/guides/gpt/chat-completions-api)(支持 [Azure OpenAI API](https://learn.microsoft.com/en-us/azure/ai-services/openai/reference))
|
||||
- [x] [Anthropic Claude 系列模型](https://anthropic.com)
|
||||
- [x] [Google PaLM2/Gemini 系列模型](https://developers.generativeai.google)
|
||||
- [x] [百度文心一言系列模型](https://cloud.baidu.com/doc/WENXINWORKSHOP/index.html)
|
||||
- [x] [阿里通义千问系列模型](https://help.aliyun.com/document_detail/2400395.html)
|
||||
- [x] [讯飞星火认知大模型](https://www.xfyun.cn/doc/spark/Web.html)
|
||||
- [x] [智谱 ChatGLM 系列模型](https://bigmodel.cn)
|
||||
- [x] [360 智脑](https://ai.360.cn)
|
||||
- [x] [腾讯混元大模型](https://cloud.tencent.com/document/product/1729)
|
||||
+ [x] [OpenAI ChatGPT 系列模型](https://platform.openai.com/docs/guides/gpt/chat-completions-api)(支持 [Azure OpenAI API](https://learn.microsoft.com/en-us/azure/ai-services/openai/reference))
|
||||
+ [x] [Anthropic Claude 系列模型](https://anthropic.com)
|
||||
+ [x] [Google PaLM2/Gemini 系列模型](https://developers.generativeai.google)
|
||||
+ [x] [Mistral 系列模型](https://mistral.ai/)
|
||||
+ [x] [百度文心一言系列模型](https://cloud.baidu.com/doc/WENXINWORKSHOP/index.html)
|
||||
+ [x] [阿里通义千问系列模型](https://help.aliyun.com/document_detail/2400395.html)
|
||||
+ [x] [讯飞星火认知大模型](https://www.xfyun.cn/doc/spark/Web.html)
|
||||
+ [x] [智谱 ChatGLM 系列模型](https://bigmodel.cn)
|
||||
+ [x] [360 智脑](https://ai.360.cn)
|
||||
+ [x] [腾讯混元大模型](https://cloud.tencent.com/document/product/1729)
|
||||
+ [x] [Moonshot AI](https://platform.moonshot.cn/)
|
||||
+ [x] [百川大模型](https://platform.baichuan-ai.com)
|
||||
+ [ ] [字节云雀大模型](https://www.volcengine.com/product/ark) (WIP)
|
||||
+ [x] [MINIMAX](https://api.minimax.chat/)
|
||||
2. 支持配置镜像以及众多[第三方代理服务](https://iamazing.cn/page/openai-api-third-party-services)。
|
||||
3. 支持通过**负载均衡**的方式访问多个渠道。
|
||||
4. 支持 **stream 模式**,可以通过流式传输实现打字机效果。
|
||||
@@ -106,14 +101,13 @@ _✨ 通过标准的 OpenAI API 格式访问所有的大模型,开箱即用
|
||||
20. 支持通过系统访问令牌访问管理 API(bearer token,用以替代 cookie,你可以自行抓包来查看 API 的用法)。
|
||||
21. 支持 Cloudflare Turnstile 用户校验。
|
||||
22. 支持用户管理,支持**多种用户登录注册方式**:
|
||||
- 邮箱登录注册(支持注册邮箱白名单)以及通过邮箱进行密码重置。
|
||||
- [GitHub 开放授权](https://github.com/settings/applications/new)。
|
||||
- 微信公众号授权(需要额外部署 [WeChat Server](https://github.com/songquanpeng/wechat-server))。
|
||||
+ 邮箱登录注册(支持注册邮箱白名单)以及通过邮箱进行密码重置。
|
||||
+ [GitHub 开放授权](https://github.com/settings/applications/new)。
|
||||
+ 微信公众号授权(需要额外部署 [WeChat Server](https://github.com/songquanpeng/wechat-server))。
|
||||
23. 支持主题切换,设置环境变量 `THEME` 即可,默认为 `default`,欢迎 PR 更多主题,具体参考[此处](./web/README.md)。
|
||||
|
||||
## 部署
|
||||
|
||||
### 基于 Docker 进行部署
|
||||
|
||||
```shell
|
||||
# 使用 SQLite 的部署命令:
|
||||
docker run --name one-api -d --restart always -p 3000:3000 -e TZ=Asia/Shanghai -v /home/ubuntu/data/one-api:/data justsong/one-api
|
||||
@@ -135,11 +129,10 @@ docker run --name one-api -d --restart always -p 3000:3000 -e SQL_DSN="root:1234
|
||||
更新命令:`docker run --rm -v /var/run/docker.sock:/var/run/docker.sock containrrr/watchtower -cR`
|
||||
|
||||
Nginx 的参考配置:
|
||||
|
||||
```
|
||||
server{
|
||||
server_name openai.justsong.cn; # 请根据实际情况修改你的域名
|
||||
|
||||
|
||||
location / {
|
||||
client_max_body_size 64m;
|
||||
proxy_http_version 1.1;
|
||||
@@ -154,7 +147,6 @@ server{
|
||||
```
|
||||
|
||||
之后使用 Let's Encrypt 的 certbot 配置 HTTPS:
|
||||
|
||||
```bash
|
||||
# Ubuntu 安装 certbot:
|
||||
sudo snap install --classic certbot
|
||||
@@ -168,6 +160,7 @@ sudo service nginx restart
|
||||
|
||||
初始账号用户名为 `root`,密码为 `123456`。
|
||||
|
||||
|
||||
### 基于 Docker Compose 进行部署
|
||||
|
||||
> 仅启动方式不同,参数设置不变,请参考基于 Docker 部署部分
|
||||
@@ -181,23 +174,20 @@ docker-compose ps
|
||||
```
|
||||
|
||||
### 手动部署
|
||||
|
||||
1. 从 [GitHub Releases](https://github.com/songquanpeng/one-api/releases/latest) 下载可执行文件或者从源码编译:
|
||||
|
||||
```shell
|
||||
git clone https://github.com/songquanpeng/one-api.git
|
||||
|
||||
|
||||
# 构建前端
|
||||
cd one-api/web
|
||||
cd one-api/web/default
|
||||
npm install
|
||||
npm run build
|
||||
|
||||
|
||||
# 构建后端
|
||||
cd ..
|
||||
cd ../..
|
||||
go mod download
|
||||
go build -ldflags "-s -w" -o one-api
|
||||
```
|
||||
|
||||
````
|
||||
2. 运行:
|
||||
```shell
|
||||
chmod u+x one-api
|
||||
@@ -208,7 +198,6 @@ docker-compose ps
|
||||
更加详细的部署教程[参见此处](https://iamazing.cn/page/how-to-deploy-a-website)。
|
||||
|
||||
### 多机部署
|
||||
|
||||
1. 所有服务器 `SESSION_SECRET` 设置一样的值。
|
||||
2. 必须设置 `SQL_DSN`,使用 MySQL 数据库而非 SQLite,所有服务器连接同一个数据库。
|
||||
3. 所有从服务器必须设置 `NODE_TYPE` 为 `slave`,不设置则默认为主服务器。
|
||||
@@ -226,11 +215,9 @@ docker-compose ps
|
||||
如果部署后访问出现空白页面,详见 [#97](https://github.com/songquanpeng/one-api/issues/97)。
|
||||
|
||||
### 部署第三方服务配合 One API 使用
|
||||
|
||||
> 欢迎 PR 添加更多示例。
|
||||
|
||||
#### ChatGPT Next Web
|
||||
|
||||
项目主页:https://github.com/Yidadaa/ChatGPT-Next-Web
|
||||
|
||||
```bash
|
||||
@@ -240,7 +227,6 @@ docker run --name chat-next-web -d -p 3001:3000 yidadaa/chatgpt-next-web
|
||||
注意修改端口号,之后在页面上设置接口地址(例如:https://openai.justsong.cn/ )和 API Key 即可。
|
||||
|
||||
#### ChatGPT Web
|
||||
|
||||
项目主页:https://github.com/Chanzhaoyu/chatgpt-web
|
||||
|
||||
```bash
|
||||
@@ -249,16 +235,14 @@ docker run --name chatgpt-web -d -p 3002:3002 -e OPENAI_API_BASE_URL=https://ope
|
||||
|
||||
注意修改端口号、`OPENAI_API_BASE_URL` 和 `OPENAI_API_KEY`。
|
||||
|
||||
#### QChatGPT - QQ 机器人
|
||||
|
||||
#### QChatGPT - QQ机器人
|
||||
项目主页:https://github.com/RockChinQ/QChatGPT
|
||||
|
||||
根据文档完成部署后,在`config.py`设置配置项`openai_config`的`reverse_proxy`为 One API 后端地址,设置`api_key`为 One API 生成的 key,并在配置项`completion_api_params`的`model`参数设置为 One API 支持的模型名称。
|
||||
根据文档完成部署后,在`config.py`设置配置项`openai_config`的`reverse_proxy`为 One API 后端地址,设置`api_key`为 One API 生成的key,并在配置项`completion_api_params`的`model`参数设置为 One API 支持的模型名称。
|
||||
|
||||
可安装 [Switcher 插件](https://github.com/RockChinQ/Switcher)在运行时切换所使用的模型。
|
||||
|
||||
### 部署到第三方平台
|
||||
|
||||
<details>
|
||||
<summary><strong>部署到 Sealos </strong></summary>
|
||||
<div>
|
||||
@@ -283,7 +267,7 @@ docker run --name chatgpt-web -d -p 3002:3002 -e OPENAI_API_BASE_URL=https://ope
|
||||
1. 首先 fork 一份代码。
|
||||
2. 进入 [Zeabur](https://zeabur.com?referralCode=songquanpeng),登录,进入控制台。
|
||||
3. 新建一个 Project,在 Service -> Add Service 选择 Marketplace,选择 MySQL,并记下连接参数(用户名、密码、地址、端口)。
|
||||
4. 复制链接参数,运行 `` create database `one-api` `` 创建数据库。
|
||||
4. 复制链接参数,运行 ```create database `one-api` ``` 创建数据库。
|
||||
5. 然后在 Service -> Add Service,选择 Git(第一次使用需要先授权),选择你 fork 的仓库。
|
||||
6. Deploy 会自动开始,先取消。进入下方 Variable,添加一个 `PORT`,值为 `3000`,再添加一个 `SQL_DSN`,值为 `<username>:<password>@tcp(<addr>:<port>)/one-api` ,然后保存。 注意如果不填写 `SQL_DSN`,数据将无法持久化,重新部署后数据会丢失。
|
||||
7. 选择 Redeploy。
|
||||
@@ -305,7 +289,6 @@ Render 可以直接部署 docker 镜像,不需要 fork 仓库:https://dashbo
|
||||
</details>
|
||||
|
||||
## 配置
|
||||
|
||||
系统本身开箱即用。
|
||||
|
||||
你可以通过设置环境变量或者命令行参数进行配置。
|
||||
@@ -315,7 +298,6 @@ Render 可以直接部署 docker 镜像,不需要 fork 仓库:https://dashbo
|
||||
**Note**:如果你不知道某个配置项的含义,可以临时删掉值以看到进一步的提示文字。
|
||||
|
||||
## 使用方法
|
||||
|
||||
在`渠道`页面中添加你的 API Key,之后在`令牌`页面中新增访问令牌。
|
||||
|
||||
之后就可以使用你的令牌访问 One API 了,使用方式与 [OpenAI API](https://platform.openai.com/docs/api-reference/introduction) 一致。
|
||||
@@ -325,10 +307,9 @@ Render 可以直接部署 docker 镜像,不需要 fork 仓库:https://dashbo
|
||||
注意,具体的 API Base 的格式取决于你所使用的客户端。
|
||||
|
||||
例如对于 OpenAI 的官方库:
|
||||
|
||||
```bash
|
||||
OPENAI_API_KEY="sk-xxxxxx"
|
||||
OPENAI_API_BASE="https://<HOST>:<PORT>/v1"
|
||||
OPENAI_API_BASE="https://<HOST>:<PORT>/v1"
|
||||
```
|
||||
|
||||
```mermaid
|
||||
@@ -347,106 +328,104 @@ graph LR
|
||||
不加的话将会使用负载均衡的方式使用多个渠道。
|
||||
|
||||
### 环境变量
|
||||
|
||||
1. `REDIS_CONN_STRING`:设置之后将使用 Redis 作为缓存使用。
|
||||
- 例子:`REDIS_CONN_STRING=redis://default:redispw@localhost:49153`
|
||||
- 如果数据库访问延迟很低,没有必要启用 Redis,启用后反而会出现数据滞后的问题。
|
||||
+ 例子:`REDIS_CONN_STRING=redis://default:redispw@localhost:49153`
|
||||
+ 如果数据库访问延迟很低,没有必要启用 Redis,启用后反而会出现数据滞后的问题。
|
||||
2. `SESSION_SECRET`:设置之后将使用固定的会话密钥,这样系统重新启动后已登录用户的 cookie 将依旧有效。
|
||||
- 例子:`SESSION_SECRET=random_string`
|
||||
+ 例子:`SESSION_SECRET=random_string`
|
||||
3. `SQL_DSN`:设置之后将使用指定数据库而非 SQLite,请使用 MySQL 或 PostgreSQL。
|
||||
- 例子:
|
||||
- MySQL:`SQL_DSN=root:123456@tcp(localhost:3306)/oneapi`
|
||||
- PostgreSQL:`SQL_DSN=postgres://postgres:123456@localhost:5432/oneapi`(适配中,欢迎反馈)
|
||||
- 注意需要提前建立数据库 `oneapi`,无需手动建表,程序将自动建表。
|
||||
- 如果使用本地数据库:部署命令可添加 `--network="host"` 以使得容器内的程序可以访问到宿主机上的 MySQL。
|
||||
- 如果使用云数据库:如果云服务器需要验证身份,需要在连接参数中添加 `?tls=skip-verify`。
|
||||
- 请根据你的数据库配置修改下列参数(或者保持默认值):
|
||||
- `SQL_MAX_IDLE_CONNS`:最大空闲连接数,默认为 `100`。
|
||||
- `SQL_MAX_OPEN_CONNS`:最大打开连接数,默认为 `1000`。
|
||||
- 如果报错 `Error 1040: Too many connections`,请适当减小该值。
|
||||
- `SQL_CONN_MAX_LIFETIME`:连接的最大生命周期,默认为 `60`,单位分钟。
|
||||
+ 例子:
|
||||
+ MySQL:`SQL_DSN=root:123456@tcp(localhost:3306)/oneapi`
|
||||
+ PostgreSQL:`SQL_DSN=postgres://postgres:123456@localhost:5432/oneapi`(适配中,欢迎反馈)
|
||||
+ 注意需要提前建立数据库 `oneapi`,无需手动建表,程序将自动建表。
|
||||
+ 如果使用本地数据库:部署命令可添加 `--network="host"` 以使得容器内的程序可以访问到宿主机上的 MySQL。
|
||||
+ 如果使用云数据库:如果云服务器需要验证身份,需要在连接参数中添加 `?tls=skip-verify`。
|
||||
+ 请根据你的数据库配置修改下列参数(或者保持默认值):
|
||||
+ `SQL_MAX_IDLE_CONNS`:最大空闲连接数,默认为 `100`。
|
||||
+ `SQL_MAX_OPEN_CONNS`:最大打开连接数,默认为 `1000`。
|
||||
+ 如果报错 `Error 1040: Too many connections`,请适当减小该值。
|
||||
+ `SQL_CONN_MAX_LIFETIME`:连接的最大生命周期,默认为 `60`,单位分钟。
|
||||
4. `FRONTEND_BASE_URL`:设置之后将重定向页面请求到指定的地址,仅限从服务器设置。
|
||||
- 例子:`FRONTEND_BASE_URL=https://openai.justsong.cn`
|
||||
+ 例子:`FRONTEND_BASE_URL=https://openai.justsong.cn`
|
||||
5. `MEMORY_CACHE_ENABLED`:启用内存缓存,会导致用户额度的更新存在一定的延迟,可选值为 `true` 和 `false`,未设置则默认为 `false`。
|
||||
- 例子:`MEMORY_CACHE_ENABLED=true`
|
||||
+ 例子:`MEMORY_CACHE_ENABLED=true`
|
||||
6. `SYNC_FREQUENCY`:在启用缓存的情况下与数据库同步配置的频率,单位为秒,默认为 `600` 秒。
|
||||
- 例子:`SYNC_FREQUENCY=60`
|
||||
+ 例子:`SYNC_FREQUENCY=60`
|
||||
7. `NODE_TYPE`:设置之后将指定节点类型,可选值为 `master` 和 `slave`,未设置则默认为 `master`。
|
||||
- 例子:`NODE_TYPE=slave`
|
||||
+ 例子:`NODE_TYPE=slave`
|
||||
8. `CHANNEL_UPDATE_FREQUENCY`:设置之后将定期更新渠道余额,单位为分钟,未设置则不进行更新。
|
||||
- 例子:`CHANNEL_UPDATE_FREQUENCY=1440`
|
||||
+ 例子:`CHANNEL_UPDATE_FREQUENCY=1440`
|
||||
9. `CHANNEL_TEST_FREQUENCY`:设置之后将定期检查渠道,单位为分钟,未设置则不进行检查。
|
||||
- 例子:`CHANNEL_TEST_FREQUENCY=1440`
|
||||
+ 例子:`CHANNEL_TEST_FREQUENCY=1440`
|
||||
10. `POLLING_INTERVAL`:批量更新渠道余额以及测试可用性时的请求间隔,单位为秒,默认无间隔。
|
||||
- 例子:`POLLING_INTERVAL=5`
|
||||
+ 例子:`POLLING_INTERVAL=5`
|
||||
11. `BATCH_UPDATE_ENABLED`:启用数据库批量更新聚合,会导致用户额度的更新存在一定的延迟可选值为 `true` 和 `false`,未设置则默认为 `false`。
|
||||
- 例子:`BATCH_UPDATE_ENABLED=true`
|
||||
- 如果你遇到了数据库连接数过多的问题,可以尝试启用该选项。
|
||||
+ 例子:`BATCH_UPDATE_ENABLED=true`
|
||||
+ 如果你遇到了数据库连接数过多的问题,可以尝试启用该选项。
|
||||
12. `BATCH_UPDATE_INTERVAL=5`:批量更新聚合的时间间隔,单位为秒,默认为 `5`。
|
||||
- 例子:`BATCH_UPDATE_INTERVAL=5`
|
||||
+ 例子:`BATCH_UPDATE_INTERVAL=5`
|
||||
13. 请求频率限制:
|
||||
- `GLOBAL_API_RATE_LIMIT`:全局 API 速率限制(除中继请求外),单 ip 三分钟内的最大请求数,默认为 `180`。
|
||||
- `GLOBAL_WEB_RATE_LIMIT`:全局 Web 速率限制,单 ip 三分钟内的最大请求数,默认为 `60`。
|
||||
+ `GLOBAL_API_RATE_LIMIT`:全局 API 速率限制(除中继请求外),单 ip 三分钟内的最大请求数,默认为 `180`。
|
||||
+ `GLOBAL_WEB_RATE_LIMIT`:全局 Web 速率限制,单 ip 三分钟内的最大请求数,默认为 `60`。
|
||||
14. 编码器缓存设置:
|
||||
- `TIKTOKEN_CACHE_DIR`:默认程序启动时会联网下载一些通用的词元的编码,如:`gpt-3.5-turbo`,在一些网络环境不稳定,或者离线情况,可能会导致启动有问题,可以配置此目录缓存数据,可迁移到离线环境。
|
||||
- `DATA_GYM_CACHE_DIR`:目前该配置作用与 `TIKTOKEN_CACHE_DIR` 一致,但是优先级没有它高。
|
||||
+ `TIKTOKEN_CACHE_DIR`:默认程序启动时会联网下载一些通用的词元的编码,如:`gpt-3.5-turbo`,在一些网络环境不稳定,或者离线情况,可能会导致启动有问题,可以配置此目录缓存数据,可迁移到离线环境。
|
||||
+ `DATA_GYM_CACHE_DIR`:目前该配置作用与 `TIKTOKEN_CACHE_DIR` 一致,但是优先级没有它高。
|
||||
15. `RELAY_TIMEOUT`:中继超时设置,单位为秒,默认不设置超时时间。
|
||||
16. `SQLITE_BUSY_TIMEOUT`:SQLite 锁等待超时设置,单位为毫秒,默认 `3000`。
|
||||
17. `GEMINI_SAFETY_SETTING`:Gemini 的安全设置,默认 `BLOCK_NONE`。
|
||||
18. `THEME`:系统的主题设置,默认为 `default`,具体可选值参考[此处](./web/README.md)。
|
||||
|
||||
### 命令行参数
|
||||
|
||||
1. `--port <port_number>`: 指定服务器监听的端口号,默认为 `3000`。
|
||||
- 例子:`--port 3000`
|
||||
+ 例子:`--port 3000`
|
||||
2. `--log-dir <log_dir>`: 指定日志文件夹,如果没有设置,默认保存至工作目录的 `logs` 文件夹下。
|
||||
- 例子:`--log-dir ./logs`
|
||||
+ 例子:`--log-dir ./logs`
|
||||
3. `--version`: 打印系统版本号并退出。
|
||||
4. `--help`: 查看命令的使用帮助和参数说明。
|
||||
|
||||
## 演示
|
||||
|
||||
### 在线演示
|
||||
|
||||
注意,该演示站不提供对外服务:
|
||||
https://openai.justsong.cn
|
||||
|
||||
### 截图展示
|
||||
|
||||

|
||||

|
||||
|
||||
## 常见问题
|
||||
|
||||
1. 额度是什么?怎么计算的?One API 的额度计算有问题?
|
||||
- 额度 = 分组倍率 _ 模型倍率 _ (提示 token 数 + 补全 token 数 \* 补全倍率)
|
||||
- 其中补全倍率对于 GPT3.5 固定为 1.33,GPT4 为 2,与官方保持一致。
|
||||
- 如果是非流模式,官方接口会返回消耗的总 token,但是你要注意提示和补全的消耗倍率不一样。
|
||||
- 注意,One API 的默认倍率就是官方倍率,是已经调整过的。
|
||||
+ 额度 = 分组倍率 * 模型倍率 * (提示 token 数 + 补全 token 数 * 补全倍率)
|
||||
+ 其中补全倍率对于 GPT3.5 固定为 1.33,GPT4 为 2,与官方保持一致。
|
||||
+ 如果是非流模式,官方接口会返回消耗的总 token,但是你要注意提示和补全的消耗倍率不一样。
|
||||
+ 注意,One API 的默认倍率就是官方倍率,是已经调整过的。
|
||||
2. 账户额度足够为什么提示额度不足?
|
||||
- 请检查你的令牌额度是否足够,这个和账户额度是分开的。
|
||||
- 令牌额度仅供用户设置最大使用量,用户可自由设置。
|
||||
+ 请检查你的令牌额度是否足够,这个和账户额度是分开的。
|
||||
+ 令牌额度仅供用户设置最大使用量,用户可自由设置。
|
||||
3. 提示无可用渠道?
|
||||
- 请检查的用户分组和渠道分组设置。
|
||||
- 以及渠道的模型设置。
|
||||
+ 请检查的用户分组和渠道分组设置。
|
||||
+ 以及渠道的模型设置。
|
||||
4. 渠道测试报错:`invalid character '<' looking for beginning of value`
|
||||
- 这是因为返回值不是合法的 JSON,而是一个 HTML 页面。
|
||||
- 大概率是你的部署站的 IP 或代理的节点被 CloudFlare 封禁了。
|
||||
+ 这是因为返回值不是合法的 JSON,而是一个 HTML 页面。
|
||||
+ 大概率是你的部署站的 IP 或代理的节点被 CloudFlare 封禁了。
|
||||
5. ChatGPT Next Web 报错:`Failed to fetch`
|
||||
- 部署的时候不要设置 `BASE_URL`。
|
||||
- 检查你的接口地址和 API Key 有没有填对。
|
||||
- 检查是否启用了 HTTPS,浏览器会拦截 HTTPS 域名下的 HTTP 请求。
|
||||
+ 部署的时候不要设置 `BASE_URL`。
|
||||
+ 检查你的接口地址和 API Key 有没有填对。
|
||||
+ 检查是否启用了 HTTPS,浏览器会拦截 HTTPS 域名下的 HTTP 请求。
|
||||
6. 报错:`当前分组负载已饱和,请稍后再试`
|
||||
- 上游通道 429 了。
|
||||
+ 上游通道 429 了。
|
||||
7. 升级之后我的数据会丢失吗?
|
||||
- 如果使用 MySQL,不会。
|
||||
- 如果使用 SQLite,需要按照我所给的部署命令挂载 volume 持久化 one-api.db 数据库文件,否则容器重启后数据会丢失。
|
||||
+ 如果使用 MySQL,不会。
|
||||
+ 如果使用 SQLite,需要按照我所给的部署命令挂载 volume 持久化 one-api.db 数据库文件,否则容器重启后数据会丢失。
|
||||
8. 升级之前数据库需要做变更吗?
|
||||
- 一般情况下不需要,系统将在初始化的时候自动调整。
|
||||
- 如果需要的话,我会在更新日志中说明,并给出脚本。
|
||||
+ 一般情况下不需要,系统将在初始化的时候自动调整。
|
||||
+ 如果需要的话,我会在更新日志中说明,并给出脚本。
|
||||
9. 手动修改数据库后报错:`数据库一致性已被破坏,请联系管理员`?
|
||||
+ 这是检测到 ability 表里有些记录的通道 id 是不存在的,这大概率是因为你删了 channel 表里的记录但是没有同步在 ability 表里清理无效的通道。
|
||||
+ 对于每一个通道,其所支持的模型都需要有一个专门的 ability 表的记录,表示该通道支持该模型。
|
||||
|
||||
## 相关项目
|
||||
|
||||
- [FastGPT](https://github.com/labring/FastGPT): 基于 LLM 大语言模型的知识库问答系统
|
||||
- [ChatGPT Next Web](https://github.com/Yidadaa/ChatGPT-Next-Web): 一键拥有你自己的跨平台 ChatGPT 应用
|
||||
* [FastGPT](https://github.com/labring/FastGPT): 基于 LLM 大语言模型的知识库问答系统
|
||||
* [ChatGPT Next Web](https://github.com/Yidadaa/ChatGPT-Next-Web): 一键拥有你自己的跨平台 ChatGPT 应用
|
||||
|
||||
## 注意
|
||||
|
||||
|
257
common/client.go
257
common/client.go
@@ -1,257 +0,0 @@
|
||||
package common
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"one-api/types"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
var HttpClient *http.Client
|
||||
|
||||
func init() {
|
||||
if RelayTimeout == 0 {
|
||||
HttpClient = &http.Client{}
|
||||
} else {
|
||||
HttpClient = &http.Client{
|
||||
Timeout: time.Duration(RelayTimeout) * time.Second,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type Client struct {
|
||||
requestBuilder RequestBuilder
|
||||
CreateFormBuilder func(io.Writer) FormBuilder
|
||||
}
|
||||
|
||||
func NewClient() *Client {
|
||||
return &Client{
|
||||
requestBuilder: NewRequestBuilder(),
|
||||
CreateFormBuilder: func(body io.Writer) FormBuilder {
|
||||
return NewFormBuilder(body)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
type requestOptions struct {
|
||||
body any
|
||||
header http.Header
|
||||
}
|
||||
|
||||
type requestOption func(*requestOptions)
|
||||
|
||||
type Stringer interface {
|
||||
GetString() *string
|
||||
}
|
||||
|
||||
func WithBody(body any) requestOption {
|
||||
return func(args *requestOptions) {
|
||||
args.body = body
|
||||
}
|
||||
}
|
||||
|
||||
func WithHeader(header map[string]string) requestOption {
|
||||
return func(args *requestOptions) {
|
||||
for k, v := range header {
|
||||
args.header.Set(k, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func WithContentType(contentType string) requestOption {
|
||||
return func(args *requestOptions) {
|
||||
args.header.Set("Content-Type", contentType)
|
||||
}
|
||||
}
|
||||
|
||||
type RequestError struct {
|
||||
HTTPStatusCode int
|
||||
Err error
|
||||
}
|
||||
|
||||
func (c *Client) NewRequest(method, url string, setters ...requestOption) (*http.Request, error) {
|
||||
// Default Options
|
||||
args := &requestOptions{
|
||||
body: nil,
|
||||
header: make(http.Header),
|
||||
}
|
||||
for _, setter := range setters {
|
||||
setter(args)
|
||||
}
|
||||
req, err := c.requestBuilder.Build(method, url, args.body, args.header)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return req, nil
|
||||
}
|
||||
|
||||
func SendRequest(req *http.Request, response any, outputResp bool) (*http.Response, *types.OpenAIErrorWithStatusCode) {
|
||||
// 发送请求
|
||||
resp, err := HttpClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, ErrorWrapper(err, "http_request_failed", http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
if !outputResp {
|
||||
defer resp.Body.Close()
|
||||
}
|
||||
|
||||
// 处理响应
|
||||
if IsFailureStatusCode(resp) {
|
||||
return nil, HandleErrorResp(resp)
|
||||
}
|
||||
|
||||
// 解析响应
|
||||
if outputResp {
|
||||
var buf bytes.Buffer
|
||||
tee := io.TeeReader(resp.Body, &buf)
|
||||
err = DecodeResponse(tee, response)
|
||||
|
||||
// 将响应体重新写入 resp.Body
|
||||
resp.Body = io.NopCloser(&buf)
|
||||
} else {
|
||||
err = DecodeResponse(resp.Body, response)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, ErrorWrapper(err, "decode_response_failed", http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
if outputResp {
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
type GeneralErrorResponse struct {
|
||||
Error types.OpenAIError `json:"error"`
|
||||
Message string `json:"message"`
|
||||
Msg string `json:"msg"`
|
||||
Err string `json:"err"`
|
||||
ErrorMsg string `json:"error_msg"`
|
||||
Header struct {
|
||||
Message string `json:"message"`
|
||||
} `json:"header"`
|
||||
Response struct {
|
||||
Error struct {
|
||||
Message string `json:"message"`
|
||||
} `json:"error"`
|
||||
} `json:"response"`
|
||||
}
|
||||
|
||||
func (e GeneralErrorResponse) ToMessage() string {
|
||||
if e.Error.Message != "" {
|
||||
return e.Error.Message
|
||||
}
|
||||
if e.Message != "" {
|
||||
return e.Message
|
||||
}
|
||||
if e.Msg != "" {
|
||||
return e.Msg
|
||||
}
|
||||
if e.Err != "" {
|
||||
return e.Err
|
||||
}
|
||||
if e.ErrorMsg != "" {
|
||||
return e.ErrorMsg
|
||||
}
|
||||
if e.Header.Message != "" {
|
||||
return e.Header.Message
|
||||
}
|
||||
if e.Response.Error.Message != "" {
|
||||
return e.Response.Error.Message
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// 处理错误响应
|
||||
func HandleErrorResp(resp *http.Response) (openAIErrorWithStatusCode *types.OpenAIErrorWithStatusCode) {
|
||||
openAIErrorWithStatusCode = &types.OpenAIErrorWithStatusCode{
|
||||
StatusCode: resp.StatusCode,
|
||||
OpenAIError: types.OpenAIError{
|
||||
Message: "",
|
||||
Type: "upstream_error",
|
||||
Code: "bad_response_status_code",
|
||||
Param: strconv.Itoa(resp.StatusCode),
|
||||
},
|
||||
}
|
||||
responseBody, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = resp.Body.Close()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
// var errorResponse types.OpenAIErrorResponse
|
||||
var errorResponse GeneralErrorResponse
|
||||
err = json.Unmarshal(responseBody, &errorResponse)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if errorResponse.Error.Message != "" {
|
||||
// OpenAI format error, so we override the default one
|
||||
openAIErrorWithStatusCode.OpenAIError = errorResponse.Error
|
||||
} else {
|
||||
openAIErrorWithStatusCode.OpenAIError.Message = errorResponse.ToMessage()
|
||||
}
|
||||
if openAIErrorWithStatusCode.OpenAIError.Message == "" {
|
||||
openAIErrorWithStatusCode.OpenAIError.Message = fmt.Sprintf("bad response status code %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (c *Client) SendRequestRaw(req *http.Request) (body io.ReadCloser, err error) {
|
||||
resp, err := HttpClient.Do(req)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return resp.Body, nil
|
||||
}
|
||||
|
||||
func IsFailureStatusCode(resp *http.Response) bool {
|
||||
return resp.StatusCode < http.StatusOK || resp.StatusCode >= http.StatusBadRequest
|
||||
}
|
||||
|
||||
func DecodeResponse(body io.Reader, v any) error {
|
||||
if v == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if result, ok := v.(*string); ok {
|
||||
return DecodeString(body, result)
|
||||
}
|
||||
|
||||
if stringer, ok := v.(Stringer); ok {
|
||||
return DecodeString(body, stringer.GetString())
|
||||
}
|
||||
|
||||
return json.NewDecoder(body).Decode(v)
|
||||
}
|
||||
|
||||
func DecodeString(body io.Reader, output *string) error {
|
||||
b, err := io.ReadAll(body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*output = string(b)
|
||||
return nil
|
||||
}
|
||||
|
||||
func SetEventStreamHeaders(c *gin.Context) {
|
||||
c.Writer.Header().Set("Content-Type", "text/event-stream")
|
||||
c.Writer.Header().Set("Cache-Control", "no-cache")
|
||||
c.Writer.Header().Set("Connection", "keep-alive")
|
||||
c.Writer.Header().Set("Transfer-Encoding", "chunked")
|
||||
c.Writer.Header().Set("X-Accel-Buffering", "no")
|
||||
}
|
127
common/config/config.go
Normal file
127
common/config/config.go
Normal file
@@ -0,0 +1,127 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"github.com/songquanpeng/one-api/common/helper"
|
||||
"os"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
var SystemName = "One API"
|
||||
var ServerAddress = "http://localhost:3000"
|
||||
var Footer = ""
|
||||
var Logo = ""
|
||||
var TopUpLink = ""
|
||||
var ChatLink = ""
|
||||
var QuotaPerUnit = 500 * 1000.0 // $0.002 / 1K tokens
|
||||
var DisplayInCurrencyEnabled = true
|
||||
var DisplayTokenStatEnabled = true
|
||||
|
||||
// Any options with "Secret", "Token" in its key won't be return by GetOptions
|
||||
|
||||
var SessionSecret = uuid.New().String()
|
||||
|
||||
var OptionMap map[string]string
|
||||
var OptionMapRWMutex sync.RWMutex
|
||||
|
||||
var ItemsPerPage = 10
|
||||
var MaxRecentItems = 100
|
||||
|
||||
var PasswordLoginEnabled = true
|
||||
var PasswordRegisterEnabled = true
|
||||
var EmailVerificationEnabled = false
|
||||
var GitHubOAuthEnabled = false
|
||||
var WeChatAuthEnabled = false
|
||||
var TurnstileCheckEnabled = false
|
||||
var RegisterEnabled = true
|
||||
|
||||
var EmailDomainRestrictionEnabled = false
|
||||
var EmailDomainWhitelist = []string{
|
||||
"gmail.com",
|
||||
"163.com",
|
||||
"126.com",
|
||||
"qq.com",
|
||||
"outlook.com",
|
||||
"hotmail.com",
|
||||
"icloud.com",
|
||||
"yahoo.com",
|
||||
"foxmail.com",
|
||||
}
|
||||
|
||||
var DebugEnabled = os.Getenv("DEBUG") == "true"
|
||||
var MemoryCacheEnabled = os.Getenv("MEMORY_CACHE_ENABLED") == "true"
|
||||
|
||||
var LogConsumeEnabled = true
|
||||
|
||||
var SMTPServer = ""
|
||||
var SMTPPort = 587
|
||||
var SMTPAccount = ""
|
||||
var SMTPFrom = ""
|
||||
var SMTPToken = ""
|
||||
|
||||
var GitHubClientId = ""
|
||||
var GitHubClientSecret = ""
|
||||
|
||||
var WeChatServerAddress = ""
|
||||
var WeChatServerToken = ""
|
||||
var WeChatAccountQRCodeImageURL = ""
|
||||
|
||||
var TurnstileSiteKey = ""
|
||||
var TurnstileSecretKey = ""
|
||||
|
||||
var QuotaForNewUser = 0
|
||||
var QuotaForInviter = 0
|
||||
var QuotaForInvitee = 0
|
||||
var ChannelDisableThreshold = 5.0
|
||||
var AutomaticDisableChannelEnabled = false
|
||||
var AutomaticEnableChannelEnabled = false
|
||||
var QuotaRemindThreshold = 1000
|
||||
var PreConsumedQuota = 500
|
||||
var ApproximateTokenEnabled = false
|
||||
var RetryTimes = 0
|
||||
|
||||
var RootUserEmail = ""
|
||||
|
||||
var IsMasterNode = os.Getenv("NODE_TYPE") != "slave"
|
||||
|
||||
var requestInterval, _ = strconv.Atoi(os.Getenv("POLLING_INTERVAL"))
|
||||
var RequestInterval = time.Duration(requestInterval) * time.Second
|
||||
|
||||
var SyncFrequency = helper.GetOrDefaultEnvInt("SYNC_FREQUENCY", 10*60) // unit is second
|
||||
|
||||
var BatchUpdateEnabled = false
|
||||
var BatchUpdateInterval = helper.GetOrDefaultEnvInt("BATCH_UPDATE_INTERVAL", 5)
|
||||
|
||||
var RelayTimeout = helper.GetOrDefaultEnvInt("RELAY_TIMEOUT", 0) // unit is second
|
||||
|
||||
var GeminiSafetySetting = helper.GetOrDefaultEnvString("GEMINI_SAFETY_SETTING", "BLOCK_NONE")
|
||||
|
||||
var Theme = helper.GetOrDefaultEnvString("THEME", "default")
|
||||
var ValidThemes = map[string]bool{
|
||||
"default": true,
|
||||
"berry": true,
|
||||
}
|
||||
|
||||
// All duration's unit is seconds
|
||||
// Shouldn't larger then RateLimitKeyExpirationDuration
|
||||
var (
|
||||
GlobalApiRateLimitNum = helper.GetOrDefaultEnvInt("GLOBAL_API_RATE_LIMIT", 180)
|
||||
GlobalApiRateLimitDuration int64 = 3 * 60
|
||||
|
||||
GlobalWebRateLimitNum = helper.GetOrDefaultEnvInt("GLOBAL_WEB_RATE_LIMIT", 60)
|
||||
GlobalWebRateLimitDuration int64 = 3 * 60
|
||||
|
||||
UploadRateLimitNum = 10
|
||||
UploadRateLimitDuration int64 = 60
|
||||
|
||||
DownloadRateLimitNum = 10
|
||||
DownloadRateLimitDuration int64 = 60
|
||||
|
||||
CriticalRateLimitNum = 20
|
||||
CriticalRateLimitDuration int64 = 20 * 60
|
||||
)
|
||||
|
||||
var RateLimitKeyExpirationDuration = 20 * time.Minute
|
@@ -1,106 +1,9 @@
|
||||
package common
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
import "time"
|
||||
|
||||
var StartTime = time.Now().Unix() // unit: second
|
||||
var Version = "v0.0.0" // this hard coding will be replaced automatically when building, no need to manually change
|
||||
var SystemName = "One API"
|
||||
var ServerAddress = "http://localhost:3000"
|
||||
var Footer = ""
|
||||
var Logo = ""
|
||||
var TopUpLink = ""
|
||||
var ChatLink = ""
|
||||
var QuotaPerUnit = 500 * 1000.0 // $0.002 / 1K tokens
|
||||
var DisplayInCurrencyEnabled = true
|
||||
var DisplayTokenStatEnabled = true
|
||||
|
||||
// Any options with "Secret", "Token" in its key won't be return by GetOptions
|
||||
|
||||
var SessionSecret = uuid.New().String()
|
||||
|
||||
var OptionMap map[string]string
|
||||
var OptionMapRWMutex sync.RWMutex
|
||||
|
||||
var ItemsPerPage = 10
|
||||
var MaxRecentItems = 100
|
||||
|
||||
var PasswordLoginEnabled = true
|
||||
var PasswordRegisterEnabled = true
|
||||
var EmailVerificationEnabled = false
|
||||
var GitHubOAuthEnabled = false
|
||||
var WeChatAuthEnabled = false
|
||||
var TurnstileCheckEnabled = false
|
||||
var RegisterEnabled = true
|
||||
|
||||
var EmailDomainRestrictionEnabled = false
|
||||
var EmailDomainWhitelist = []string{
|
||||
"gmail.com",
|
||||
"163.com",
|
||||
"126.com",
|
||||
"qq.com",
|
||||
"outlook.com",
|
||||
"hotmail.com",
|
||||
"icloud.com",
|
||||
"yahoo.com",
|
||||
"foxmail.com",
|
||||
}
|
||||
|
||||
var DebugEnabled = os.Getenv("DEBUG") == "true"
|
||||
var MemoryCacheEnabled = os.Getenv("MEMORY_CACHE_ENABLED") == "true"
|
||||
|
||||
var LogConsumeEnabled = true
|
||||
|
||||
var SMTPServer = ""
|
||||
var SMTPPort = 587
|
||||
var SMTPAccount = ""
|
||||
var SMTPFrom = ""
|
||||
var SMTPToken = ""
|
||||
|
||||
var GitHubClientId = ""
|
||||
var GitHubClientSecret = ""
|
||||
|
||||
var WeChatServerAddress = ""
|
||||
var WeChatServerToken = ""
|
||||
var WeChatAccountQRCodeImageURL = ""
|
||||
|
||||
var TurnstileSiteKey = ""
|
||||
var TurnstileSecretKey = ""
|
||||
|
||||
var QuotaForNewUser = 0
|
||||
var QuotaForInviter = 0
|
||||
var QuotaForInvitee = 0
|
||||
var ChannelDisableThreshold = 5.0
|
||||
var AutomaticDisableChannelEnabled = false
|
||||
var AutomaticEnableChannelEnabled = false
|
||||
var QuotaRemindThreshold = 1000
|
||||
var PreConsumedQuota = 500
|
||||
var ApproximateTokenEnabled = false
|
||||
var RetryTimes = 0
|
||||
|
||||
var RootUserEmail = ""
|
||||
|
||||
var IsMasterNode = os.Getenv("NODE_TYPE") != "slave"
|
||||
|
||||
var requestInterval, _ = strconv.Atoi(os.Getenv("POLLING_INTERVAL"))
|
||||
var RequestInterval = time.Duration(requestInterval) * time.Second
|
||||
|
||||
var SyncFrequency = GetOrDefault("SYNC_FREQUENCY", 10*60) // unit is second
|
||||
|
||||
var BatchUpdateEnabled = false
|
||||
var BatchUpdateInterval = GetOrDefault("BATCH_UPDATE_INTERVAL", 5)
|
||||
|
||||
var RelayTimeout = GetOrDefault("RELAY_TIMEOUT", 0) // unit is second
|
||||
|
||||
const (
|
||||
RequestIdKey = "X-Oneapi-Request-Id"
|
||||
)
|
||||
|
||||
const (
|
||||
RoleGuestUser = 0
|
||||
@@ -109,34 +12,6 @@ const (
|
||||
RoleRootUser = 100
|
||||
)
|
||||
|
||||
var (
|
||||
FileUploadPermission = RoleGuestUser
|
||||
FileDownloadPermission = RoleGuestUser
|
||||
ImageUploadPermission = RoleGuestUser
|
||||
ImageDownloadPermission = RoleGuestUser
|
||||
)
|
||||
|
||||
// All duration's unit is seconds
|
||||
// Shouldn't larger then RateLimitKeyExpirationDuration
|
||||
var (
|
||||
GlobalApiRateLimitNum = GetOrDefault("GLOBAL_API_RATE_LIMIT", 180)
|
||||
GlobalApiRateLimitDuration int64 = 3 * 60
|
||||
|
||||
GlobalWebRateLimitNum = GetOrDefault("GLOBAL_WEB_RATE_LIMIT", 100)
|
||||
GlobalWebRateLimitDuration int64 = 3 * 60
|
||||
|
||||
UploadRateLimitNum = 10
|
||||
UploadRateLimitDuration int64 = 60
|
||||
|
||||
DownloadRateLimitNum = 10
|
||||
DownloadRateLimitDuration int64 = 60
|
||||
|
||||
CriticalRateLimitNum = 20
|
||||
CriticalRateLimitDuration int64 = 20 * 60
|
||||
)
|
||||
|
||||
var RateLimitKeyExpirationDuration = 20 * time.Minute
|
||||
|
||||
const (
|
||||
UserStatusEnabled = 1 // don't use 0, 0 is the default value!
|
||||
UserStatusDisabled = 2 // also don't use 0
|
||||
@@ -163,74 +38,75 @@ const (
|
||||
)
|
||||
|
||||
const (
|
||||
ChannelTypeUnknown = 0
|
||||
ChannelTypeOpenAI = 1
|
||||
ChannelTypeAPI2D = 2
|
||||
ChannelTypeAzure = 3
|
||||
ChannelTypeCloseAI = 4
|
||||
ChannelTypeOpenAISB = 5
|
||||
ChannelTypeOpenAIMax = 6
|
||||
ChannelTypeOhMyGPT = 7
|
||||
ChannelTypeCustom = 8
|
||||
ChannelTypeAILS = 9
|
||||
ChannelTypeAIProxy = 10
|
||||
ChannelTypePaLM = 11
|
||||
ChannelTypeAPI2GPT = 12
|
||||
ChannelTypeAIGC2D = 13
|
||||
ChannelTypeAnthropic = 14
|
||||
ChannelTypeBaidu = 15
|
||||
ChannelTypeZhipu = 16
|
||||
ChannelTypeAli = 17
|
||||
ChannelTypeXunfei = 18
|
||||
ChannelType360 = 19
|
||||
ChannelTypeOpenRouter = 20
|
||||
ChannelTypeAIProxyLibrary = 21
|
||||
ChannelTypeFastGPT = 22
|
||||
ChannelTypeTencent = 23
|
||||
ChannelTypeAzureSpeech = 24
|
||||
ChannelTypeGemini = 25
|
||||
ChannelTypeUnknown = iota
|
||||
ChannelTypeOpenAI
|
||||
ChannelTypeAPI2D
|
||||
ChannelTypeAzure
|
||||
ChannelTypeCloseAI
|
||||
ChannelTypeOpenAISB
|
||||
ChannelTypeOpenAIMax
|
||||
ChannelTypeOhMyGPT
|
||||
ChannelTypeCustom
|
||||
ChannelTypeAILS
|
||||
ChannelTypeAIProxy
|
||||
ChannelTypePaLM
|
||||
ChannelTypeAPI2GPT
|
||||
ChannelTypeAIGC2D
|
||||
ChannelTypeAnthropic
|
||||
ChannelTypeBaidu
|
||||
ChannelTypeZhipu
|
||||
ChannelTypeAli
|
||||
ChannelTypeXunfei
|
||||
ChannelType360
|
||||
ChannelTypeOpenRouter
|
||||
ChannelTypeAIProxyLibrary
|
||||
ChannelTypeFastGPT
|
||||
ChannelTypeTencent
|
||||
ChannelTypeGemini
|
||||
ChannelTypeMoonshot
|
||||
ChannelTypeBaichuan
|
||||
ChannelTypeMinimax
|
||||
ChannelTypeMistral
|
||||
|
||||
ChannelTypeDummy
|
||||
)
|
||||
|
||||
var ChannelBaseURLs = []string{
|
||||
"", // 0
|
||||
"https://api.openai.com", // 1
|
||||
"https://oa.api2d.net", // 2
|
||||
"", // 3
|
||||
"https://api.closeai-proxy.xyz", // 4
|
||||
"https://api.openai-sb.com", // 5
|
||||
"https://api.openaimax.com", // 6
|
||||
"https://api.ohmygpt.com", // 7
|
||||
"", // 8
|
||||
"https://api.caipacity.com", // 9
|
||||
"https://api.aiproxy.io", // 10
|
||||
"", // 11
|
||||
"https://api.api2gpt.com", // 12
|
||||
"https://api.aigc2d.com", // 13
|
||||
"https://api.anthropic.com", // 14
|
||||
"https://aip.baidubce.com", // 15
|
||||
"https://open.bigmodel.cn", // 16
|
||||
"https://dashscope.aliyuncs.com", // 17
|
||||
"", // 18
|
||||
"https://ai.360.cn", // 19
|
||||
"https://openrouter.ai/api", // 20
|
||||
"https://api.aiproxy.io", // 21
|
||||
"https://fastgpt.run/api/openapi", // 22
|
||||
"https://hunyuan.cloud.tencent.com", //23
|
||||
"", //24
|
||||
"", //25
|
||||
"", // 0
|
||||
"https://api.openai.com", // 1
|
||||
"https://oa.api2d.net", // 2
|
||||
"", // 3
|
||||
"https://api.closeai-proxy.xyz", // 4
|
||||
"https://api.openai-sb.com", // 5
|
||||
"https://api.openaimax.com", // 6
|
||||
"https://api.ohmygpt.com", // 7
|
||||
"", // 8
|
||||
"https://api.caipacity.com", // 9
|
||||
"https://api.aiproxy.io", // 10
|
||||
"https://generativelanguage.googleapis.com", // 11
|
||||
"https://api.api2gpt.com", // 12
|
||||
"https://api.aigc2d.com", // 13
|
||||
"https://api.anthropic.com", // 14
|
||||
"https://aip.baidubce.com", // 15
|
||||
"https://open.bigmodel.cn", // 16
|
||||
"https://dashscope.aliyuncs.com", // 17
|
||||
"", // 18
|
||||
"https://ai.360.cn", // 19
|
||||
"https://openrouter.ai/api", // 20
|
||||
"https://api.aiproxy.io", // 21
|
||||
"https://fastgpt.run/api/openapi", // 22
|
||||
"https://hunyuan.cloud.tencent.com", // 23
|
||||
"https://generativelanguage.googleapis.com", // 24
|
||||
"https://api.moonshot.cn", // 25
|
||||
"https://api.baichuan-ai.com", // 26
|
||||
"https://api.minimax.chat", // 27
|
||||
"https://api.mistral.ai", // 28
|
||||
}
|
||||
|
||||
const (
|
||||
RelayModeUnknown = iota
|
||||
RelayModeChatCompletions
|
||||
RelayModeCompletions
|
||||
RelayModeEmbeddings
|
||||
RelayModeModerations
|
||||
RelayModeImagesGenerations
|
||||
RelayModeImagesEdits
|
||||
RelayModeImagesVariations
|
||||
RelayModeEdits
|
||||
RelayModeAudioSpeech
|
||||
RelayModeAudioTranscription
|
||||
RelayModeAudioTranslation
|
||||
ConfigKeyPrefix = "cfg_"
|
||||
|
||||
ConfigKeyAPIVersion = ConfigKeyPrefix + "api_version"
|
||||
ConfigKeyLibraryID = ConfigKeyPrefix + "library_id"
|
||||
ConfigKeyPlugin = ConfigKeyPrefix + "plugin"
|
||||
)
|
||||
|
@@ -1,7 +1,9 @@
|
||||
package common
|
||||
|
||||
import "github.com/songquanpeng/one-api/common/helper"
|
||||
|
||||
var UsingSQLite = false
|
||||
var UsingPostgreSQL = false
|
||||
|
||||
var SQLitePath = "one-api.db"
|
||||
var SQLiteBusyTimeout = GetOrDefault("SQLITE_BUSY_TIMEOUT", 3000)
|
||||
var SQLiteBusyTimeout = helper.GetOrDefaultEnvInt("SQLITE_BUSY_TIMEOUT", 3000)
|
||||
|
@@ -5,19 +5,20 @@ import (
|
||||
"crypto/tls"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"github.com/songquanpeng/one-api/common/config"
|
||||
"net/smtp"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
func SendEmail(subject string, receiver string, content string) error {
|
||||
if SMTPFrom == "" { // for compatibility
|
||||
SMTPFrom = SMTPAccount
|
||||
if config.SMTPFrom == "" { // for compatibility
|
||||
config.SMTPFrom = config.SMTPAccount
|
||||
}
|
||||
encodedSubject := fmt.Sprintf("=?UTF-8?B?%s?=", base64.StdEncoding.EncodeToString([]byte(subject)))
|
||||
|
||||
// Extract domain from SMTPFrom
|
||||
parts := strings.Split(SMTPFrom, "@")
|
||||
parts := strings.Split(config.SMTPFrom, "@")
|
||||
var domain string
|
||||
if len(parts) > 1 {
|
||||
domain = parts[1]
|
||||
@@ -36,21 +37,21 @@ func SendEmail(subject string, receiver string, content string) error {
|
||||
"Message-ID: %s\r\n"+ // add Message-ID header to avoid being treated as spam, RFC 5322
|
||||
"Date: %s\r\n"+
|
||||
"Content-Type: text/html; charset=UTF-8\r\n\r\n%s\r\n",
|
||||
receiver, SystemName, SMTPFrom, encodedSubject, messageId, time.Now().Format(time.RFC1123Z), content))
|
||||
auth := smtp.PlainAuth("", SMTPAccount, SMTPToken, SMTPServer)
|
||||
addr := fmt.Sprintf("%s:%d", SMTPServer, SMTPPort)
|
||||
receiver, config.SystemName, config.SMTPFrom, encodedSubject, messageId, time.Now().Format(time.RFC1123Z), content))
|
||||
auth := smtp.PlainAuth("", config.SMTPAccount, config.SMTPToken, config.SMTPServer)
|
||||
addr := fmt.Sprintf("%s:%d", config.SMTPServer, config.SMTPPort)
|
||||
to := strings.Split(receiver, ";")
|
||||
|
||||
if SMTPPort == 465 {
|
||||
if config.SMTPPort == 465 {
|
||||
tlsConfig := &tls.Config{
|
||||
InsecureSkipVerify: true,
|
||||
ServerName: SMTPServer,
|
||||
ServerName: config.SMTPServer,
|
||||
}
|
||||
conn, err := tls.Dial("tcp", fmt.Sprintf("%s:%d", SMTPServer, SMTPPort), tlsConfig)
|
||||
conn, err := tls.Dial("tcp", fmt.Sprintf("%s:%d", config.SMTPServer, config.SMTPPort), tlsConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
client, err := smtp.NewClient(conn, SMTPServer)
|
||||
client, err := smtp.NewClient(conn, config.SMTPServer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -58,7 +59,7 @@ func SendEmail(subject string, receiver string, content string) error {
|
||||
if err = client.Auth(auth); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = client.Mail(SMTPFrom); err != nil {
|
||||
if err = client.Mail(config.SMTPFrom); err != nil {
|
||||
return err
|
||||
}
|
||||
receiverEmails := strings.Split(receiver, ";")
|
||||
@@ -80,7 +81,7 @@ func SendEmail(subject string, receiver string, content string) error {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
err = smtp.SendMail(addr, auth, SMTPAccount, to, mail)
|
||||
err = smtp.SendMail(addr, auth, config.SMTPAccount, to, mail)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
@@ -15,10 +15,7 @@ type embedFileSystem struct {
|
||||
|
||||
func (e embedFileSystem) Exists(prefix string, path string) bool {
|
||||
_, err := e.Open(path)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
return err == nil
|
||||
}
|
||||
|
||||
func EmbedFolder(fsEmbed embed.FS, targetPath string) static.ServeFileSystem {
|
||||
|
@@ -1,71 +0,0 @@
|
||||
package common
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"mime/multipart"
|
||||
"path"
|
||||
)
|
||||
|
||||
type FormBuilder interface {
|
||||
CreateFormFile(fieldname string, fileHeader *multipart.FileHeader) error
|
||||
CreateFormFileReader(fieldname string, r io.Reader, filename string) error
|
||||
WriteField(fieldname, value string) error
|
||||
Close() error
|
||||
FormDataContentType() string
|
||||
}
|
||||
|
||||
type DefaultFormBuilder struct {
|
||||
writer *multipart.Writer
|
||||
}
|
||||
|
||||
func NewFormBuilder(body io.Writer) *DefaultFormBuilder {
|
||||
return &DefaultFormBuilder{
|
||||
writer: multipart.NewWriter(body),
|
||||
}
|
||||
}
|
||||
|
||||
func (fb *DefaultFormBuilder) CreateFormFile(fieldname string, fileHeader *multipart.FileHeader) error {
|
||||
file, err := fileHeader.Open()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer file.Close()
|
||||
|
||||
return fb.createFormFile(fieldname, file, fileHeader.Filename)
|
||||
}
|
||||
|
||||
func (fb *DefaultFormBuilder) CreateFormFileReader(fieldname string, r io.Reader, filename string) error {
|
||||
return fb.createFormFile(fieldname, r, path.Base(filename))
|
||||
}
|
||||
|
||||
func (fb *DefaultFormBuilder) createFormFile(fieldname string, r io.Reader, filename string) error {
|
||||
if filename == "" {
|
||||
return fmt.Errorf("filename cannot be empty")
|
||||
}
|
||||
|
||||
fieldWriter, err := fb.writer.CreateFormFile(fieldname, filename)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = io.Copy(fieldWriter, r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fb *DefaultFormBuilder) WriteField(fieldname, value string) error {
|
||||
return fb.writer.WriteField(fieldname, value)
|
||||
}
|
||||
|
||||
func (fb *DefaultFormBuilder) Close() error {
|
||||
return fb.writer.Close()
|
||||
}
|
||||
|
||||
func (fb *DefaultFormBuilder) FormDataContentType() string {
|
||||
return fb.writer.FormDataContentType()
|
||||
}
|
@@ -2,60 +2,52 @@ package common
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"one-api/types"
|
||||
|
||||
"encoding/json"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/go-playground/validator/v10"
|
||||
"io"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func UnmarshalBodyReusable(c *gin.Context, v any) error {
|
||||
const KeyRequestBody = "key_request_body"
|
||||
|
||||
func GetRequestBody(c *gin.Context) ([]byte, error) {
|
||||
requestBody, _ := c.Get(KeyRequestBody)
|
||||
if requestBody != nil {
|
||||
return requestBody.([]byte), nil
|
||||
}
|
||||
requestBody, err := io.ReadAll(c.Request.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = c.Request.Body.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.Request.Body = io.NopCloser(bytes.NewBuffer(requestBody))
|
||||
err = c.ShouldBind(v)
|
||||
if err != nil {
|
||||
if errs, ok := err.(validator.ValidationErrors); ok {
|
||||
// 返回第一个错误字段的名称
|
||||
return fmt.Errorf("field %s is required", errs[0].Field())
|
||||
}
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
_ = c.Request.Body.Close()
|
||||
c.Set(KeyRequestBody, requestBody)
|
||||
return requestBody.([]byte), nil
|
||||
}
|
||||
|
||||
func UnmarshalBodyReusable(c *gin.Context, v any) error {
|
||||
requestBody, err := GetRequestBody(c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
contentType := c.Request.Header.Get("Content-Type")
|
||||
if strings.HasPrefix(contentType, "application/json") {
|
||||
err = json.Unmarshal(requestBody, &v)
|
||||
} else {
|
||||
// skip for now
|
||||
// TODO: someday non json request have variant model, we will need to implementation this
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Reset request body
|
||||
c.Request.Body = io.NopCloser(bytes.NewBuffer(requestBody))
|
||||
return nil
|
||||
}
|
||||
|
||||
func ErrorWrapper(err error, code string, statusCode int) *types.OpenAIErrorWithStatusCode {
|
||||
return StringErrorWrapper(err.Error(), code, statusCode)
|
||||
}
|
||||
|
||||
func StringErrorWrapper(err string, code string, statusCode int) *types.OpenAIErrorWithStatusCode {
|
||||
openAIError := types.OpenAIError{
|
||||
Message: err,
|
||||
Type: "one_api_error",
|
||||
Code: code,
|
||||
}
|
||||
return &types.OpenAIErrorWithStatusCode{
|
||||
OpenAIError: openAIError,
|
||||
StatusCode: statusCode,
|
||||
}
|
||||
}
|
||||
|
||||
func AbortWithMessage(c *gin.Context, statusCode int, message string) {
|
||||
c.JSON(statusCode, gin.H{
|
||||
"error": gin.H{
|
||||
"message": message,
|
||||
"type": "one_api_error",
|
||||
},
|
||||
})
|
||||
c.Abort()
|
||||
LogError(c.Request.Context(), message)
|
||||
func SetEventStreamHeaders(c *gin.Context) {
|
||||
c.Writer.Header().Set("Content-Type", "text/event-stream")
|
||||
c.Writer.Header().Set("Cache-Control", "no-cache")
|
||||
c.Writer.Header().Set("Connection", "keep-alive")
|
||||
c.Writer.Header().Set("Transfer-Encoding", "chunked")
|
||||
c.Writer.Header().Set("X-Accel-Buffering", "no")
|
||||
}
|
||||
|
@@ -1,6 +1,9 @@
|
||||
package common
|
||||
|
||||
import "encoding/json"
|
||||
import (
|
||||
"encoding/json"
|
||||
"github.com/songquanpeng/one-api/common/logger"
|
||||
)
|
||||
|
||||
var GroupRatio = map[string]float64{
|
||||
"default": 1,
|
||||
@@ -11,7 +14,7 @@ var GroupRatio = map[string]float64{
|
||||
func GroupRatio2JSONString() string {
|
||||
jsonBytes, err := json.Marshal(GroupRatio)
|
||||
if err != nil {
|
||||
SysError("error marshalling model ratio: " + err.Error())
|
||||
logger.SysError("error marshalling model ratio: " + err.Error())
|
||||
}
|
||||
return string(jsonBytes)
|
||||
}
|
||||
@@ -24,7 +27,7 @@ func UpdateGroupRatioByJSONString(jsonStr string) error {
|
||||
func GetGroupRatio(name string) float64 {
|
||||
ratio, ok := GroupRatio[name]
|
||||
if !ok {
|
||||
SysError("group ratio not found: " + name)
|
||||
logger.SysError("group ratio not found: " + name)
|
||||
return 1
|
||||
}
|
||||
return ratio
|
||||
|
234
common/helper/helper.go
Normal file
234
common/helper/helper.go
Normal file
@@ -0,0 +1,234 @@
|
||||
package helper
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/google/uuid"
|
||||
"github.com/songquanpeng/one-api/common/logger"
|
||||
"html/template"
|
||||
"log"
|
||||
"math/rand"
|
||||
"net"
|
||||
"os"
|
||||
"os/exec"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
func OpenBrowser(url string) {
|
||||
var err error
|
||||
|
||||
switch runtime.GOOS {
|
||||
case "linux":
|
||||
err = exec.Command("xdg-open", url).Start()
|
||||
case "windows":
|
||||
err = exec.Command("rundll32", "url.dll,FileProtocolHandler", url).Start()
|
||||
case "darwin":
|
||||
err = exec.Command("open", url).Start()
|
||||
}
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
}
|
||||
|
||||
func GetIp() (ip string) {
|
||||
ips, err := net.InterfaceAddrs()
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
return ip
|
||||
}
|
||||
|
||||
for _, a := range ips {
|
||||
if ipNet, ok := a.(*net.IPNet); ok && !ipNet.IP.IsLoopback() {
|
||||
if ipNet.IP.To4() != nil {
|
||||
ip = ipNet.IP.String()
|
||||
if strings.HasPrefix(ip, "10") {
|
||||
return
|
||||
}
|
||||
if strings.HasPrefix(ip, "172") {
|
||||
return
|
||||
}
|
||||
if strings.HasPrefix(ip, "192.168") {
|
||||
return
|
||||
}
|
||||
ip = ""
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
var sizeKB = 1024
|
||||
var sizeMB = sizeKB * 1024
|
||||
var sizeGB = sizeMB * 1024
|
||||
|
||||
func Bytes2Size(num int64) string {
|
||||
numStr := ""
|
||||
unit := "B"
|
||||
if num/int64(sizeGB) > 1 {
|
||||
numStr = fmt.Sprintf("%.2f", float64(num)/float64(sizeGB))
|
||||
unit = "GB"
|
||||
} else if num/int64(sizeMB) > 1 {
|
||||
numStr = fmt.Sprintf("%d", int(float64(num)/float64(sizeMB)))
|
||||
unit = "MB"
|
||||
} else if num/int64(sizeKB) > 1 {
|
||||
numStr = fmt.Sprintf("%d", int(float64(num)/float64(sizeKB)))
|
||||
unit = "KB"
|
||||
} else {
|
||||
numStr = fmt.Sprintf("%d", num)
|
||||
}
|
||||
return numStr + " " + unit
|
||||
}
|
||||
|
||||
func Seconds2Time(num int) (time string) {
|
||||
if num/31104000 > 0 {
|
||||
time += strconv.Itoa(num/31104000) + " 年 "
|
||||
num %= 31104000
|
||||
}
|
||||
if num/2592000 > 0 {
|
||||
time += strconv.Itoa(num/2592000) + " 个月 "
|
||||
num %= 2592000
|
||||
}
|
||||
if num/86400 > 0 {
|
||||
time += strconv.Itoa(num/86400) + " 天 "
|
||||
num %= 86400
|
||||
}
|
||||
if num/3600 > 0 {
|
||||
time += strconv.Itoa(num/3600) + " 小时 "
|
||||
num %= 3600
|
||||
}
|
||||
if num/60 > 0 {
|
||||
time += strconv.Itoa(num/60) + " 分钟 "
|
||||
num %= 60
|
||||
}
|
||||
time += strconv.Itoa(num) + " 秒"
|
||||
return
|
||||
}
|
||||
|
||||
func Interface2String(inter interface{}) string {
|
||||
switch inter := inter.(type) {
|
||||
case string:
|
||||
return inter
|
||||
case int:
|
||||
return fmt.Sprintf("%d", inter)
|
||||
case float64:
|
||||
return fmt.Sprintf("%f", inter)
|
||||
}
|
||||
return "Not Implemented"
|
||||
}
|
||||
|
||||
func UnescapeHTML(x string) interface{} {
|
||||
return template.HTML(x)
|
||||
}
|
||||
|
||||
func IntMax(a int, b int) int {
|
||||
if a >= b {
|
||||
return a
|
||||
} else {
|
||||
return b
|
||||
}
|
||||
}
|
||||
|
||||
func GetUUID() string {
|
||||
code := uuid.New().String()
|
||||
code = strings.Replace(code, "-", "", -1)
|
||||
return code
|
||||
}
|
||||
|
||||
const keyChars = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
|
||||
const keyNumbers = "0123456789"
|
||||
|
||||
func init() {
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
}
|
||||
|
||||
func GenerateKey() string {
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
key := make([]byte, 48)
|
||||
for i := 0; i < 16; i++ {
|
||||
key[i] = keyChars[rand.Intn(len(keyChars))]
|
||||
}
|
||||
uuid_ := GetUUID()
|
||||
for i := 0; i < 32; i++ {
|
||||
c := uuid_[i]
|
||||
if i%2 == 0 && c >= 'a' && c <= 'z' {
|
||||
c = c - 'a' + 'A'
|
||||
}
|
||||
key[i+16] = c
|
||||
}
|
||||
return string(key)
|
||||
}
|
||||
|
||||
func GetRandomString(length int) string {
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
key := make([]byte, length)
|
||||
for i := 0; i < length; i++ {
|
||||
key[i] = keyChars[rand.Intn(len(keyChars))]
|
||||
}
|
||||
return string(key)
|
||||
}
|
||||
|
||||
func GetRandomNumberString(length int) string {
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
key := make([]byte, length)
|
||||
for i := 0; i < length; i++ {
|
||||
key[i] = keyNumbers[rand.Intn(len(keyNumbers))]
|
||||
}
|
||||
return string(key)
|
||||
}
|
||||
|
||||
func GetTimestamp() int64 {
|
||||
return time.Now().Unix()
|
||||
}
|
||||
|
||||
func GetTimeString() string {
|
||||
now := time.Now()
|
||||
return fmt.Sprintf("%s%d", now.Format("20060102150405"), now.UnixNano()%1e9)
|
||||
}
|
||||
|
||||
func Max(a int, b int) int {
|
||||
if a >= b {
|
||||
return a
|
||||
} else {
|
||||
return b
|
||||
}
|
||||
}
|
||||
|
||||
func GetOrDefaultEnvInt(env string, defaultValue int) int {
|
||||
if env == "" || os.Getenv(env) == "" {
|
||||
return defaultValue
|
||||
}
|
||||
num, err := strconv.Atoi(os.Getenv(env))
|
||||
if err != nil {
|
||||
logger.SysError(fmt.Sprintf("failed to parse %s: %s, using default value: %d", env, err.Error(), defaultValue))
|
||||
return defaultValue
|
||||
}
|
||||
return num
|
||||
}
|
||||
|
||||
func GetOrDefaultEnvString(env string, defaultValue string) string {
|
||||
if env == "" || os.Getenv(env) == "" {
|
||||
return defaultValue
|
||||
}
|
||||
return os.Getenv(env)
|
||||
}
|
||||
|
||||
func AssignOrDefault(value string, defaultValue string) string {
|
||||
if len(value) != 0 {
|
||||
return value
|
||||
}
|
||||
return defaultValue
|
||||
}
|
||||
|
||||
func MessageWithRequestId(message string, id string) string {
|
||||
return fmt.Sprintf("%s (request id: %s)", message, id)
|
||||
}
|
||||
|
||||
func String2Int(str string) int {
|
||||
num, err := strconv.Atoi(str)
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
return num
|
||||
}
|
@@ -15,7 +15,25 @@ import (
|
||||
_ "golang.org/x/image/webp"
|
||||
)
|
||||
|
||||
// Regex to match data URL pattern
|
||||
var dataURLPattern = regexp.MustCompile(`data:image/([^;]+);base64,(.*)`)
|
||||
|
||||
func IsImageUrl(url string) (bool, error) {
|
||||
resp, err := http.Head(url)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if !strings.HasPrefix(resp.Header.Get("Content-Type"), "image/") {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func GetImageSizeFromUrl(url string) (width int, height int, err error) {
|
||||
isImage, err := IsImageUrl(url)
|
||||
if !isImage {
|
||||
return
|
||||
}
|
||||
resp, err := http.Get(url)
|
||||
if err != nil {
|
||||
return
|
||||
@@ -28,6 +46,35 @@ func GetImageSizeFromUrl(url string) (width int, height int, err error) {
|
||||
return img.Width, img.Height, nil
|
||||
}
|
||||
|
||||
func GetImageFromUrl(url string) (mimeType string, data string, err error) {
|
||||
// Check if the URL is a data URL
|
||||
matches := dataURLPattern.FindStringSubmatch(url)
|
||||
if len(matches) == 3 {
|
||||
// URL is a data URL
|
||||
mimeType = "image/" + matches[1]
|
||||
data = matches[2]
|
||||
return
|
||||
}
|
||||
|
||||
isImage, err := IsImageUrl(url)
|
||||
if !isImage {
|
||||
return
|
||||
}
|
||||
resp, err := http.Get(url)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
buffer := bytes.NewBuffer(nil)
|
||||
_, err = buffer.ReadFrom(resp.Body)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
mimeType = resp.Header.Get("Content-Type")
|
||||
data = base64.StdEncoding.EncodeToString(buffer.Bytes())
|
||||
return
|
||||
}
|
||||
|
||||
var (
|
||||
reg = regexp.MustCompile(`data:image/([^;]+);base64,`)
|
||||
)
|
||||
|
@@ -12,7 +12,7 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
img "one-api/common/image"
|
||||
img "github.com/songquanpeng/one-api/common/image"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
_ "golang.org/x/image/webp"
|
||||
@@ -152,3 +152,20 @@ func TestGetImageSize(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetImageSizeFromBase64(t *testing.T) {
|
||||
for i, c := range cases {
|
||||
t.Run("Decode:"+strconv.Itoa(i), func(t *testing.T) {
|
||||
resp, err := http.Get(c.url)
|
||||
assert.NoError(t, err)
|
||||
defer resp.Body.Close()
|
||||
data, err := io.ReadAll(resp.Body)
|
||||
assert.NoError(t, err)
|
||||
encoded := base64.StdEncoding.EncodeToString(data)
|
||||
width, height, err := img.GetImageSizeFromBase64(encoded)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, c.width, width)
|
||||
assert.Equal(t, c.height, height)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@@ -3,11 +3,11 @@ package common
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"github.com/songquanpeng/one-api/common/config"
|
||||
"github.com/songquanpeng/one-api/common/logger"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/joho/godotenv"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -25,11 +25,6 @@ func printHelp() {
|
||||
}
|
||||
|
||||
func init() {
|
||||
// 加载.env文件
|
||||
err := godotenv.Load()
|
||||
if err != nil {
|
||||
SysLog("failed to load .env file: " + err.Error())
|
||||
}
|
||||
flag.Parse()
|
||||
|
||||
if *PrintVersion {
|
||||
@@ -44,9 +39,9 @@ func init() {
|
||||
|
||||
if os.Getenv("SESSION_SECRET") != "" {
|
||||
if os.Getenv("SESSION_SECRET") == "random_string" {
|
||||
SysError("SESSION_SECRET is set to an example value, please change it to a random string.")
|
||||
logger.SysError("SESSION_SECRET is set to an example value, please change it to a random string.")
|
||||
} else {
|
||||
SessionSecret = os.Getenv("SESSION_SECRET")
|
||||
config.SessionSecret = os.Getenv("SESSION_SECRET")
|
||||
}
|
||||
}
|
||||
if os.Getenv("SQLITE_PATH") != "" {
|
||||
@@ -64,5 +59,6 @@ func init() {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
logger.LogDir = *LogDir
|
||||
}
|
||||
}
|
||||
|
7
common/logger/constants.go
Normal file
7
common/logger/constants.go
Normal file
@@ -0,0 +1,7 @@
|
||||
package logger
|
||||
|
||||
const (
|
||||
RequestIdKey = "X-Oneapi-Request-Id"
|
||||
)
|
||||
|
||||
var LogDir string
|
@@ -1,4 +1,4 @@
|
||||
package common
|
||||
package logger
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
loggerDEBUG = "DEBUG"
|
||||
loggerINFO = "INFO"
|
||||
loggerWarn = "WARN"
|
||||
loggerError = "ERR"
|
||||
@@ -25,7 +26,7 @@ var setupLogLock sync.Mutex
|
||||
var setupLogWorking bool
|
||||
|
||||
func SetupLogger() {
|
||||
if *LogDir != "" {
|
||||
if LogDir != "" {
|
||||
ok := setupLogLock.TryLock()
|
||||
if !ok {
|
||||
log.Println("setup log is already working")
|
||||
@@ -35,7 +36,7 @@ func SetupLogger() {
|
||||
setupLogLock.Unlock()
|
||||
setupLogWorking = false
|
||||
}()
|
||||
logPath := filepath.Join(*LogDir, fmt.Sprintf("oneapi-%s.log", time.Now().Format("20060102")))
|
||||
logPath := filepath.Join(LogDir, fmt.Sprintf("oneapi-%s.log", time.Now().Format("20060102")))
|
||||
fd, err := os.OpenFile(logPath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
|
||||
if err != nil {
|
||||
log.Fatal("failed to open log file")
|
||||
@@ -55,18 +56,38 @@ func SysError(s string) {
|
||||
_, _ = fmt.Fprintf(gin.DefaultErrorWriter, "[SYS] %v | %s \n", t.Format("2006/01/02 - 15:04:05"), s)
|
||||
}
|
||||
|
||||
func LogInfo(ctx context.Context, msg string) {
|
||||
func Debug(ctx context.Context, msg string) {
|
||||
logHelper(ctx, loggerDEBUG, msg)
|
||||
}
|
||||
|
||||
func Info(ctx context.Context, msg string) {
|
||||
logHelper(ctx, loggerINFO, msg)
|
||||
}
|
||||
|
||||
func LogWarn(ctx context.Context, msg string) {
|
||||
func Warn(ctx context.Context, msg string) {
|
||||
logHelper(ctx, loggerWarn, msg)
|
||||
}
|
||||
|
||||
func LogError(ctx context.Context, msg string) {
|
||||
func Error(ctx context.Context, msg string) {
|
||||
logHelper(ctx, loggerError, msg)
|
||||
}
|
||||
|
||||
func Debugf(ctx context.Context, format string, a ...any) {
|
||||
Debug(ctx, fmt.Sprintf(format, a...))
|
||||
}
|
||||
|
||||
func Infof(ctx context.Context, format string, a ...any) {
|
||||
Info(ctx, fmt.Sprintf(format, a...))
|
||||
}
|
||||
|
||||
func Warnf(ctx context.Context, format string, a ...any) {
|
||||
Warn(ctx, fmt.Sprintf(format, a...))
|
||||
}
|
||||
|
||||
func Errorf(ctx context.Context, format string, a ...any) {
|
||||
Error(ctx, fmt.Sprintf(format, a...))
|
||||
}
|
||||
|
||||
func logHelper(ctx context.Context, level string, msg string) {
|
||||
writer := gin.DefaultErrorWriter
|
||||
if level == loggerINFO {
|
||||
@@ -90,11 +111,3 @@ func FatalLog(v ...any) {
|
||||
_, _ = fmt.Fprintf(gin.DefaultErrorWriter, "[FATAL] %v | %v \n", t.Format("2006/01/02 - 15:04:05"), v)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
func LogQuota(quota int) string {
|
||||
if DisplayInCurrencyEnabled {
|
||||
return fmt.Sprintf("$%.6f 额度", float64(quota)/QuotaPerUnit)
|
||||
} else {
|
||||
return fmt.Sprintf("%d 点额度", quota)
|
||||
}
|
||||
}
|
@@ -1,15 +0,0 @@
|
||||
package common
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
)
|
||||
|
||||
type Marshaller interface {
|
||||
Marshal(value any) ([]byte, error)
|
||||
}
|
||||
|
||||
type JSONMarshaller struct{}
|
||||
|
||||
func (jm *JSONMarshaller) Marshal(value any) ([]byte, error) {
|
||||
return json.Marshal(value)
|
||||
}
|
@@ -2,88 +2,89 @@ package common
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"github.com/songquanpeng/one-api/common/logger"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
var DalleSizeRatios = map[string]map[string]float64{
|
||||
"dall-e-2": {
|
||||
"256x256": 1,
|
||||
"512x512": 1.125,
|
||||
"1024x1024": 1.25,
|
||||
},
|
||||
"dall-e-3": {
|
||||
"1024x1024": 1,
|
||||
"1024x1792": 2,
|
||||
"1792x1024": 2,
|
||||
},
|
||||
}
|
||||
|
||||
var DalleGenerationImageAmounts = map[string][2]int{
|
||||
"dall-e-2": {1, 10},
|
||||
"dall-e-3": {1, 1}, // OpenAI allows n=1 currently.
|
||||
}
|
||||
|
||||
var DalleImagePromptLengthLimitations = map[string]int{
|
||||
"dall-e-2": 1000,
|
||||
"dall-e-3": 4000,
|
||||
}
|
||||
const (
|
||||
USD2RMB = 7
|
||||
USD = 500 // $0.002 = 1 -> $1 = 500
|
||||
RMB = USD / USD2RMB
|
||||
)
|
||||
|
||||
// ModelRatio
|
||||
// https://platform.openai.com/docs/models/model-endpoint-compatibility
|
||||
// https://cloud.baidu.com/doc/WENXINWORKSHOP/s/Blfmc9dlf
|
||||
// https://openai.com/pricing
|
||||
// TODO: when a new api is enabled, check the pricing here
|
||||
// 1 === $0.002 / 1K tokens
|
||||
// 1 === ¥0.014 / 1k tokens
|
||||
var ModelRatio = map[string]float64{
|
||||
"gpt-4": 15,
|
||||
"gpt-4-0314": 15,
|
||||
"gpt-4-0613": 15,
|
||||
"gpt-4-32k": 30,
|
||||
"gpt-4-32k-0314": 30,
|
||||
"gpt-4-32k-0613": 30,
|
||||
"gpt-4-1106-preview": 5, // $0.01 / 1K tokens
|
||||
"gpt-4-vision-preview": 5, // $0.01 / 1K tokens
|
||||
"gpt-3.5-turbo": 0.75, // $0.0015 / 1K tokens
|
||||
"gpt-3.5-turbo-0301": 0.75,
|
||||
"gpt-3.5-turbo-0613": 0.75,
|
||||
"gpt-3.5-turbo-16k": 1.5, // $0.003 / 1K tokens
|
||||
"gpt-3.5-turbo-16k-0613": 1.5,
|
||||
"gpt-3.5-turbo-instruct": 0.75, // $0.0015 / 1K tokens
|
||||
"gpt-3.5-turbo-1106": 0.5, // $0.001 / 1K tokens
|
||||
"text-ada-001": 0.2,
|
||||
"text-babbage-001": 0.25,
|
||||
"text-curie-001": 1,
|
||||
"text-davinci-002": 10,
|
||||
"text-davinci-003": 10,
|
||||
"text-davinci-edit-001": 10,
|
||||
"code-davinci-edit-001": 10,
|
||||
"whisper-1": 15, // $0.006 / minute -> $0.006 / 150 words -> $0.006 / 200 tokens -> $0.03 / 1k tokens
|
||||
"tts-1": 7.5, // $0.015 / 1K characters
|
||||
"tts-1-1106": 7.5,
|
||||
"tts-1-hd": 15, // $0.030 / 1K characters
|
||||
"tts-1-hd-1106": 15,
|
||||
"davinci": 10,
|
||||
"curie": 10,
|
||||
"babbage": 10,
|
||||
"ada": 10,
|
||||
"text-embedding-ada-002": 0.05,
|
||||
"text-search-ada-doc-001": 10,
|
||||
"text-moderation-stable": 0.1,
|
||||
"text-moderation-latest": 0.1,
|
||||
"dall-e-2": 8, // $0.016 - $0.020 / image
|
||||
"dall-e-3": 20, // $0.040 - $0.120 / image
|
||||
"claude-instant-1": 0.815, // $1.63 / 1M tokens
|
||||
"claude-2": 5.51, // $11.02 / 1M tokens
|
||||
"claude-2.0": 5.51, // $11.02 / 1M tokens
|
||||
"claude-2.1": 5.51, // $11.02 / 1M tokens
|
||||
"ERNIE-Bot": 0.8572, // ¥0.012 / 1k tokens
|
||||
"ERNIE-Bot-turbo": 0.5715, // ¥0.008 / 1k tokens
|
||||
"ERNIE-Bot-4": 8.572, // ¥0.12 / 1k tokens
|
||||
"Embedding-V1": 0.1429, // ¥0.002 / 1k tokens
|
||||
"PaLM-2": 1,
|
||||
"gemini-pro": 1, // $0.00025 / 1k characters -> $0.001 / 1k tokens
|
||||
// https://openai.com/pricing
|
||||
"gpt-4": 15,
|
||||
"gpt-4-0314": 15,
|
||||
"gpt-4-0613": 15,
|
||||
"gpt-4-32k": 30,
|
||||
"gpt-4-32k-0314": 30,
|
||||
"gpt-4-32k-0613": 30,
|
||||
"gpt-4-1106-preview": 5, // $0.01 / 1K tokens
|
||||
"gpt-4-0125-preview": 5, // $0.01 / 1K tokens
|
||||
"gpt-4-turbo-preview": 5, // $0.01 / 1K tokens
|
||||
"gpt-4-vision-preview": 5, // $0.01 / 1K tokens
|
||||
"gpt-3.5-turbo": 0.75, // $0.0015 / 1K tokens
|
||||
"gpt-3.5-turbo-0301": 0.75,
|
||||
"gpt-3.5-turbo-0613": 0.75,
|
||||
"gpt-3.5-turbo-16k": 1.5, // $0.003 / 1K tokens
|
||||
"gpt-3.5-turbo-16k-0613": 1.5,
|
||||
"gpt-3.5-turbo-instruct": 0.75, // $0.0015 / 1K tokens
|
||||
"gpt-3.5-turbo-1106": 0.5, // $0.001 / 1K tokens
|
||||
"gpt-3.5-turbo-0125": 0.25, // $0.0005 / 1K tokens
|
||||
"davinci-002": 1, // $0.002 / 1K tokens
|
||||
"babbage-002": 0.2, // $0.0004 / 1K tokens
|
||||
"text-ada-001": 0.2,
|
||||
"text-babbage-001": 0.25,
|
||||
"text-curie-001": 1,
|
||||
"text-davinci-002": 10,
|
||||
"text-davinci-003": 10,
|
||||
"text-davinci-edit-001": 10,
|
||||
"code-davinci-edit-001": 10,
|
||||
"whisper-1": 15, // $0.006 / minute -> $0.006 / 150 words -> $0.006 / 200 tokens -> $0.03 / 1k tokens
|
||||
"tts-1": 7.5, // $0.015 / 1K characters
|
||||
"tts-1-1106": 7.5,
|
||||
"tts-1-hd": 15, // $0.030 / 1K characters
|
||||
"tts-1-hd-1106": 15,
|
||||
"davinci": 10,
|
||||
"curie": 10,
|
||||
"babbage": 10,
|
||||
"ada": 10,
|
||||
"text-embedding-ada-002": 0.05,
|
||||
"text-embedding-3-small": 0.01,
|
||||
"text-embedding-3-large": 0.065,
|
||||
"text-search-ada-doc-001": 10,
|
||||
"text-moderation-stable": 0.1,
|
||||
"text-moderation-latest": 0.1,
|
||||
"dall-e-2": 8, // $0.016 - $0.020 / image
|
||||
"dall-e-3": 20, // $0.040 - $0.120 / image
|
||||
// https://www.anthropic.com/api#pricing
|
||||
"claude-instant-1.2": 0.8 / 1000 * USD,
|
||||
"claude-2.0": 8.0 / 1000 * USD,
|
||||
"claude-2.1": 8.0 / 1000 * USD,
|
||||
"claude-3-haiku-20240229": 0.25 / 1000 * USD,
|
||||
"claude-3-sonnet-20240229": 3.0 / 1000 * USD,
|
||||
"claude-3-opus-20240229": 15.0 / 1000 * USD,
|
||||
// https://cloud.baidu.com/doc/WENXINWORKSHOP/s/hlrk4akp7
|
||||
"ERNIE-Bot": 0.8572, // ¥0.012 / 1k tokens
|
||||
"ERNIE-Bot-turbo": 0.5715, // ¥0.008 / 1k tokens
|
||||
"ERNIE-Bot-4": 0.12 * RMB, // ¥0.12 / 1k tokens
|
||||
"ERNIE-Bot-8k": 0.024 * RMB,
|
||||
"Embedding-V1": 0.1429, // ¥0.002 / 1k tokens
|
||||
"PaLM-2": 1,
|
||||
"gemini-pro": 1, // $0.00025 / 1k characters -> $0.001 / 1k tokens
|
||||
"gemini-pro-vision": 1, // $0.00025 / 1k characters -> $0.001 / 1k tokens
|
||||
// https://open.bigmodel.cn/pricing
|
||||
"glm-4": 0.1 * RMB,
|
||||
"glm-4v": 0.1 * RMB,
|
||||
"glm-3-turbo": 0.005 * RMB,
|
||||
"chatglm_turbo": 0.3572, // ¥0.005 / 1k tokens
|
||||
"chatglm_pro": 0.7143, // ¥0.01 / 1k tokens
|
||||
"chatglm_std": 0.3572, // ¥0.005 / 1k tokens
|
||||
@@ -94,17 +95,58 @@ var ModelRatio = map[string]float64{
|
||||
"qwen-max-longcontext": 1.4286, // ¥0.02 / 1k tokens
|
||||
"text-embedding-v1": 0.05, // ¥0.0007 / 1k tokens
|
||||
"SparkDesk": 1.2858, // ¥0.018 / 1k tokens
|
||||
"SparkDesk-v1.1": 1.2858, // ¥0.018 / 1k tokens
|
||||
"SparkDesk-v2.1": 1.2858, // ¥0.018 / 1k tokens
|
||||
"SparkDesk-v3.1": 1.2858, // ¥0.018 / 1k tokens
|
||||
"SparkDesk-v3.5": 1.2858, // ¥0.018 / 1k tokens
|
||||
"360GPT_S2_V9": 0.8572, // ¥0.012 / 1k tokens
|
||||
"embedding-bert-512-v1": 0.0715, // ¥0.001 / 1k tokens
|
||||
"embedding_s1_v1": 0.0715, // ¥0.001 / 1k tokens
|
||||
"semantic_similarity_s1_v1": 0.0715, // ¥0.001 / 1k tokens
|
||||
"hunyuan": 7.143, // ¥0.1 / 1k tokens // https://cloud.tencent.com/document/product/1729/97731#e0e6be58-60c8-469f-bdeb-6c264ce3b4d0
|
||||
"ChatStd": 0.01 * RMB,
|
||||
"ChatPro": 0.1 * RMB,
|
||||
// https://platform.moonshot.cn/pricing
|
||||
"moonshot-v1-8k": 0.012 * RMB,
|
||||
"moonshot-v1-32k": 0.024 * RMB,
|
||||
"moonshot-v1-128k": 0.06 * RMB,
|
||||
// https://platform.baichuan-ai.com/price
|
||||
"Baichuan2-Turbo": 0.008 * RMB,
|
||||
"Baichuan2-Turbo-192k": 0.016 * RMB,
|
||||
"Baichuan2-53B": 0.02 * RMB,
|
||||
// https://api.minimax.chat/document/price
|
||||
"abab6-chat": 0.1 * RMB,
|
||||
"abab5.5-chat": 0.015 * RMB,
|
||||
"abab5.5s-chat": 0.005 * RMB,
|
||||
// https://docs.mistral.ai/platform/pricing/
|
||||
"open-mistral-7b": 0.25 / 1000 * USD,
|
||||
"open-mixtral-8x7b": 0.7 / 1000 * USD,
|
||||
"mistral-small-latest": 2.0 / 1000 * USD,
|
||||
"mistral-medium-latest": 2.7 / 1000 * USD,
|
||||
"mistral-large-latest": 8.0 / 1000 * USD,
|
||||
"mistral-embed": 0.1 / 1000 * USD,
|
||||
}
|
||||
|
||||
var CompletionRatio = map[string]float64{}
|
||||
|
||||
var DefaultModelRatio map[string]float64
|
||||
var DefaultCompletionRatio map[string]float64
|
||||
|
||||
func init() {
|
||||
DefaultModelRatio = make(map[string]float64)
|
||||
for k, v := range ModelRatio {
|
||||
DefaultModelRatio[k] = v
|
||||
}
|
||||
DefaultCompletionRatio = make(map[string]float64)
|
||||
for k, v := range CompletionRatio {
|
||||
DefaultCompletionRatio[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
func ModelRatio2JSONString() string {
|
||||
jsonBytes, err := json.Marshal(ModelRatio)
|
||||
if err != nil {
|
||||
SysError("error marshalling model ratio: " + err.Error())
|
||||
logger.SysError("error marshalling model ratio: " + err.Error())
|
||||
}
|
||||
return string(jsonBytes)
|
||||
}
|
||||
@@ -115,16 +157,46 @@ func UpdateModelRatioByJSONString(jsonStr string) error {
|
||||
}
|
||||
|
||||
func GetModelRatio(name string) float64 {
|
||||
if strings.HasPrefix(name, "qwen-") && strings.HasSuffix(name, "-internet") {
|
||||
name = strings.TrimSuffix(name, "-internet")
|
||||
}
|
||||
ratio, ok := ModelRatio[name]
|
||||
if !ok {
|
||||
SysError("model ratio not found: " + name)
|
||||
ratio, ok = DefaultModelRatio[name]
|
||||
}
|
||||
if !ok {
|
||||
logger.SysError("model ratio not found: " + name)
|
||||
return 30
|
||||
}
|
||||
return ratio
|
||||
}
|
||||
|
||||
func CompletionRatio2JSONString() string {
|
||||
jsonBytes, err := json.Marshal(CompletionRatio)
|
||||
if err != nil {
|
||||
logger.SysError("error marshalling completion ratio: " + err.Error())
|
||||
}
|
||||
return string(jsonBytes)
|
||||
}
|
||||
|
||||
func UpdateCompletionRatioByJSONString(jsonStr string) error {
|
||||
CompletionRatio = make(map[string]float64)
|
||||
return json.Unmarshal([]byte(jsonStr), &CompletionRatio)
|
||||
}
|
||||
|
||||
func GetCompletionRatio(name string) float64 {
|
||||
if ratio, ok := CompletionRatio[name]; ok {
|
||||
return ratio
|
||||
}
|
||||
if ratio, ok := DefaultCompletionRatio[name]; ok {
|
||||
return ratio
|
||||
}
|
||||
if strings.HasPrefix(name, "gpt-3.5") {
|
||||
if strings.HasSuffix(name, "0125") {
|
||||
// https://openai.com/blog/new-embedding-models-and-api-updates
|
||||
// Updated GPT-3.5 Turbo model and lower pricing
|
||||
return 3
|
||||
}
|
||||
if strings.HasSuffix(name, "1106") {
|
||||
return 2
|
||||
}
|
||||
@@ -145,11 +217,14 @@ func GetCompletionRatio(name string) float64 {
|
||||
}
|
||||
return 2
|
||||
}
|
||||
if strings.HasPrefix(name, "claude-instant-1") {
|
||||
return 3.38
|
||||
if strings.HasPrefix(name, "claude-3") {
|
||||
return 5
|
||||
}
|
||||
if strings.HasPrefix(name, "claude-2") {
|
||||
return 2.965517
|
||||
if strings.HasPrefix(name, "claude-") {
|
||||
return 3
|
||||
}
|
||||
if strings.HasPrefix(name, "mistral-") {
|
||||
return 3
|
||||
}
|
||||
return 1
|
||||
}
|
||||
|
@@ -1,59 +0,0 @@
|
||||
package common
|
||||
|
||||
// type Quota struct {
|
||||
// ModelName string
|
||||
// ModelRatio float64
|
||||
// GroupRatio float64
|
||||
// Ratio float64
|
||||
// UserQuota int
|
||||
// }
|
||||
|
||||
// func CreateQuota(modelName string, userQuota int, group string) *Quota {
|
||||
// modelRatio := GetModelRatio(modelName)
|
||||
// groupRatio := GetGroupRatio(group)
|
||||
|
||||
// return &Quota{
|
||||
// ModelName: modelName,
|
||||
// ModelRatio: modelRatio,
|
||||
// GroupRatio: groupRatio,
|
||||
// Ratio: modelRatio * groupRatio,
|
||||
// UserQuota: userQuota,
|
||||
// }
|
||||
// }
|
||||
|
||||
// func (q *Quota) getTokenNum(tokenEncoder *tiktoken.Tiktoken, text string) int {
|
||||
// if ApproximateTokenEnabled {
|
||||
// return int(float64(len(text)) * 0.38)
|
||||
// }
|
||||
// return len(tokenEncoder.Encode(text, nil, nil))
|
||||
// }
|
||||
|
||||
// func (q *Quota) CountTokenMessages(messages []Message, model string) int {
|
||||
// tokenEncoder := q.getTokenEncoder(model)
|
||||
// // Reference:
|
||||
// // https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb
|
||||
// // https://github.com/pkoukk/tiktoken-go/issues/6
|
||||
// //
|
||||
// // Every message follows <|start|>{role/name}\n{content}<|end|>\n
|
||||
// var tokensPerMessage int
|
||||
// var tokensPerName int
|
||||
// if model == "gpt-3.5-turbo-0301" {
|
||||
// tokensPerMessage = 4
|
||||
// tokensPerName = -1 // If there's a name, the role is omitted
|
||||
// } else {
|
||||
// tokensPerMessage = 3
|
||||
// tokensPerName = 1
|
||||
// }
|
||||
// tokenNum := 0
|
||||
// for _, message := range messages {
|
||||
// tokenNum += tokensPerMessage
|
||||
// tokenNum += q.getTokenNum(tokenEncoder, message.StringContent())
|
||||
// tokenNum += q.getTokenNum(tokenEncoder, message.Role)
|
||||
// if message.Name != nil {
|
||||
// tokenNum += tokensPerName
|
||||
// tokenNum += q.getTokenNum(tokenEncoder, *message.Name)
|
||||
// }
|
||||
// }
|
||||
// tokenNum += 3 // Every reply is primed with <|start|>assistant<|message|>
|
||||
// return tokenNum
|
||||
// }
|
8
common/random.go
Normal file
8
common/random.go
Normal file
@@ -0,0 +1,8 @@
|
||||
package common
|
||||
|
||||
import "math/rand"
|
||||
|
||||
// RandRange returns a random number between min and max (max is not included)
|
||||
func RandRange(min, max int) int {
|
||||
return min + rand.Intn(max-min)
|
||||
}
|
@@ -3,6 +3,7 @@ package common
|
||||
import (
|
||||
"context"
|
||||
"github.com/go-redis/redis/v8"
|
||||
"github.com/songquanpeng/one-api/common/logger"
|
||||
"os"
|
||||
"time"
|
||||
)
|
||||
@@ -14,18 +15,18 @@ var RedisEnabled = true
|
||||
func InitRedisClient() (err error) {
|
||||
if os.Getenv("REDIS_CONN_STRING") == "" {
|
||||
RedisEnabled = false
|
||||
SysLog("REDIS_CONN_STRING not set, Redis is not enabled")
|
||||
logger.SysLog("REDIS_CONN_STRING not set, Redis is not enabled")
|
||||
return nil
|
||||
}
|
||||
if os.Getenv("SYNC_FREQUENCY") == "" {
|
||||
RedisEnabled = false
|
||||
SysLog("SYNC_FREQUENCY not set, Redis is disabled")
|
||||
logger.SysLog("SYNC_FREQUENCY not set, Redis is disabled")
|
||||
return nil
|
||||
}
|
||||
SysLog("Redis is enabled")
|
||||
logger.SysLog("Redis is enabled")
|
||||
opt, err := redis.ParseURL(os.Getenv("REDIS_CONN_STRING"))
|
||||
if err != nil {
|
||||
FatalLog("failed to parse Redis connection string: " + err.Error())
|
||||
logger.FatalLog("failed to parse Redis connection string: " + err.Error())
|
||||
}
|
||||
RDB = redis.NewClient(opt)
|
||||
|
||||
@@ -34,7 +35,7 @@ func InitRedisClient() (err error) {
|
||||
|
||||
_, err = RDB.Ping(ctx).Result()
|
||||
if err != nil {
|
||||
FatalLog("Redis ping test failed: " + err.Error())
|
||||
logger.FatalLog("Redis ping test failed: " + err.Error())
|
||||
}
|
||||
return err
|
||||
}
|
||||
@@ -42,7 +43,7 @@ func InitRedisClient() (err error) {
|
||||
func ParseRedisOption() *redis.Options {
|
||||
opt, err := redis.ParseURL(os.Getenv("REDIS_CONN_STRING"))
|
||||
if err != nil {
|
||||
FatalLog("failed to parse Redis connection string: " + err.Error())
|
||||
logger.FatalLog("failed to parse Redis connection string: " + err.Error())
|
||||
}
|
||||
return opt
|
||||
}
|
||||
|
@@ -1,50 +0,0 @@
|
||||
package common
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
type RequestBuilder interface {
|
||||
Build(method, url string, body any, header http.Header) (*http.Request, error)
|
||||
}
|
||||
|
||||
type HTTPRequestBuilder struct {
|
||||
marshaller Marshaller
|
||||
}
|
||||
|
||||
func NewRequestBuilder() *HTTPRequestBuilder {
|
||||
return &HTTPRequestBuilder{
|
||||
marshaller: &JSONMarshaller{},
|
||||
}
|
||||
}
|
||||
|
||||
func (b *HTTPRequestBuilder) Build(
|
||||
method string,
|
||||
url string,
|
||||
body any,
|
||||
header http.Header,
|
||||
) (req *http.Request, err error) {
|
||||
var bodyReader io.Reader
|
||||
if body != nil {
|
||||
if v, ok := body.(io.Reader); ok {
|
||||
bodyReader = v
|
||||
} else {
|
||||
var reqBytes []byte
|
||||
reqBytes, err = b.marshaller.Marshal(body)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
bodyReader = bytes.NewBuffer(reqBytes)
|
||||
}
|
||||
}
|
||||
req, err = http.NewRequest(method, url, bodyReader)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if header != nil {
|
||||
req.Header = header
|
||||
}
|
||||
return
|
||||
}
|
205
common/utils.go
205
common/utils.go
@@ -2,208 +2,13 @@ package common
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/google/uuid"
|
||||
"html/template"
|
||||
"log"
|
||||
"math/rand"
|
||||
"net"
|
||||
"os"
|
||||
"os/exec"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
"github.com/songquanpeng/one-api/common/config"
|
||||
)
|
||||
|
||||
func OpenBrowser(url string) {
|
||||
var err error
|
||||
|
||||
switch runtime.GOOS {
|
||||
case "linux":
|
||||
err = exec.Command("xdg-open", url).Start()
|
||||
case "windows":
|
||||
err = exec.Command("rundll32", "url.dll,FileProtocolHandler", url).Start()
|
||||
case "darwin":
|
||||
err = exec.Command("open", url).Start()
|
||||
}
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
}
|
||||
|
||||
func GetIp() (ip string) {
|
||||
ips, err := net.InterfaceAddrs()
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
return ip
|
||||
}
|
||||
|
||||
for _, a := range ips {
|
||||
if ipNet, ok := a.(*net.IPNet); ok && !ipNet.IP.IsLoopback() {
|
||||
if ipNet.IP.To4() != nil {
|
||||
ip = ipNet.IP.String()
|
||||
if strings.HasPrefix(ip, "10") {
|
||||
return
|
||||
}
|
||||
if strings.HasPrefix(ip, "172") {
|
||||
return
|
||||
}
|
||||
if strings.HasPrefix(ip, "192.168") {
|
||||
return
|
||||
}
|
||||
ip = ""
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
var sizeKB = 1024
|
||||
var sizeMB = sizeKB * 1024
|
||||
var sizeGB = sizeMB * 1024
|
||||
|
||||
func Bytes2Size(num int64) string {
|
||||
numStr := ""
|
||||
unit := "B"
|
||||
if num/int64(sizeGB) > 1 {
|
||||
numStr = fmt.Sprintf("%.2f", float64(num)/float64(sizeGB))
|
||||
unit = "GB"
|
||||
} else if num/int64(sizeMB) > 1 {
|
||||
numStr = fmt.Sprintf("%d", int(float64(num)/float64(sizeMB)))
|
||||
unit = "MB"
|
||||
} else if num/int64(sizeKB) > 1 {
|
||||
numStr = fmt.Sprintf("%d", int(float64(num)/float64(sizeKB)))
|
||||
unit = "KB"
|
||||
func LogQuota(quota int) string {
|
||||
if config.DisplayInCurrencyEnabled {
|
||||
return fmt.Sprintf("$%.6f 额度", float64(quota)/config.QuotaPerUnit)
|
||||
} else {
|
||||
numStr = fmt.Sprintf("%d", num)
|
||||
}
|
||||
return numStr + " " + unit
|
||||
}
|
||||
|
||||
func Seconds2Time(num int) (time string) {
|
||||
if num/31104000 > 0 {
|
||||
time += strconv.Itoa(num/31104000) + " 年 "
|
||||
num %= 31104000
|
||||
}
|
||||
if num/2592000 > 0 {
|
||||
time += strconv.Itoa(num/2592000) + " 个月 "
|
||||
num %= 2592000
|
||||
}
|
||||
if num/86400 > 0 {
|
||||
time += strconv.Itoa(num/86400) + " 天 "
|
||||
num %= 86400
|
||||
}
|
||||
if num/3600 > 0 {
|
||||
time += strconv.Itoa(num/3600) + " 小时 "
|
||||
num %= 3600
|
||||
}
|
||||
if num/60 > 0 {
|
||||
time += strconv.Itoa(num/60) + " 分钟 "
|
||||
num %= 60
|
||||
}
|
||||
time += strconv.Itoa(num) + " 秒"
|
||||
return
|
||||
}
|
||||
|
||||
func Interface2String(inter interface{}) string {
|
||||
switch inter.(type) {
|
||||
case string:
|
||||
return inter.(string)
|
||||
case int:
|
||||
return fmt.Sprintf("%d", inter.(int))
|
||||
case float64:
|
||||
return fmt.Sprintf("%f", inter.(float64))
|
||||
}
|
||||
return "Not Implemented"
|
||||
}
|
||||
|
||||
func UnescapeHTML(x string) interface{} {
|
||||
return template.HTML(x)
|
||||
}
|
||||
|
||||
func IntMax(a int, b int) int {
|
||||
if a >= b {
|
||||
return a
|
||||
} else {
|
||||
return b
|
||||
return fmt.Sprintf("%d 点额度", quota)
|
||||
}
|
||||
}
|
||||
|
||||
func GetUUID() string {
|
||||
code := uuid.New().String()
|
||||
code = strings.Replace(code, "-", "", -1)
|
||||
return code
|
||||
}
|
||||
|
||||
const keyChars = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
|
||||
|
||||
func init() {
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
}
|
||||
|
||||
func GenerateKey() string {
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
key := make([]byte, 48)
|
||||
for i := 0; i < 16; i++ {
|
||||
key[i] = keyChars[rand.Intn(len(keyChars))]
|
||||
}
|
||||
uuid_ := GetUUID()
|
||||
for i := 0; i < 32; i++ {
|
||||
c := uuid_[i]
|
||||
if i%2 == 0 && c >= 'a' && c <= 'z' {
|
||||
c = c - 'a' + 'A'
|
||||
}
|
||||
key[i+16] = c
|
||||
}
|
||||
return string(key)
|
||||
}
|
||||
|
||||
func GetRandomString(length int) string {
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
key := make([]byte, length)
|
||||
for i := 0; i < length; i++ {
|
||||
key[i] = keyChars[rand.Intn(len(keyChars))]
|
||||
}
|
||||
return string(key)
|
||||
}
|
||||
|
||||
func GetTimestamp() int64 {
|
||||
return time.Now().Unix()
|
||||
}
|
||||
|
||||
func GetTimeString() string {
|
||||
now := time.Now()
|
||||
return fmt.Sprintf("%s%d", now.Format("20060102150405"), now.UnixNano()%1e9)
|
||||
}
|
||||
|
||||
func Max(a int, b int) int {
|
||||
if a >= b {
|
||||
return a
|
||||
} else {
|
||||
return b
|
||||
}
|
||||
}
|
||||
|
||||
func GetOrDefault(env string, defaultValue int) int {
|
||||
if env == "" || os.Getenv(env) == "" {
|
||||
return defaultValue
|
||||
}
|
||||
num, err := strconv.Atoi(os.Getenv(env))
|
||||
if err != nil {
|
||||
SysError(fmt.Sprintf("failed to parse %s: %s, using default value: %d", env, err.Error(), defaultValue))
|
||||
return defaultValue
|
||||
}
|
||||
return num
|
||||
}
|
||||
|
||||
func MessageWithRequestId(message string, id string) string {
|
||||
return fmt.Sprintf("%s (request id: %s)", message, id)
|
||||
}
|
||||
|
||||
func String2Int(str string) int {
|
||||
num, err := strconv.Atoi(str)
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
return num
|
||||
}
|
||||
|
@@ -1,11 +1,10 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"one-api/common"
|
||||
"one-api/model"
|
||||
"one-api/types"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/songquanpeng/one-api/common/config"
|
||||
"github.com/songquanpeng/one-api/model"
|
||||
relaymodel "github.com/songquanpeng/one-api/relay/model"
|
||||
)
|
||||
|
||||
func GetSubscription(c *gin.Context) {
|
||||
@@ -14,7 +13,7 @@ func GetSubscription(c *gin.Context) {
|
||||
var err error
|
||||
var token *model.Token
|
||||
var expiredTime int64
|
||||
if common.DisplayTokenStatEnabled {
|
||||
if config.DisplayTokenStatEnabled {
|
||||
tokenId := c.GetInt("token_id")
|
||||
token, err = model.GetTokenById(tokenId)
|
||||
expiredTime = token.ExpiredTime
|
||||
@@ -24,34 +23,26 @@ func GetSubscription(c *gin.Context) {
|
||||
userId := c.GetInt("id")
|
||||
remainQuota, err = model.GetUserQuota(userId)
|
||||
if err != nil {
|
||||
openAIError := types.OpenAIError{
|
||||
Message: err.Error(),
|
||||
Type: "upstream_error",
|
||||
}
|
||||
c.JSON(200, gin.H{
|
||||
"error": openAIError,
|
||||
})
|
||||
return
|
||||
usedQuota, err = model.GetUserUsedQuota(userId)
|
||||
}
|
||||
usedQuota, err = model.GetUserUsedQuota(userId)
|
||||
}
|
||||
if expiredTime <= 0 {
|
||||
expiredTime = 0
|
||||
}
|
||||
if err != nil {
|
||||
openAIError := types.OpenAIError{
|
||||
Error := relaymodel.Error{
|
||||
Message: err.Error(),
|
||||
Type: "upstream_error",
|
||||
}
|
||||
c.JSON(200, gin.H{
|
||||
"error": openAIError,
|
||||
"error": Error,
|
||||
})
|
||||
return
|
||||
}
|
||||
quota := remainQuota + usedQuota
|
||||
amount := float64(quota)
|
||||
if common.DisplayInCurrencyEnabled {
|
||||
amount /= common.QuotaPerUnit
|
||||
if config.DisplayInCurrencyEnabled {
|
||||
amount /= config.QuotaPerUnit
|
||||
}
|
||||
if token != nil && token.UnlimitedQuota {
|
||||
amount = 100000000
|
||||
@@ -65,13 +56,14 @@ func GetSubscription(c *gin.Context) {
|
||||
AccessUntil: expiredTime,
|
||||
}
|
||||
c.JSON(200, subscription)
|
||||
return
|
||||
}
|
||||
|
||||
func GetUsage(c *gin.Context) {
|
||||
var quota int
|
||||
var err error
|
||||
var token *model.Token
|
||||
if common.DisplayTokenStatEnabled {
|
||||
if config.DisplayTokenStatEnabled {
|
||||
tokenId := c.GetInt("token_id")
|
||||
token, err = model.GetTokenById(tokenId)
|
||||
quota = token.UsedQuota
|
||||
@@ -80,22 +72,23 @@ func GetUsage(c *gin.Context) {
|
||||
quota, err = model.GetUserUsedQuota(userId)
|
||||
}
|
||||
if err != nil {
|
||||
openAIError := types.OpenAIError{
|
||||
Error := relaymodel.Error{
|
||||
Message: err.Error(),
|
||||
Type: "one_api_error",
|
||||
}
|
||||
c.JSON(200, gin.H{
|
||||
"error": openAIError,
|
||||
"error": Error,
|
||||
})
|
||||
return
|
||||
}
|
||||
amount := float64(quota)
|
||||
if common.DisplayInCurrencyEnabled {
|
||||
amount /= common.QuotaPerUnit
|
||||
if config.DisplayInCurrencyEnabled {
|
||||
amount /= config.QuotaPerUnit
|
||||
}
|
||||
usage := OpenAIUsageResponse{
|
||||
Object: "list",
|
||||
TotalUsage: amount * 100,
|
||||
}
|
||||
c.JSON(200, usage)
|
||||
return
|
||||
}
|
||||
|
@@ -1,13 +1,16 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/songquanpeng/one-api/common"
|
||||
"github.com/songquanpeng/one-api/common/config"
|
||||
"github.com/songquanpeng/one-api/common/logger"
|
||||
"github.com/songquanpeng/one-api/model"
|
||||
"github.com/songquanpeng/one-api/relay/util"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"one-api/common"
|
||||
"one-api/model"
|
||||
"one-api/providers"
|
||||
providersBase "one-api/providers/base"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
@@ -46,30 +49,216 @@ type OpenAIUsageResponse struct {
|
||||
TotalUsage float64 `json:"total_usage"` // unit: 0.01 dollar
|
||||
}
|
||||
|
||||
func updateChannelBalance(channel *model.Channel) (float64, error) {
|
||||
req, err := http.NewRequest("POST", "/balance", nil)
|
||||
type OpenAISBUsageResponse struct {
|
||||
Msg string `json:"msg"`
|
||||
Data *struct {
|
||||
Credit string `json:"credit"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
type AIProxyUserOverviewResponse struct {
|
||||
Success bool `json:"success"`
|
||||
Message string `json:"message"`
|
||||
ErrorCode int `json:"error_code"`
|
||||
Data struct {
|
||||
TotalPoints float64 `json:"totalPoints"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
type API2GPTUsageResponse struct {
|
||||
Object string `json:"object"`
|
||||
TotalGranted float64 `json:"total_granted"`
|
||||
TotalUsed float64 `json:"total_used"`
|
||||
TotalRemaining float64 `json:"total_remaining"`
|
||||
}
|
||||
|
||||
type APGC2DGPTUsageResponse struct {
|
||||
//Grants interface{} `json:"grants"`
|
||||
Object string `json:"object"`
|
||||
TotalAvailable float64 `json:"total_available"`
|
||||
TotalGranted float64 `json:"total_granted"`
|
||||
TotalUsed float64 `json:"total_used"`
|
||||
}
|
||||
|
||||
// GetAuthHeader get auth header
|
||||
func GetAuthHeader(token string) http.Header {
|
||||
h := http.Header{}
|
||||
h.Add("Authorization", fmt.Sprintf("Bearer %s", token))
|
||||
return h
|
||||
}
|
||||
|
||||
func GetResponseBody(method, url string, channel *model.Channel, headers http.Header) ([]byte, error) {
|
||||
req, err := http.NewRequest(method, url, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for k := range headers {
|
||||
req.Header.Add(k, headers.Get(k))
|
||||
}
|
||||
res, err := util.HTTPClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if res.StatusCode != http.StatusOK {
|
||||
return nil, fmt.Errorf("status code: %d", res.StatusCode)
|
||||
}
|
||||
body, err := io.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = res.Body.Close()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return body, nil
|
||||
}
|
||||
|
||||
func updateChannelCloseAIBalance(channel *model.Channel) (float64, error) {
|
||||
url := fmt.Sprintf("%s/dashboard/billing/credit_grants", channel.GetBaseURL())
|
||||
body, err := GetResponseBody("GET", url, channel, GetAuthHeader(channel.Key))
|
||||
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
w := httptest.NewRecorder()
|
||||
c, _ := gin.CreateTestContext(w)
|
||||
c.Request = req
|
||||
|
||||
setChannelToContext(c, channel)
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
|
||||
provider := providers.GetProvider(channel.Type, c)
|
||||
if provider == nil {
|
||||
return 0, errors.New("provider not found")
|
||||
response := OpenAICreditGrants{}
|
||||
err = json.Unmarshal(body, &response)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
channel.UpdateBalance(response.TotalAvailable)
|
||||
return response.TotalAvailable, nil
|
||||
}
|
||||
|
||||
balanceProvider, ok := provider.(providersBase.BalanceInterface)
|
||||
if !ok {
|
||||
return 0, errors.New("provider not implemented")
|
||||
func updateChannelOpenAISBBalance(channel *model.Channel) (float64, error) {
|
||||
url := fmt.Sprintf("https://api.openai-sb.com/sb-api/user/status?api_key=%s", channel.Key)
|
||||
body, err := GetResponseBody("GET", url, channel, GetAuthHeader(channel.Key))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
response := OpenAISBUsageResponse{}
|
||||
err = json.Unmarshal(body, &response)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if response.Data == nil {
|
||||
return 0, errors.New(response.Msg)
|
||||
}
|
||||
balance, err := strconv.ParseFloat(response.Data.Credit, 64)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
channel.UpdateBalance(balance)
|
||||
return balance, nil
|
||||
}
|
||||
|
||||
return balanceProvider.Balance(channel)
|
||||
func updateChannelAIProxyBalance(channel *model.Channel) (float64, error) {
|
||||
url := "https://aiproxy.io/api/report/getUserOverview"
|
||||
headers := http.Header{}
|
||||
headers.Add("Api-Key", channel.Key)
|
||||
body, err := GetResponseBody("GET", url, channel, headers)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
response := AIProxyUserOverviewResponse{}
|
||||
err = json.Unmarshal(body, &response)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if !response.Success {
|
||||
return 0, fmt.Errorf("code: %d, message: %s", response.ErrorCode, response.Message)
|
||||
}
|
||||
channel.UpdateBalance(response.Data.TotalPoints)
|
||||
return response.Data.TotalPoints, nil
|
||||
}
|
||||
|
||||
func updateChannelAPI2GPTBalance(channel *model.Channel) (float64, error) {
|
||||
url := "https://api.api2gpt.com/dashboard/billing/credit_grants"
|
||||
body, err := GetResponseBody("GET", url, channel, GetAuthHeader(channel.Key))
|
||||
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
response := API2GPTUsageResponse{}
|
||||
err = json.Unmarshal(body, &response)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
channel.UpdateBalance(response.TotalRemaining)
|
||||
return response.TotalRemaining, nil
|
||||
}
|
||||
|
||||
func updateChannelAIGC2DBalance(channel *model.Channel) (float64, error) {
|
||||
url := "https://api.aigc2d.com/dashboard/billing/credit_grants"
|
||||
body, err := GetResponseBody("GET", url, channel, GetAuthHeader(channel.Key))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
response := APGC2DGPTUsageResponse{}
|
||||
err = json.Unmarshal(body, &response)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
channel.UpdateBalance(response.TotalAvailable)
|
||||
return response.TotalAvailable, nil
|
||||
}
|
||||
|
||||
func updateChannelBalance(channel *model.Channel) (float64, error) {
|
||||
baseURL := common.ChannelBaseURLs[channel.Type]
|
||||
if channel.GetBaseURL() == "" {
|
||||
channel.BaseURL = &baseURL
|
||||
}
|
||||
switch channel.Type {
|
||||
case common.ChannelTypeOpenAI:
|
||||
if channel.GetBaseURL() != "" {
|
||||
baseURL = channel.GetBaseURL()
|
||||
}
|
||||
case common.ChannelTypeAzure:
|
||||
return 0, errors.New("尚未实现")
|
||||
case common.ChannelTypeCustom:
|
||||
baseURL = channel.GetBaseURL()
|
||||
case common.ChannelTypeCloseAI:
|
||||
return updateChannelCloseAIBalance(channel)
|
||||
case common.ChannelTypeOpenAISB:
|
||||
return updateChannelOpenAISBBalance(channel)
|
||||
case common.ChannelTypeAIProxy:
|
||||
return updateChannelAIProxyBalance(channel)
|
||||
case common.ChannelTypeAPI2GPT:
|
||||
return updateChannelAPI2GPTBalance(channel)
|
||||
case common.ChannelTypeAIGC2D:
|
||||
return updateChannelAIGC2DBalance(channel)
|
||||
default:
|
||||
return 0, errors.New("尚未实现")
|
||||
}
|
||||
url := fmt.Sprintf("%s/v1/dashboard/billing/subscription", baseURL)
|
||||
|
||||
body, err := GetResponseBody("GET", url, channel, GetAuthHeader(channel.Key))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
subscription := OpenAISubscriptionResponse{}
|
||||
err = json.Unmarshal(body, &subscription)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
now := time.Now()
|
||||
startDate := fmt.Sprintf("%s-01", now.Format("2006-01"))
|
||||
endDate := now.Format("2006-01-02")
|
||||
if !subscription.HasPaymentMethod {
|
||||
startDate = now.AddDate(0, 0, -100).Format("2006-01-02")
|
||||
}
|
||||
url = fmt.Sprintf("%s/v1/dashboard/billing/usage?start_date=%s&end_date=%s", baseURL, startDate, endDate)
|
||||
body, err = GetResponseBody("GET", url, channel, GetAuthHeader(channel.Key))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
usage := OpenAIUsageResponse{}
|
||||
err = json.Unmarshal(body, &usage)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
balance := subscription.HardLimitUSD - usage.TotalUsage/100
|
||||
channel.UpdateBalance(balance)
|
||||
return balance, nil
|
||||
}
|
||||
|
||||
func UpdateChannelBalance(c *gin.Context) {
|
||||
@@ -127,7 +316,7 @@ func updateAllChannelsBalance() error {
|
||||
disableChannel(channel.Id, channel.Name, "余额不足")
|
||||
}
|
||||
}
|
||||
time.Sleep(common.RequestInterval)
|
||||
time.Sleep(config.RequestInterval)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -152,8 +341,8 @@ func UpdateAllChannelsBalance(c *gin.Context) {
|
||||
func AutomaticallyUpdateChannels(frequency int) {
|
||||
for {
|
||||
time.Sleep(time.Duration(frequency) * time.Minute)
|
||||
common.SysLog("updating all channels")
|
||||
logger.SysLog("updating all channels")
|
||||
_ = updateAllChannelsBalance()
|
||||
common.SysLog("channels update done")
|
||||
logger.SysLog("channels update done")
|
||||
}
|
||||
}
|
||||
|
@@ -1,100 +1,111 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/songquanpeng/one-api/common"
|
||||
"github.com/songquanpeng/one-api/common/config"
|
||||
"github.com/songquanpeng/one-api/common/logger"
|
||||
"github.com/songquanpeng/one-api/middleware"
|
||||
"github.com/songquanpeng/one-api/model"
|
||||
"github.com/songquanpeng/one-api/relay/constant"
|
||||
"github.com/songquanpeng/one-api/relay/helper"
|
||||
relaymodel "github.com/songquanpeng/one-api/relay/model"
|
||||
"github.com/songquanpeng/one-api/relay/util"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"one-api/common"
|
||||
"one-api/model"
|
||||
"one-api/providers"
|
||||
providers_base "one-api/providers/base"
|
||||
"one-api/types"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
func testChannel(channel *model.Channel, request types.ChatCompletionRequest) (err error, openaiErr *types.OpenAIError) {
|
||||
// 创建一个 http.Request
|
||||
req, err := http.NewRequest("POST", "/v1/chat/completions", nil)
|
||||
if err != nil {
|
||||
return err, nil
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
|
||||
w := httptest.NewRecorder()
|
||||
c, _ := gin.CreateTestContext(w)
|
||||
c.Request = req
|
||||
|
||||
setChannelToContext(c, channel)
|
||||
// 创建映射
|
||||
channelTypeToModel := map[int]string{
|
||||
common.ChannelTypePaLM: "PaLM-2",
|
||||
common.ChannelTypeAnthropic: "claude-2",
|
||||
common.ChannelTypeBaidu: "ERNIE-Bot",
|
||||
common.ChannelTypeZhipu: "chatglm_lite",
|
||||
common.ChannelTypeAli: "qwen-turbo",
|
||||
common.ChannelType360: "360GPT_S2_V9",
|
||||
common.ChannelTypeXunfei: "SparkDesk",
|
||||
common.ChannelTypeTencent: "hunyuan",
|
||||
common.ChannelTypeAzure: "gpt-3.5-turbo",
|
||||
}
|
||||
|
||||
// 从映射中获取模型名称
|
||||
model, ok := channelTypeToModel[channel.Type]
|
||||
if !ok {
|
||||
model = "gpt-3.5-turbo" // 默认值
|
||||
}
|
||||
request.Model = model
|
||||
|
||||
provider := providers.GetProvider(channel.Type, c)
|
||||
if provider == nil {
|
||||
return errors.New("channel not implemented"), nil
|
||||
}
|
||||
chatProvider, ok := provider.(providers_base.ChatInterface)
|
||||
if !ok {
|
||||
return errors.New("channel not implemented"), nil
|
||||
}
|
||||
|
||||
modelMap, err := parseModelMapping(channel.GetModelMapping())
|
||||
if err != nil {
|
||||
return err, nil
|
||||
}
|
||||
if modelMap != nil && modelMap[request.Model] != "" {
|
||||
request.Model = modelMap[request.Model]
|
||||
}
|
||||
|
||||
promptTokens := common.CountTokenMessages(request.Messages, request.Model)
|
||||
Usage, openAIErrorWithStatusCode := chatProvider.ChatAction(&request, true, promptTokens)
|
||||
if openAIErrorWithStatusCode != nil {
|
||||
return nil, &openAIErrorWithStatusCode.OpenAIError
|
||||
}
|
||||
|
||||
if Usage.CompletionTokens == 0 {
|
||||
return errors.New(fmt.Sprintf("channel %s, message 补全 tokens 非预期返回 0", channel.Name)), nil
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func buildTestRequest() *types.ChatCompletionRequest {
|
||||
testRequest := &types.ChatCompletionRequest{
|
||||
Messages: []types.ChatCompletionMessage{
|
||||
{
|
||||
Role: "user",
|
||||
Content: "You just need to output 'hi' next.",
|
||||
},
|
||||
},
|
||||
Model: "",
|
||||
func buildTestRequest() *relaymodel.GeneralOpenAIRequest {
|
||||
testRequest := &relaymodel.GeneralOpenAIRequest{
|
||||
MaxTokens: 1,
|
||||
Stream: false,
|
||||
Model: "gpt-3.5-turbo",
|
||||
}
|
||||
testMessage := relaymodel.Message{
|
||||
Role: "user",
|
||||
Content: "hi",
|
||||
}
|
||||
testRequest.Messages = append(testRequest.Messages, testMessage)
|
||||
return testRequest
|
||||
}
|
||||
|
||||
func testChannel(channel *model.Channel) (err error, openaiErr *relaymodel.Error) {
|
||||
w := httptest.NewRecorder()
|
||||
c, _ := gin.CreateTestContext(w)
|
||||
c.Request = &http.Request{
|
||||
Method: "POST",
|
||||
URL: &url.URL{Path: "/v1/chat/completions"},
|
||||
Body: nil,
|
||||
Header: make(http.Header),
|
||||
}
|
||||
c.Request.Header.Set("Authorization", "Bearer "+channel.Key)
|
||||
c.Request.Header.Set("Content-Type", "application/json")
|
||||
c.Set("channel", channel.Type)
|
||||
c.Set("base_url", channel.GetBaseURL())
|
||||
middleware.SetupContextForSelectedChannel(c, channel, "")
|
||||
meta := util.GetRelayMeta(c)
|
||||
apiType := constant.ChannelType2APIType(channel.Type)
|
||||
adaptor := helper.GetAdaptor(apiType)
|
||||
if adaptor == nil {
|
||||
return fmt.Errorf("invalid api type: %d, adaptor is nil", apiType), nil
|
||||
}
|
||||
adaptor.Init(meta)
|
||||
modelName := adaptor.GetModelList()[0]
|
||||
if !strings.Contains(channel.Models, modelName) {
|
||||
modelNames := strings.Split(channel.Models, ",")
|
||||
if len(modelNames) > 0 {
|
||||
modelName = modelNames[0]
|
||||
}
|
||||
}
|
||||
request := buildTestRequest()
|
||||
request.Model = modelName
|
||||
meta.OriginModelName, meta.ActualModelName = modelName, modelName
|
||||
convertedRequest, err := adaptor.ConvertRequest(c, constant.RelayModeChatCompletions, request)
|
||||
if err != nil {
|
||||
return err, nil
|
||||
}
|
||||
jsonData, err := json.Marshal(convertedRequest)
|
||||
if err != nil {
|
||||
return err, nil
|
||||
}
|
||||
requestBody := bytes.NewBuffer(jsonData)
|
||||
c.Request.Body = io.NopCloser(requestBody)
|
||||
resp, err := adaptor.DoRequest(c, meta, requestBody)
|
||||
if err != nil {
|
||||
return err, nil
|
||||
}
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
err := util.RelayErrorHandler(resp)
|
||||
return fmt.Errorf("status code %d: %s", resp.StatusCode, err.Error.Message), &err.Error
|
||||
}
|
||||
usage, respErr := adaptor.DoResponse(c, resp, meta)
|
||||
if respErr != nil {
|
||||
return fmt.Errorf("%s", respErr.Error.Message), &respErr.Error
|
||||
}
|
||||
if usage == nil {
|
||||
return errors.New("usage is nil"), nil
|
||||
}
|
||||
result := w.Result()
|
||||
// print result.Body
|
||||
respBody, err := io.ReadAll(result.Body)
|
||||
if err != nil {
|
||||
return err, nil
|
||||
}
|
||||
logger.SysLog(fmt.Sprintf("testing channel #%d, response: \n%s", channel.Id, string(respBody)))
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func TestChannel(c *gin.Context) {
|
||||
id, err := strconv.Atoi(c.Param("id"))
|
||||
if err != nil {
|
||||
@@ -112,9 +123,8 @@ func TestChannel(c *gin.Context) {
|
||||
})
|
||||
return
|
||||
}
|
||||
testRequest := buildTestRequest()
|
||||
tik := time.Now()
|
||||
err, _ = testChannel(channel, *testRequest)
|
||||
err, _ = testChannel(channel)
|
||||
tok := time.Now()
|
||||
milliseconds := tok.Sub(tik).Milliseconds()
|
||||
go channel.UpdateResponseTime(milliseconds)
|
||||
@@ -139,12 +149,12 @@ var testAllChannelsLock sync.Mutex
|
||||
var testAllChannelsRunning bool = false
|
||||
|
||||
func notifyRootUser(subject string, content string) {
|
||||
if common.RootUserEmail == "" {
|
||||
common.RootUserEmail = model.GetRootUserEmail()
|
||||
if config.RootUserEmail == "" {
|
||||
config.RootUserEmail = model.GetRootUserEmail()
|
||||
}
|
||||
err := common.SendEmail(subject, common.RootUserEmail, content)
|
||||
err := common.SendEmail(subject, config.RootUserEmail, content)
|
||||
if err != nil {
|
||||
common.SysError(fmt.Sprintf("failed to send email: %s", err.Error()))
|
||||
logger.SysError(fmt.Sprintf("failed to send email: %s", err.Error()))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -165,8 +175,8 @@ func enableChannel(channelId int, channelName string) {
|
||||
}
|
||||
|
||||
func testAllChannels(notify bool) error {
|
||||
if common.RootUserEmail == "" {
|
||||
common.RootUserEmail = model.GetRootUserEmail()
|
||||
if config.RootUserEmail == "" {
|
||||
config.RootUserEmail = model.GetRootUserEmail()
|
||||
}
|
||||
testAllChannelsLock.Lock()
|
||||
if testAllChannelsRunning {
|
||||
@@ -179,8 +189,7 @@ func testAllChannels(notify bool) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
testRequest := buildTestRequest()
|
||||
var disableThreshold = int64(common.ChannelDisableThreshold * 1000)
|
||||
var disableThreshold = int64(config.ChannelDisableThreshold * 1000)
|
||||
if disableThreshold == 0 {
|
||||
disableThreshold = 10000000 // a impossible value
|
||||
}
|
||||
@@ -188,29 +197,29 @@ func testAllChannels(notify bool) error {
|
||||
for _, channel := range channels {
|
||||
isChannelEnabled := channel.Status == common.ChannelStatusEnabled
|
||||
tik := time.Now()
|
||||
err, openaiErr := testChannel(channel, *testRequest)
|
||||
err, openaiErr := testChannel(channel)
|
||||
tok := time.Now()
|
||||
milliseconds := tok.Sub(tik).Milliseconds()
|
||||
if milliseconds > disableThreshold {
|
||||
err = fmt.Errorf("响应时间 %.2fs 超过阈值 %.2fs", float64(milliseconds)/1000.0, float64(disableThreshold)/1000.0)
|
||||
if isChannelEnabled && milliseconds > disableThreshold {
|
||||
err = errors.New(fmt.Sprintf("响应时间 %.2fs 超过阈值 %.2fs", float64(milliseconds)/1000.0, float64(disableThreshold)/1000.0))
|
||||
disableChannel(channel.Id, channel.Name, err.Error())
|
||||
}
|
||||
if isChannelEnabled && shouldDisableChannel(openaiErr, -1) {
|
||||
if isChannelEnabled && util.ShouldDisableChannel(openaiErr, -1) {
|
||||
disableChannel(channel.Id, channel.Name, err.Error())
|
||||
}
|
||||
if !isChannelEnabled && shouldEnableChannel(err, openaiErr) {
|
||||
if !isChannelEnabled && util.ShouldEnableChannel(err, openaiErr) {
|
||||
enableChannel(channel.Id, channel.Name)
|
||||
}
|
||||
channel.UpdateResponseTime(milliseconds)
|
||||
time.Sleep(common.RequestInterval)
|
||||
time.Sleep(config.RequestInterval)
|
||||
}
|
||||
testAllChannelsLock.Lock()
|
||||
testAllChannelsRunning = false
|
||||
testAllChannelsLock.Unlock()
|
||||
if notify {
|
||||
err := common.SendEmail("通道测试完成", common.RootUserEmail, "通道测试完成,如果没有收到禁用通知,说明所有通道都正常")
|
||||
err := common.SendEmail("通道测试完成", config.RootUserEmail, "通道测试完成,如果没有收到禁用通知,说明所有通道都正常")
|
||||
if err != nil {
|
||||
common.SysError(fmt.Sprintf("failed to send email: %s", err.Error()))
|
||||
logger.SysError(fmt.Sprintf("failed to send email: %s", err.Error()))
|
||||
}
|
||||
}
|
||||
}()
|
||||
@@ -230,13 +239,14 @@ func TestAllChannels(c *gin.Context) {
|
||||
"success": true,
|
||||
"message": "",
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
func AutomaticallyTestChannels(frequency int) {
|
||||
for {
|
||||
time.Sleep(time.Duration(frequency) * time.Minute)
|
||||
common.SysLog("testing all channels")
|
||||
logger.SysLog("testing all channels")
|
||||
_ = testAllChannels(false)
|
||||
common.SysLog("channel test finished")
|
||||
logger.SysLog("channel test finished")
|
||||
}
|
||||
}
|
||||
|
@@ -2,9 +2,10 @@ package controller
|
||||
|
||||
import (
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/songquanpeng/one-api/common/config"
|
||||
"github.com/songquanpeng/one-api/common/helper"
|
||||
"github.com/songquanpeng/one-api/model"
|
||||
"net/http"
|
||||
"one-api/common"
|
||||
"one-api/model"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
@@ -14,7 +15,7 @@ func GetAllChannels(c *gin.Context) {
|
||||
if p < 0 {
|
||||
p = 0
|
||||
}
|
||||
channels, err := model.GetAllChannels(p*common.ItemsPerPage, common.ItemsPerPage, false)
|
||||
channels, err := model.GetAllChannels(p*config.ItemsPerPage, config.ItemsPerPage, false)
|
||||
if err != nil {
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"success": false,
|
||||
@@ -83,7 +84,7 @@ func AddChannel(c *gin.Context) {
|
||||
})
|
||||
return
|
||||
}
|
||||
channel.CreatedTime = common.GetTimestamp()
|
||||
channel.CreatedTime = helper.GetTimestamp()
|
||||
keys := strings.Split(channel.Key, "\n")
|
||||
channels := make([]model.Channel, 0, len(keys))
|
||||
for _, key := range keys {
|
||||
|
@@ -7,9 +7,12 @@ import (
|
||||
"fmt"
|
||||
"github.com/gin-contrib/sessions"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/songquanpeng/one-api/common"
|
||||
"github.com/songquanpeng/one-api/common/config"
|
||||
"github.com/songquanpeng/one-api/common/helper"
|
||||
"github.com/songquanpeng/one-api/common/logger"
|
||||
"github.com/songquanpeng/one-api/model"
|
||||
"net/http"
|
||||
"one-api/common"
|
||||
"one-api/model"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
@@ -30,7 +33,7 @@ func getGitHubUserInfoByCode(code string) (*GitHubUser, error) {
|
||||
if code == "" {
|
||||
return nil, errors.New("无效的参数")
|
||||
}
|
||||
values := map[string]string{"client_id": common.GitHubClientId, "client_secret": common.GitHubClientSecret, "code": code}
|
||||
values := map[string]string{"client_id": config.GitHubClientId, "client_secret": config.GitHubClientSecret, "code": code}
|
||||
jsonData, err := json.Marshal(values)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -46,7 +49,7 @@ func getGitHubUserInfoByCode(code string) (*GitHubUser, error) {
|
||||
}
|
||||
res, err := client.Do(req)
|
||||
if err != nil {
|
||||
common.SysLog(err.Error())
|
||||
logger.SysLog(err.Error())
|
||||
return nil, errors.New("无法连接至 GitHub 服务器,请稍后重试!")
|
||||
}
|
||||
defer res.Body.Close()
|
||||
@@ -62,7 +65,7 @@ func getGitHubUserInfoByCode(code string) (*GitHubUser, error) {
|
||||
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", oAuthResponse.AccessToken))
|
||||
res2, err := client.Do(req)
|
||||
if err != nil {
|
||||
common.SysLog(err.Error())
|
||||
logger.SysLog(err.Error())
|
||||
return nil, errors.New("无法连接至 GitHub 服务器,请稍后重试!")
|
||||
}
|
||||
defer res2.Body.Close()
|
||||
@@ -93,7 +96,7 @@ func GitHubOAuth(c *gin.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
if !common.GitHubOAuthEnabled {
|
||||
if !config.GitHubOAuthEnabled {
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"success": false,
|
||||
"message": "管理员未开启通过 GitHub 登录以及注册",
|
||||
@@ -122,7 +125,7 @@ func GitHubOAuth(c *gin.Context) {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
if common.RegisterEnabled {
|
||||
if config.RegisterEnabled {
|
||||
user.Username = "github_" + strconv.Itoa(model.GetMaxUserId()+1)
|
||||
if githubUser.Name != "" {
|
||||
user.DisplayName = githubUser.Name
|
||||
@@ -160,7 +163,7 @@ func GitHubOAuth(c *gin.Context) {
|
||||
}
|
||||
|
||||
func GitHubBind(c *gin.Context) {
|
||||
if !common.GitHubOAuthEnabled {
|
||||
if !config.GitHubOAuthEnabled {
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"success": false,
|
||||
"message": "管理员未开启通过 GitHub 登录以及注册",
|
||||
@@ -216,7 +219,7 @@ func GitHubBind(c *gin.Context) {
|
||||
|
||||
func GenerateOAuthCode(c *gin.Context) {
|
||||
session := sessions.Default(c)
|
||||
state := common.GetRandomString(12)
|
||||
state := helper.GetRandomString(12)
|
||||
session.Set("oauth_state", state)
|
||||
err := session.Save()
|
||||
if err != nil {
|
||||
|
@@ -1,15 +1,14 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"one-api/common"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/songquanpeng/one-api/common"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
func GetGroups(c *gin.Context) {
|
||||
groupNames := make([]string, 0)
|
||||
for groupName, _ := range common.GroupRatio {
|
||||
for groupName := range common.GroupRatio {
|
||||
groupNames = append(groupNames, groupName)
|
||||
}
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
|
@@ -2,9 +2,9 @@ package controller
|
||||
|
||||
import (
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/songquanpeng/one-api/common/config"
|
||||
"github.com/songquanpeng/one-api/model"
|
||||
"net/http"
|
||||
"one-api/common"
|
||||
"one-api/model"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
@@ -20,7 +20,7 @@ func GetAllLogs(c *gin.Context) {
|
||||
tokenName := c.Query("token_name")
|
||||
modelName := c.Query("model_name")
|
||||
channel, _ := strconv.Atoi(c.Query("channel"))
|
||||
logs, err := model.GetAllLogs(logType, startTimestamp, endTimestamp, modelName, username, tokenName, p*common.ItemsPerPage, common.ItemsPerPage, channel)
|
||||
logs, err := model.GetAllLogs(logType, startTimestamp, endTimestamp, modelName, username, tokenName, p*config.ItemsPerPage, config.ItemsPerPage, channel)
|
||||
if err != nil {
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"success": false,
|
||||
@@ -47,7 +47,7 @@ func GetUserLogs(c *gin.Context) {
|
||||
endTimestamp, _ := strconv.ParseInt(c.Query("end_timestamp"), 10, 64)
|
||||
tokenName := c.Query("token_name")
|
||||
modelName := c.Query("model_name")
|
||||
logs, err := model.GetUserLogs(userId, logType, startTimestamp, endTimestamp, modelName, tokenName, p*common.ItemsPerPage, common.ItemsPerPage)
|
||||
logs, err := model.GetUserLogs(userId, logType, startTimestamp, endTimestamp, modelName, tokenName, p*config.ItemsPerPage, config.ItemsPerPage)
|
||||
if err != nil {
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"success": false,
|
||||
|
@@ -3,9 +3,10 @@ package controller
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/songquanpeng/one-api/common"
|
||||
"github.com/songquanpeng/one-api/common/config"
|
||||
"github.com/songquanpeng/one-api/model"
|
||||
"net/http"
|
||||
"one-api/common"
|
||||
"one-api/model"
|
||||
"strings"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
@@ -18,55 +19,55 @@ func GetStatus(c *gin.Context) {
|
||||
"data": gin.H{
|
||||
"version": common.Version,
|
||||
"start_time": common.StartTime,
|
||||
"email_verification": common.EmailVerificationEnabled,
|
||||
"github_oauth": common.GitHubOAuthEnabled,
|
||||
"github_client_id": common.GitHubClientId,
|
||||
"system_name": common.SystemName,
|
||||
"logo": common.Logo,
|
||||
"footer_html": common.Footer,
|
||||
"wechat_qrcode": common.WeChatAccountQRCodeImageURL,
|
||||
"wechat_login": common.WeChatAuthEnabled,
|
||||
"server_address": common.ServerAddress,
|
||||
"turnstile_check": common.TurnstileCheckEnabled,
|
||||
"turnstile_site_key": common.TurnstileSiteKey,
|
||||
"top_up_link": common.TopUpLink,
|
||||
"chat_link": common.ChatLink,
|
||||
"quota_per_unit": common.QuotaPerUnit,
|
||||
"display_in_currency": common.DisplayInCurrencyEnabled,
|
||||
"email_verification": config.EmailVerificationEnabled,
|
||||
"github_oauth": config.GitHubOAuthEnabled,
|
||||
"github_client_id": config.GitHubClientId,
|
||||
"system_name": config.SystemName,
|
||||
"logo": config.Logo,
|
||||
"footer_html": config.Footer,
|
||||
"wechat_qrcode": config.WeChatAccountQRCodeImageURL,
|
||||
"wechat_login": config.WeChatAuthEnabled,
|
||||
"server_address": config.ServerAddress,
|
||||
"turnstile_check": config.TurnstileCheckEnabled,
|
||||
"turnstile_site_key": config.TurnstileSiteKey,
|
||||
"top_up_link": config.TopUpLink,
|
||||
"chat_link": config.ChatLink,
|
||||
"quota_per_unit": config.QuotaPerUnit,
|
||||
"display_in_currency": config.DisplayInCurrencyEnabled,
|
||||
},
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
func GetNotice(c *gin.Context) {
|
||||
common.OptionMapRWMutex.RLock()
|
||||
defer common.OptionMapRWMutex.RUnlock()
|
||||
config.OptionMapRWMutex.RLock()
|
||||
defer config.OptionMapRWMutex.RUnlock()
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"success": true,
|
||||
"message": "",
|
||||
"data": common.OptionMap["Notice"],
|
||||
"data": config.OptionMap["Notice"],
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
func GetAbout(c *gin.Context) {
|
||||
common.OptionMapRWMutex.RLock()
|
||||
defer common.OptionMapRWMutex.RUnlock()
|
||||
config.OptionMapRWMutex.RLock()
|
||||
defer config.OptionMapRWMutex.RUnlock()
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"success": true,
|
||||
"message": "",
|
||||
"data": common.OptionMap["About"],
|
||||
"data": config.OptionMap["About"],
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
func GetHomePageContent(c *gin.Context) {
|
||||
common.OptionMapRWMutex.RLock()
|
||||
defer common.OptionMapRWMutex.RUnlock()
|
||||
config.OptionMapRWMutex.RLock()
|
||||
defer config.OptionMapRWMutex.RUnlock()
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"success": true,
|
||||
"message": "",
|
||||
"data": common.OptionMap["HomePageContent"],
|
||||
"data": config.OptionMap["HomePageContent"],
|
||||
})
|
||||
return
|
||||
}
|
||||
@@ -80,9 +81,9 @@ func SendEmailVerification(c *gin.Context) {
|
||||
})
|
||||
return
|
||||
}
|
||||
if common.EmailDomainRestrictionEnabled {
|
||||
if config.EmailDomainRestrictionEnabled {
|
||||
allowed := false
|
||||
for _, domain := range common.EmailDomainWhitelist {
|
||||
for _, domain := range config.EmailDomainWhitelist {
|
||||
if strings.HasSuffix(email, "@"+domain) {
|
||||
allowed = true
|
||||
break
|
||||
@@ -105,10 +106,10 @@ func SendEmailVerification(c *gin.Context) {
|
||||
}
|
||||
code := common.GenerateVerificationCode(6)
|
||||
common.RegisterVerificationCodeWithKey(email, code, common.EmailVerificationPurpose)
|
||||
subject := fmt.Sprintf("%s邮箱验证邮件", common.SystemName)
|
||||
subject := fmt.Sprintf("%s邮箱验证邮件", config.SystemName)
|
||||
content := fmt.Sprintf("<p>您好,你正在进行%s邮箱验证。</p>"+
|
||||
"<p>您的验证码为: <strong>%s</strong></p>"+
|
||||
"<p>验证码 %d 分钟内有效,如果不是本人操作,请忽略。</p>", common.SystemName, code, common.VerificationValidMinutes)
|
||||
"<p>验证码 %d 分钟内有效,如果不是本人操作,请忽略。</p>", config.SystemName, code, common.VerificationValidMinutes)
|
||||
err := common.SendEmail(subject, email, content)
|
||||
if err != nil {
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
@@ -142,12 +143,12 @@ func SendPasswordResetEmail(c *gin.Context) {
|
||||
}
|
||||
code := common.GenerateVerificationCode(0)
|
||||
common.RegisterVerificationCodeWithKey(email, code, common.PasswordResetPurpose)
|
||||
link := fmt.Sprintf("%s/user/reset?email=%s&token=%s", common.ServerAddress, email, code)
|
||||
subject := fmt.Sprintf("%s密码重置", common.SystemName)
|
||||
link := fmt.Sprintf("%s/user/reset?email=%s&token=%s", config.ServerAddress, email, code)
|
||||
subject := fmt.Sprintf("%s密码重置", config.SystemName)
|
||||
content := fmt.Sprintf("<p>您好,你正在进行%s密码重置。</p>"+
|
||||
"<p>点击 <a href='%s'>此处</a> 进行密码重置。</p>"+
|
||||
"<p>如果链接无法点击,请尝试点击下面的链接或将其复制到浏览器中打开:<br> %s </p>"+
|
||||
"<p>重置链接 %d 分钟内有效,如果不是本人操作,请忽略。</p>", common.SystemName, link, link, common.VerificationValidMinutes)
|
||||
"<p>重置链接 %d 分钟内有效,如果不是本人操作,请忽略。</p>", config.SystemName, link, link, common.VerificationValidMinutes)
|
||||
err := common.SendEmail(subject, email, content)
|
||||
if err != nil {
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
|
@@ -2,9 +2,18 @@ package controller
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"one-api/types"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/songquanpeng/one-api/common"
|
||||
"github.com/songquanpeng/one-api/relay/channel/ai360"
|
||||
"github.com/songquanpeng/one-api/relay/channel/baichuan"
|
||||
"github.com/songquanpeng/one-api/relay/channel/minimax"
|
||||
"github.com/songquanpeng/one-api/relay/channel/mistral"
|
||||
"github.com/songquanpeng/one-api/relay/channel/moonshot"
|
||||
"github.com/songquanpeng/one-api/relay/constant"
|
||||
"github.com/songquanpeng/one-api/relay/helper"
|
||||
relaymodel "github.com/songquanpeng/one-api/relay/model"
|
||||
"github.com/songquanpeng/one-api/relay/util"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// https://platform.openai.com/docs/api-reference/models/list
|
||||
@@ -36,6 +45,7 @@ type OpenAIModels struct {
|
||||
|
||||
var openAIModels []OpenAIModels
|
||||
var openAIModelsMap map[string]OpenAIModels
|
||||
var channelId2Models map[int][]string
|
||||
|
||||
func init() {
|
||||
var permission []OpenAIModelPermission
|
||||
@@ -54,525 +64,101 @@ func init() {
|
||||
IsBlocking: false,
|
||||
})
|
||||
// https://platform.openai.com/docs/models/model-endpoint-compatibility
|
||||
openAIModels = []OpenAIModels{
|
||||
{
|
||||
Id: "dall-e-2",
|
||||
for i := 0; i < constant.APITypeDummy; i++ {
|
||||
if i == constant.APITypeAIProxyLibrary {
|
||||
continue
|
||||
}
|
||||
adaptor := helper.GetAdaptor(i)
|
||||
channelName := adaptor.GetChannelName()
|
||||
modelNames := adaptor.GetModelList()
|
||||
for _, modelName := range modelNames {
|
||||
openAIModels = append(openAIModels, OpenAIModels{
|
||||
Id: modelName,
|
||||
Object: "model",
|
||||
Created: 1626777600,
|
||||
OwnedBy: channelName,
|
||||
Permission: permission,
|
||||
Root: modelName,
|
||||
Parent: nil,
|
||||
})
|
||||
}
|
||||
}
|
||||
for _, modelName := range ai360.ModelList {
|
||||
openAIModels = append(openAIModels, OpenAIModels{
|
||||
Id: modelName,
|
||||
Object: "model",
|
||||
Created: 1677649963,
|
||||
OwnedBy: "openai",
|
||||
Permission: permission,
|
||||
Root: "dall-e-2",
|
||||
Parent: nil,
|
||||
},
|
||||
{
|
||||
Id: "dall-e-3",
|
||||
Object: "model",
|
||||
Created: 1677649963,
|
||||
OwnedBy: "openai",
|
||||
Permission: permission,
|
||||
Root: "dall-e-3",
|
||||
Parent: nil,
|
||||
},
|
||||
{
|
||||
Id: "whisper-1",
|
||||
Object: "model",
|
||||
Created: 1677649963,
|
||||
OwnedBy: "openai",
|
||||
Permission: permission,
|
||||
Root: "whisper-1",
|
||||
Parent: nil,
|
||||
},
|
||||
{
|
||||
Id: "tts-1",
|
||||
Object: "model",
|
||||
Created: 1677649963,
|
||||
OwnedBy: "openai",
|
||||
Permission: permission,
|
||||
Root: "tts-1",
|
||||
Parent: nil,
|
||||
},
|
||||
{
|
||||
Id: "tts-1-1106",
|
||||
Object: "model",
|
||||
Created: 1677649963,
|
||||
OwnedBy: "openai",
|
||||
Permission: permission,
|
||||
Root: "tts-1-1106",
|
||||
Parent: nil,
|
||||
},
|
||||
{
|
||||
Id: "tts-1-hd",
|
||||
Object: "model",
|
||||
Created: 1677649963,
|
||||
OwnedBy: "openai",
|
||||
Permission: permission,
|
||||
Root: "tts-1-hd",
|
||||
Parent: nil,
|
||||
},
|
||||
{
|
||||
Id: "tts-1-hd-1106",
|
||||
Object: "model",
|
||||
Created: 1677649963,
|
||||
OwnedBy: "openai",
|
||||
Permission: permission,
|
||||
Root: "tts-1-hd-1106",
|
||||
Parent: nil,
|
||||
},
|
||||
{
|
||||
Id: "gpt-3.5-turbo",
|
||||
Object: "model",
|
||||
Created: 1677649963,
|
||||
OwnedBy: "openai",
|
||||
Permission: permission,
|
||||
Root: "gpt-3.5-turbo",
|
||||
Parent: nil,
|
||||
},
|
||||
{
|
||||
Id: "gpt-3.5-turbo-0301",
|
||||
Object: "model",
|
||||
Created: 1677649963,
|
||||
OwnedBy: "openai",
|
||||
Permission: permission,
|
||||
Root: "gpt-3.5-turbo-0301",
|
||||
Parent: nil,
|
||||
},
|
||||
{
|
||||
Id: "gpt-3.5-turbo-0613",
|
||||
Object: "model",
|
||||
Created: 1677649963,
|
||||
OwnedBy: "openai",
|
||||
Permission: permission,
|
||||
Root: "gpt-3.5-turbo-0613",
|
||||
Parent: nil,
|
||||
},
|
||||
{
|
||||
Id: "gpt-3.5-turbo-16k",
|
||||
Object: "model",
|
||||
Created: 1677649963,
|
||||
OwnedBy: "openai",
|
||||
Permission: permission,
|
||||
Root: "gpt-3.5-turbo-16k",
|
||||
Parent: nil,
|
||||
},
|
||||
{
|
||||
Id: "gpt-3.5-turbo-16k-0613",
|
||||
Object: "model",
|
||||
Created: 1677649963,
|
||||
OwnedBy: "openai",
|
||||
Permission: permission,
|
||||
Root: "gpt-3.5-turbo-16k-0613",
|
||||
Parent: nil,
|
||||
},
|
||||
{
|
||||
Id: "gpt-3.5-turbo-1106",
|
||||
Object: "model",
|
||||
Created: 1699593571,
|
||||
OwnedBy: "openai",
|
||||
Permission: permission,
|
||||
Root: "gpt-3.5-turbo-1106",
|
||||
Parent: nil,
|
||||
},
|
||||
{
|
||||
Id: "gpt-3.5-turbo-instruct",
|
||||
Object: "model",
|
||||
Created: 1677649963,
|
||||
OwnedBy: "openai",
|
||||
Permission: permission,
|
||||
Root: "gpt-3.5-turbo-instruct",
|
||||
Parent: nil,
|
||||
},
|
||||
{
|
||||
Id: "gpt-4",
|
||||
Object: "model",
|
||||
Created: 1677649963,
|
||||
OwnedBy: "openai",
|
||||
Permission: permission,
|
||||
Root: "gpt-4",
|
||||
Parent: nil,
|
||||
},
|
||||
{
|
||||
Id: "gpt-4-0314",
|
||||
Object: "model",
|
||||
Created: 1677649963,
|
||||
OwnedBy: "openai",
|
||||
Permission: permission,
|
||||
Root: "gpt-4-0314",
|
||||
Parent: nil,
|
||||
},
|
||||
{
|
||||
Id: "gpt-4-0613",
|
||||
Object: "model",
|
||||
Created: 1677649963,
|
||||
OwnedBy: "openai",
|
||||
Permission: permission,
|
||||
Root: "gpt-4-0613",
|
||||
Parent: nil,
|
||||
},
|
||||
{
|
||||
Id: "gpt-4-32k",
|
||||
Object: "model",
|
||||
Created: 1677649963,
|
||||
OwnedBy: "openai",
|
||||
Permission: permission,
|
||||
Root: "gpt-4-32k",
|
||||
Parent: nil,
|
||||
},
|
||||
{
|
||||
Id: "gpt-4-32k-0314",
|
||||
Object: "model",
|
||||
Created: 1677649963,
|
||||
OwnedBy: "openai",
|
||||
Permission: permission,
|
||||
Root: "gpt-4-32k-0314",
|
||||
Parent: nil,
|
||||
},
|
||||
{
|
||||
Id: "gpt-4-32k-0613",
|
||||
Object: "model",
|
||||
Created: 1677649963,
|
||||
OwnedBy: "openai",
|
||||
Permission: permission,
|
||||
Root: "gpt-4-32k-0613",
|
||||
Parent: nil,
|
||||
},
|
||||
{
|
||||
Id: "gpt-4-1106-preview",
|
||||
Object: "model",
|
||||
Created: 1699593571,
|
||||
OwnedBy: "openai",
|
||||
Permission: permission,
|
||||
Root: "gpt-4-1106-preview",
|
||||
Parent: nil,
|
||||
},
|
||||
{
|
||||
Id: "gpt-4-vision-preview",
|
||||
Object: "model",
|
||||
Created: 1699593571,
|
||||
OwnedBy: "openai",
|
||||
Permission: permission,
|
||||
Root: "gpt-4-vision-preview",
|
||||
Parent: nil,
|
||||
},
|
||||
{
|
||||
Id: "text-embedding-ada-002",
|
||||
Object: "model",
|
||||
Created: 1677649963,
|
||||
OwnedBy: "openai",
|
||||
Permission: permission,
|
||||
Root: "text-embedding-ada-002",
|
||||
Parent: nil,
|
||||
},
|
||||
{
|
||||
Id: "text-davinci-003",
|
||||
Object: "model",
|
||||
Created: 1677649963,
|
||||
OwnedBy: "openai",
|
||||
Permission: permission,
|
||||
Root: "text-davinci-003",
|
||||
Parent: nil,
|
||||
},
|
||||
{
|
||||
Id: "text-davinci-002",
|
||||
Object: "model",
|
||||
Created: 1677649963,
|
||||
OwnedBy: "openai",
|
||||
Permission: permission,
|
||||
Root: "text-davinci-002",
|
||||
Parent: nil,
|
||||
},
|
||||
{
|
||||
Id: "text-curie-001",
|
||||
Object: "model",
|
||||
Created: 1677649963,
|
||||
OwnedBy: "openai",
|
||||
Permission: permission,
|
||||
Root: "text-curie-001",
|
||||
Parent: nil,
|
||||
},
|
||||
{
|
||||
Id: "text-babbage-001",
|
||||
Object: "model",
|
||||
Created: 1677649963,
|
||||
OwnedBy: "openai",
|
||||
Permission: permission,
|
||||
Root: "text-babbage-001",
|
||||
Parent: nil,
|
||||
},
|
||||
{
|
||||
Id: "text-ada-001",
|
||||
Object: "model",
|
||||
Created: 1677649963,
|
||||
OwnedBy: "openai",
|
||||
Permission: permission,
|
||||
Root: "text-ada-001",
|
||||
Parent: nil,
|
||||
},
|
||||
{
|
||||
Id: "text-moderation-latest",
|
||||
Object: "model",
|
||||
Created: 1677649963,
|
||||
OwnedBy: "openai",
|
||||
Permission: permission,
|
||||
Root: "text-moderation-latest",
|
||||
Parent: nil,
|
||||
},
|
||||
{
|
||||
Id: "text-moderation-stable",
|
||||
Object: "model",
|
||||
Created: 1677649963,
|
||||
OwnedBy: "openai",
|
||||
Permission: permission,
|
||||
Root: "text-moderation-stable",
|
||||
Parent: nil,
|
||||
},
|
||||
{
|
||||
Id: "text-davinci-edit-001",
|
||||
Object: "model",
|
||||
Created: 1677649963,
|
||||
OwnedBy: "openai",
|
||||
Permission: permission,
|
||||
Root: "text-davinci-edit-001",
|
||||
Parent: nil,
|
||||
},
|
||||
{
|
||||
Id: "code-davinci-edit-001",
|
||||
Object: "model",
|
||||
Created: 1677649963,
|
||||
OwnedBy: "openai",
|
||||
Permission: permission,
|
||||
Root: "code-davinci-edit-001",
|
||||
Parent: nil,
|
||||
},
|
||||
{
|
||||
Id: "claude-instant-1",
|
||||
Object: "model",
|
||||
Created: 1677649963,
|
||||
OwnedBy: "anthropic",
|
||||
Permission: permission,
|
||||
Root: "claude-instant-1",
|
||||
Parent: nil,
|
||||
},
|
||||
{
|
||||
Id: "claude-2",
|
||||
Object: "model",
|
||||
Created: 1677649963,
|
||||
OwnedBy: "anthropic",
|
||||
Permission: permission,
|
||||
Root: "claude-2",
|
||||
Parent: nil,
|
||||
},
|
||||
{
|
||||
Id: "claude-2.1",
|
||||
Object: "model",
|
||||
Created: 1677649963,
|
||||
OwnedBy: "anthropic",
|
||||
Permission: permission,
|
||||
Root: "claude-2.1",
|
||||
Parent: nil,
|
||||
},
|
||||
{
|
||||
Id: "claude-2.0",
|
||||
Object: "model",
|
||||
Created: 1677649963,
|
||||
OwnedBy: "anthropic",
|
||||
Permission: permission,
|
||||
Root: "claude-2.0",
|
||||
Parent: nil,
|
||||
},
|
||||
{
|
||||
Id: "ERNIE-Bot",
|
||||
Object: "model",
|
||||
Created: 1677649963,
|
||||
OwnedBy: "baidu",
|
||||
Permission: permission,
|
||||
Root: "ERNIE-Bot",
|
||||
Parent: nil,
|
||||
},
|
||||
{
|
||||
Id: "ERNIE-Bot-turbo",
|
||||
Object: "model",
|
||||
Created: 1677649963,
|
||||
OwnedBy: "baidu",
|
||||
Permission: permission,
|
||||
Root: "ERNIE-Bot-turbo",
|
||||
Parent: nil,
|
||||
},
|
||||
{
|
||||
Id: "ERNIE-Bot-4",
|
||||
Object: "model",
|
||||
Created: 1677649963,
|
||||
OwnedBy: "baidu",
|
||||
Permission: permission,
|
||||
Root: "ERNIE-Bot-4",
|
||||
Parent: nil,
|
||||
},
|
||||
{
|
||||
Id: "Embedding-V1",
|
||||
Object: "model",
|
||||
Created: 1677649963,
|
||||
OwnedBy: "baidu",
|
||||
Permission: permission,
|
||||
Root: "Embedding-V1",
|
||||
Parent: nil,
|
||||
},
|
||||
{
|
||||
Id: "PaLM-2",
|
||||
Object: "model",
|
||||
Created: 1677649963,
|
||||
OwnedBy: "google",
|
||||
Permission: permission,
|
||||
Root: "PaLM-2",
|
||||
Parent: nil,
|
||||
},
|
||||
{
|
||||
Id: "gemini-pro",
|
||||
Object: "model",
|
||||
Created: 1677649963,
|
||||
OwnedBy: "google",
|
||||
Permission: permission,
|
||||
Root: "gemini-pro",
|
||||
Parent: nil,
|
||||
},
|
||||
{
|
||||
Id: "chatglm_turbo",
|
||||
Object: "model",
|
||||
Created: 1677649963,
|
||||
OwnedBy: "zhipu",
|
||||
Permission: permission,
|
||||
Root: "chatglm_turbo",
|
||||
Parent: nil,
|
||||
},
|
||||
{
|
||||
Id: "chatglm_pro",
|
||||
Object: "model",
|
||||
Created: 1677649963,
|
||||
OwnedBy: "zhipu",
|
||||
Permission: permission,
|
||||
Root: "chatglm_pro",
|
||||
Parent: nil,
|
||||
},
|
||||
{
|
||||
Id: "chatglm_std",
|
||||
Object: "model",
|
||||
Created: 1677649963,
|
||||
OwnedBy: "zhipu",
|
||||
Permission: permission,
|
||||
Root: "chatglm_std",
|
||||
Parent: nil,
|
||||
},
|
||||
{
|
||||
Id: "chatglm_lite",
|
||||
Object: "model",
|
||||
Created: 1677649963,
|
||||
OwnedBy: "zhipu",
|
||||
Permission: permission,
|
||||
Root: "chatglm_lite",
|
||||
Parent: nil,
|
||||
},
|
||||
{
|
||||
Id: "qwen-turbo",
|
||||
Object: "model",
|
||||
Created: 1677649963,
|
||||
OwnedBy: "ali",
|
||||
Permission: permission,
|
||||
Root: "qwen-turbo",
|
||||
Parent: nil,
|
||||
},
|
||||
{
|
||||
Id: "qwen-plus",
|
||||
Object: "model",
|
||||
Created: 1677649963,
|
||||
OwnedBy: "ali",
|
||||
Permission: permission,
|
||||
Root: "qwen-plus",
|
||||
Parent: nil,
|
||||
},
|
||||
{
|
||||
Id: "qwen-max",
|
||||
Object: "model",
|
||||
Created: 1677649963,
|
||||
OwnedBy: "ali",
|
||||
Permission: permission,
|
||||
Root: "qwen-max",
|
||||
Parent: nil,
|
||||
},
|
||||
{
|
||||
Id: "qwen-max-longcontext",
|
||||
Object: "model",
|
||||
Created: 1677649963,
|
||||
OwnedBy: "ali",
|
||||
Permission: permission,
|
||||
Root: "qwen-max-longcontext",
|
||||
Parent: nil,
|
||||
},
|
||||
{
|
||||
Id: "text-embedding-v1",
|
||||
Object: "model",
|
||||
Created: 1677649963,
|
||||
OwnedBy: "ali",
|
||||
Permission: permission,
|
||||
Root: "text-embedding-v1",
|
||||
Parent: nil,
|
||||
},
|
||||
{
|
||||
Id: "SparkDesk",
|
||||
Object: "model",
|
||||
Created: 1677649963,
|
||||
OwnedBy: "xunfei",
|
||||
Permission: permission,
|
||||
Root: "SparkDesk",
|
||||
Parent: nil,
|
||||
},
|
||||
{
|
||||
Id: "360GPT_S2_V9",
|
||||
Object: "model",
|
||||
Created: 1677649963,
|
||||
Created: 1626777600,
|
||||
OwnedBy: "360",
|
||||
Permission: permission,
|
||||
Root: "360GPT_S2_V9",
|
||||
Root: modelName,
|
||||
Parent: nil,
|
||||
},
|
||||
{
|
||||
Id: "embedding-bert-512-v1",
|
||||
})
|
||||
}
|
||||
for _, modelName := range moonshot.ModelList {
|
||||
openAIModels = append(openAIModels, OpenAIModels{
|
||||
Id: modelName,
|
||||
Object: "model",
|
||||
Created: 1677649963,
|
||||
OwnedBy: "360",
|
||||
Created: 1626777600,
|
||||
OwnedBy: "moonshot",
|
||||
Permission: permission,
|
||||
Root: "embedding-bert-512-v1",
|
||||
Root: modelName,
|
||||
Parent: nil,
|
||||
},
|
||||
{
|
||||
Id: "embedding_s1_v1",
|
||||
})
|
||||
}
|
||||
for _, modelName := range baichuan.ModelList {
|
||||
openAIModels = append(openAIModels, OpenAIModels{
|
||||
Id: modelName,
|
||||
Object: "model",
|
||||
Created: 1677649963,
|
||||
OwnedBy: "360",
|
||||
Created: 1626777600,
|
||||
OwnedBy: "baichuan",
|
||||
Permission: permission,
|
||||
Root: "embedding_s1_v1",
|
||||
Root: modelName,
|
||||
Parent: nil,
|
||||
},
|
||||
{
|
||||
Id: "semantic_similarity_s1_v1",
|
||||
})
|
||||
}
|
||||
for _, modelName := range minimax.ModelList {
|
||||
openAIModels = append(openAIModels, OpenAIModels{
|
||||
Id: modelName,
|
||||
Object: "model",
|
||||
Created: 1677649963,
|
||||
OwnedBy: "360",
|
||||
Created: 1626777600,
|
||||
OwnedBy: "minimax",
|
||||
Permission: permission,
|
||||
Root: "semantic_similarity_s1_v1",
|
||||
Root: modelName,
|
||||
Parent: nil,
|
||||
},
|
||||
{
|
||||
Id: "hunyuan",
|
||||
})
|
||||
}
|
||||
for _, modelName := range mistral.ModelList {
|
||||
openAIModels = append(openAIModels, OpenAIModels{
|
||||
Id: modelName,
|
||||
Object: "model",
|
||||
Created: 1677649963,
|
||||
OwnedBy: "tencent",
|
||||
Created: 1626777600,
|
||||
OwnedBy: "mistralai",
|
||||
Permission: permission,
|
||||
Root: "hunyuan",
|
||||
Root: modelName,
|
||||
Parent: nil,
|
||||
},
|
||||
})
|
||||
}
|
||||
openAIModelsMap = make(map[string]OpenAIModels)
|
||||
for _, model := range openAIModels {
|
||||
openAIModelsMap[model.Id] = model
|
||||
}
|
||||
channelId2Models = make(map[int][]string)
|
||||
for i := 1; i < common.ChannelTypeDummy; i++ {
|
||||
adaptor := helper.GetAdaptor(constant.ChannelType2APIType(i))
|
||||
meta := &util.RelayMeta{
|
||||
ChannelType: i,
|
||||
}
|
||||
adaptor.Init(meta)
|
||||
channelId2Models[i] = adaptor.GetModelList()
|
||||
}
|
||||
}
|
||||
|
||||
func DashboardListModels(c *gin.Context) {
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"success": true,
|
||||
"message": "",
|
||||
"data": channelId2Models,
|
||||
})
|
||||
}
|
||||
|
||||
func ListModels(c *gin.Context) {
|
||||
@@ -587,14 +173,14 @@ func RetrieveModel(c *gin.Context) {
|
||||
if model, ok := openAIModelsMap[modelId]; ok {
|
||||
c.JSON(200, model)
|
||||
} else {
|
||||
openAIError := types.OpenAIError{
|
||||
Error := relaymodel.Error{
|
||||
Message: fmt.Sprintf("The model '%s' does not exist", modelId),
|
||||
Type: "invalid_request_error",
|
||||
Param: "model",
|
||||
Code: "model_not_found",
|
||||
}
|
||||
c.JSON(200, gin.H{
|
||||
"error": openAIError,
|
||||
"error": Error,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@@ -2,9 +2,10 @@ package controller
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"github.com/songquanpeng/one-api/common/config"
|
||||
"github.com/songquanpeng/one-api/common/helper"
|
||||
"github.com/songquanpeng/one-api/model"
|
||||
"net/http"
|
||||
"one-api/common"
|
||||
"one-api/model"
|
||||
"strings"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
@@ -12,17 +13,17 @@ import (
|
||||
|
||||
func GetOptions(c *gin.Context) {
|
||||
var options []*model.Option
|
||||
common.OptionMapRWMutex.Lock()
|
||||
for k, v := range common.OptionMap {
|
||||
config.OptionMapRWMutex.Lock()
|
||||
for k, v := range config.OptionMap {
|
||||
if strings.HasSuffix(k, "Token") || strings.HasSuffix(k, "Secret") {
|
||||
continue
|
||||
}
|
||||
options = append(options, &model.Option{
|
||||
Key: k,
|
||||
Value: common.Interface2String(v),
|
||||
Value: helper.Interface2String(v),
|
||||
})
|
||||
}
|
||||
common.OptionMapRWMutex.Unlock()
|
||||
config.OptionMapRWMutex.Unlock()
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"success": true,
|
||||
"message": "",
|
||||
@@ -42,8 +43,16 @@ func UpdateOption(c *gin.Context) {
|
||||
return
|
||||
}
|
||||
switch option.Key {
|
||||
case "Theme":
|
||||
if !config.ValidThemes[option.Value] {
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"success": false,
|
||||
"message": "无效的主题",
|
||||
})
|
||||
return
|
||||
}
|
||||
case "GitHubOAuthEnabled":
|
||||
if option.Value == "true" && common.GitHubClientId == "" {
|
||||
if option.Value == "true" && config.GitHubClientId == "" {
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"success": false,
|
||||
"message": "无法启用 GitHub OAuth,请先填入 GitHub Client Id 以及 GitHub Client Secret!",
|
||||
@@ -51,7 +60,7 @@ func UpdateOption(c *gin.Context) {
|
||||
return
|
||||
}
|
||||
case "EmailDomainRestrictionEnabled":
|
||||
if option.Value == "true" && len(common.EmailDomainWhitelist) == 0 {
|
||||
if option.Value == "true" && len(config.EmailDomainWhitelist) == 0 {
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"success": false,
|
||||
"message": "无法启用邮箱域名限制,请先填入限制的邮箱域名!",
|
||||
@@ -59,7 +68,7 @@ func UpdateOption(c *gin.Context) {
|
||||
return
|
||||
}
|
||||
case "WeChatAuthEnabled":
|
||||
if option.Value == "true" && common.WeChatServerAddress == "" {
|
||||
if option.Value == "true" && config.WeChatServerAddress == "" {
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"success": false,
|
||||
"message": "无法启用微信登录,请先填入微信登录相关配置信息!",
|
||||
@@ -67,7 +76,7 @@ func UpdateOption(c *gin.Context) {
|
||||
return
|
||||
}
|
||||
case "TurnstileCheckEnabled":
|
||||
if option.Value == "true" && common.TurnstileSiteKey == "" {
|
||||
if option.Value == "true" && config.TurnstileSiteKey == "" {
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"success": false,
|
||||
"message": "无法启用 Turnstile 校验,请先填入 Turnstile 校验相关配置信息!",
|
||||
|
@@ -2,9 +2,10 @@ package controller
|
||||
|
||||
import (
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/songquanpeng/one-api/common/config"
|
||||
"github.com/songquanpeng/one-api/common/helper"
|
||||
"github.com/songquanpeng/one-api/model"
|
||||
"net/http"
|
||||
"one-api/common"
|
||||
"one-api/model"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
@@ -13,7 +14,7 @@ func GetAllRedemptions(c *gin.Context) {
|
||||
if p < 0 {
|
||||
p = 0
|
||||
}
|
||||
redemptions, err := model.GetAllRedemptions(p*common.ItemsPerPage, common.ItemsPerPage)
|
||||
redemptions, err := model.GetAllRedemptions(p*config.ItemsPerPage, config.ItemsPerPage)
|
||||
if err != nil {
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"success": false,
|
||||
@@ -105,12 +106,12 @@ func AddRedemption(c *gin.Context) {
|
||||
}
|
||||
var keys []string
|
||||
for i := 0; i < redemption.Count; i++ {
|
||||
key := common.GetUUID()
|
||||
key := helper.GetUUID()
|
||||
cleanRedemption := model.Redemption{
|
||||
UserId: c.GetInt("id"),
|
||||
Name: redemption.Name,
|
||||
Key: key,
|
||||
CreatedTime: common.GetTimestamp(),
|
||||
CreatedTime: helper.GetTimestamp(),
|
||||
Quota: redemption.Quota,
|
||||
}
|
||||
err = cleanRedemption.Insert()
|
||||
|
@@ -1,94 +0,0 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math"
|
||||
"net/http"
|
||||
"one-api/common"
|
||||
"one-api/model"
|
||||
providersBase "one-api/providers/base"
|
||||
"one-api/types"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
func RelayChat(c *gin.Context) {
|
||||
|
||||
var chatRequest types.ChatCompletionRequest
|
||||
if err := common.UnmarshalBodyReusable(c, &chatRequest); err != nil {
|
||||
common.AbortWithMessage(c, http.StatusBadRequest, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
channel, pass := fetchChannel(c, chatRequest.Model)
|
||||
if pass {
|
||||
return
|
||||
}
|
||||
|
||||
if chatRequest.MaxTokens < 0 || chatRequest.MaxTokens > math.MaxInt32/2 {
|
||||
common.AbortWithMessage(c, http.StatusBadRequest, "max_tokens is invalid")
|
||||
return
|
||||
}
|
||||
|
||||
// 解析模型映射
|
||||
var isModelMapped bool
|
||||
modelMap, err := parseModelMapping(channel.GetModelMapping())
|
||||
if err != nil {
|
||||
common.AbortWithMessage(c, http.StatusInternalServerError, err.Error())
|
||||
return
|
||||
}
|
||||
if modelMap != nil && modelMap[chatRequest.Model] != "" {
|
||||
chatRequest.Model = modelMap[chatRequest.Model]
|
||||
isModelMapped = true
|
||||
}
|
||||
|
||||
// 获取供应商
|
||||
provider, pass := getProvider(c, channel.Type, common.RelayModeChatCompletions)
|
||||
if pass {
|
||||
return
|
||||
}
|
||||
chatProvider, ok := provider.(providersBase.ChatInterface)
|
||||
if !ok {
|
||||
common.AbortWithMessage(c, http.StatusNotImplemented, "channel not implemented")
|
||||
return
|
||||
}
|
||||
|
||||
// 获取Input Tokens
|
||||
promptTokens := common.CountTokenMessages(chatRequest.Messages, chatRequest.Model)
|
||||
|
||||
var quotaInfo *QuotaInfo
|
||||
var errWithCode *types.OpenAIErrorWithStatusCode
|
||||
var usage *types.Usage
|
||||
quotaInfo, errWithCode = generateQuotaInfo(c, chatRequest.Model, promptTokens)
|
||||
if errWithCode != nil {
|
||||
errorHelper(c, errWithCode)
|
||||
return
|
||||
}
|
||||
|
||||
usage, errWithCode = chatProvider.ChatAction(&chatRequest, isModelMapped, promptTokens)
|
||||
|
||||
// 如果报错,则退还配额
|
||||
if errWithCode != nil {
|
||||
tokenId := c.GetInt("token_id")
|
||||
if quotaInfo.HandelStatus {
|
||||
go func(ctx context.Context) {
|
||||
// return pre-consumed quota
|
||||
err := model.PostConsumeTokenQuota(tokenId, -quotaInfo.preConsumedQuota)
|
||||
if err != nil {
|
||||
common.LogError(ctx, "error return pre-consumed quota: "+err.Error())
|
||||
}
|
||||
}(c.Request.Context())
|
||||
}
|
||||
errorHelper(c, errWithCode)
|
||||
return
|
||||
} else {
|
||||
tokenName := c.GetString("token_name")
|
||||
// 如果没有报错,则消费配额
|
||||
go func(ctx context.Context) {
|
||||
err = quotaInfo.completedQuotaConsumption(usage, tokenName, ctx)
|
||||
if err != nil {
|
||||
common.LogError(ctx, err.Error())
|
||||
}
|
||||
}(c.Request.Context())
|
||||
}
|
||||
}
|
@@ -1,94 +0,0 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math"
|
||||
"net/http"
|
||||
"one-api/common"
|
||||
"one-api/model"
|
||||
providersBase "one-api/providers/base"
|
||||
"one-api/types"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
func RelayCompletions(c *gin.Context) {
|
||||
|
||||
var completionRequest types.CompletionRequest
|
||||
if err := common.UnmarshalBodyReusable(c, &completionRequest); err != nil {
|
||||
common.AbortWithMessage(c, http.StatusBadRequest, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
channel, pass := fetchChannel(c, completionRequest.Model)
|
||||
if pass {
|
||||
return
|
||||
}
|
||||
|
||||
if completionRequest.MaxTokens < 0 || completionRequest.MaxTokens > math.MaxInt32/2 {
|
||||
common.AbortWithMessage(c, http.StatusBadRequest, "max_tokens is invalid")
|
||||
return
|
||||
}
|
||||
|
||||
// 解析模型映射
|
||||
var isModelMapped bool
|
||||
modelMap, err := parseModelMapping(channel.GetModelMapping())
|
||||
if err != nil {
|
||||
common.AbortWithMessage(c, http.StatusInternalServerError, err.Error())
|
||||
return
|
||||
}
|
||||
if modelMap != nil && modelMap[completionRequest.Model] != "" {
|
||||
completionRequest.Model = modelMap[completionRequest.Model]
|
||||
isModelMapped = true
|
||||
}
|
||||
|
||||
// 获取供应商
|
||||
provider, pass := getProvider(c, channel.Type, common.RelayModeCompletions)
|
||||
if pass {
|
||||
return
|
||||
}
|
||||
completionProvider, ok := provider.(providersBase.CompletionInterface)
|
||||
if !ok {
|
||||
common.AbortWithMessage(c, http.StatusNotImplemented, "channel not implemented")
|
||||
return
|
||||
}
|
||||
|
||||
// 获取Input Tokens
|
||||
promptTokens := common.CountTokenInput(completionRequest.Prompt, completionRequest.Model)
|
||||
|
||||
var quotaInfo *QuotaInfo
|
||||
var errWithCode *types.OpenAIErrorWithStatusCode
|
||||
var usage *types.Usage
|
||||
quotaInfo, errWithCode = generateQuotaInfo(c, completionRequest.Model, promptTokens)
|
||||
if errWithCode != nil {
|
||||
errorHelper(c, errWithCode)
|
||||
return
|
||||
}
|
||||
|
||||
usage, errWithCode = completionProvider.CompleteAction(&completionRequest, isModelMapped, promptTokens)
|
||||
|
||||
// 如果报错,则退还配额
|
||||
if errWithCode != nil {
|
||||
tokenId := c.GetInt("token_id")
|
||||
if quotaInfo.HandelStatus {
|
||||
go func(ctx context.Context) {
|
||||
// return pre-consumed quota
|
||||
err := model.PostConsumeTokenQuota(tokenId, -quotaInfo.preConsumedQuota)
|
||||
if err != nil {
|
||||
common.LogError(ctx, "error return pre-consumed quota: "+err.Error())
|
||||
}
|
||||
}(c.Request.Context())
|
||||
}
|
||||
errorHelper(c, errWithCode)
|
||||
return
|
||||
} else {
|
||||
tokenName := c.GetString("token_name")
|
||||
// 如果没有报错,则消费配额
|
||||
go func(ctx context.Context) {
|
||||
err = quotaInfo.completedQuotaConsumption(usage, tokenName, ctx)
|
||||
if err != nil {
|
||||
common.LogError(ctx, err.Error())
|
||||
}
|
||||
}(c.Request.Context())
|
||||
}
|
||||
}
|
@@ -1,93 +0,0 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"one-api/common"
|
||||
"one-api/model"
|
||||
providersBase "one-api/providers/base"
|
||||
"one-api/types"
|
||||
"strings"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
func RelayEmbeddings(c *gin.Context) {
|
||||
|
||||
var embeddingsRequest types.EmbeddingRequest
|
||||
if strings.HasSuffix(c.Request.URL.Path, "embeddings") {
|
||||
embeddingsRequest.Model = c.Param("model")
|
||||
}
|
||||
|
||||
if err := common.UnmarshalBodyReusable(c, &embeddingsRequest); err != nil {
|
||||
common.AbortWithMessage(c, http.StatusBadRequest, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
channel, pass := fetchChannel(c, embeddingsRequest.Model)
|
||||
if pass {
|
||||
return
|
||||
}
|
||||
|
||||
// 解析模型映射
|
||||
var isModelMapped bool
|
||||
modelMap, err := parseModelMapping(channel.GetModelMapping())
|
||||
if err != nil {
|
||||
common.AbortWithMessage(c, http.StatusInternalServerError, err.Error())
|
||||
return
|
||||
}
|
||||
if modelMap != nil && modelMap[embeddingsRequest.Model] != "" {
|
||||
embeddingsRequest.Model = modelMap[embeddingsRequest.Model]
|
||||
isModelMapped = true
|
||||
}
|
||||
|
||||
// 获取供应商
|
||||
provider, pass := getProvider(c, channel.Type, common.RelayModeEmbeddings)
|
||||
if pass {
|
||||
return
|
||||
}
|
||||
embeddingsProvider, ok := provider.(providersBase.EmbeddingsInterface)
|
||||
if !ok {
|
||||
common.AbortWithMessage(c, http.StatusNotImplemented, "channel not implemented")
|
||||
return
|
||||
}
|
||||
|
||||
// 获取Input Tokens
|
||||
promptTokens := common.CountTokenInput(embeddingsRequest.Input, embeddingsRequest.Model)
|
||||
|
||||
var quotaInfo *QuotaInfo
|
||||
var errWithCode *types.OpenAIErrorWithStatusCode
|
||||
var usage *types.Usage
|
||||
quotaInfo, errWithCode = generateQuotaInfo(c, embeddingsRequest.Model, promptTokens)
|
||||
if errWithCode != nil {
|
||||
errorHelper(c, errWithCode)
|
||||
return
|
||||
}
|
||||
|
||||
usage, errWithCode = embeddingsProvider.EmbeddingsAction(&embeddingsRequest, isModelMapped, promptTokens)
|
||||
|
||||
// 如果报错,则退还配额
|
||||
if errWithCode != nil {
|
||||
tokenId := c.GetInt("token_id")
|
||||
if quotaInfo.HandelStatus {
|
||||
go func(ctx context.Context) {
|
||||
// return pre-consumed quota
|
||||
err := model.PostConsumeTokenQuota(tokenId, -quotaInfo.preConsumedQuota)
|
||||
if err != nil {
|
||||
common.LogError(ctx, "error return pre-consumed quota: "+err.Error())
|
||||
}
|
||||
}(c.Request.Context())
|
||||
}
|
||||
errorHelper(c, errWithCode)
|
||||
return
|
||||
} else {
|
||||
tokenName := c.GetString("token_name")
|
||||
// 如果没有报错,则消费配额
|
||||
go func(ctx context.Context) {
|
||||
err = quotaInfo.completedQuotaConsumption(usage, tokenName, ctx)
|
||||
if err != nil {
|
||||
common.LogError(ctx, err.Error())
|
||||
}
|
||||
}(c.Request.Context())
|
||||
}
|
||||
}
|
@@ -1,106 +0,0 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"one-api/common"
|
||||
"one-api/model"
|
||||
providersBase "one-api/providers/base"
|
||||
"one-api/types"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
func RelayImageEdits(c *gin.Context) {
|
||||
|
||||
var imageEditRequest types.ImageEditRequest
|
||||
|
||||
if err := common.UnmarshalBodyReusable(c, &imageEditRequest); err != nil {
|
||||
common.AbortWithMessage(c, http.StatusBadRequest, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
if imageEditRequest.Prompt == "" {
|
||||
common.AbortWithMessage(c, http.StatusBadRequest, "field prompt is required")
|
||||
return
|
||||
}
|
||||
|
||||
if imageEditRequest.Model == "" {
|
||||
imageEditRequest.Model = "dall-e-2"
|
||||
}
|
||||
|
||||
if imageEditRequest.Size == "" {
|
||||
imageEditRequest.Size = "1024x1024"
|
||||
}
|
||||
|
||||
channel, pass := fetchChannel(c, imageEditRequest.Model)
|
||||
if pass {
|
||||
return
|
||||
}
|
||||
|
||||
// 解析模型映射
|
||||
var isModelMapped bool
|
||||
modelMap, err := parseModelMapping(channel.GetModelMapping())
|
||||
if err != nil {
|
||||
common.AbortWithMessage(c, http.StatusInternalServerError, err.Error())
|
||||
return
|
||||
}
|
||||
if modelMap != nil && modelMap[imageEditRequest.Model] != "" {
|
||||
imageEditRequest.Model = modelMap[imageEditRequest.Model]
|
||||
isModelMapped = true
|
||||
}
|
||||
|
||||
// 获取供应商
|
||||
provider, pass := getProvider(c, channel.Type, common.RelayModeImagesEdits)
|
||||
if pass {
|
||||
return
|
||||
}
|
||||
imageEditsProvider, ok := provider.(providersBase.ImageEditsInterface)
|
||||
if !ok {
|
||||
common.AbortWithMessage(c, http.StatusNotImplemented, "channel not implemented")
|
||||
return
|
||||
}
|
||||
|
||||
// 获取Input Tokens
|
||||
promptTokens, err := common.CountTokenImage(imageEditRequest)
|
||||
if err != nil {
|
||||
common.AbortWithMessage(c, http.StatusInternalServerError, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
var quotaInfo *QuotaInfo
|
||||
var errWithCode *types.OpenAIErrorWithStatusCode
|
||||
var usage *types.Usage
|
||||
quotaInfo, errWithCode = generateQuotaInfo(c, imageEditRequest.Model, promptTokens)
|
||||
if errWithCode != nil {
|
||||
errorHelper(c, errWithCode)
|
||||
return
|
||||
}
|
||||
|
||||
usage, errWithCode = imageEditsProvider.ImageEditsAction(&imageEditRequest, isModelMapped, promptTokens)
|
||||
|
||||
// 如果报错,则退还配额
|
||||
if errWithCode != nil {
|
||||
tokenId := c.GetInt("token_id")
|
||||
if quotaInfo.HandelStatus {
|
||||
go func(ctx context.Context) {
|
||||
// return pre-consumed quota
|
||||
err := model.PostConsumeTokenQuota(tokenId, -quotaInfo.preConsumedQuota)
|
||||
if err != nil {
|
||||
common.LogError(ctx, "error return pre-consumed quota: "+err.Error())
|
||||
}
|
||||
}(c.Request.Context())
|
||||
}
|
||||
errorHelper(c, errWithCode)
|
||||
return
|
||||
} else {
|
||||
tokenName := c.GetString("token_name")
|
||||
// 如果没有报错,则消费配额
|
||||
go func(ctx context.Context) {
|
||||
err = quotaInfo.completedQuotaConsumption(usage, tokenName, ctx)
|
||||
if err != nil {
|
||||
common.LogError(ctx, err.Error())
|
||||
}
|
||||
}(c.Request.Context())
|
||||
}
|
||||
}
|
@@ -1,109 +0,0 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"one-api/common"
|
||||
"one-api/model"
|
||||
providersBase "one-api/providers/base"
|
||||
"one-api/types"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
func RelayImageGenerations(c *gin.Context) {
|
||||
|
||||
var imageRequest types.ImageRequest
|
||||
|
||||
if err := common.UnmarshalBodyReusable(c, &imageRequest); err != nil {
|
||||
common.AbortWithMessage(c, http.StatusBadRequest, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
if imageRequest.Model == "" {
|
||||
imageRequest.Model = "dall-e-2"
|
||||
}
|
||||
|
||||
if imageRequest.N == 0 {
|
||||
imageRequest.N = 1
|
||||
}
|
||||
|
||||
if imageRequest.Size == "" {
|
||||
imageRequest.Size = "1024x1024"
|
||||
}
|
||||
|
||||
if imageRequest.Quality == "" {
|
||||
imageRequest.Quality = "standard"
|
||||
}
|
||||
|
||||
channel, pass := fetchChannel(c, imageRequest.Model)
|
||||
if pass {
|
||||
return
|
||||
}
|
||||
|
||||
// 解析模型映射
|
||||
var isModelMapped bool
|
||||
modelMap, err := parseModelMapping(channel.GetModelMapping())
|
||||
if err != nil {
|
||||
common.AbortWithMessage(c, http.StatusInternalServerError, err.Error())
|
||||
return
|
||||
}
|
||||
if modelMap != nil && modelMap[imageRequest.Model] != "" {
|
||||
imageRequest.Model = modelMap[imageRequest.Model]
|
||||
isModelMapped = true
|
||||
}
|
||||
|
||||
// 获取供应商
|
||||
provider, pass := getProvider(c, channel.Type, common.RelayModeImagesGenerations)
|
||||
if pass {
|
||||
return
|
||||
}
|
||||
imageGenerationsProvider, ok := provider.(providersBase.ImageGenerationsInterface)
|
||||
if !ok {
|
||||
common.AbortWithMessage(c, http.StatusNotImplemented, "channel not implemented")
|
||||
return
|
||||
}
|
||||
|
||||
// 获取Input Tokens
|
||||
promptTokens, err := common.CountTokenImage(imageRequest)
|
||||
if err != nil {
|
||||
common.AbortWithMessage(c, http.StatusInternalServerError, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
var quotaInfo *QuotaInfo
|
||||
var errWithCode *types.OpenAIErrorWithStatusCode
|
||||
var usage *types.Usage
|
||||
quotaInfo, errWithCode = generateQuotaInfo(c, imageRequest.Model, promptTokens)
|
||||
if errWithCode != nil {
|
||||
errorHelper(c, errWithCode)
|
||||
return
|
||||
}
|
||||
|
||||
usage, errWithCode = imageGenerationsProvider.ImageGenerationsAction(&imageRequest, isModelMapped, promptTokens)
|
||||
|
||||
// 如果报错,则退还配额
|
||||
if errWithCode != nil {
|
||||
tokenId := c.GetInt("token_id")
|
||||
if quotaInfo.HandelStatus {
|
||||
go func(ctx context.Context) {
|
||||
// return pre-consumed quota
|
||||
err := model.PostConsumeTokenQuota(tokenId, -quotaInfo.preConsumedQuota)
|
||||
if err != nil {
|
||||
common.LogError(ctx, "error return pre-consumed quota: "+err.Error())
|
||||
}
|
||||
}(c.Request.Context())
|
||||
}
|
||||
errorHelper(c, errWithCode)
|
||||
return
|
||||
} else {
|
||||
tokenName := c.GetString("token_name")
|
||||
// 如果没有报错,则消费配额
|
||||
go func(ctx context.Context) {
|
||||
err = quotaInfo.completedQuotaConsumption(usage, tokenName, ctx)
|
||||
if err != nil {
|
||||
common.LogError(ctx, err.Error())
|
||||
}
|
||||
}(c.Request.Context())
|
||||
}
|
||||
}
|
@@ -1,101 +0,0 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"one-api/common"
|
||||
"one-api/model"
|
||||
providersBase "one-api/providers/base"
|
||||
"one-api/types"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
func RelayImageVariations(c *gin.Context) {
|
||||
|
||||
var imageEditRequest types.ImageEditRequest
|
||||
|
||||
if err := common.UnmarshalBodyReusable(c, &imageEditRequest); err != nil {
|
||||
common.AbortWithMessage(c, http.StatusBadRequest, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
if imageEditRequest.Model == "" {
|
||||
imageEditRequest.Model = "dall-e-2"
|
||||
}
|
||||
|
||||
if imageEditRequest.Size == "" {
|
||||
imageEditRequest.Size = "1024x1024"
|
||||
}
|
||||
|
||||
channel, pass := fetchChannel(c, imageEditRequest.Model)
|
||||
if pass {
|
||||
return
|
||||
}
|
||||
|
||||
// 解析模型映射
|
||||
var isModelMapped bool
|
||||
modelMap, err := parseModelMapping(channel.GetModelMapping())
|
||||
if err != nil {
|
||||
common.AbortWithMessage(c, http.StatusInternalServerError, err.Error())
|
||||
return
|
||||
}
|
||||
if modelMap != nil && modelMap[imageEditRequest.Model] != "" {
|
||||
imageEditRequest.Model = modelMap[imageEditRequest.Model]
|
||||
isModelMapped = true
|
||||
}
|
||||
|
||||
// 获取供应商
|
||||
provider, pass := getProvider(c, channel.Type, common.RelayModeImagesVariations)
|
||||
if pass {
|
||||
return
|
||||
}
|
||||
imageVariations, ok := provider.(providersBase.ImageVariationsInterface)
|
||||
if !ok {
|
||||
common.AbortWithMessage(c, http.StatusNotImplemented, "channel not implemented")
|
||||
return
|
||||
}
|
||||
|
||||
// 获取Input Tokens
|
||||
promptTokens, err := common.CountTokenImage(imageEditRequest)
|
||||
if err != nil {
|
||||
common.AbortWithMessage(c, http.StatusInternalServerError, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
var quotaInfo *QuotaInfo
|
||||
var errWithCode *types.OpenAIErrorWithStatusCode
|
||||
var usage *types.Usage
|
||||
quotaInfo, errWithCode = generateQuotaInfo(c, imageEditRequest.Model, promptTokens)
|
||||
if errWithCode != nil {
|
||||
errorHelper(c, errWithCode)
|
||||
return
|
||||
}
|
||||
|
||||
usage, errWithCode = imageVariations.ImageVariationsAction(&imageEditRequest, isModelMapped, promptTokens)
|
||||
|
||||
// 如果报错,则退还配额
|
||||
if errWithCode != nil {
|
||||
tokenId := c.GetInt("token_id")
|
||||
if quotaInfo.HandelStatus {
|
||||
go func(ctx context.Context) {
|
||||
// return pre-consumed quota
|
||||
err := model.PostConsumeTokenQuota(tokenId, -quotaInfo.preConsumedQuota)
|
||||
if err != nil {
|
||||
common.LogError(ctx, "error return pre-consumed quota: "+err.Error())
|
||||
}
|
||||
}(c.Request.Context())
|
||||
}
|
||||
errorHelper(c, errWithCode)
|
||||
return
|
||||
} else {
|
||||
tokenName := c.GetString("token_name")
|
||||
// 如果没有报错,则消费配额
|
||||
go func(ctx context.Context) {
|
||||
err = quotaInfo.completedQuotaConsumption(usage, tokenName, ctx)
|
||||
if err != nil {
|
||||
common.LogError(ctx, err.Error())
|
||||
}
|
||||
}(c.Request.Context())
|
||||
}
|
||||
}
|
@@ -1,93 +0,0 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"one-api/common"
|
||||
"one-api/model"
|
||||
providersBase "one-api/providers/base"
|
||||
"one-api/types"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
func RelayModerations(c *gin.Context) {
|
||||
|
||||
var moderationRequest types.ModerationRequest
|
||||
|
||||
if err := common.UnmarshalBodyReusable(c, &moderationRequest); err != nil {
|
||||
common.AbortWithMessage(c, http.StatusBadRequest, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
if moderationRequest.Model == "" {
|
||||
moderationRequest.Model = "text-moderation-stable"
|
||||
}
|
||||
|
||||
channel, pass := fetchChannel(c, moderationRequest.Model)
|
||||
if pass {
|
||||
return
|
||||
}
|
||||
|
||||
// 解析模型映射
|
||||
var isModelMapped bool
|
||||
modelMap, err := parseModelMapping(channel.GetModelMapping())
|
||||
if err != nil {
|
||||
common.AbortWithMessage(c, http.StatusInternalServerError, err.Error())
|
||||
return
|
||||
}
|
||||
if modelMap != nil && modelMap[moderationRequest.Model] != "" {
|
||||
moderationRequest.Model = modelMap[moderationRequest.Model]
|
||||
isModelMapped = true
|
||||
}
|
||||
|
||||
// 获取供应商
|
||||
provider, pass := getProvider(c, channel.Type, common.RelayModeModerations)
|
||||
if pass {
|
||||
return
|
||||
}
|
||||
moderationProvider, ok := provider.(providersBase.ModerationInterface)
|
||||
if !ok {
|
||||
common.AbortWithMessage(c, http.StatusNotImplemented, "channel not implemented")
|
||||
return
|
||||
}
|
||||
|
||||
// 获取Input Tokens
|
||||
promptTokens := common.CountTokenInput(moderationRequest.Input, moderationRequest.Model)
|
||||
|
||||
var quotaInfo *QuotaInfo
|
||||
var errWithCode *types.OpenAIErrorWithStatusCode
|
||||
var usage *types.Usage
|
||||
quotaInfo, errWithCode = generateQuotaInfo(c, moderationRequest.Model, promptTokens)
|
||||
if errWithCode != nil {
|
||||
errorHelper(c, errWithCode)
|
||||
return
|
||||
}
|
||||
|
||||
usage, errWithCode = moderationProvider.ModerationAction(&moderationRequest, isModelMapped, promptTokens)
|
||||
|
||||
// 如果报错,则退还配额
|
||||
if errWithCode != nil {
|
||||
tokenId := c.GetInt("token_id")
|
||||
if quotaInfo.HandelStatus {
|
||||
go func(ctx context.Context) {
|
||||
// return pre-consumed quota
|
||||
err := model.PostConsumeTokenQuota(tokenId, -quotaInfo.preConsumedQuota)
|
||||
if err != nil {
|
||||
common.LogError(ctx, "error return pre-consumed quota: "+err.Error())
|
||||
}
|
||||
}(c.Request.Context())
|
||||
}
|
||||
errorHelper(c, errWithCode)
|
||||
return
|
||||
} else {
|
||||
tokenName := c.GetString("token_name")
|
||||
// 如果没有报错,则消费配额
|
||||
go func(ctx context.Context) {
|
||||
err = quotaInfo.completedQuotaConsumption(usage, tokenName, ctx)
|
||||
if err != nil {
|
||||
common.LogError(ctx, err.Error())
|
||||
}
|
||||
}(c.Request.Context())
|
||||
}
|
||||
}
|
@@ -1,89 +0,0 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"one-api/common"
|
||||
"one-api/model"
|
||||
providersBase "one-api/providers/base"
|
||||
"one-api/types"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
func RelaySpeech(c *gin.Context) {
|
||||
|
||||
var speechRequest types.SpeechAudioRequest
|
||||
|
||||
if err := common.UnmarshalBodyReusable(c, &speechRequest); err != nil {
|
||||
common.AbortWithMessage(c, http.StatusBadRequest, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
channel, pass := fetchChannel(c, speechRequest.Model)
|
||||
if pass {
|
||||
return
|
||||
}
|
||||
|
||||
// 解析模型映射
|
||||
var isModelMapped bool
|
||||
modelMap, err := parseModelMapping(channel.GetModelMapping())
|
||||
if err != nil {
|
||||
common.AbortWithMessage(c, http.StatusInternalServerError, err.Error())
|
||||
return
|
||||
}
|
||||
if modelMap != nil && modelMap[speechRequest.Model] != "" {
|
||||
speechRequest.Model = modelMap[speechRequest.Model]
|
||||
isModelMapped = true
|
||||
}
|
||||
|
||||
// 获取供应商
|
||||
provider, pass := getProvider(c, channel.Type, common.RelayModeAudioSpeech)
|
||||
if pass {
|
||||
return
|
||||
}
|
||||
speechProvider, ok := provider.(providersBase.SpeechInterface)
|
||||
if !ok {
|
||||
common.AbortWithMessage(c, http.StatusNotImplemented, "channel not implemented")
|
||||
return
|
||||
}
|
||||
|
||||
// 获取Input Tokens
|
||||
promptTokens := len(speechRequest.Input)
|
||||
|
||||
var quotaInfo *QuotaInfo
|
||||
var errWithCode *types.OpenAIErrorWithStatusCode
|
||||
var usage *types.Usage
|
||||
quotaInfo, errWithCode = generateQuotaInfo(c, speechRequest.Model, promptTokens)
|
||||
if errWithCode != nil {
|
||||
errorHelper(c, errWithCode)
|
||||
return
|
||||
}
|
||||
|
||||
usage, errWithCode = speechProvider.SpeechAction(&speechRequest, isModelMapped, promptTokens)
|
||||
|
||||
// 如果报错,则退还配额
|
||||
if errWithCode != nil {
|
||||
tokenId := c.GetInt("token_id")
|
||||
if quotaInfo.HandelStatus {
|
||||
go func(ctx context.Context) {
|
||||
// return pre-consumed quota
|
||||
err := model.PostConsumeTokenQuota(tokenId, -quotaInfo.preConsumedQuota)
|
||||
if err != nil {
|
||||
common.LogError(ctx, "error return pre-consumed quota: "+err.Error())
|
||||
}
|
||||
}(c.Request.Context())
|
||||
}
|
||||
errorHelper(c, errWithCode)
|
||||
return
|
||||
} else {
|
||||
tokenName := c.GetString("token_name")
|
||||
// 如果没有报错,则消费配额
|
||||
go func(ctx context.Context) {
|
||||
err = quotaInfo.completedQuotaConsumption(usage, tokenName, ctx)
|
||||
if err != nil {
|
||||
common.LogError(ctx, err.Error())
|
||||
}
|
||||
}(c.Request.Context())
|
||||
}
|
||||
}
|
@@ -1,89 +0,0 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"one-api/common"
|
||||
"one-api/model"
|
||||
providersBase "one-api/providers/base"
|
||||
"one-api/types"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
func RelayTranscriptions(c *gin.Context) {
|
||||
|
||||
var audioRequest types.AudioRequest
|
||||
|
||||
if err := common.UnmarshalBodyReusable(c, &audioRequest); err != nil {
|
||||
common.AbortWithMessage(c, http.StatusBadRequest, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
channel, pass := fetchChannel(c, audioRequest.Model)
|
||||
if pass {
|
||||
return
|
||||
}
|
||||
|
||||
// 解析模型映射
|
||||
var isModelMapped bool
|
||||
modelMap, err := parseModelMapping(channel.GetModelMapping())
|
||||
if err != nil {
|
||||
common.AbortWithMessage(c, http.StatusInternalServerError, err.Error())
|
||||
return
|
||||
}
|
||||
if modelMap != nil && modelMap[audioRequest.Model] != "" {
|
||||
audioRequest.Model = modelMap[audioRequest.Model]
|
||||
isModelMapped = true
|
||||
}
|
||||
|
||||
// 获取供应商
|
||||
provider, pass := getProvider(c, channel.Type, common.RelayModeAudioTranscription)
|
||||
if pass {
|
||||
return
|
||||
}
|
||||
transcriptionsProvider, ok := provider.(providersBase.TranscriptionsInterface)
|
||||
if !ok {
|
||||
common.AbortWithMessage(c, http.StatusNotImplemented, "channel not implemented")
|
||||
return
|
||||
}
|
||||
|
||||
// 获取Input Tokens
|
||||
promptTokens := 0
|
||||
|
||||
var quotaInfo *QuotaInfo
|
||||
var errWithCode *types.OpenAIErrorWithStatusCode
|
||||
var usage *types.Usage
|
||||
quotaInfo, errWithCode = generateQuotaInfo(c, audioRequest.Model, promptTokens)
|
||||
if errWithCode != nil {
|
||||
errorHelper(c, errWithCode)
|
||||
return
|
||||
}
|
||||
|
||||
usage, errWithCode = transcriptionsProvider.TranscriptionsAction(&audioRequest, isModelMapped, promptTokens)
|
||||
|
||||
// 如果报错,则退还配额
|
||||
if errWithCode != nil {
|
||||
tokenId := c.GetInt("token_id")
|
||||
if quotaInfo.HandelStatus {
|
||||
go func(ctx context.Context) {
|
||||
// return pre-consumed quota
|
||||
err := model.PostConsumeTokenQuota(tokenId, -quotaInfo.preConsumedQuota)
|
||||
if err != nil {
|
||||
common.LogError(ctx, "error return pre-consumed quota: "+err.Error())
|
||||
}
|
||||
}(c.Request.Context())
|
||||
}
|
||||
errorHelper(c, errWithCode)
|
||||
return
|
||||
} else {
|
||||
tokenName := c.GetString("token_name")
|
||||
// 如果没有报错,则消费配额
|
||||
go func(ctx context.Context) {
|
||||
err = quotaInfo.completedQuotaConsumption(usage, tokenName, ctx)
|
||||
if err != nil {
|
||||
common.LogError(ctx, err.Error())
|
||||
}
|
||||
}(c.Request.Context())
|
||||
}
|
||||
}
|
@@ -1,89 +0,0 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"one-api/common"
|
||||
"one-api/model"
|
||||
providersBase "one-api/providers/base"
|
||||
"one-api/types"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
func RelayTranslations(c *gin.Context) {
|
||||
|
||||
var audioRequest types.AudioRequest
|
||||
|
||||
if err := common.UnmarshalBodyReusable(c, &audioRequest); err != nil {
|
||||
common.AbortWithMessage(c, http.StatusBadRequest, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
channel, pass := fetchChannel(c, audioRequest.Model)
|
||||
if pass {
|
||||
return
|
||||
}
|
||||
|
||||
// 解析模型映射
|
||||
var isModelMapped bool
|
||||
modelMap, err := parseModelMapping(channel.GetModelMapping())
|
||||
if err != nil {
|
||||
common.AbortWithMessage(c, http.StatusInternalServerError, err.Error())
|
||||
return
|
||||
}
|
||||
if modelMap != nil && modelMap[audioRequest.Model] != "" {
|
||||
audioRequest.Model = modelMap[audioRequest.Model]
|
||||
isModelMapped = true
|
||||
}
|
||||
|
||||
// 获取供应商
|
||||
provider, pass := getProvider(c, channel.Type, common.RelayModeAudioTranslation)
|
||||
if pass {
|
||||
return
|
||||
}
|
||||
translationProvider, ok := provider.(providersBase.TranslationInterface)
|
||||
if !ok {
|
||||
common.AbortWithMessage(c, http.StatusNotImplemented, "channel not implemented")
|
||||
return
|
||||
}
|
||||
|
||||
// 获取Input Tokens
|
||||
promptTokens := 0
|
||||
|
||||
var quotaInfo *QuotaInfo
|
||||
var errWithCode *types.OpenAIErrorWithStatusCode
|
||||
var usage *types.Usage
|
||||
quotaInfo, errWithCode = generateQuotaInfo(c, audioRequest.Model, promptTokens)
|
||||
if errWithCode != nil {
|
||||
errorHelper(c, errWithCode)
|
||||
return
|
||||
}
|
||||
|
||||
usage, errWithCode = translationProvider.TranslationAction(&audioRequest, isModelMapped, promptTokens)
|
||||
|
||||
// 如果报错,则退还配额
|
||||
if errWithCode != nil {
|
||||
tokenId := c.GetInt("token_id")
|
||||
if quotaInfo.HandelStatus {
|
||||
go func(ctx context.Context) {
|
||||
// return pre-consumed quota
|
||||
err := model.PostConsumeTokenQuota(tokenId, -quotaInfo.preConsumedQuota)
|
||||
if err != nil {
|
||||
common.LogError(ctx, "error return pre-consumed quota: "+err.Error())
|
||||
}
|
||||
}(c.Request.Context())
|
||||
}
|
||||
errorHelper(c, errWithCode)
|
||||
return
|
||||
} else {
|
||||
tokenName := c.GetString("token_name")
|
||||
// 如果没有报错,则消费配额
|
||||
go func(ctx context.Context) {
|
||||
err = quotaInfo.completedQuotaConsumption(usage, tokenName, ctx)
|
||||
if err != nil {
|
||||
common.LogError(ctx, err.Error())
|
||||
}
|
||||
}(c.Request.Context())
|
||||
}
|
||||
}
|
@@ -1,300 +0,0 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"net/http"
|
||||
"one-api/common"
|
||||
"one-api/model"
|
||||
"one-api/providers"
|
||||
providersBase "one-api/providers/base"
|
||||
"one-api/types"
|
||||
"reflect"
|
||||
"strconv"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/go-playground/validator/v10"
|
||||
)
|
||||
|
||||
func GetValidFieldName(err error, obj interface{}) string {
|
||||
getObj := reflect.TypeOf(obj)
|
||||
if errs, ok := err.(validator.ValidationErrors); ok {
|
||||
for _, e := range errs {
|
||||
if f, exist := getObj.Elem().FieldByName(e.Field()); exist {
|
||||
return f.Name
|
||||
}
|
||||
}
|
||||
}
|
||||
return err.Error()
|
||||
}
|
||||
|
||||
func fetchChannel(c *gin.Context, modelName string) (channel *model.Channel, pass bool) {
|
||||
channelId, ok := c.Get("channelId")
|
||||
if ok {
|
||||
channel, pass = fetchChannelById(c, channelId.(int))
|
||||
if pass {
|
||||
return
|
||||
}
|
||||
|
||||
}
|
||||
channel, pass = fetchChannelByModel(c, modelName)
|
||||
if pass {
|
||||
return
|
||||
}
|
||||
|
||||
setChannelToContext(c, channel)
|
||||
return
|
||||
}
|
||||
|
||||
func fetchChannelById(c *gin.Context, channelId any) (*model.Channel, bool) {
|
||||
id, err := strconv.Atoi(channelId.(string))
|
||||
if err != nil {
|
||||
common.AbortWithMessage(c, http.StatusBadRequest, "无效的渠道 Id")
|
||||
return nil, true
|
||||
}
|
||||
channel, err := model.GetChannelById(id, true)
|
||||
if err != nil {
|
||||
common.AbortWithMessage(c, http.StatusBadRequest, "无效的渠道 Id")
|
||||
return nil, true
|
||||
}
|
||||
if channel.Status != common.ChannelStatusEnabled {
|
||||
common.AbortWithMessage(c, http.StatusForbidden, "该渠道已被禁用")
|
||||
return nil, true
|
||||
}
|
||||
|
||||
return channel, false
|
||||
}
|
||||
|
||||
func fetchChannelByModel(c *gin.Context, modelName string) (*model.Channel, bool) {
|
||||
group := c.GetString("group")
|
||||
channel, err := model.CacheGetRandomSatisfiedChannel(group, modelName)
|
||||
if err != nil {
|
||||
message := fmt.Sprintf("当前分组 %s 下对于模型 %s 无可用渠道", group, modelName)
|
||||
if channel != nil {
|
||||
common.SysError(fmt.Sprintf("渠道不存在:%d", channel.Id))
|
||||
message = "数据库一致性已被破坏,请联系管理员"
|
||||
}
|
||||
common.AbortWithMessage(c, http.StatusServiceUnavailable, message)
|
||||
return nil, true
|
||||
}
|
||||
|
||||
return channel, false
|
||||
}
|
||||
|
||||
func getProvider(c *gin.Context, channelType int, relayMode int) (providersBase.ProviderInterface, bool) {
|
||||
provider := providers.GetProvider(channelType, c)
|
||||
if provider == nil {
|
||||
common.AbortWithMessage(c, http.StatusNotImplemented, "channel not found")
|
||||
return nil, true
|
||||
}
|
||||
|
||||
if !provider.SupportAPI(relayMode) {
|
||||
common.AbortWithMessage(c, http.StatusNotImplemented, "channel does not support this API")
|
||||
return nil, true
|
||||
}
|
||||
|
||||
return provider, false
|
||||
}
|
||||
|
||||
func setChannelToContext(c *gin.Context, channel *model.Channel) {
|
||||
// c.Set("channel", channel.Type)
|
||||
c.Set("channel_id", channel.Id)
|
||||
c.Set("channel_name", channel.Name)
|
||||
c.Set("api_key", channel.Key)
|
||||
c.Set("base_url", channel.GetBaseURL())
|
||||
switch channel.Type {
|
||||
case common.ChannelTypeAzure:
|
||||
c.Set("api_version", channel.Other)
|
||||
case common.ChannelTypeXunfei:
|
||||
c.Set("api_version", channel.Other)
|
||||
case common.ChannelTypeGemini:
|
||||
c.Set("api_version", channel.Other)
|
||||
case common.ChannelTypeAIProxyLibrary:
|
||||
c.Set("library_id", channel.Other)
|
||||
case common.ChannelTypeAli:
|
||||
c.Set("plugin", channel.Other)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func shouldDisableChannel(err *types.OpenAIError, statusCode int) bool {
|
||||
if !common.AutomaticDisableChannelEnabled {
|
||||
return false
|
||||
}
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
if statusCode == http.StatusUnauthorized {
|
||||
return true
|
||||
}
|
||||
if err.Type == "insufficient_quota" || err.Code == "invalid_api_key" || err.Code == "account_deactivated" {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func shouldEnableChannel(err error, openAIErr *types.OpenAIError) bool {
|
||||
if !common.AutomaticEnableChannelEnabled {
|
||||
return false
|
||||
}
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
if openAIErr != nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func postConsumeQuota(ctx context.Context, tokenId int, quotaDelta int, totalQuota int, userId int, channelId int, modelRatio float64, groupRatio float64, modelName string, tokenName string) {
|
||||
// quotaDelta is remaining quota to be consumed
|
||||
err := model.PostConsumeTokenQuota(tokenId, quotaDelta)
|
||||
if err != nil {
|
||||
common.SysError("error consuming token remain quota: " + err.Error())
|
||||
}
|
||||
err = model.CacheUpdateUserQuota(userId)
|
||||
if err != nil {
|
||||
common.SysError("error update user quota cache: " + err.Error())
|
||||
}
|
||||
// totalQuota is total quota consumed
|
||||
if totalQuota != 0 {
|
||||
logContent := fmt.Sprintf("模型倍率 %.2f,分组倍率 %.2f", modelRatio, groupRatio)
|
||||
model.RecordConsumeLog(ctx, userId, channelId, totalQuota, 0, modelName, tokenName, totalQuota, logContent)
|
||||
model.UpdateUserUsedQuotaAndRequestCount(userId, totalQuota)
|
||||
model.UpdateChannelUsedQuota(channelId, totalQuota)
|
||||
}
|
||||
if totalQuota <= 0 {
|
||||
common.LogError(ctx, fmt.Sprintf("totalQuota consumed is %d, something is wrong", totalQuota))
|
||||
}
|
||||
}
|
||||
|
||||
func parseModelMapping(modelMapping string) (map[string]string, error) {
|
||||
if modelMapping == "" || modelMapping == "{}" {
|
||||
return nil, nil
|
||||
}
|
||||
modelMap := make(map[string]string)
|
||||
err := json.Unmarshal([]byte(modelMapping), &modelMap)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return modelMap, nil
|
||||
}
|
||||
|
||||
type QuotaInfo struct {
|
||||
modelName string
|
||||
promptTokens int
|
||||
preConsumedTokens int
|
||||
modelRatio float64
|
||||
groupRatio float64
|
||||
ratio float64
|
||||
preConsumedQuota int
|
||||
userId int
|
||||
channelId int
|
||||
tokenId int
|
||||
HandelStatus bool
|
||||
}
|
||||
|
||||
func generateQuotaInfo(c *gin.Context, modelName string, promptTokens int) (*QuotaInfo, *types.OpenAIErrorWithStatusCode) {
|
||||
quotaInfo := &QuotaInfo{
|
||||
modelName: modelName,
|
||||
promptTokens: promptTokens,
|
||||
userId: c.GetInt("id"),
|
||||
channelId: c.GetInt("channel_id"),
|
||||
tokenId: c.GetInt("token_id"),
|
||||
HandelStatus: false,
|
||||
}
|
||||
quotaInfo.initQuotaInfo(c.GetString("group"))
|
||||
|
||||
errWithCode := quotaInfo.preQuotaConsumption()
|
||||
if errWithCode != nil {
|
||||
return nil, errWithCode
|
||||
}
|
||||
|
||||
return quotaInfo, nil
|
||||
}
|
||||
|
||||
func (q *QuotaInfo) initQuotaInfo(groupName string) {
|
||||
modelRatio := common.GetModelRatio(q.modelName)
|
||||
groupRatio := common.GetGroupRatio(groupName)
|
||||
preConsumedTokens := common.PreConsumedQuota
|
||||
ratio := modelRatio * groupRatio
|
||||
preConsumedQuota := int(float64(q.promptTokens+preConsumedTokens) * ratio)
|
||||
|
||||
q.preConsumedTokens = preConsumedTokens
|
||||
q.modelRatio = modelRatio
|
||||
q.groupRatio = groupRatio
|
||||
q.ratio = ratio
|
||||
q.preConsumedQuota = preConsumedQuota
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (q *QuotaInfo) preQuotaConsumption() *types.OpenAIErrorWithStatusCode {
|
||||
userQuota, err := model.CacheGetUserQuota(q.userId)
|
||||
if err != nil {
|
||||
return common.ErrorWrapper(err, "get_user_quota_failed", http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
if userQuota < q.preConsumedQuota {
|
||||
return common.ErrorWrapper(errors.New("user quota is not enough"), "insufficient_user_quota", http.StatusForbidden)
|
||||
}
|
||||
|
||||
err = model.CacheDecreaseUserQuota(q.userId, q.preConsumedQuota)
|
||||
if err != nil {
|
||||
return common.ErrorWrapper(err, "decrease_user_quota_failed", http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
if userQuota > 100*q.preConsumedQuota {
|
||||
// in this case, we do not pre-consume quota
|
||||
// because the user has enough quota
|
||||
q.preConsumedQuota = 0
|
||||
// common.LogInfo(c.Request.Context(), fmt.Sprintf("user %d has enough quota %d, trusted and no need to pre-consume", userId, userQuota))
|
||||
}
|
||||
|
||||
if q.preConsumedQuota > 0 {
|
||||
err := model.PreConsumeTokenQuota(q.tokenId, q.preConsumedQuota)
|
||||
if err != nil {
|
||||
return common.ErrorWrapper(err, "pre_consume_token_quota_failed", http.StatusForbidden)
|
||||
}
|
||||
q.HandelStatus = true
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (q *QuotaInfo) completedQuotaConsumption(usage *types.Usage, tokenName string, ctx context.Context) error {
|
||||
quota := 0
|
||||
completionRatio := common.GetCompletionRatio(q.modelName)
|
||||
promptTokens := usage.PromptTokens
|
||||
completionTokens := usage.CompletionTokens
|
||||
quota = int(math.Ceil((float64(promptTokens) + float64(completionTokens)*completionRatio) * q.ratio))
|
||||
if q.ratio != 0 && quota <= 0 {
|
||||
quota = 1
|
||||
}
|
||||
totalTokens := promptTokens + completionTokens
|
||||
if totalTokens == 0 {
|
||||
// in this case, must be some error happened
|
||||
// we cannot just return, because we may have to return the pre-consumed quota
|
||||
quota = 0
|
||||
}
|
||||
quotaDelta := quota - q.preConsumedQuota
|
||||
err := model.PostConsumeTokenQuota(q.tokenId, quotaDelta)
|
||||
if err != nil {
|
||||
return errors.New("error consuming token remain quota: " + err.Error())
|
||||
}
|
||||
err = model.CacheUpdateUserQuota(q.userId)
|
||||
if err != nil {
|
||||
return errors.New("error consuming token remain quota: " + err.Error())
|
||||
}
|
||||
if quota != 0 {
|
||||
logContent := fmt.Sprintf("模型倍率 %.2f,分组倍率 %.2f", q.modelRatio, q.groupRatio)
|
||||
model.RecordConsumeLog(ctx, q.userId, q.channelId, promptTokens, completionTokens, q.modelName, tokenName, quota, logContent)
|
||||
model.UpdateUserUsedQuotaAndRequestCount(q.userId, quota)
|
||||
model.UpdateChannelUsedQuota(q.channelId, quota)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
@@ -1,17 +1,128 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"one-api/common"
|
||||
"one-api/types"
|
||||
"strconv"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/songquanpeng/one-api/common"
|
||||
"github.com/songquanpeng/one-api/common/config"
|
||||
"github.com/songquanpeng/one-api/common/helper"
|
||||
"github.com/songquanpeng/one-api/common/logger"
|
||||
"github.com/songquanpeng/one-api/middleware"
|
||||
dbmodel "github.com/songquanpeng/one-api/model"
|
||||
"github.com/songquanpeng/one-api/relay/constant"
|
||||
"github.com/songquanpeng/one-api/relay/controller"
|
||||
"github.com/songquanpeng/one-api/relay/model"
|
||||
"github.com/songquanpeng/one-api/relay/util"
|
||||
"io"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// https://platform.openai.com/docs/api-reference/chat
|
||||
|
||||
func relay(c *gin.Context, relayMode int) *model.ErrorWithStatusCode {
|
||||
var err *model.ErrorWithStatusCode
|
||||
switch relayMode {
|
||||
case constant.RelayModeImagesGenerations:
|
||||
err = controller.RelayImageHelper(c, relayMode)
|
||||
case constant.RelayModeAudioSpeech:
|
||||
fallthrough
|
||||
case constant.RelayModeAudioTranslation:
|
||||
fallthrough
|
||||
case constant.RelayModeAudioTranscription:
|
||||
err = controller.RelayAudioHelper(c, relayMode)
|
||||
default:
|
||||
err = controller.RelayTextHelper(c)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func Relay(c *gin.Context) {
|
||||
ctx := c.Request.Context()
|
||||
relayMode := constant.Path2RelayMode(c.Request.URL.Path)
|
||||
if config.DebugEnabled {
|
||||
requestBody, _ := common.GetRequestBody(c)
|
||||
logger.Debugf(ctx, "request body: %s", string(requestBody))
|
||||
}
|
||||
bizErr := relay(c, relayMode)
|
||||
if bizErr == nil {
|
||||
return
|
||||
}
|
||||
channelId := c.GetInt("channel_id")
|
||||
lastFailedChannelId := channelId
|
||||
channelName := c.GetString("channel_name")
|
||||
group := c.GetString("group")
|
||||
originalModel := c.GetString("original_model")
|
||||
go processChannelRelayError(ctx, channelId, channelName, bizErr)
|
||||
requestId := c.GetString(logger.RequestIdKey)
|
||||
retryTimes := config.RetryTimes
|
||||
if !shouldRetry(c, bizErr.StatusCode) {
|
||||
logger.Errorf(ctx, "relay error happen, status code is %d, won't retry in this case", bizErr.StatusCode)
|
||||
retryTimes = 0
|
||||
}
|
||||
for i := retryTimes; i > 0; i-- {
|
||||
channel, err := dbmodel.CacheGetRandomSatisfiedChannel(group, originalModel, i != retryTimes)
|
||||
if err != nil {
|
||||
logger.Errorf(ctx, "CacheGetRandomSatisfiedChannel failed: %w", err)
|
||||
break
|
||||
}
|
||||
logger.Infof(ctx, "using channel #%d to retry (remain times %d)", channel.Id, i)
|
||||
if channel.Id == lastFailedChannelId {
|
||||
continue
|
||||
}
|
||||
middleware.SetupContextForSelectedChannel(c, channel, originalModel)
|
||||
requestBody, err := common.GetRequestBody(c)
|
||||
c.Request.Body = io.NopCloser(bytes.NewBuffer(requestBody))
|
||||
bizErr = relay(c, relayMode)
|
||||
if bizErr == nil {
|
||||
return
|
||||
}
|
||||
channelId := c.GetInt("channel_id")
|
||||
lastFailedChannelId = channelId
|
||||
channelName := c.GetString("channel_name")
|
||||
go processChannelRelayError(ctx, channelId, channelName, bizErr)
|
||||
}
|
||||
if bizErr != nil {
|
||||
if bizErr.StatusCode == http.StatusTooManyRequests {
|
||||
bizErr.Error.Message = "当前分组上游负载已饱和,请稍后再试"
|
||||
}
|
||||
bizErr.Error.Message = helper.MessageWithRequestId(bizErr.Error.Message, requestId)
|
||||
c.JSON(bizErr.StatusCode, gin.H{
|
||||
"error": bizErr.Error,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func shouldRetry(c *gin.Context, statusCode int) bool {
|
||||
if _, ok := c.Get("specific_channel_id"); ok {
|
||||
return false
|
||||
}
|
||||
if statusCode == http.StatusTooManyRequests {
|
||||
return true
|
||||
}
|
||||
if statusCode/100 == 5 {
|
||||
return true
|
||||
}
|
||||
if statusCode == http.StatusBadRequest {
|
||||
return false
|
||||
}
|
||||
if statusCode/100 == 2 {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func processChannelRelayError(ctx context.Context, channelId int, channelName string, err *model.ErrorWithStatusCode) {
|
||||
logger.Errorf(ctx, "relay error (channel #%d): %s", channelId, err.Message)
|
||||
// https://platform.openai.com/docs/guides/error-codes/api-errors
|
||||
if util.ShouldDisableChannel(&err.Error, err.StatusCode) {
|
||||
disableChannel(channelId, channelName, err.Message)
|
||||
}
|
||||
}
|
||||
|
||||
func RelayNotImplemented(c *gin.Context) {
|
||||
err := types.OpenAIError{
|
||||
err := model.Error{
|
||||
Message: "API not implemented",
|
||||
Type: "one_api_error",
|
||||
Param: "",
|
||||
@@ -23,7 +134,7 @@ func RelayNotImplemented(c *gin.Context) {
|
||||
}
|
||||
|
||||
func RelayNotFound(c *gin.Context) {
|
||||
err := types.OpenAIError{
|
||||
err := model.Error{
|
||||
Message: fmt.Sprintf("Invalid URL (%s %s)", c.Request.Method, c.Request.URL.Path),
|
||||
Type: "invalid_request_error",
|
||||
Param: "",
|
||||
@@ -33,31 +144,3 @@ func RelayNotFound(c *gin.Context) {
|
||||
"error": err,
|
||||
})
|
||||
}
|
||||
|
||||
func errorHelper(c *gin.Context, err *types.OpenAIErrorWithStatusCode) {
|
||||
requestId := c.GetString(common.RequestIdKey)
|
||||
retryTimesStr := c.Query("retry")
|
||||
retryTimes, _ := strconv.Atoi(retryTimesStr)
|
||||
if retryTimesStr == "" {
|
||||
retryTimes = common.RetryTimes
|
||||
}
|
||||
if retryTimes > 0 {
|
||||
c.Redirect(http.StatusTemporaryRedirect, fmt.Sprintf("%s?retry=%d", c.Request.URL.Path, retryTimes-1))
|
||||
} else {
|
||||
if err.StatusCode == http.StatusTooManyRequests {
|
||||
err.OpenAIError.Message = "当前分组上游负载已饱和,请稍后再试"
|
||||
}
|
||||
err.OpenAIError.Message = common.MessageWithRequestId(err.OpenAIError.Message, requestId)
|
||||
c.JSON(err.StatusCode, gin.H{
|
||||
"error": err.OpenAIError,
|
||||
})
|
||||
}
|
||||
channelId := c.GetInt("channel_id")
|
||||
common.LogError(c.Request.Context(), fmt.Sprintf("relay error (channel #%d): %s", channelId, err.Message))
|
||||
// https://platform.openai.com/docs/guides/error-codes/api-errors
|
||||
if shouldDisableChannel(&err.OpenAIError, err.StatusCode) {
|
||||
channelId := c.GetInt("channel_id")
|
||||
channelName := c.GetString("channel_name")
|
||||
disableChannel(channelId, channelName, err.Message)
|
||||
}
|
||||
}
|
||||
|
@@ -2,9 +2,11 @@ package controller
|
||||
|
||||
import (
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/songquanpeng/one-api/common"
|
||||
"github.com/songquanpeng/one-api/common/config"
|
||||
"github.com/songquanpeng/one-api/common/helper"
|
||||
"github.com/songquanpeng/one-api/model"
|
||||
"net/http"
|
||||
"one-api/common"
|
||||
"one-api/model"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
@@ -14,7 +16,7 @@ func GetAllTokens(c *gin.Context) {
|
||||
if p < 0 {
|
||||
p = 0
|
||||
}
|
||||
tokens, err := model.GetAllUserTokens(userId, p*common.ItemsPerPage, common.ItemsPerPage)
|
||||
tokens, err := model.GetAllUserTokens(userId, p*config.ItemsPerPage, config.ItemsPerPage)
|
||||
if err != nil {
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"success": false,
|
||||
@@ -119,9 +121,9 @@ func AddToken(c *gin.Context) {
|
||||
cleanToken := model.Token{
|
||||
UserId: c.GetInt("id"),
|
||||
Name: token.Name,
|
||||
Key: common.GenerateKey(),
|
||||
CreatedTime: common.GetTimestamp(),
|
||||
AccessedTime: common.GetTimestamp(),
|
||||
Key: helper.GenerateKey(),
|
||||
CreatedTime: helper.GetTimestamp(),
|
||||
AccessedTime: helper.GetTimestamp(),
|
||||
ExpiredTime: token.ExpiredTime,
|
||||
RemainQuota: token.RemainQuota,
|
||||
UnlimitedQuota: token.UnlimitedQuota,
|
||||
@@ -187,7 +189,7 @@ func UpdateToken(c *gin.Context) {
|
||||
return
|
||||
}
|
||||
if token.Status == common.TokenStatusEnabled {
|
||||
if cleanToken.Status == common.TokenStatusExpired && cleanToken.ExpiredTime <= common.GetTimestamp() && cleanToken.ExpiredTime != -1 {
|
||||
if cleanToken.Status == common.TokenStatusExpired && cleanToken.ExpiredTime <= helper.GetTimestamp() && cleanToken.ExpiredTime != -1 {
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"success": false,
|
||||
"message": "令牌已过期,无法启用,请先修改令牌过期时间,或者设置为永不过期",
|
||||
|
@@ -3,9 +3,11 @@ package controller
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/songquanpeng/one-api/common"
|
||||
"github.com/songquanpeng/one-api/common/config"
|
||||
"github.com/songquanpeng/one-api/common/helper"
|
||||
"github.com/songquanpeng/one-api/model"
|
||||
"net/http"
|
||||
"one-api/common"
|
||||
"one-api/model"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
@@ -19,7 +21,7 @@ type LoginRequest struct {
|
||||
}
|
||||
|
||||
func Login(c *gin.Context) {
|
||||
if !common.PasswordLoginEnabled {
|
||||
if !config.PasswordLoginEnabled {
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"message": "管理员关闭了密码登录",
|
||||
"success": false,
|
||||
@@ -106,14 +108,14 @@ func Logout(c *gin.Context) {
|
||||
}
|
||||
|
||||
func Register(c *gin.Context) {
|
||||
if !common.RegisterEnabled {
|
||||
if !config.RegisterEnabled {
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"message": "管理员关闭了新用户注册",
|
||||
"success": false,
|
||||
})
|
||||
return
|
||||
}
|
||||
if !common.PasswordRegisterEnabled {
|
||||
if !config.PasswordRegisterEnabled {
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"message": "管理员关闭了通过密码进行注册,请使用第三方账户验证的形式进行注册",
|
||||
"success": false,
|
||||
@@ -136,7 +138,7 @@ func Register(c *gin.Context) {
|
||||
})
|
||||
return
|
||||
}
|
||||
if common.EmailVerificationEnabled {
|
||||
if config.EmailVerificationEnabled {
|
||||
if user.Email == "" || user.VerificationCode == "" {
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"success": false,
|
||||
@@ -160,7 +162,7 @@ func Register(c *gin.Context) {
|
||||
DisplayName: user.Username,
|
||||
InviterId: inviterId,
|
||||
}
|
||||
if common.EmailVerificationEnabled {
|
||||
if config.EmailVerificationEnabled {
|
||||
cleanUser.Email = user.Email
|
||||
}
|
||||
if err := cleanUser.Insert(inviterId); err != nil {
|
||||
@@ -182,7 +184,7 @@ func GetAllUsers(c *gin.Context) {
|
||||
if p < 0 {
|
||||
p = 0
|
||||
}
|
||||
users, err := model.GetAllUsers(p*common.ItemsPerPage, common.ItemsPerPage)
|
||||
users, err := model.GetAllUsers(p*config.ItemsPerPage, config.ItemsPerPage)
|
||||
if err != nil {
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"success": false,
|
||||
@@ -251,26 +253,25 @@ func GetUser(c *gin.Context) {
|
||||
|
||||
func GetUserDashboard(c *gin.Context) {
|
||||
id := c.GetInt("id")
|
||||
// 获取7天前 00:00:00 和 今天23:59:59 的秒时间戳
|
||||
now := time.Now()
|
||||
toDay := time.Date(now.Year(), now.Month(), now.Day(), 0, 0, 0, 0, now.Location())
|
||||
endOfDay := toDay.Add(time.Hour * 24).Add(-time.Second).Unix()
|
||||
startOfDay := toDay.AddDate(0, 0, -7).Unix()
|
||||
startOfDay := now.Truncate(24*time.Hour).AddDate(0, 0, -6).Unix()
|
||||
endOfDay := now.Truncate(24 * time.Hour).Add(24*time.Hour - time.Second).Unix()
|
||||
|
||||
dashboards, err := model.SearchLogsByDayAndModel(id, int(startOfDay), int(endOfDay))
|
||||
if err != nil {
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"success": false,
|
||||
"message": "无法获取统计信息.",
|
||||
"message": "无法获取统计信息",
|
||||
"data": nil,
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"success": true,
|
||||
"message": "",
|
||||
"data": dashboards,
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
func GenerateAccessToken(c *gin.Context) {
|
||||
@@ -283,7 +284,7 @@ func GenerateAccessToken(c *gin.Context) {
|
||||
})
|
||||
return
|
||||
}
|
||||
user.AccessToken = common.GetUUID()
|
||||
user.AccessToken = helper.GetUUID()
|
||||
|
||||
if model.DB.Where("access_token = ?", user.AccessToken).First(user).RowsAffected != 0 {
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
@@ -320,7 +321,7 @@ func GetAffCode(c *gin.Context) {
|
||||
return
|
||||
}
|
||||
if user.AffCode == "" {
|
||||
user.AffCode = common.GetRandomString(4)
|
||||
user.AffCode = helper.GetRandomString(4)
|
||||
if err := user.Update(false); err != nil {
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"success": false,
|
||||
@@ -727,7 +728,7 @@ func EmailBind(c *gin.Context) {
|
||||
return
|
||||
}
|
||||
if user.Role == common.RoleRootUser {
|
||||
common.RootUserEmail = email
|
||||
config.RootUserEmail = email
|
||||
}
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"success": true,
|
||||
|
@@ -5,9 +5,10 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/songquanpeng/one-api/common"
|
||||
"github.com/songquanpeng/one-api/common/config"
|
||||
"github.com/songquanpeng/one-api/model"
|
||||
"net/http"
|
||||
"one-api/common"
|
||||
"one-api/model"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
@@ -22,11 +23,11 @@ func getWeChatIdByCode(code string) (string, error) {
|
||||
if code == "" {
|
||||
return "", errors.New("无效的参数")
|
||||
}
|
||||
req, err := http.NewRequest("GET", fmt.Sprintf("%s/api/wechat/user?code=%s", common.WeChatServerAddress, code), nil)
|
||||
req, err := http.NewRequest("GET", fmt.Sprintf("%s/api/wechat/user?code=%s", config.WeChatServerAddress, code), nil)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
req.Header.Set("Authorization", common.WeChatServerToken)
|
||||
req.Header.Set("Authorization", config.WeChatServerToken)
|
||||
client := http.Client{
|
||||
Timeout: 5 * time.Second,
|
||||
}
|
||||
@@ -50,7 +51,7 @@ func getWeChatIdByCode(code string) (string, error) {
|
||||
}
|
||||
|
||||
func WeChatAuth(c *gin.Context) {
|
||||
if !common.WeChatAuthEnabled {
|
||||
if !config.WeChatAuthEnabled {
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"message": "管理员未开启通过微信登录以及注册",
|
||||
"success": false,
|
||||
@@ -79,7 +80,7 @@ func WeChatAuth(c *gin.Context) {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
if common.RegisterEnabled {
|
||||
if config.RegisterEnabled {
|
||||
user.Username = "wechat_" + strconv.Itoa(model.GetMaxUserId()+1)
|
||||
user.DisplayName = "WeChat User"
|
||||
user.Role = common.RoleCommonUser
|
||||
@@ -112,7 +113,7 @@ func WeChatAuth(c *gin.Context) {
|
||||
}
|
||||
|
||||
func WeChatBind(c *gin.Context) {
|
||||
if !common.WeChatAuthEnabled {
|
||||
if !config.WeChatAuthEnabled {
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"message": "管理员未开启通过微信登录以及注册",
|
||||
"success": false,
|
||||
|
7
go.mod
7
go.mod
@@ -1,4 +1,4 @@
|
||||
module one-api
|
||||
module github.com/songquanpeng/one-api
|
||||
|
||||
// +heroku goVersion go1.18
|
||||
go 1.18
|
||||
@@ -16,7 +16,7 @@ require (
|
||||
github.com/gorilla/websocket v1.5.0
|
||||
github.com/pkoukk/tiktoken-go v0.1.5
|
||||
github.com/stretchr/testify v1.8.3
|
||||
golang.org/x/crypto v0.14.0
|
||||
golang.org/x/crypto v0.17.0
|
||||
golang.org/x/image v0.14.0
|
||||
gorm.io/driver/mysql v1.4.3
|
||||
gorm.io/driver/postgres v1.5.2
|
||||
@@ -45,7 +45,6 @@ require (
|
||||
github.com/jackc/pgx/v5 v5.3.1 // indirect
|
||||
github.com/jinzhu/inflection v1.0.0 // indirect
|
||||
github.com/jinzhu/now v1.1.5 // indirect
|
||||
github.com/joho/godotenv v1.5.1 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.2.4 // indirect
|
||||
github.com/leodido/go-urn v1.2.4 // indirect
|
||||
@@ -59,7 +58,7 @@ require (
|
||||
github.com/ugorji/go/codec v1.2.11 // indirect
|
||||
golang.org/x/arch v0.3.0 // indirect
|
||||
golang.org/x/net v0.17.0 // indirect
|
||||
golang.org/x/sys v0.13.0 // indirect
|
||||
golang.org/x/sys v0.15.0 // indirect
|
||||
golang.org/x/text v0.14.0 // indirect
|
||||
google.golang.org/protobuf v1.30.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
|
10
go.sum
10
go.sum
@@ -80,8 +80,6 @@ github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkr
|
||||
github.com/jinzhu/now v1.1.4/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
|
||||
github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ=
|
||||
github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
|
||||
github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0=
|
||||
github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4=
|
||||
github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
|
||||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
||||
@@ -152,8 +150,8 @@ golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUu
|
||||
golang.org/x/arch v0.3.0 h1:02VY4/ZcO/gBOH6PUaoiptASxtXU10jazRCP865E97k=
|
||||
golang.org/x/arch v0.3.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
|
||||
golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc=
|
||||
golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4=
|
||||
golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k=
|
||||
golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
|
||||
golang.org/x/image v0.14.0 h1:tNgSxAFe3jC4uYqvZdTr84SZoM1KfwdC9SKIFrLjFn4=
|
||||
golang.org/x/image v0.14.0/go.mod h1:HUYqC05R2ZcZ3ejNQsIHQDQiwWM4JBqmm6MKANTp4LE=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
@@ -166,8 +164,8 @@ golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE=
|
||||
golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc=
|
||||
golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
|
250
i18n/en.json
250
i18n/en.json
@@ -86,6 +86,7 @@
|
||||
"该令牌已过期": "The token has expired",
|
||||
"该令牌额度已用尽": "The token quota has been used up",
|
||||
"无效的令牌": "Invalid token",
|
||||
"令牌验证失败": "Token verification failed",
|
||||
"id 或 userId 为空!": "id or userId is empty!",
|
||||
"quota 不能为负数!": "quota cannot be negative!",
|
||||
"令牌额度不足": "Insufficient token quota",
|
||||
@@ -455,9 +456,11 @@
|
||||
"已绑定的邮箱账户": "Email Account Bound",
|
||||
"用户信息更新成功!": "User information updated successfully!",
|
||||
"模型倍率 %.2f,分组倍率 %.2f": "model rate %.2f, group rate %.2f",
|
||||
"模型倍率 %.2f,分组倍率 %.2f,补全倍率 %.2f": "model rate %.2f, group rate %.2f, completion rate %.2f",
|
||||
"使用明细(总消耗额度:{renderQuota(stat.quota)})": "Usage Details (Total Consumption Quota: {renderQuota(stat.quota)})",
|
||||
"用户名称": "User Name",
|
||||
"令牌名称": "Token Name",
|
||||
"默认令牌": "Default Token",
|
||||
"留空则查询全部用户": "Leave blank to query all users",
|
||||
"留空则查询全部令牌": "Leave blank to query all tokens",
|
||||
"模型名称": "Model Name",
|
||||
@@ -526,5 +529,250 @@
|
||||
"模型版本": "Model version",
|
||||
"请输入星火大模型版本,注意是接口地址中的版本号,例如:v2.1": "Please enter the version of the Starfire model, note that it is the version number in the interface address, for example: v2.1",
|
||||
"点击查看": "click to view",
|
||||
"请确保已在 Azure 上创建了 gpt-35-turbo 模型,并且 apiVersion 已正确填写!": "Please make sure that the gpt-35-turbo model has been created on Azure, and the apiVersion has been filled in correctly!"
|
||||
"请确保已在 Azure 上创建了 gpt-35-turbo 模型,并且 apiVersion 已正确填写!": "Please make sure that the gpt-35-turbo model has been created on Azure, and the apiVersion has been filled in correctly!",
|
||||
"处理中...": "Processing...",
|
||||
"绑定成功!": "Binding successful!",
|
||||
"登录成功!": "Login successful!",
|
||||
"操作失败,重定向至登录界面中...": "Operation failed, redirecting to login screen...",
|
||||
"出现错误,第 ${count} 次重试中...": "An error occurred, retrying ${count}...",
|
||||
"首页": "Home",
|
||||
"渠道": "Channel",
|
||||
"令牌": "API Keys",
|
||||
"兑换": "Redeem",
|
||||
"充值": "Recharge",
|
||||
"用户": "Users",
|
||||
"日志": "Logs",
|
||||
"设置": "Settings",
|
||||
"关于": "About",
|
||||
"聊天": "Chat",
|
||||
"注销成功!": "Logout successful!",
|
||||
"注销": "Log out",
|
||||
"登录": "Log in",
|
||||
"注册": "Sign up",
|
||||
"加载{name}中...": "Loading {name}...",
|
||||
"未登录或登录已过期,请重新登录!": "Not logged in or login has expired, please log in again!",
|
||||
"请立刻修改默认密码!": "Please change the default password immediately!",
|
||||
"欢迎回来": "Welcome back",
|
||||
"没有账户?": "No account?",
|
||||
"立刻注册": "Sign up now",
|
||||
"用户名": "Username",
|
||||
"密码": "Password",
|
||||
"正在登录……": "Logging in...",
|
||||
"忘记密码": "Forgot password",
|
||||
"其他方式": "Other methods",
|
||||
"微信扫码关注公众号,输入「验证码」获取验证码(三分钟内有效)": "Scan the QR code with WeChat, follow the official account and enter 'verification code' to get the verification code (valid within three minutes)",
|
||||
"验证码": "Verification code",
|
||||
"全部用户": "All users",
|
||||
"当前用户": "Current user",
|
||||
"全部": "All",
|
||||
"消费": "Consumption",
|
||||
"管理": "Management",
|
||||
"系统": "System",
|
||||
"未知": "Unknown",
|
||||
"其他模型": "Other models",
|
||||
"复制成功": "Copy successful",
|
||||
"使用明细": "Usages",
|
||||
"刷新": "Refresh",
|
||||
"收起面板": "Collapse panel",
|
||||
"展开面板": "Expand panel",
|
||||
"显示查询选项": "Show search options",
|
||||
"隐藏查询选项": "Hide search options",
|
||||
"用户名称": "User name",
|
||||
"可选值": "Optional values",
|
||||
"渠道 ID": "Channel ID",
|
||||
"令牌名称": "Key name",
|
||||
"模型名称": "Model name",
|
||||
"起始时间": "Start time",
|
||||
"结束时间": "End time",
|
||||
"查询": "Query",
|
||||
"隐藏条形图": "Hide bar chart",
|
||||
"显示条形图": "Show bar chart",
|
||||
"折线条形图只展示最新50条数据": "Line and bar charts only show the latest 50 pieces of data",
|
||||
"总消耗": "Total consumption",
|
||||
"总共调用了 {payload[0].value} 次": "A total of {payload[0].value} calls were made",
|
||||
"{model.name}: {model.value} 次": "{model.name}: {model.value} times",
|
||||
"总共调用了 {payload[0].value} 次 {payload[0].name}": "A total of {payload[0].value} {payload[0].name} calls were made",
|
||||
"总消耗额度": "Total consumption limit",
|
||||
"暂无数据": "No data available",
|
||||
"更多数据统计图形即将到来,敬请期待!": "More data statistics graphics are coming soon, stay tuned!",
|
||||
"复制用户名": "Copy username",
|
||||
"{`共 ${counts} 条数据`}": "{`A total of ${counts} pieces of data`}",
|
||||
"共 0 条数据": "A total of 0 pieces of data",
|
||||
"选择明细分类": "Select detail category",
|
||||
"模型倍率": "model rate",
|
||||
"分组倍率": "group rate",
|
||||
"新密码已复制到剪贴板:": "New password has been copied to the clipboard:",
|
||||
"密码重置确认": "Password reset confirmation",
|
||||
"邮箱地址": "Email address",
|
||||
"新密码": "New password",
|
||||
"密码已复制到剪贴板:": "Password has been copied to the clipboard:",
|
||||
"密码重置完成": "Password reset complete",
|
||||
"提交": "Submit",
|
||||
"返回登录": "Return to login",
|
||||
"请稍后重试,浏览器环境检查未通过": "Please try again later, browser environment check failed",
|
||||
"重置邮件发送成功,请检查邮箱!": "Reset email sent successfully, please check your email!",
|
||||
"密码重置": "Password reset",
|
||||
"重试": "Retry",
|
||||
"组": "Group",
|
||||
"令牌已重置并已复制到剪贴板": "Token has been reset and copied to the clipboard",
|
||||
"邀请链接已复制到剪切板": "Invitation link has been copied to the clipboard",
|
||||
"系统令牌已复制到剪切板": "System token has been copied to the clipboard",
|
||||
"请输入你的账户名以确认删除!": "Please enter your account name to confirm deletion!",
|
||||
"账户已删除!": "Account has been deleted!",
|
||||
"微信账户绑定成功!": "WeChat account binding successful!",
|
||||
"请稍后几秒重试,Turnstile 正在检查用户环境!": "Please try again in a few seconds, Turnstile is checking the user environment!",
|
||||
"验证码发送成功,请检查邮箱!": "Verification code sent successfully, please check your email!",
|
||||
"邮箱账户绑定成功!": "Email account binding successful!",
|
||||
"个人信息": "Personal information",
|
||||
"编辑个人信息": "Edit personal information",
|
||||
"生成系统访问令牌": "Generate system access token",
|
||||
"复制邀请链接": "Copy invitation link",
|
||||
"删除个人帐户": "Delete personal account",
|
||||
"普通用户": "Regular user",
|
||||
"管理员": "Administrator",
|
||||
"超级管理员": "Super administrator",
|
||||
"显示名称": "Display name",
|
||||
"GitHub 账号": "GitHub account",
|
||||
"微信账号": "WeChat account",
|
||||
"修改个人信息只允许在电脑端进行。生成的令牌用于系统管理,而非用于请求 OpenAI 相关的服务,请知悉。": "Modifying personal information is only allowed on a computer. The generated token is for system management, not for requesting OpenAI related services. Please be aware.",
|
||||
"可用模型": "Available models",
|
||||
"账号绑定": "Account binding",
|
||||
"绑定微信": "Bind WeChat",
|
||||
"绑定 GitHub": "Bind GitHub",
|
||||
"绑定邮箱": "Bind Email",
|
||||
"绑定": "Bind",
|
||||
"绑定邮箱地址": "Bind email address",
|
||||
"输入邮箱地址": "Enter email address",
|
||||
"重新发送": "Resend",
|
||||
"获取验证码": "Get verification code",
|
||||
"确认绑定": "Confirm binding",
|
||||
"取消": "Cancel",
|
||||
"危险操作": "Dangerous operation",
|
||||
"您正在删除自己的帐户,将清空所有数据且不可恢复": "You are deleting your own account, all data will be cleared and cannot be recovered",
|
||||
"输入你的账户名": "Enter your account name",
|
||||
"以确认删除": "To confirm deletion",
|
||||
"确认删除": "Confirm deletion",
|
||||
"未使用": "Not used",
|
||||
"已禁用": "Disabled",
|
||||
"已使用": "Used",
|
||||
"未知状态": "Unknown status",
|
||||
"操作成功完成!": "Operation successfully completed!",
|
||||
"搜索兑换码的 ID 和名称 ...": "Search for the ID and name of the redemption code ...",
|
||||
"名称": "Name",
|
||||
"状态": "Status",
|
||||
"额度": "Quota",
|
||||
"创建时间": "Creation time",
|
||||
"兑换时间": "Redemption time",
|
||||
"操作": "Operation",
|
||||
"尚未兑换": "Not yet redeemed",
|
||||
"已复制到剪贴板!": "Copied to clipboard!",
|
||||
"无法复制到剪贴板,请手动复制,已将兑换码填入搜索框。": "Unable to copy to clipboard, please copy manually. The redemption code has been filled in the search box.",
|
||||
"复制": "Copy",
|
||||
"删除": "Delete",
|
||||
"禁用": "Disable",
|
||||
"启用": "Enable",
|
||||
"编辑": "Edit",
|
||||
"添加新的兑换码": "Add new redemption code",
|
||||
"密码长度不得小于 8 位!": "Password length must not be less than 8 characters!",
|
||||
"两次输入的密码不一致": "The two passwords entered do not match",
|
||||
"注册成功!": "Registration successful!",
|
||||
"请填写注册邮箱!": "Please fill in the registration email!",
|
||||
"请在${verificationTimeout}秒后再试": "Please try again after ${verificationTimeout} seconds",
|
||||
"验证码发送成功,请检查你的邮箱!": "Verification code sent successfully, please check your email!",
|
||||
"已有账户?": "Already have an account?",
|
||||
"请输入用户名(最长 12 位)": "Please enter a username (up to 12 characters)",
|
||||
"请输入密码(最短 8 位,最长 20 位)": "Please enter a password (minimum 8 characters, maximum 20 characters)",
|
||||
"请再次输入密码": "Please enter the password again",
|
||||
"请输入邮箱地址": "Please enter an email address",
|
||||
"秒后可重发": "Can be resent after seconds",
|
||||
"请输入邮箱验证码": "Please enter the email verification code",
|
||||
"已过期": "Expired",
|
||||
"已启用": "Enabled",
|
||||
"已耗尽": "Exhausted",
|
||||
"无": "None",
|
||||
"令牌密钥": "API Key",
|
||||
"令牌状态": "Key status",
|
||||
"已用额度": "Used quota",
|
||||
"剩余额度": "Remaining quota",
|
||||
"过期时间": "Expiration time",
|
||||
"你确定要删除这个令牌吗?": "Are you sure you want to delete this key?",
|
||||
"无法复制到剪贴板,请手动复制,已将令牌密钥填入搜索框": "Unable to copy to clipboard, please copy manually. The key key has been filled in the search box.",
|
||||
"无限制": "Unlimited",
|
||||
"永不过期": "Never expires",
|
||||
"使用 API 访问令牌进行服务鉴权和计费。": "Use API Key for service authentication and billing.",
|
||||
"API 访问令牌关系到您的个人利益,请妥善留存,不要与其他人共享,也不要保存在客户端代码中。": "API Key is related to your personal interests. Please keep it properly. Do not share it with others or save it in client code.",
|
||||
"创建令牌": "Create Key",
|
||||
"什么都还没有,快去创建一个令牌开始使用吧!": "Nothing yet, go create a key to start using!",
|
||||
"你确定要删除该令牌吗": "Are you sure you want to delete this key",
|
||||
"导出令牌信息": "Export key information",
|
||||
"错误:未登录或登录已过期,请重新登录!": "Error: Not logged in or login has expired, please log in again!",
|
||||
"错误:请求次数过多,请稍后再试!": "Error: Too many requests, please try again later!",
|
||||
"错误:服务器内部错误,请联系管理员!": "Error: Server internal error, please contact the online customer service!",
|
||||
"本站仅作演示之用,无服务端!": "This site is for demonstration purposes only, no server!",
|
||||
"错误:": "Error:",
|
||||
"加载首页内容失败...": "Failed to load homepage content...",
|
||||
"系统状况": "System status",
|
||||
"系统信息": "System information",
|
||||
"系统信息总览": "System information overview",
|
||||
"名称:": "Name:",
|
||||
"版本:": "Version:",
|
||||
"源码:": "Source code:",
|
||||
"启动时间:": "Startup time:",
|
||||
"系统配置": "System configuration",
|
||||
"系统配置总览": "System configuration overview",
|
||||
"邮箱验证:": "Email verification:",
|
||||
"未启用": "Not enabled",
|
||||
"Turnstile 用户校验:": "Turnstile user verification:",
|
||||
"页面不存在": "Page does not exist",
|
||||
"请检查你的浏览器地址是否正确": "Please check if your browser address is correct",
|
||||
"个人设置": "Personal settings",
|
||||
"运营设置": "Operations settings",
|
||||
"系统设置": "System settings",
|
||||
"其他设置": "Other settings",
|
||||
"默认令牌": "Default key",
|
||||
"过期时间必须在当前时间之后!": "Expiration time must be after the current time!",
|
||||
"额度必须大于等于 0!": "Quota must be greater than or equal to 0!",
|
||||
"过期时间格式错误!": "Expiration time format error!",
|
||||
"创建令牌数量必须大于等于 1!": "The number of keys to create must be greater than or equal to 1!",
|
||||
"令牌修改成功": "API Key modification successful",
|
||||
"令牌创建成功": "API Key creation successful",
|
||||
"更新令牌信息": "Update key information",
|
||||
"创建新的令牌": "Create a new key",
|
||||
"请输入名称": "Please enter a name",
|
||||
"请输入过期时间,格式为 yyyy-MM-dd HH:mm:ss,-1 表示无限制": "Please enter the expiration time, the format is yyyy-MM-dd HH:mm:ss, -1 means unlimited",
|
||||
"无限额度": "Unlimited quota",
|
||||
"注意:启用无限额度后,已用额度将不再进行计算。": "Note: After enabling unlimited quota, the used quota will no longer be calculated.",
|
||||
"等于": "Equals",
|
||||
"请输入额度(单位:token)": "Please enter the quota (unit: token)",
|
||||
"创建令牌数量": "Create key quantity",
|
||||
"请输入令牌数量": "Please enter the number of keys",
|
||||
"注意:令牌的额度仅用于限制令牌本身的最大额度使用量,实际的使用受到账户的剩余额度限制。": "Note: The quota of the key is only used to limit the maximum quota usage of the key itself, and the actual usage is subject to the remaining quota of the account.",
|
||||
"我的令牌": "My keys",
|
||||
"请输入额度兑换码!": "Please enter the redeem code!",
|
||||
"充值成功!": "Recharge successful!",
|
||||
"请求失败": "Request failed",
|
||||
"超级管理员未设置充值链接!": "The super administrator did not set a recharge link!",
|
||||
"充值额度": "Recharge quota",
|
||||
"兑换中...": "Redeeming...",
|
||||
"请点击充值以获取额度兑换码。": "Please click recharge to get the quota redemption code.",
|
||||
"用户信息更新成功!": "User information updated successfully!",
|
||||
"更新用户信息": "Update user information",
|
||||
"请输入新的用户名": "Please enter a new username",
|
||||
"请输入新的密码,最短 8 位": "Please enter a new password, at least 8 characters",
|
||||
"请输入新的显示名称": "Please enter a new display name",
|
||||
"分组": "Group",
|
||||
"请选择分组": "Please select a group",
|
||||
"请在系统设置页面编辑分组倍率以添加新的分组:": "Please edit the group rate on the system settings page to add a new group:",
|
||||
"请输入新的剩余额度": "Please enter a new remaining quota",
|
||||
"已绑定的 GitHub 账户": "Bound GitHub account",
|
||||
"此项只读,需要用户通过个人设置页面的相关绑定按钮进行绑定,不可直接修改": "This item is read-only, users need to bind through the relevant binding button on the personal settings page, cannot be directly modified",
|
||||
"已绑定的微信账户": "Bound WeChat account",
|
||||
"已绑定的邮箱账户": "Bound email account",
|
||||
"新版本可用:${data.version},请使用快捷键 Shift + F5 刷新页面": "New version available: ${data.version}, please refresh the page using the shortcut key Shift + F5",
|
||||
"无法正常连接至服务器!": "Unable to connect to the server normally!",
|
||||
"提示:": "Input:",
|
||||
"补全:": "Output:",
|
||||
"搜索令牌名称": "Search key name",
|
||||
"测试所有渠道": "Test all channels",
|
||||
"更新已启用渠道余额": "Update the balance of enabled channels"
|
||||
}
|
||||
|
68
main.go
68
main.go
@@ -3,87 +3,87 @@ package main
|
||||
import (
|
||||
"embed"
|
||||
"fmt"
|
||||
"one-api/common"
|
||||
"one-api/controller"
|
||||
"one-api/middleware"
|
||||
"one-api/model"
|
||||
"one-api/router"
|
||||
"os"
|
||||
"strconv"
|
||||
|
||||
"github.com/gin-contrib/sessions"
|
||||
"github.com/gin-contrib/sessions/cookie"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/songquanpeng/one-api/common"
|
||||
"github.com/songquanpeng/one-api/common/config"
|
||||
"github.com/songquanpeng/one-api/common/logger"
|
||||
"github.com/songquanpeng/one-api/controller"
|
||||
"github.com/songquanpeng/one-api/middleware"
|
||||
"github.com/songquanpeng/one-api/model"
|
||||
"github.com/songquanpeng/one-api/relay/channel/openai"
|
||||
"github.com/songquanpeng/one-api/router"
|
||||
"os"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
//go:embed web/build
|
||||
//go:embed web/build/*
|
||||
var buildFS embed.FS
|
||||
|
||||
//go:embed web/build/index.html
|
||||
var indexPage []byte
|
||||
|
||||
func main() {
|
||||
common.SetupLogger()
|
||||
common.SysLog("One API " + common.Version + " started")
|
||||
logger.SetupLogger()
|
||||
logger.SysLog(fmt.Sprintf("One API %s started", common.Version))
|
||||
if os.Getenv("GIN_MODE") != "debug" {
|
||||
gin.SetMode(gin.ReleaseMode)
|
||||
}
|
||||
if common.DebugEnabled {
|
||||
common.SysLog("running in debug mode")
|
||||
if config.DebugEnabled {
|
||||
logger.SysLog("running in debug mode")
|
||||
}
|
||||
// Initialize SQL Database
|
||||
err := model.InitDB()
|
||||
if err != nil {
|
||||
common.FatalLog("failed to initialize database: " + err.Error())
|
||||
logger.FatalLog("failed to initialize database: " + err.Error())
|
||||
}
|
||||
defer func() {
|
||||
err := model.CloseDB()
|
||||
if err != nil {
|
||||
common.FatalLog("failed to close database: " + err.Error())
|
||||
logger.FatalLog("failed to close database: " + err.Error())
|
||||
}
|
||||
}()
|
||||
|
||||
// Initialize Redis
|
||||
err = common.InitRedisClient()
|
||||
if err != nil {
|
||||
common.FatalLog("failed to initialize Redis: " + err.Error())
|
||||
logger.FatalLog("failed to initialize Redis: " + err.Error())
|
||||
}
|
||||
|
||||
// Initialize options
|
||||
model.InitOptionMap()
|
||||
logger.SysLog(fmt.Sprintf("using theme %s", config.Theme))
|
||||
if common.RedisEnabled {
|
||||
// for compatibility with old versions
|
||||
common.MemoryCacheEnabled = true
|
||||
config.MemoryCacheEnabled = true
|
||||
}
|
||||
if common.MemoryCacheEnabled {
|
||||
common.SysLog("memory cache enabled")
|
||||
common.SysError(fmt.Sprintf("sync frequency: %d seconds", common.SyncFrequency))
|
||||
if config.MemoryCacheEnabled {
|
||||
logger.SysLog("memory cache enabled")
|
||||
logger.SysError(fmt.Sprintf("sync frequency: %d seconds", config.SyncFrequency))
|
||||
model.InitChannelCache()
|
||||
}
|
||||
if common.MemoryCacheEnabled {
|
||||
go model.SyncOptions(common.SyncFrequency)
|
||||
go model.SyncChannelCache(common.SyncFrequency)
|
||||
if config.MemoryCacheEnabled {
|
||||
go model.SyncOptions(config.SyncFrequency)
|
||||
go model.SyncChannelCache(config.SyncFrequency)
|
||||
}
|
||||
if os.Getenv("CHANNEL_UPDATE_FREQUENCY") != "" {
|
||||
frequency, err := strconv.Atoi(os.Getenv("CHANNEL_UPDATE_FREQUENCY"))
|
||||
if err != nil {
|
||||
common.FatalLog("failed to parse CHANNEL_UPDATE_FREQUENCY: " + err.Error())
|
||||
logger.FatalLog("failed to parse CHANNEL_UPDATE_FREQUENCY: " + err.Error())
|
||||
}
|
||||
go controller.AutomaticallyUpdateChannels(frequency)
|
||||
}
|
||||
if os.Getenv("CHANNEL_TEST_FREQUENCY") != "" {
|
||||
frequency, err := strconv.Atoi(os.Getenv("CHANNEL_TEST_FREQUENCY"))
|
||||
if err != nil {
|
||||
common.FatalLog("failed to parse CHANNEL_TEST_FREQUENCY: " + err.Error())
|
||||
logger.FatalLog("failed to parse CHANNEL_TEST_FREQUENCY: " + err.Error())
|
||||
}
|
||||
go controller.AutomaticallyTestChannels(frequency)
|
||||
}
|
||||
if os.Getenv("BATCH_UPDATE_ENABLED") == "true" {
|
||||
common.BatchUpdateEnabled = true
|
||||
common.SysLog("batch update enabled with interval " + strconv.Itoa(common.BatchUpdateInterval) + "s")
|
||||
config.BatchUpdateEnabled = true
|
||||
logger.SysLog("batch update enabled with interval " + strconv.Itoa(config.BatchUpdateInterval) + "s")
|
||||
model.InitBatchUpdater()
|
||||
}
|
||||
common.InitTokenEncoders()
|
||||
openai.InitTokenEncoders()
|
||||
|
||||
// Initialize HTTP server
|
||||
server := gin.New()
|
||||
@@ -93,16 +93,16 @@ func main() {
|
||||
server.Use(middleware.RequestId())
|
||||
middleware.SetUpLogger(server)
|
||||
// Initialize session store
|
||||
store := cookie.NewStore([]byte(common.SessionSecret))
|
||||
store := cookie.NewStore([]byte(config.SessionSecret))
|
||||
server.Use(sessions.Sessions("session", store))
|
||||
|
||||
router.SetRouter(server, buildFS, indexPage)
|
||||
router.SetRouter(server, buildFS)
|
||||
var port = os.Getenv("PORT")
|
||||
if port == "" {
|
||||
port = strconv.Itoa(*common.Port)
|
||||
}
|
||||
err = server.Run(":" + port)
|
||||
if err != nil {
|
||||
common.FatalLog("failed to start HTTP server: " + err.Error())
|
||||
logger.FatalLog("failed to start HTTP server: " + err.Error())
|
||||
}
|
||||
}
|
||||
|
@@ -3,9 +3,9 @@ package middleware
|
||||
import (
|
||||
"github.com/gin-contrib/sessions"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/songquanpeng/one-api/common"
|
||||
"github.com/songquanpeng/one-api/model"
|
||||
"net/http"
|
||||
"one-api/common"
|
||||
"one-api/model"
|
||||
"strings"
|
||||
)
|
||||
|
||||
@@ -108,7 +108,7 @@ func TokenAuth() func(c *gin.Context) {
|
||||
c.Set("token_name", token.Name)
|
||||
if len(parts) > 1 {
|
||||
if model.IsAdmin(token.UserId) {
|
||||
c.Set("channelId", parts[1])
|
||||
c.Set("specific_channel_id", parts[1])
|
||||
} else {
|
||||
abortWithMessage(c, http.StatusForbidden, "普通用户不支持指定渠道")
|
||||
return
|
||||
|
@@ -1,16 +1,112 @@
|
||||
package middleware
|
||||
|
||||
import (
|
||||
"one-api/model"
|
||||
"fmt"
|
||||
"github.com/songquanpeng/one-api/common"
|
||||
"github.com/songquanpeng/one-api/common/logger"
|
||||
"github.com/songquanpeng/one-api/model"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
type ModelRequest struct {
|
||||
Model string `json:"model"`
|
||||
}
|
||||
|
||||
func Distribute() func(c *gin.Context) {
|
||||
return func(c *gin.Context) {
|
||||
userId := c.GetInt("id")
|
||||
userGroup, _ := model.CacheGetUserGroup(userId)
|
||||
c.Set("group", userGroup)
|
||||
var requestModel string
|
||||
var channel *model.Channel
|
||||
channelId, ok := c.Get("specific_channel_id")
|
||||
if ok {
|
||||
id, err := strconv.Atoi(channelId.(string))
|
||||
if err != nil {
|
||||
abortWithMessage(c, http.StatusBadRequest, "无效的渠道 Id")
|
||||
return
|
||||
}
|
||||
channel, err = model.GetChannelById(id, true)
|
||||
if err != nil {
|
||||
abortWithMessage(c, http.StatusBadRequest, "无效的渠道 Id")
|
||||
return
|
||||
}
|
||||
if channel.Status != common.ChannelStatusEnabled {
|
||||
abortWithMessage(c, http.StatusForbidden, "该渠道已被禁用")
|
||||
return
|
||||
}
|
||||
} else {
|
||||
// Select a channel for the user
|
||||
var modelRequest ModelRequest
|
||||
err := common.UnmarshalBodyReusable(c, &modelRequest)
|
||||
if err != nil {
|
||||
abortWithMessage(c, http.StatusBadRequest, "无效的请求")
|
||||
return
|
||||
}
|
||||
if strings.HasPrefix(c.Request.URL.Path, "/v1/moderations") {
|
||||
if modelRequest.Model == "" {
|
||||
modelRequest.Model = "text-moderation-stable"
|
||||
}
|
||||
}
|
||||
if strings.HasSuffix(c.Request.URL.Path, "embeddings") {
|
||||
if modelRequest.Model == "" {
|
||||
modelRequest.Model = c.Param("model")
|
||||
}
|
||||
}
|
||||
if strings.HasPrefix(c.Request.URL.Path, "/v1/images/generations") {
|
||||
if modelRequest.Model == "" {
|
||||
modelRequest.Model = "dall-e-2"
|
||||
}
|
||||
}
|
||||
if strings.HasPrefix(c.Request.URL.Path, "/v1/audio/transcriptions") || strings.HasPrefix(c.Request.URL.Path, "/v1/audio/translations") {
|
||||
if modelRequest.Model == "" {
|
||||
modelRequest.Model = "whisper-1"
|
||||
}
|
||||
}
|
||||
requestModel = modelRequest.Model
|
||||
channel, err = model.CacheGetRandomSatisfiedChannel(userGroup, modelRequest.Model, false)
|
||||
if err != nil {
|
||||
message := fmt.Sprintf("当前分组 %s 下对于模型 %s 无可用渠道", userGroup, modelRequest.Model)
|
||||
if channel != nil {
|
||||
logger.SysError(fmt.Sprintf("渠道不存在:%d", channel.Id))
|
||||
message = "数据库一致性已被破坏,请联系管理员"
|
||||
}
|
||||
abortWithMessage(c, http.StatusServiceUnavailable, message)
|
||||
return
|
||||
}
|
||||
}
|
||||
SetupContextForSelectedChannel(c, channel, requestModel)
|
||||
c.Next()
|
||||
}
|
||||
}
|
||||
|
||||
func SetupContextForSelectedChannel(c *gin.Context, channel *model.Channel, modelName string) {
|
||||
c.Set("channel", channel.Type)
|
||||
c.Set("channel_id", channel.Id)
|
||||
c.Set("channel_name", channel.Name)
|
||||
c.Set("model_mapping", channel.GetModelMapping())
|
||||
c.Set("original_model", modelName) // for retry
|
||||
c.Request.Header.Set("Authorization", fmt.Sprintf("Bearer %s", channel.Key))
|
||||
c.Set("base_url", channel.GetBaseURL())
|
||||
// this is for backward compatibility
|
||||
switch channel.Type {
|
||||
case common.ChannelTypeAzure:
|
||||
c.Set(common.ConfigKeyAPIVersion, channel.Other)
|
||||
case common.ChannelTypeXunfei:
|
||||
c.Set(common.ConfigKeyAPIVersion, channel.Other)
|
||||
case common.ChannelTypeGemini:
|
||||
c.Set(common.ConfigKeyAPIVersion, channel.Other)
|
||||
case common.ChannelTypeAIProxyLibrary:
|
||||
c.Set(common.ConfigKeyLibraryID, channel.Other)
|
||||
case common.ChannelTypeAli:
|
||||
c.Set(common.ConfigKeyPlugin, channel.Other)
|
||||
}
|
||||
cfg, _ := channel.LoadConfig()
|
||||
for k, v := range cfg {
|
||||
c.Set(common.ConfigKeyPrefix+k, v)
|
||||
}
|
||||
}
|
||||
|
@@ -3,14 +3,14 @@ package middleware
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/gin-gonic/gin"
|
||||
"one-api/common"
|
||||
"github.com/songquanpeng/one-api/common/logger"
|
||||
)
|
||||
|
||||
func SetUpLogger(server *gin.Engine) {
|
||||
server.Use(gin.LoggerWithFormatter(func(param gin.LogFormatterParams) string {
|
||||
var requestID string
|
||||
if param.Keys != nil {
|
||||
requestID = param.Keys[common.RequestIdKey].(string)
|
||||
requestID = param.Keys[logger.RequestIdKey].(string)
|
||||
}
|
||||
return fmt.Sprintf("[GIN] %s | %s | %3d | %13v | %15s | %7s %s\n",
|
||||
param.TimeStamp.Format("2006/01/02 - 15:04:05"),
|
||||
|
@@ -4,8 +4,9 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/songquanpeng/one-api/common"
|
||||
"github.com/songquanpeng/one-api/common/config"
|
||||
"net/http"
|
||||
"one-api/common"
|
||||
"time"
|
||||
)
|
||||
|
||||
@@ -26,7 +27,7 @@ func redisRateLimiter(c *gin.Context, maxRequestNum int, duration int64, mark st
|
||||
}
|
||||
if listLength < int64(maxRequestNum) {
|
||||
rdb.LPush(ctx, key, time.Now().Format(timeFormat))
|
||||
rdb.Expire(ctx, key, common.RateLimitKeyExpirationDuration)
|
||||
rdb.Expire(ctx, key, config.RateLimitKeyExpirationDuration)
|
||||
} else {
|
||||
oldTimeStr, _ := rdb.LIndex(ctx, key, -1).Result()
|
||||
oldTime, err := time.Parse(timeFormat, oldTimeStr)
|
||||
@@ -47,14 +48,14 @@ func redisRateLimiter(c *gin.Context, maxRequestNum int, duration int64, mark st
|
||||
// time.Since will return negative number!
|
||||
// See: https://stackoverflow.com/questions/50970900/why-is-time-since-returning-negative-durations-on-windows
|
||||
if int64(nowTime.Sub(oldTime).Seconds()) < duration {
|
||||
rdb.Expire(ctx, key, common.RateLimitKeyExpirationDuration)
|
||||
rdb.Expire(ctx, key, config.RateLimitKeyExpirationDuration)
|
||||
c.Status(http.StatusTooManyRequests)
|
||||
c.Abort()
|
||||
return
|
||||
} else {
|
||||
rdb.LPush(ctx, key, time.Now().Format(timeFormat))
|
||||
rdb.LTrim(ctx, key, 0, int64(maxRequestNum-1))
|
||||
rdb.Expire(ctx, key, common.RateLimitKeyExpirationDuration)
|
||||
rdb.Expire(ctx, key, config.RateLimitKeyExpirationDuration)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -75,7 +76,7 @@ func rateLimitFactory(maxRequestNum int, duration int64, mark string) func(c *gi
|
||||
}
|
||||
} else {
|
||||
// It's safe to call multi times.
|
||||
inMemoryRateLimiter.Init(common.RateLimitKeyExpirationDuration)
|
||||
inMemoryRateLimiter.Init(config.RateLimitKeyExpirationDuration)
|
||||
return func(c *gin.Context) {
|
||||
memoryRateLimiter(c, maxRequestNum, duration, mark)
|
||||
}
|
||||
@@ -83,21 +84,21 @@ func rateLimitFactory(maxRequestNum int, duration int64, mark string) func(c *gi
|
||||
}
|
||||
|
||||
func GlobalWebRateLimit() func(c *gin.Context) {
|
||||
return rateLimitFactory(common.GlobalWebRateLimitNum, common.GlobalWebRateLimitDuration, "GW")
|
||||
return rateLimitFactory(config.GlobalWebRateLimitNum, config.GlobalWebRateLimitDuration, "GW")
|
||||
}
|
||||
|
||||
func GlobalAPIRateLimit() func(c *gin.Context) {
|
||||
return rateLimitFactory(common.GlobalApiRateLimitNum, common.GlobalApiRateLimitDuration, "GA")
|
||||
return rateLimitFactory(config.GlobalApiRateLimitNum, config.GlobalApiRateLimitDuration, "GA")
|
||||
}
|
||||
|
||||
func CriticalRateLimit() func(c *gin.Context) {
|
||||
return rateLimitFactory(common.CriticalRateLimitNum, common.CriticalRateLimitDuration, "CT")
|
||||
return rateLimitFactory(config.CriticalRateLimitNum, config.CriticalRateLimitDuration, "CT")
|
||||
}
|
||||
|
||||
func DownloadRateLimit() func(c *gin.Context) {
|
||||
return rateLimitFactory(common.DownloadRateLimitNum, common.DownloadRateLimitDuration, "DW")
|
||||
return rateLimitFactory(config.DownloadRateLimitNum, config.DownloadRateLimitDuration, "DW")
|
||||
}
|
||||
|
||||
func UploadRateLimit() func(c *gin.Context) {
|
||||
return rateLimitFactory(common.UploadRateLimitNum, common.UploadRateLimitDuration, "UP")
|
||||
return rateLimitFactory(config.UploadRateLimitNum, config.UploadRateLimitDuration, "UP")
|
||||
}
|
||||
|
@@ -3,15 +3,17 @@ package middleware
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/songquanpeng/one-api/common/logger"
|
||||
"net/http"
|
||||
"one-api/common"
|
||||
"runtime/debug"
|
||||
)
|
||||
|
||||
func RelayPanicRecover() gin.HandlerFunc {
|
||||
return func(c *gin.Context) {
|
||||
defer func() {
|
||||
if err := recover(); err != nil {
|
||||
common.SysError(fmt.Sprintf("panic detected: %v", err))
|
||||
logger.SysError(fmt.Sprintf("panic detected: %v", err))
|
||||
logger.SysError(fmt.Sprintf("stacktrace from panic: %s", string(debug.Stack())))
|
||||
c.JSON(http.StatusInternalServerError, gin.H{
|
||||
"error": gin.H{
|
||||
"message": fmt.Sprintf("Panic detected, error: %v. Please submit a issue here: https://github.com/songquanpeng/one-api", err),
|
||||
|
@@ -3,16 +3,17 @@ package middleware
|
||||
import (
|
||||
"context"
|
||||
"github.com/gin-gonic/gin"
|
||||
"one-api/common"
|
||||
"github.com/songquanpeng/one-api/common/helper"
|
||||
"github.com/songquanpeng/one-api/common/logger"
|
||||
)
|
||||
|
||||
func RequestId() func(c *gin.Context) {
|
||||
return func(c *gin.Context) {
|
||||
id := common.GetTimeString() + common.GetRandomString(8)
|
||||
c.Set(common.RequestIdKey, id)
|
||||
ctx := context.WithValue(c.Request.Context(), common.RequestIdKey, id)
|
||||
id := helper.GetTimeString() + helper.GetRandomNumberString(8)
|
||||
c.Set(logger.RequestIdKey, id)
|
||||
ctx := context.WithValue(c.Request.Context(), logger.RequestIdKey, id)
|
||||
c.Request = c.Request.WithContext(ctx)
|
||||
c.Header(common.RequestIdKey, id)
|
||||
c.Header(logger.RequestIdKey, id)
|
||||
c.Next()
|
||||
}
|
||||
}
|
||||
|
@@ -4,9 +4,10 @@ import (
|
||||
"encoding/json"
|
||||
"github.com/gin-contrib/sessions"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/songquanpeng/one-api/common/config"
|
||||
"github.com/songquanpeng/one-api/common/logger"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"one-api/common"
|
||||
)
|
||||
|
||||
type turnstileCheckResponse struct {
|
||||
@@ -15,7 +16,7 @@ type turnstileCheckResponse struct {
|
||||
|
||||
func TurnstileCheck() gin.HandlerFunc {
|
||||
return func(c *gin.Context) {
|
||||
if common.TurnstileCheckEnabled {
|
||||
if config.TurnstileCheckEnabled {
|
||||
session := sessions.Default(c)
|
||||
turnstileChecked := session.Get("turnstile")
|
||||
if turnstileChecked != nil {
|
||||
@@ -32,12 +33,12 @@ func TurnstileCheck() gin.HandlerFunc {
|
||||
return
|
||||
}
|
||||
rawRes, err := http.PostForm("https://challenges.cloudflare.com/turnstile/v0/siteverify", url.Values{
|
||||
"secret": {common.TurnstileSecretKey},
|
||||
"secret": {config.TurnstileSecretKey},
|
||||
"response": {response},
|
||||
"remoteip": {c.ClientIP()},
|
||||
})
|
||||
if err != nil {
|
||||
common.SysError(err.Error())
|
||||
logger.SysError(err.Error())
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"success": false,
|
||||
"message": err.Error(),
|
||||
@@ -49,7 +50,7 @@ func TurnstileCheck() gin.HandlerFunc {
|
||||
var res turnstileCheckResponse
|
||||
err = json.NewDecoder(rawRes.Body).Decode(&res)
|
||||
if err != nil {
|
||||
common.SysError(err.Error())
|
||||
logger.SysError(err.Error())
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"success": false,
|
||||
"message": err.Error(),
|
||||
|
@@ -2,16 +2,17 @@ package middleware
|
||||
|
||||
import (
|
||||
"github.com/gin-gonic/gin"
|
||||
"one-api/common"
|
||||
"github.com/songquanpeng/one-api/common/helper"
|
||||
"github.com/songquanpeng/one-api/common/logger"
|
||||
)
|
||||
|
||||
func abortWithMessage(c *gin.Context, statusCode int, message string) {
|
||||
c.JSON(statusCode, gin.H{
|
||||
"error": gin.H{
|
||||
"message": common.MessageWithRequestId(message, c.GetString(common.RequestIdKey)),
|
||||
"message": helper.MessageWithRequestId(message, c.GetString(logger.RequestIdKey)),
|
||||
"type": "one_api_error",
|
||||
},
|
||||
})
|
||||
c.Abort()
|
||||
common.LogError(c.Request.Context(), message)
|
||||
logger.Error(c.Request.Context(), message)
|
||||
}
|
||||
|
@@ -1,7 +1,7 @@
|
||||
package model
|
||||
|
||||
import (
|
||||
"one-api/common"
|
||||
"github.com/songquanpeng/one-api/common"
|
||||
"strings"
|
||||
)
|
||||
|
||||
|
@@ -4,8 +4,10 @@ import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/songquanpeng/one-api/common"
|
||||
"github.com/songquanpeng/one-api/common/config"
|
||||
"github.com/songquanpeng/one-api/common/logger"
|
||||
"math/rand"
|
||||
"one-api/common"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -14,10 +16,10 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
TokenCacheSeconds = common.SyncFrequency
|
||||
UserId2GroupCacheSeconds = common.SyncFrequency
|
||||
UserId2QuotaCacheSeconds = common.SyncFrequency
|
||||
UserId2StatusCacheSeconds = common.SyncFrequency
|
||||
TokenCacheSeconds = config.SyncFrequency
|
||||
UserId2GroupCacheSeconds = config.SyncFrequency
|
||||
UserId2QuotaCacheSeconds = config.SyncFrequency
|
||||
UserId2StatusCacheSeconds = config.SyncFrequency
|
||||
)
|
||||
|
||||
func CacheGetTokenByKey(key string) (*Token, error) {
|
||||
@@ -42,7 +44,7 @@ func CacheGetTokenByKey(key string) (*Token, error) {
|
||||
}
|
||||
err = common.RedisSet(fmt.Sprintf("token:%s", key), string(jsonBytes), time.Duration(TokenCacheSeconds)*time.Second)
|
||||
if err != nil {
|
||||
common.SysError("Redis set token error: " + err.Error())
|
||||
logger.SysError("Redis set token error: " + err.Error())
|
||||
}
|
||||
return &token, nil
|
||||
}
|
||||
@@ -62,7 +64,7 @@ func CacheGetUserGroup(id int) (group string, err error) {
|
||||
}
|
||||
err = common.RedisSet(fmt.Sprintf("user_group:%d", id), group, time.Duration(UserId2GroupCacheSeconds)*time.Second)
|
||||
if err != nil {
|
||||
common.SysError("Redis set user group error: " + err.Error())
|
||||
logger.SysError("Redis set user group error: " + err.Error())
|
||||
}
|
||||
}
|
||||
return group, err
|
||||
@@ -80,7 +82,7 @@ func CacheGetUserQuota(id int) (quota int, err error) {
|
||||
}
|
||||
err = common.RedisSet(fmt.Sprintf("user_quota:%d", id), fmt.Sprintf("%d", quota), time.Duration(UserId2QuotaCacheSeconds)*time.Second)
|
||||
if err != nil {
|
||||
common.SysError("Redis set user quota error: " + err.Error())
|
||||
logger.SysError("Redis set user quota error: " + err.Error())
|
||||
}
|
||||
return quota, err
|
||||
}
|
||||
@@ -92,7 +94,7 @@ func CacheUpdateUserQuota(id int) error {
|
||||
if !common.RedisEnabled {
|
||||
return nil
|
||||
}
|
||||
quota, err := GetUserQuota(id)
|
||||
quota, err := CacheGetUserQuota(id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -127,7 +129,7 @@ func CacheIsUserEnabled(userId int) (bool, error) {
|
||||
}
|
||||
err = common.RedisSet(fmt.Sprintf("user_enabled:%d", userId), enabled, time.Duration(UserId2StatusCacheSeconds)*time.Second)
|
||||
if err != nil {
|
||||
common.SysError("Redis set user enabled error: " + err.Error())
|
||||
logger.SysError("Redis set user enabled error: " + err.Error())
|
||||
}
|
||||
return userEnabled, err
|
||||
}
|
||||
@@ -178,19 +180,19 @@ func InitChannelCache() {
|
||||
channelSyncLock.Lock()
|
||||
group2model2channels = newGroup2model2channels
|
||||
channelSyncLock.Unlock()
|
||||
common.SysLog("channels synced from database")
|
||||
logger.SysLog("channels synced from database")
|
||||
}
|
||||
|
||||
func SyncChannelCache(frequency int) {
|
||||
for {
|
||||
time.Sleep(time.Duration(frequency) * time.Second)
|
||||
common.SysLog("syncing channels from database")
|
||||
logger.SysLog("syncing channels from database")
|
||||
InitChannelCache()
|
||||
}
|
||||
}
|
||||
|
||||
func CacheGetRandomSatisfiedChannel(group string, model string) (*Channel, error) {
|
||||
if !common.MemoryCacheEnabled {
|
||||
func CacheGetRandomSatisfiedChannel(group string, model string, ignoreFirstPriority bool) (*Channel, error) {
|
||||
if !config.MemoryCacheEnabled {
|
||||
return GetRandomSatisfiedChannel(group, model)
|
||||
}
|
||||
channelSyncLock.RLock()
|
||||
@@ -211,5 +213,10 @@ func CacheGetRandomSatisfiedChannel(group string, model string) (*Channel, error
|
||||
}
|
||||
}
|
||||
idx := rand.Intn(endIdx)
|
||||
if ignoreFirstPriority {
|
||||
if endIdx < len(channels) { // which means there are more than one priority
|
||||
idx = common.RandRange(endIdx, len(channels))
|
||||
}
|
||||
}
|
||||
return channels[idx], nil
|
||||
}
|
||||
|
@@ -1,8 +1,12 @@
|
||||
package model
|
||||
|
||||
import (
|
||||
"one-api/common"
|
||||
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/songquanpeng/one-api/common"
|
||||
"github.com/songquanpeng/one-api/common/config"
|
||||
"github.com/songquanpeng/one-api/common/helper"
|
||||
"github.com/songquanpeng/one-api/common/logger"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
@@ -17,7 +21,7 @@ type Channel struct {
|
||||
TestTime int64 `json:"test_time" gorm:"bigint"`
|
||||
ResponseTime int `json:"response_time"` // in milliseconds
|
||||
BaseURL *string `json:"base_url" gorm:"column:base_url;default:''"`
|
||||
Other string `json:"other"`
|
||||
Other string `json:"other"` // DEPRECATED: please save config to field Config
|
||||
Balance float64 `json:"balance"` // in USD
|
||||
BalanceUpdatedTime int64 `json:"balance_updated_time" gorm:"bigint"`
|
||||
Models string `json:"models"`
|
||||
@@ -25,6 +29,7 @@ type Channel struct {
|
||||
UsedQuota int64 `json:"used_quota" gorm:"bigint;default:0"`
|
||||
ModelMapping *string `json:"model_mapping" gorm:"type:varchar(1024);default:''"`
|
||||
Priority *int64 `json:"priority" gorm:"bigint;default:0"`
|
||||
Config string `json:"config"`
|
||||
}
|
||||
|
||||
func GetAllChannels(startIdx int, num int, selectAll bool) ([]*Channel, error) {
|
||||
@@ -43,7 +48,7 @@ func SearchChannels(keyword string) (channels []*Channel, err error) {
|
||||
if common.UsingPostgreSQL {
|
||||
keyCol = `"key"`
|
||||
}
|
||||
err = DB.Omit("key").Where("id = ? or name LIKE ? or "+keyCol+" = ?", common.String2Int(keyword), keyword+"%", keyword).Find(&channels).Error
|
||||
err = DB.Omit("key").Where("id = ? or name LIKE ? or "+keyCol+" = ?", helper.String2Int(keyword), keyword+"%", keyword).Find(&channels).Error
|
||||
return channels, err
|
||||
}
|
||||
|
||||
@@ -87,11 +92,17 @@ func (channel *Channel) GetBaseURL() string {
|
||||
return *channel.BaseURL
|
||||
}
|
||||
|
||||
func (channel *Channel) GetModelMapping() string {
|
||||
if channel.ModelMapping == nil {
|
||||
return ""
|
||||
func (channel *Channel) GetModelMapping() map[string]string {
|
||||
if channel.ModelMapping == nil || *channel.ModelMapping == "" || *channel.ModelMapping == "{}" {
|
||||
return nil
|
||||
}
|
||||
return *channel.ModelMapping
|
||||
modelMapping := make(map[string]string)
|
||||
err := json.Unmarshal([]byte(*channel.ModelMapping), &modelMapping)
|
||||
if err != nil {
|
||||
logger.SysError(fmt.Sprintf("failed to unmarshal model mapping for channel %d, error: %s", channel.Id, err.Error()))
|
||||
return nil
|
||||
}
|
||||
return modelMapping
|
||||
}
|
||||
|
||||
func (channel *Channel) Insert() error {
|
||||
@@ -117,21 +128,21 @@ func (channel *Channel) Update() error {
|
||||
|
||||
func (channel *Channel) UpdateResponseTime(responseTime int64) {
|
||||
err := DB.Model(channel).Select("response_time", "test_time").Updates(Channel{
|
||||
TestTime: common.GetTimestamp(),
|
||||
TestTime: helper.GetTimestamp(),
|
||||
ResponseTime: int(responseTime),
|
||||
}).Error
|
||||
if err != nil {
|
||||
common.SysError("failed to update response time: " + err.Error())
|
||||
logger.SysError("failed to update response time: " + err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func (channel *Channel) UpdateBalance(balance float64) {
|
||||
err := DB.Model(channel).Select("balance_updated_time", "balance").Updates(Channel{
|
||||
BalanceUpdatedTime: common.GetTimestamp(),
|
||||
BalanceUpdatedTime: helper.GetTimestamp(),
|
||||
Balance: balance,
|
||||
}).Error
|
||||
if err != nil {
|
||||
common.SysError("failed to update balance: " + err.Error())
|
||||
logger.SysError("failed to update balance: " + err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -145,19 +156,31 @@ func (channel *Channel) Delete() error {
|
||||
return err
|
||||
}
|
||||
|
||||
func (channel *Channel) LoadConfig() (map[string]string, error) {
|
||||
if channel.Config == "" {
|
||||
return nil, nil
|
||||
}
|
||||
cfg := make(map[string]string)
|
||||
err := json.Unmarshal([]byte(channel.Config), &cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
func UpdateChannelStatusById(id int, status int) {
|
||||
err := UpdateAbilityStatus(id, status == common.ChannelStatusEnabled)
|
||||
if err != nil {
|
||||
common.SysError("failed to update ability status: " + err.Error())
|
||||
logger.SysError("failed to update ability status: " + err.Error())
|
||||
}
|
||||
err = DB.Model(&Channel{}).Where("id = ?", id).Update("status", status).Error
|
||||
if err != nil {
|
||||
common.SysError("failed to update channel status: " + err.Error())
|
||||
logger.SysError("failed to update channel status: " + err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func UpdateChannelUsedQuota(id int, quota int) {
|
||||
if common.BatchUpdateEnabled {
|
||||
if config.BatchUpdateEnabled {
|
||||
addNewRecord(BatchUpdateTypeChannelUsedQuota, id, quota)
|
||||
return
|
||||
}
|
||||
@@ -167,7 +190,7 @@ func UpdateChannelUsedQuota(id int, quota int) {
|
||||
func updateChannelUsedQuota(id int, quota int) {
|
||||
err := DB.Model(&Channel{}).Where("id = ?", id).Update("used_quota", gorm.Expr("used_quota + ?", quota)).Error
|
||||
if err != nil {
|
||||
common.SysError("failed to update channel used quota: " + err.Error())
|
||||
logger.SysError("failed to update channel used quota: " + err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
|
71
model/log.go
71
model/log.go
@@ -3,15 +3,18 @@ package model
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"one-api/common"
|
||||
"github.com/songquanpeng/one-api/common"
|
||||
"github.com/songquanpeng/one-api/common/config"
|
||||
"github.com/songquanpeng/one-api/common/helper"
|
||||
"github.com/songquanpeng/one-api/common/logger"
|
||||
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
type Log struct {
|
||||
Id int `json:"id;index:idx_created_at_id,priority:1"`
|
||||
Id int `json:"id"`
|
||||
UserId int `json:"user_id" gorm:"index"`
|
||||
CreatedAt int64 `json:"created_at" gorm:"bigint;index:idx_created_at_id,priority:2;index:idx_created_at_type"`
|
||||
CreatedAt int64 `json:"created_at" gorm:"bigint;index:idx_created_at_type"`
|
||||
Type int `json:"type" gorm:"index:idx_created_at_type"`
|
||||
Content string `json:"content"`
|
||||
Username string `json:"username" gorm:"index:index_username_model_name,priority:2;default:''"`
|
||||
@@ -23,15 +26,6 @@ type Log struct {
|
||||
ChannelId int `json:"channel" gorm:"index"`
|
||||
}
|
||||
|
||||
type LogStatistic struct {
|
||||
Day string `gorm:"column:day"`
|
||||
ModelName string `gorm:"column:model_name"`
|
||||
RequestCount int `gorm:"column:request_count"`
|
||||
Quota int `gorm:"column:quota"`
|
||||
PromptTokens int `gorm:"column:prompt_tokens"`
|
||||
CompletionTokens int `gorm:"column:completion_tokens"`
|
||||
}
|
||||
|
||||
const (
|
||||
LogTypeUnknown = iota
|
||||
LogTypeTopup
|
||||
@@ -41,31 +35,31 @@ const (
|
||||
)
|
||||
|
||||
func RecordLog(userId int, logType int, content string) {
|
||||
if logType == LogTypeConsume && !common.LogConsumeEnabled {
|
||||
if logType == LogTypeConsume && !config.LogConsumeEnabled {
|
||||
return
|
||||
}
|
||||
log := &Log{
|
||||
UserId: userId,
|
||||
Username: GetUsernameById(userId),
|
||||
CreatedAt: common.GetTimestamp(),
|
||||
CreatedAt: helper.GetTimestamp(),
|
||||
Type: logType,
|
||||
Content: content,
|
||||
}
|
||||
err := DB.Create(log).Error
|
||||
if err != nil {
|
||||
common.SysError("failed to record log: " + err.Error())
|
||||
logger.SysError("failed to record log: " + err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func RecordConsumeLog(ctx context.Context, userId int, channelId int, promptTokens int, completionTokens int, modelName string, tokenName string, quota int, content string) {
|
||||
common.LogInfo(ctx, fmt.Sprintf("record consume log: userId=%d, channelId=%d, promptTokens=%d, completionTokens=%d, modelName=%s, tokenName=%s, quota=%d, content=%s", userId, channelId, promptTokens, completionTokens, modelName, tokenName, quota, content))
|
||||
if !common.LogConsumeEnabled {
|
||||
logger.Info(ctx, fmt.Sprintf("record consume log: userId=%d, channelId=%d, promptTokens=%d, completionTokens=%d, modelName=%s, tokenName=%s, quota=%d, content=%s", userId, channelId, promptTokens, completionTokens, modelName, tokenName, quota, content))
|
||||
if !config.LogConsumeEnabled {
|
||||
return
|
||||
}
|
||||
log := &Log{
|
||||
UserId: userId,
|
||||
Username: GetUsernameById(userId),
|
||||
CreatedAt: common.GetTimestamp(),
|
||||
CreatedAt: helper.GetTimestamp(),
|
||||
Type: LogTypeConsume,
|
||||
Content: content,
|
||||
PromptTokens: promptTokens,
|
||||
@@ -77,7 +71,7 @@ func RecordConsumeLog(ctx context.Context, userId int, channelId int, promptToke
|
||||
}
|
||||
err := DB.Create(log).Error
|
||||
if err != nil {
|
||||
common.LogError(ctx, "failed to record log: "+err.Error())
|
||||
logger.Error(ctx, "failed to record log: "+err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -134,17 +128,17 @@ func GetUserLogs(userId int, logType int, startTimestamp int64, endTimestamp int
|
||||
}
|
||||
|
||||
func SearchAllLogs(keyword string) (logs []*Log, err error) {
|
||||
err = DB.Where("type = ? or content LIKE ?", keyword, keyword+"%").Order("id desc").Limit(common.MaxRecentItems).Find(&logs).Error
|
||||
err = DB.Where("type = ? or content LIKE ?", keyword, keyword+"%").Order("id desc").Limit(config.MaxRecentItems).Find(&logs).Error
|
||||
return logs, err
|
||||
}
|
||||
|
||||
func SearchUserLogs(userId int, keyword string) (logs []*Log, err error) {
|
||||
err = DB.Where("user_id = ? and type = ?", userId, keyword).Order("id desc").Limit(common.MaxRecentItems).Omit("id").Find(&logs).Error
|
||||
err = DB.Where("user_id = ? and type = ?", userId, keyword).Order("id desc").Limit(config.MaxRecentItems).Omit("id").Find(&logs).Error
|
||||
return logs, err
|
||||
}
|
||||
|
||||
func SumUsedQuota(logType int, startTimestamp int64, endTimestamp int64, modelName string, username string, tokenName string, channel int) (quota int) {
|
||||
tx := DB.Table("logs").Select(assembleSumSelectStr("quota"))
|
||||
tx := DB.Table("logs").Select("ifnull(sum(quota),0)")
|
||||
if username != "" {
|
||||
tx = tx.Where("username = ?", username)
|
||||
}
|
||||
@@ -168,7 +162,7 @@ func SumUsedQuota(logType int, startTimestamp int64, endTimestamp int64, modelNa
|
||||
}
|
||||
|
||||
func SumUsedToken(logType int, startTimestamp int64, endTimestamp int64, modelName string, username string, tokenName string) (token int) {
|
||||
tx := DB.Table("logs").Select(assembleSumSelectStr("prompt_tokens") + " + " + assembleSumSelectStr("completion_tokens"))
|
||||
tx := DB.Table("logs").Select("ifnull(sum(prompt_tokens),0) + ifnull(sum(completion_tokens),0)")
|
||||
if username != "" {
|
||||
tx = tx.Where("username = ?", username)
|
||||
}
|
||||
@@ -193,13 +187,26 @@ func DeleteOldLog(targetTimestamp int64) (int64, error) {
|
||||
return result.RowsAffected, result.Error
|
||||
}
|
||||
|
||||
func SearchLogsByDayAndModel(user_id, start, end int) (LogStatistics []*LogStatistic, err error) {
|
||||
type LogStatistic struct {
|
||||
Day string `gorm:"column:day"`
|
||||
ModelName string `gorm:"column:model_name"`
|
||||
RequestCount int `gorm:"column:request_count"`
|
||||
Quota int `gorm:"column:quota"`
|
||||
PromptTokens int `gorm:"column:prompt_tokens"`
|
||||
CompletionTokens int `gorm:"column:completion_tokens"`
|
||||
}
|
||||
|
||||
func SearchLogsByDayAndModel(userId, start, end int) (LogStatistics []*LogStatistic, err error) {
|
||||
groupSelect := "DATE_FORMAT(FROM_UNIXTIME(created_at), '%Y-%m-%d') as day"
|
||||
|
||||
if common.UsingPostgreSQL {
|
||||
groupSelect = "TO_CHAR(date_trunc('day', to_timestamp(created_at)), 'YYYY-MM-DD') as day"
|
||||
}
|
||||
|
||||
if common.UsingSQLite {
|
||||
groupSelect = "strftime('%Y-%m-%d', datetime(created_at, 'unixepoch')) as day"
|
||||
}
|
||||
|
||||
err = DB.Raw(`
|
||||
SELECT `+groupSelect+`,
|
||||
model_name, count(1) as request_count,
|
||||
@@ -212,21 +219,7 @@ func SearchLogsByDayAndModel(user_id, start, end int) (LogStatistics []*LogStati
|
||||
AND created_at BETWEEN ? AND ?
|
||||
GROUP BY day, model_name
|
||||
ORDER BY day, model_name
|
||||
`, user_id, start, end).Scan(&LogStatistics).Error
|
||||
|
||||
fmt.Println(user_id, start, end)
|
||||
`, userId, start, end).Scan(&LogStatistics).Error
|
||||
|
||||
return LogStatistics, err
|
||||
}
|
||||
|
||||
func assembleSumSelectStr(selectStr string) string {
|
||||
sumSelectStr := "%s(sum(%s),0)"
|
||||
nullfunc := "ifnull"
|
||||
if common.UsingPostgreSQL {
|
||||
nullfunc = "coalesce"
|
||||
}
|
||||
|
||||
sumSelectStr = fmt.Sprintf(sumSelectStr, nullfunc, selectStr)
|
||||
|
||||
return sumSelectStr
|
||||
}
|
||||
|
@@ -2,11 +2,14 @@ package model
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/songquanpeng/one-api/common"
|
||||
"github.com/songquanpeng/one-api/common/config"
|
||||
"github.com/songquanpeng/one-api/common/helper"
|
||||
"github.com/songquanpeng/one-api/common/logger"
|
||||
"gorm.io/driver/mysql"
|
||||
"gorm.io/driver/postgres"
|
||||
"gorm.io/driver/sqlite"
|
||||
"gorm.io/gorm"
|
||||
"one-api/common"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -16,9 +19,9 @@ var DB *gorm.DB
|
||||
|
||||
func createRootAccountIfNeed() error {
|
||||
var user User
|
||||
//if user.Status != common.UserStatusEnabled {
|
||||
//if user.Status != util.UserStatusEnabled {
|
||||
if err := DB.First(&user).Error; err != nil {
|
||||
common.SysLog("no user exists, create a root user for you: username is root, password is 123456")
|
||||
logger.SysLog("no user exists, create a root user for you: username is root, password is 123456")
|
||||
hashedPassword, err := common.Password2Hash("123456")
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -29,7 +32,7 @@ func createRootAccountIfNeed() error {
|
||||
Role: common.RoleRootUser,
|
||||
Status: common.UserStatusEnabled,
|
||||
DisplayName: "Root User",
|
||||
AccessToken: common.GetUUID(),
|
||||
AccessToken: helper.GetUUID(),
|
||||
Quota: 100000000,
|
||||
}
|
||||
DB.Create(&rootUser)
|
||||
@@ -42,7 +45,7 @@ func chooseDB() (*gorm.DB, error) {
|
||||
dsn := os.Getenv("SQL_DSN")
|
||||
if strings.HasPrefix(dsn, "postgres://") {
|
||||
// Use PostgreSQL
|
||||
common.SysLog("using PostgreSQL as database")
|
||||
logger.SysLog("using PostgreSQL as database")
|
||||
common.UsingPostgreSQL = true
|
||||
return gorm.Open(postgres.New(postgres.Config{
|
||||
DSN: dsn,
|
||||
@@ -52,13 +55,13 @@ func chooseDB() (*gorm.DB, error) {
|
||||
})
|
||||
}
|
||||
// Use MySQL
|
||||
common.SysLog("using MySQL as database")
|
||||
logger.SysLog("using MySQL as database")
|
||||
return gorm.Open(mysql.Open(dsn), &gorm.Config{
|
||||
PrepareStmt: true, // precompile SQL
|
||||
})
|
||||
}
|
||||
// Use SQLite
|
||||
common.SysLog("SQL_DSN not set, using SQLite as database")
|
||||
logger.SysLog("SQL_DSN not set, using SQLite as database")
|
||||
common.UsingSQLite = true
|
||||
config := fmt.Sprintf("?_busy_timeout=%d", common.SQLiteBusyTimeout)
|
||||
return gorm.Open(sqlite.Open(common.SQLitePath+config), &gorm.Config{
|
||||
@@ -69,7 +72,7 @@ func chooseDB() (*gorm.DB, error) {
|
||||
func InitDB() (err error) {
|
||||
db, err := chooseDB()
|
||||
if err == nil {
|
||||
if common.DebugEnabled {
|
||||
if config.DebugEnabled {
|
||||
db = db.Debug()
|
||||
}
|
||||
DB = db
|
||||
@@ -77,14 +80,14 @@ func InitDB() (err error) {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sqlDB.SetMaxIdleConns(common.GetOrDefault("SQL_MAX_IDLE_CONNS", 100))
|
||||
sqlDB.SetMaxOpenConns(common.GetOrDefault("SQL_MAX_OPEN_CONNS", 1000))
|
||||
sqlDB.SetConnMaxLifetime(time.Second * time.Duration(common.GetOrDefault("SQL_MAX_LIFETIME", 60)))
|
||||
sqlDB.SetMaxIdleConns(helper.GetOrDefaultEnvInt("SQL_MAX_IDLE_CONNS", 100))
|
||||
sqlDB.SetMaxOpenConns(helper.GetOrDefaultEnvInt("SQL_MAX_OPEN_CONNS", 1000))
|
||||
sqlDB.SetConnMaxLifetime(time.Second * time.Duration(helper.GetOrDefaultEnvInt("SQL_MAX_LIFETIME", 60)))
|
||||
|
||||
if !common.IsMasterNode {
|
||||
if !config.IsMasterNode {
|
||||
return nil
|
||||
}
|
||||
common.SysLog("database migration started")
|
||||
logger.SysLog("database migration started")
|
||||
err = db.AutoMigrate(&Channel{})
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -113,11 +116,11 @@ func InitDB() (err error) {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
common.SysLog("database migrated")
|
||||
logger.SysLog("database migrated")
|
||||
err = createRootAccountIfNeed()
|
||||
return err
|
||||
} else {
|
||||
common.FatalLog(err)
|
||||
logger.FatalLog(err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
217
model/option.go
217
model/option.go
@@ -1,7 +1,9 @@
|
||||
package model
|
||||
|
||||
import (
|
||||
"one-api/common"
|
||||
"github.com/songquanpeng/one-api/common"
|
||||
"github.com/songquanpeng/one-api/common/config"
|
||||
"github.com/songquanpeng/one-api/common/logger"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -20,59 +22,57 @@ func AllOption() ([]*Option, error) {
|
||||
}
|
||||
|
||||
func InitOptionMap() {
|
||||
common.OptionMapRWMutex.Lock()
|
||||
common.OptionMap = make(map[string]string)
|
||||
common.OptionMap["FileUploadPermission"] = strconv.Itoa(common.FileUploadPermission)
|
||||
common.OptionMap["FileDownloadPermission"] = strconv.Itoa(common.FileDownloadPermission)
|
||||
common.OptionMap["ImageUploadPermission"] = strconv.Itoa(common.ImageUploadPermission)
|
||||
common.OptionMap["ImageDownloadPermission"] = strconv.Itoa(common.ImageDownloadPermission)
|
||||
common.OptionMap["PasswordLoginEnabled"] = strconv.FormatBool(common.PasswordLoginEnabled)
|
||||
common.OptionMap["PasswordRegisterEnabled"] = strconv.FormatBool(common.PasswordRegisterEnabled)
|
||||
common.OptionMap["EmailVerificationEnabled"] = strconv.FormatBool(common.EmailVerificationEnabled)
|
||||
common.OptionMap["GitHubOAuthEnabled"] = strconv.FormatBool(common.GitHubOAuthEnabled)
|
||||
common.OptionMap["WeChatAuthEnabled"] = strconv.FormatBool(common.WeChatAuthEnabled)
|
||||
common.OptionMap["TurnstileCheckEnabled"] = strconv.FormatBool(common.TurnstileCheckEnabled)
|
||||
common.OptionMap["RegisterEnabled"] = strconv.FormatBool(common.RegisterEnabled)
|
||||
common.OptionMap["AutomaticDisableChannelEnabled"] = strconv.FormatBool(common.AutomaticDisableChannelEnabled)
|
||||
common.OptionMap["AutomaticEnableChannelEnabled"] = strconv.FormatBool(common.AutomaticEnableChannelEnabled)
|
||||
common.OptionMap["ApproximateTokenEnabled"] = strconv.FormatBool(common.ApproximateTokenEnabled)
|
||||
common.OptionMap["LogConsumeEnabled"] = strconv.FormatBool(common.LogConsumeEnabled)
|
||||
common.OptionMap["DisplayInCurrencyEnabled"] = strconv.FormatBool(common.DisplayInCurrencyEnabled)
|
||||
common.OptionMap["DisplayTokenStatEnabled"] = strconv.FormatBool(common.DisplayTokenStatEnabled)
|
||||
common.OptionMap["ChannelDisableThreshold"] = strconv.FormatFloat(common.ChannelDisableThreshold, 'f', -1, 64)
|
||||
common.OptionMap["EmailDomainRestrictionEnabled"] = strconv.FormatBool(common.EmailDomainRestrictionEnabled)
|
||||
common.OptionMap["EmailDomainWhitelist"] = strings.Join(common.EmailDomainWhitelist, ",")
|
||||
common.OptionMap["SMTPServer"] = ""
|
||||
common.OptionMap["SMTPFrom"] = ""
|
||||
common.OptionMap["SMTPPort"] = strconv.Itoa(common.SMTPPort)
|
||||
common.OptionMap["SMTPAccount"] = ""
|
||||
common.OptionMap["SMTPToken"] = ""
|
||||
common.OptionMap["Notice"] = ""
|
||||
common.OptionMap["About"] = ""
|
||||
common.OptionMap["HomePageContent"] = ""
|
||||
common.OptionMap["Footer"] = common.Footer
|
||||
common.OptionMap["SystemName"] = common.SystemName
|
||||
common.OptionMap["Logo"] = common.Logo
|
||||
common.OptionMap["ServerAddress"] = ""
|
||||
common.OptionMap["GitHubClientId"] = ""
|
||||
common.OptionMap["GitHubClientSecret"] = ""
|
||||
common.OptionMap["WeChatServerAddress"] = ""
|
||||
common.OptionMap["WeChatServerToken"] = ""
|
||||
common.OptionMap["WeChatAccountQRCodeImageURL"] = ""
|
||||
common.OptionMap["TurnstileSiteKey"] = ""
|
||||
common.OptionMap["TurnstileSecretKey"] = ""
|
||||
common.OptionMap["QuotaForNewUser"] = strconv.Itoa(common.QuotaForNewUser)
|
||||
common.OptionMap["QuotaForInviter"] = strconv.Itoa(common.QuotaForInviter)
|
||||
common.OptionMap["QuotaForInvitee"] = strconv.Itoa(common.QuotaForInvitee)
|
||||
common.OptionMap["QuotaRemindThreshold"] = strconv.Itoa(common.QuotaRemindThreshold)
|
||||
common.OptionMap["PreConsumedQuota"] = strconv.Itoa(common.PreConsumedQuota)
|
||||
common.OptionMap["ModelRatio"] = common.ModelRatio2JSONString()
|
||||
common.OptionMap["GroupRatio"] = common.GroupRatio2JSONString()
|
||||
common.OptionMap["TopUpLink"] = common.TopUpLink
|
||||
common.OptionMap["ChatLink"] = common.ChatLink
|
||||
common.OptionMap["QuotaPerUnit"] = strconv.FormatFloat(common.QuotaPerUnit, 'f', -1, 64)
|
||||
common.OptionMap["RetryTimes"] = strconv.Itoa(common.RetryTimes)
|
||||
common.OptionMapRWMutex.Unlock()
|
||||
config.OptionMapRWMutex.Lock()
|
||||
config.OptionMap = make(map[string]string)
|
||||
config.OptionMap["PasswordLoginEnabled"] = strconv.FormatBool(config.PasswordLoginEnabled)
|
||||
config.OptionMap["PasswordRegisterEnabled"] = strconv.FormatBool(config.PasswordRegisterEnabled)
|
||||
config.OptionMap["EmailVerificationEnabled"] = strconv.FormatBool(config.EmailVerificationEnabled)
|
||||
config.OptionMap["GitHubOAuthEnabled"] = strconv.FormatBool(config.GitHubOAuthEnabled)
|
||||
config.OptionMap["WeChatAuthEnabled"] = strconv.FormatBool(config.WeChatAuthEnabled)
|
||||
config.OptionMap["TurnstileCheckEnabled"] = strconv.FormatBool(config.TurnstileCheckEnabled)
|
||||
config.OptionMap["RegisterEnabled"] = strconv.FormatBool(config.RegisterEnabled)
|
||||
config.OptionMap["AutomaticDisableChannelEnabled"] = strconv.FormatBool(config.AutomaticDisableChannelEnabled)
|
||||
config.OptionMap["AutomaticEnableChannelEnabled"] = strconv.FormatBool(config.AutomaticEnableChannelEnabled)
|
||||
config.OptionMap["ApproximateTokenEnabled"] = strconv.FormatBool(config.ApproximateTokenEnabled)
|
||||
config.OptionMap["LogConsumeEnabled"] = strconv.FormatBool(config.LogConsumeEnabled)
|
||||
config.OptionMap["DisplayInCurrencyEnabled"] = strconv.FormatBool(config.DisplayInCurrencyEnabled)
|
||||
config.OptionMap["DisplayTokenStatEnabled"] = strconv.FormatBool(config.DisplayTokenStatEnabled)
|
||||
config.OptionMap["ChannelDisableThreshold"] = strconv.FormatFloat(config.ChannelDisableThreshold, 'f', -1, 64)
|
||||
config.OptionMap["EmailDomainRestrictionEnabled"] = strconv.FormatBool(config.EmailDomainRestrictionEnabled)
|
||||
config.OptionMap["EmailDomainWhitelist"] = strings.Join(config.EmailDomainWhitelist, ",")
|
||||
config.OptionMap["SMTPServer"] = ""
|
||||
config.OptionMap["SMTPFrom"] = ""
|
||||
config.OptionMap["SMTPPort"] = strconv.Itoa(config.SMTPPort)
|
||||
config.OptionMap["SMTPAccount"] = ""
|
||||
config.OptionMap["SMTPToken"] = ""
|
||||
config.OptionMap["Notice"] = ""
|
||||
config.OptionMap["About"] = ""
|
||||
config.OptionMap["HomePageContent"] = ""
|
||||
config.OptionMap["Footer"] = config.Footer
|
||||
config.OptionMap["SystemName"] = config.SystemName
|
||||
config.OptionMap["Logo"] = config.Logo
|
||||
config.OptionMap["ServerAddress"] = ""
|
||||
config.OptionMap["GitHubClientId"] = ""
|
||||
config.OptionMap["GitHubClientSecret"] = ""
|
||||
config.OptionMap["WeChatServerAddress"] = ""
|
||||
config.OptionMap["WeChatServerToken"] = ""
|
||||
config.OptionMap["WeChatAccountQRCodeImageURL"] = ""
|
||||
config.OptionMap["TurnstileSiteKey"] = ""
|
||||
config.OptionMap["TurnstileSecretKey"] = ""
|
||||
config.OptionMap["QuotaForNewUser"] = strconv.Itoa(config.QuotaForNewUser)
|
||||
config.OptionMap["QuotaForInviter"] = strconv.Itoa(config.QuotaForInviter)
|
||||
config.OptionMap["QuotaForInvitee"] = strconv.Itoa(config.QuotaForInvitee)
|
||||
config.OptionMap["QuotaRemindThreshold"] = strconv.Itoa(config.QuotaRemindThreshold)
|
||||
config.OptionMap["PreConsumedQuota"] = strconv.Itoa(config.PreConsumedQuota)
|
||||
config.OptionMap["ModelRatio"] = common.ModelRatio2JSONString()
|
||||
config.OptionMap["GroupRatio"] = common.GroupRatio2JSONString()
|
||||
config.OptionMap["CompletionRatio"] = common.CompletionRatio2JSONString()
|
||||
config.OptionMap["TopUpLink"] = config.TopUpLink
|
||||
config.OptionMap["ChatLink"] = config.ChatLink
|
||||
config.OptionMap["QuotaPerUnit"] = strconv.FormatFloat(config.QuotaPerUnit, 'f', -1, 64)
|
||||
config.OptionMap["RetryTimes"] = strconv.Itoa(config.RetryTimes)
|
||||
config.OptionMap["Theme"] = config.Theme
|
||||
config.OptionMapRWMutex.Unlock()
|
||||
loadOptionsFromDatabase()
|
||||
}
|
||||
|
||||
@@ -81,7 +81,7 @@ func loadOptionsFromDatabase() {
|
||||
for _, option := range options {
|
||||
err := updateOptionMap(option.Key, option.Value)
|
||||
if err != nil {
|
||||
common.SysError("failed to update option map: " + err.Error())
|
||||
logger.SysError("failed to update option map: " + err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -89,7 +89,7 @@ func loadOptionsFromDatabase() {
|
||||
func SyncOptions(frequency int) {
|
||||
for {
|
||||
time.Sleep(time.Duration(frequency) * time.Second)
|
||||
common.SysLog("syncing options from database")
|
||||
logger.SysLog("syncing options from database")
|
||||
loadOptionsFromDatabase()
|
||||
}
|
||||
}
|
||||
@@ -111,115 +111,106 @@ func UpdateOption(key string, value string) error {
|
||||
}
|
||||
|
||||
func updateOptionMap(key string, value string) (err error) {
|
||||
common.OptionMapRWMutex.Lock()
|
||||
defer common.OptionMapRWMutex.Unlock()
|
||||
common.OptionMap[key] = value
|
||||
if strings.HasSuffix(key, "Permission") {
|
||||
intValue, _ := strconv.Atoi(value)
|
||||
switch key {
|
||||
case "FileUploadPermission":
|
||||
common.FileUploadPermission = intValue
|
||||
case "FileDownloadPermission":
|
||||
common.FileDownloadPermission = intValue
|
||||
case "ImageUploadPermission":
|
||||
common.ImageUploadPermission = intValue
|
||||
case "ImageDownloadPermission":
|
||||
common.ImageDownloadPermission = intValue
|
||||
}
|
||||
}
|
||||
config.OptionMapRWMutex.Lock()
|
||||
defer config.OptionMapRWMutex.Unlock()
|
||||
config.OptionMap[key] = value
|
||||
if strings.HasSuffix(key, "Enabled") {
|
||||
boolValue := value == "true"
|
||||
switch key {
|
||||
case "PasswordRegisterEnabled":
|
||||
common.PasswordRegisterEnabled = boolValue
|
||||
config.PasswordRegisterEnabled = boolValue
|
||||
case "PasswordLoginEnabled":
|
||||
common.PasswordLoginEnabled = boolValue
|
||||
config.PasswordLoginEnabled = boolValue
|
||||
case "EmailVerificationEnabled":
|
||||
common.EmailVerificationEnabled = boolValue
|
||||
config.EmailVerificationEnabled = boolValue
|
||||
case "GitHubOAuthEnabled":
|
||||
common.GitHubOAuthEnabled = boolValue
|
||||
config.GitHubOAuthEnabled = boolValue
|
||||
case "WeChatAuthEnabled":
|
||||
common.WeChatAuthEnabled = boolValue
|
||||
config.WeChatAuthEnabled = boolValue
|
||||
case "TurnstileCheckEnabled":
|
||||
common.TurnstileCheckEnabled = boolValue
|
||||
config.TurnstileCheckEnabled = boolValue
|
||||
case "RegisterEnabled":
|
||||
common.RegisterEnabled = boolValue
|
||||
config.RegisterEnabled = boolValue
|
||||
case "EmailDomainRestrictionEnabled":
|
||||
common.EmailDomainRestrictionEnabled = boolValue
|
||||
config.EmailDomainRestrictionEnabled = boolValue
|
||||
case "AutomaticDisableChannelEnabled":
|
||||
common.AutomaticDisableChannelEnabled = boolValue
|
||||
config.AutomaticDisableChannelEnabled = boolValue
|
||||
case "AutomaticEnableChannelEnabled":
|
||||
common.AutomaticEnableChannelEnabled = boolValue
|
||||
config.AutomaticEnableChannelEnabled = boolValue
|
||||
case "ApproximateTokenEnabled":
|
||||
common.ApproximateTokenEnabled = boolValue
|
||||
config.ApproximateTokenEnabled = boolValue
|
||||
case "LogConsumeEnabled":
|
||||
common.LogConsumeEnabled = boolValue
|
||||
config.LogConsumeEnabled = boolValue
|
||||
case "DisplayInCurrencyEnabled":
|
||||
common.DisplayInCurrencyEnabled = boolValue
|
||||
config.DisplayInCurrencyEnabled = boolValue
|
||||
case "DisplayTokenStatEnabled":
|
||||
common.DisplayTokenStatEnabled = boolValue
|
||||
config.DisplayTokenStatEnabled = boolValue
|
||||
}
|
||||
}
|
||||
switch key {
|
||||
case "EmailDomainWhitelist":
|
||||
common.EmailDomainWhitelist = strings.Split(value, ",")
|
||||
config.EmailDomainWhitelist = strings.Split(value, ",")
|
||||
case "SMTPServer":
|
||||
common.SMTPServer = value
|
||||
config.SMTPServer = value
|
||||
case "SMTPPort":
|
||||
intValue, _ := strconv.Atoi(value)
|
||||
common.SMTPPort = intValue
|
||||
config.SMTPPort = intValue
|
||||
case "SMTPAccount":
|
||||
common.SMTPAccount = value
|
||||
config.SMTPAccount = value
|
||||
case "SMTPFrom":
|
||||
common.SMTPFrom = value
|
||||
config.SMTPFrom = value
|
||||
case "SMTPToken":
|
||||
common.SMTPToken = value
|
||||
config.SMTPToken = value
|
||||
case "ServerAddress":
|
||||
common.ServerAddress = value
|
||||
config.ServerAddress = value
|
||||
case "GitHubClientId":
|
||||
common.GitHubClientId = value
|
||||
config.GitHubClientId = value
|
||||
case "GitHubClientSecret":
|
||||
common.GitHubClientSecret = value
|
||||
config.GitHubClientSecret = value
|
||||
case "Footer":
|
||||
common.Footer = value
|
||||
config.Footer = value
|
||||
case "SystemName":
|
||||
common.SystemName = value
|
||||
config.SystemName = value
|
||||
case "Logo":
|
||||
common.Logo = value
|
||||
config.Logo = value
|
||||
case "WeChatServerAddress":
|
||||
common.WeChatServerAddress = value
|
||||
config.WeChatServerAddress = value
|
||||
case "WeChatServerToken":
|
||||
common.WeChatServerToken = value
|
||||
config.WeChatServerToken = value
|
||||
case "WeChatAccountQRCodeImageURL":
|
||||
common.WeChatAccountQRCodeImageURL = value
|
||||
config.WeChatAccountQRCodeImageURL = value
|
||||
case "TurnstileSiteKey":
|
||||
common.TurnstileSiteKey = value
|
||||
config.TurnstileSiteKey = value
|
||||
case "TurnstileSecretKey":
|
||||
common.TurnstileSecretKey = value
|
||||
config.TurnstileSecretKey = value
|
||||
case "QuotaForNewUser":
|
||||
common.QuotaForNewUser, _ = strconv.Atoi(value)
|
||||
config.QuotaForNewUser, _ = strconv.Atoi(value)
|
||||
case "QuotaForInviter":
|
||||
common.QuotaForInviter, _ = strconv.Atoi(value)
|
||||
config.QuotaForInviter, _ = strconv.Atoi(value)
|
||||
case "QuotaForInvitee":
|
||||
common.QuotaForInvitee, _ = strconv.Atoi(value)
|
||||
config.QuotaForInvitee, _ = strconv.Atoi(value)
|
||||
case "QuotaRemindThreshold":
|
||||
common.QuotaRemindThreshold, _ = strconv.Atoi(value)
|
||||
config.QuotaRemindThreshold, _ = strconv.Atoi(value)
|
||||
case "PreConsumedQuota":
|
||||
common.PreConsumedQuota, _ = strconv.Atoi(value)
|
||||
config.PreConsumedQuota, _ = strconv.Atoi(value)
|
||||
case "RetryTimes":
|
||||
common.RetryTimes, _ = strconv.Atoi(value)
|
||||
config.RetryTimes, _ = strconv.Atoi(value)
|
||||
case "ModelRatio":
|
||||
err = common.UpdateModelRatioByJSONString(value)
|
||||
case "GroupRatio":
|
||||
err = common.UpdateGroupRatioByJSONString(value)
|
||||
case "CompletionRatio":
|
||||
err = common.UpdateCompletionRatioByJSONString(value)
|
||||
case "TopUpLink":
|
||||
common.TopUpLink = value
|
||||
config.TopUpLink = value
|
||||
case "ChatLink":
|
||||
common.ChatLink = value
|
||||
config.ChatLink = value
|
||||
case "ChannelDisableThreshold":
|
||||
common.ChannelDisableThreshold, _ = strconv.ParseFloat(value, 64)
|
||||
config.ChannelDisableThreshold, _ = strconv.ParseFloat(value, 64)
|
||||
case "QuotaPerUnit":
|
||||
common.QuotaPerUnit, _ = strconv.ParseFloat(value, 64)
|
||||
config.QuotaPerUnit, _ = strconv.ParseFloat(value, 64)
|
||||
case "Theme":
|
||||
config.Theme = value
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
@@ -3,8 +3,8 @@ package model
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"one-api/common"
|
||||
|
||||
"github.com/songquanpeng/one-api/common"
|
||||
"github.com/songquanpeng/one-api/common/helper"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
@@ -28,7 +28,7 @@ func GetAllRedemptions(startIdx int, num int) ([]*Redemption, error) {
|
||||
}
|
||||
|
||||
func SearchRedemptions(keyword string) (redemptions []*Redemption, err error) {
|
||||
err = DB.Where("id = ? or name LIKE ?", common.String2Int(keyword), keyword+"%").Find(&redemptions).Error
|
||||
err = DB.Where("id = ? or name LIKE ?", keyword, keyword+"%").Find(&redemptions).Error
|
||||
return redemptions, err
|
||||
}
|
||||
|
||||
@@ -68,7 +68,7 @@ func Redeem(key string, userId int) (quota int, err error) {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
redemption.RedeemedTime = common.GetTimestamp()
|
||||
redemption.RedeemedTime = helper.GetTimestamp()
|
||||
redemption.Status = common.RedemptionCodeStatusUsed
|
||||
err = tx.Save(redemption).Error
|
||||
return err
|
||||
|
@@ -3,8 +3,11 @@ package model
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/songquanpeng/one-api/common"
|
||||
"github.com/songquanpeng/one-api/common/config"
|
||||
"github.com/songquanpeng/one-api/common/helper"
|
||||
"github.com/songquanpeng/one-api/common/logger"
|
||||
"gorm.io/gorm"
|
||||
"one-api/common"
|
||||
)
|
||||
|
||||
type Token struct {
|
||||
@@ -38,39 +41,43 @@ func ValidateUserToken(key string) (token *Token, err error) {
|
||||
return nil, errors.New("未提供令牌")
|
||||
}
|
||||
token, err = CacheGetTokenByKey(key)
|
||||
if err == nil {
|
||||
if token.Status == common.TokenStatusExhausted {
|
||||
return nil, errors.New("该令牌额度已用尽")
|
||||
} else if token.Status == common.TokenStatusExpired {
|
||||
return nil, errors.New("该令牌已过期")
|
||||
if err != nil {
|
||||
logger.SysError("CacheGetTokenByKey failed: " + err.Error())
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
return nil, errors.New("无效的令牌")
|
||||
}
|
||||
if token.Status != common.TokenStatusEnabled {
|
||||
return nil, errors.New("该令牌状态不可用")
|
||||
}
|
||||
if token.ExpiredTime != -1 && token.ExpiredTime < common.GetTimestamp() {
|
||||
if !common.RedisEnabled {
|
||||
token.Status = common.TokenStatusExpired
|
||||
err := token.SelectUpdate()
|
||||
if err != nil {
|
||||
common.SysError("failed to update token status" + err.Error())
|
||||
}
|
||||
}
|
||||
return nil, errors.New("该令牌已过期")
|
||||
}
|
||||
if !token.UnlimitedQuota && token.RemainQuota <= 0 {
|
||||
if !common.RedisEnabled {
|
||||
// in this case, we can make sure the token is exhausted
|
||||
token.Status = common.TokenStatusExhausted
|
||||
err := token.SelectUpdate()
|
||||
if err != nil {
|
||||
common.SysError("failed to update token status" + err.Error())
|
||||
}
|
||||
}
|
||||
return nil, errors.New("该令牌额度已用尽")
|
||||
}
|
||||
return token, nil
|
||||
return nil, errors.New("令牌验证失败")
|
||||
}
|
||||
return nil, errors.New("无效的令牌")
|
||||
if token.Status == common.TokenStatusExhausted {
|
||||
return nil, errors.New("该令牌额度已用尽")
|
||||
} else if token.Status == common.TokenStatusExpired {
|
||||
return nil, errors.New("该令牌已过期")
|
||||
}
|
||||
if token.Status != common.TokenStatusEnabled {
|
||||
return nil, errors.New("该令牌状态不可用")
|
||||
}
|
||||
if token.ExpiredTime != -1 && token.ExpiredTime < helper.GetTimestamp() {
|
||||
if !common.RedisEnabled {
|
||||
token.Status = common.TokenStatusExpired
|
||||
err := token.SelectUpdate()
|
||||
if err != nil {
|
||||
logger.SysError("failed to update token status" + err.Error())
|
||||
}
|
||||
}
|
||||
return nil, errors.New("该令牌已过期")
|
||||
}
|
||||
if !token.UnlimitedQuota && token.RemainQuota <= 0 {
|
||||
if !common.RedisEnabled {
|
||||
// in this case, we can make sure the token is exhausted
|
||||
token.Status = common.TokenStatusExhausted
|
||||
err := token.SelectUpdate()
|
||||
if err != nil {
|
||||
logger.SysError("failed to update token status" + err.Error())
|
||||
}
|
||||
}
|
||||
return nil, errors.New("该令牌额度已用尽")
|
||||
}
|
||||
return token, nil
|
||||
}
|
||||
|
||||
func GetTokenByIds(id int, userId int) (*Token, error) {
|
||||
@@ -134,7 +141,7 @@ func IncreaseTokenQuota(id int, quota int) (err error) {
|
||||
if quota < 0 {
|
||||
return errors.New("quota 不能为负数!")
|
||||
}
|
||||
if common.BatchUpdateEnabled {
|
||||
if config.BatchUpdateEnabled {
|
||||
addNewRecord(BatchUpdateTypeTokenQuota, id, quota)
|
||||
return nil
|
||||
}
|
||||
@@ -146,7 +153,7 @@ func increaseTokenQuota(id int, quota int) (err error) {
|
||||
map[string]interface{}{
|
||||
"remain_quota": gorm.Expr("remain_quota + ?", quota),
|
||||
"used_quota": gorm.Expr("used_quota - ?", quota),
|
||||
"accessed_time": common.GetTimestamp(),
|
||||
"accessed_time": helper.GetTimestamp(),
|
||||
},
|
||||
).Error
|
||||
return err
|
||||
@@ -156,7 +163,7 @@ func DecreaseTokenQuota(id int, quota int) (err error) {
|
||||
if quota < 0 {
|
||||
return errors.New("quota 不能为负数!")
|
||||
}
|
||||
if common.BatchUpdateEnabled {
|
||||
if config.BatchUpdateEnabled {
|
||||
addNewRecord(BatchUpdateTypeTokenQuota, id, -quota)
|
||||
return nil
|
||||
}
|
||||
@@ -168,7 +175,7 @@ func decreaseTokenQuota(id int, quota int) (err error) {
|
||||
map[string]interface{}{
|
||||
"remain_quota": gorm.Expr("remain_quota - ?", quota),
|
||||
"used_quota": gorm.Expr("used_quota + ?", quota),
|
||||
"accessed_time": common.GetTimestamp(),
|
||||
"accessed_time": helper.GetTimestamp(),
|
||||
},
|
||||
).Error
|
||||
return err
|
||||
@@ -192,24 +199,24 @@ func PreConsumeTokenQuota(tokenId int, quota int) (err error) {
|
||||
if userQuota < quota {
|
||||
return errors.New("用户额度不足")
|
||||
}
|
||||
quotaTooLow := userQuota >= common.QuotaRemindThreshold && userQuota-quota < common.QuotaRemindThreshold
|
||||
quotaTooLow := userQuota >= config.QuotaRemindThreshold && userQuota-quota < config.QuotaRemindThreshold
|
||||
noMoreQuota := userQuota-quota <= 0
|
||||
if quotaTooLow || noMoreQuota {
|
||||
go func() {
|
||||
email, err := GetUserEmail(token.UserId)
|
||||
if err != nil {
|
||||
common.SysError("failed to fetch user email: " + err.Error())
|
||||
logger.SysError("failed to fetch user email: " + err.Error())
|
||||
}
|
||||
prompt := "您的额度即将用尽"
|
||||
if noMoreQuota {
|
||||
prompt = "您的额度已用尽"
|
||||
}
|
||||
if email != "" {
|
||||
topUpLink := fmt.Sprintf("%s/topup", common.ServerAddress)
|
||||
topUpLink := fmt.Sprintf("%s/topup", config.ServerAddress)
|
||||
err = common.SendEmail(prompt, email,
|
||||
fmt.Sprintf("%s,当前剩余额度为 %d,为了不影响您的使用,请及时充值。<br/>充值链接:<a href='%s'>%s</a>", prompt, userQuota, topUpLink, topUpLink))
|
||||
if err != nil {
|
||||
common.SysError("failed to send email" + err.Error())
|
||||
logger.SysError("failed to send email" + err.Error())
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
@@ -3,10 +3,12 @@ package model
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"one-api/common"
|
||||
"strings"
|
||||
|
||||
"github.com/songquanpeng/one-api/common"
|
||||
"github.com/songquanpeng/one-api/common/config"
|
||||
"github.com/songquanpeng/one-api/common/helper"
|
||||
"github.com/songquanpeng/one-api/common/logger"
|
||||
"gorm.io/gorm"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// User if you add sensitive fields, don't forget to clean them in setupLogin function.
|
||||
@@ -16,7 +18,7 @@ type User struct {
|
||||
Username string `json:"username" gorm:"unique;index" validate:"max=12"`
|
||||
Password string `json:"password" gorm:"not null;" validate:"min=8,max=20"`
|
||||
DisplayName string `json:"display_name" gorm:"index" validate:"max=20"`
|
||||
Role int `json:"role" gorm:"type:int;default:1"` // admin, common
|
||||
Role int `json:"role" gorm:"type:int;default:1"` // admin, util
|
||||
Status int `json:"status" gorm:"type:int;default:1"` // enabled, disabled
|
||||
Email string `json:"email" gorm:"index" validate:"max=50"`
|
||||
GitHubId string `json:"github_id" gorm:"column:github_id;index"`
|
||||
@@ -43,8 +45,11 @@ func GetAllUsers(startIdx int, num int) (users []*User, err error) {
|
||||
}
|
||||
|
||||
func SearchUsers(keyword string) (users []*User, err error) {
|
||||
err = DB.Omit("password").Where("id = ? or username LIKE ? or email LIKE ? or display_name LIKE ?", common.String2Int(keyword), keyword+"%", keyword+"%", keyword+"%").Find(&users).Error
|
||||
|
||||
if !common.UsingPostgreSQL {
|
||||
err = DB.Omit("password").Where("id = ? or username LIKE ? or email LIKE ? or display_name LIKE ?", keyword, keyword+"%", keyword+"%", keyword+"%").Find(&users).Error
|
||||
} else {
|
||||
err = DB.Omit("password").Where("username LIKE ? or email LIKE ? or display_name LIKE ?", keyword+"%", keyword+"%", keyword+"%").Find(&users).Error
|
||||
}
|
||||
return users, err
|
||||
}
|
||||
|
||||
@@ -87,24 +92,24 @@ func (user *User) Insert(inviterId int) error {
|
||||
return err
|
||||
}
|
||||
}
|
||||
user.Quota = common.QuotaForNewUser
|
||||
user.AccessToken = common.GetUUID()
|
||||
user.AffCode = common.GetRandomString(4)
|
||||
user.Quota = config.QuotaForNewUser
|
||||
user.AccessToken = helper.GetUUID()
|
||||
user.AffCode = helper.GetRandomString(4)
|
||||
result := DB.Create(user)
|
||||
if result.Error != nil {
|
||||
return result.Error
|
||||
}
|
||||
if common.QuotaForNewUser > 0 {
|
||||
RecordLog(user.Id, LogTypeSystem, fmt.Sprintf("新用户注册赠送 %s", common.LogQuota(common.QuotaForNewUser)))
|
||||
if config.QuotaForNewUser > 0 {
|
||||
RecordLog(user.Id, LogTypeSystem, fmt.Sprintf("新用户注册赠送 %s", common.LogQuota(config.QuotaForNewUser)))
|
||||
}
|
||||
if inviterId != 0 {
|
||||
if common.QuotaForInvitee > 0 {
|
||||
_ = IncreaseUserQuota(user.Id, common.QuotaForInvitee)
|
||||
RecordLog(user.Id, LogTypeSystem, fmt.Sprintf("使用邀请码赠送 %s", common.LogQuota(common.QuotaForInvitee)))
|
||||
if config.QuotaForInvitee > 0 {
|
||||
_ = IncreaseUserQuota(user.Id, config.QuotaForInvitee)
|
||||
RecordLog(user.Id, LogTypeSystem, fmt.Sprintf("使用邀请码赠送 %s", common.LogQuota(config.QuotaForInvitee)))
|
||||
}
|
||||
if common.QuotaForInviter > 0 {
|
||||
_ = IncreaseUserQuota(inviterId, common.QuotaForInviter)
|
||||
RecordLog(inviterId, LogTypeSystem, fmt.Sprintf("邀请用户赠送 %s", common.LogQuota(common.QuotaForInviter)))
|
||||
if config.QuotaForInviter > 0 {
|
||||
_ = IncreaseUserQuota(inviterId, config.QuotaForInviter)
|
||||
RecordLog(inviterId, LogTypeSystem, fmt.Sprintf("邀请用户赠送 %s", common.LogQuota(config.QuotaForInviter)))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@@ -139,7 +144,15 @@ func (user *User) ValidateAndFill() (err error) {
|
||||
if user.Username == "" || password == "" {
|
||||
return errors.New("用户名或密码为空")
|
||||
}
|
||||
DB.Where(User{Username: user.Username}).First(user)
|
||||
err = DB.Where("username = ?", user.Username).First(user).Error
|
||||
if err != nil {
|
||||
// we must make sure check username firstly
|
||||
// consider this case: a malicious user set his username as other's email
|
||||
err := DB.Where("email = ?", user.Username).First(user).Error
|
||||
if err != nil {
|
||||
return errors.New("用户名或密码错误,或用户已被封禁")
|
||||
}
|
||||
}
|
||||
okay := common.ValidatePasswordAndHash(password, user.Password)
|
||||
if !okay || user.Status != common.UserStatusEnabled {
|
||||
return errors.New("用户名或密码错误,或用户已被封禁")
|
||||
@@ -222,7 +235,7 @@ func IsAdmin(userId int) bool {
|
||||
var user User
|
||||
err := DB.Where("id = ?", userId).Select("role").Find(&user).Error
|
||||
if err != nil {
|
||||
common.SysError("no such user " + err.Error())
|
||||
logger.SysError("no such user " + err.Error())
|
||||
return false
|
||||
}
|
||||
return user.Role >= common.RoleAdminUser
|
||||
@@ -281,7 +294,7 @@ func IncreaseUserQuota(id int, quota int) (err error) {
|
||||
if quota < 0 {
|
||||
return errors.New("quota 不能为负数!")
|
||||
}
|
||||
if common.BatchUpdateEnabled {
|
||||
if config.BatchUpdateEnabled {
|
||||
addNewRecord(BatchUpdateTypeUserQuota, id, quota)
|
||||
return nil
|
||||
}
|
||||
@@ -297,7 +310,7 @@ func DecreaseUserQuota(id int, quota int) (err error) {
|
||||
if quota < 0 {
|
||||
return errors.New("quota 不能为负数!")
|
||||
}
|
||||
if common.BatchUpdateEnabled {
|
||||
if config.BatchUpdateEnabled {
|
||||
addNewRecord(BatchUpdateTypeUserQuota, id, -quota)
|
||||
return nil
|
||||
}
|
||||
@@ -315,7 +328,7 @@ func GetRootUserEmail() (email string) {
|
||||
}
|
||||
|
||||
func UpdateUserUsedQuotaAndRequestCount(id int, quota int) {
|
||||
if common.BatchUpdateEnabled {
|
||||
if config.BatchUpdateEnabled {
|
||||
addNewRecord(BatchUpdateTypeUsedQuota, id, quota)
|
||||
addNewRecord(BatchUpdateTypeRequestCount, id, 1)
|
||||
return
|
||||
@@ -331,7 +344,7 @@ func updateUserUsedQuotaAndRequestCount(id int, quota int, count int) {
|
||||
},
|
||||
).Error
|
||||
if err != nil {
|
||||
common.SysError("failed to update user used quota and request count: " + err.Error())
|
||||
logger.SysError("failed to update user used quota and request count: " + err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -342,14 +355,14 @@ func updateUserUsedQuota(id int, quota int) {
|
||||
},
|
||||
).Error
|
||||
if err != nil {
|
||||
common.SysError("failed to update user used quota: " + err.Error())
|
||||
logger.SysError("failed to update user used quota: " + err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func updateUserRequestCount(id int, count int) {
|
||||
err := DB.Model(&User{}).Where("id = ?", id).Update("request_count", gorm.Expr("request_count + ?", count)).Error
|
||||
if err != nil {
|
||||
common.SysError("failed to update user request count: " + err.Error())
|
||||
logger.SysError("failed to update user request count: " + err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -1,7 +1,8 @@
|
||||
package model
|
||||
|
||||
import (
|
||||
"one-api/common"
|
||||
"github.com/songquanpeng/one-api/common/config"
|
||||
"github.com/songquanpeng/one-api/common/logger"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
@@ -28,7 +29,7 @@ func init() {
|
||||
func InitBatchUpdater() {
|
||||
go func() {
|
||||
for {
|
||||
time.Sleep(time.Duration(common.BatchUpdateInterval) * time.Second)
|
||||
time.Sleep(time.Duration(config.BatchUpdateInterval) * time.Second)
|
||||
batchUpdate()
|
||||
}
|
||||
}()
|
||||
@@ -45,7 +46,7 @@ func addNewRecord(type_ int, id int, value int) {
|
||||
}
|
||||
|
||||
func batchUpdate() {
|
||||
common.SysLog("batch update started")
|
||||
logger.SysLog("batch update started")
|
||||
for i := 0; i < BatchUpdateTypeCount; i++ {
|
||||
batchUpdateLocks[i].Lock()
|
||||
store := batchUpdateStores[i]
|
||||
@@ -57,12 +58,12 @@ func batchUpdate() {
|
||||
case BatchUpdateTypeUserQuota:
|
||||
err := increaseUserQuota(key, value)
|
||||
if err != nil {
|
||||
common.SysError("failed to batch update user quota: " + err.Error())
|
||||
logger.SysError("failed to batch update user quota: " + err.Error())
|
||||
}
|
||||
case BatchUpdateTypeTokenQuota:
|
||||
err := increaseTokenQuota(key, value)
|
||||
if err != nil {
|
||||
common.SysError("failed to batch update token quota: " + err.Error())
|
||||
logger.SysError("failed to batch update token quota: " + err.Error())
|
||||
}
|
||||
case BatchUpdateTypeUsedQuota:
|
||||
updateUserUsedQuota(key, value)
|
||||
@@ -73,5 +74,5 @@ func batchUpdate() {
|
||||
}
|
||||
}
|
||||
}
|
||||
common.SysLog("batch update finished")
|
||||
logger.SysLog("batch update finished")
|
||||
}
|
||||
|
@@ -1,30 +0,0 @@
|
||||
package aigc2d
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"one-api/common"
|
||||
"one-api/model"
|
||||
"one-api/providers/base"
|
||||
)
|
||||
|
||||
func (p *Aigc2dProvider) Balance(channel *model.Channel) (float64, error) {
|
||||
fullRequestURL := p.GetFullRequestURL("/dashboard/billing/credit_grants", "")
|
||||
headers := p.GetRequestHeaders()
|
||||
|
||||
client := common.NewClient()
|
||||
req, err := client.NewRequest("GET", fullRequestURL, common.WithHeader(headers))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// 发送请求
|
||||
var response base.BalanceResponse
|
||||
_, errWithCode := common.SendRequest(req, &response, false)
|
||||
if errWithCode != nil {
|
||||
return 0, errors.New(errWithCode.OpenAIError.Message)
|
||||
}
|
||||
|
||||
channel.UpdateBalance(response.TotalAvailable)
|
||||
|
||||
return response.TotalAvailable, nil
|
||||
}
|
@@ -1,20 +0,0 @@
|
||||
package aigc2d
|
||||
|
||||
import (
|
||||
"one-api/providers/base"
|
||||
"one-api/providers/openai"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
type Aigc2dProviderFactory struct{}
|
||||
|
||||
func (f Aigc2dProviderFactory) Create(c *gin.Context) base.ProviderInterface {
|
||||
return &Aigc2dProvider{
|
||||
OpenAIProvider: openai.CreateOpenAIProvider(c, "https://api.aigc2d.com"),
|
||||
}
|
||||
}
|
||||
|
||||
type Aigc2dProvider struct {
|
||||
*openai.OpenAIProvider
|
||||
}
|
@@ -1,35 +0,0 @@
|
||||
package aiproxy
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"one-api/common"
|
||||
"one-api/model"
|
||||
)
|
||||
|
||||
func (p *AIProxyProvider) Balance(channel *model.Channel) (float64, error) {
|
||||
fullRequestURL := "https://aiproxy.io/api/report/getUserOverview"
|
||||
headers := make(map[string]string)
|
||||
headers["Api-Key"] = channel.Key
|
||||
|
||||
client := common.NewClient()
|
||||
req, err := client.NewRequest("GET", fullRequestURL, common.WithHeader(headers))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// 发送请求
|
||||
var response AIProxyUserOverviewResponse
|
||||
_, errWithCode := common.SendRequest(req, &response, false)
|
||||
if errWithCode != nil {
|
||||
return 0, errors.New(errWithCode.OpenAIError.Message)
|
||||
}
|
||||
|
||||
if !response.Success {
|
||||
return 0, fmt.Errorf("code: %d, message: %s", response.ErrorCode, response.Message)
|
||||
}
|
||||
|
||||
channel.UpdateBalance(response.Data.TotalPoints)
|
||||
|
||||
return response.Data.TotalPoints, nil
|
||||
}
|
@@ -1,20 +0,0 @@
|
||||
package aiproxy
|
||||
|
||||
import (
|
||||
"one-api/providers/base"
|
||||
"one-api/providers/openai"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
type AIProxyProviderFactory struct{}
|
||||
|
||||
func (f AIProxyProviderFactory) Create(c *gin.Context) base.ProviderInterface {
|
||||
return &AIProxyProvider{
|
||||
OpenAIProvider: openai.CreateOpenAIProvider(c, "https://api.aiproxy.io"),
|
||||
}
|
||||
}
|
||||
|
||||
type AIProxyProvider struct {
|
||||
*openai.OpenAIProvider
|
||||
}
|
@@ -1,10 +0,0 @@
|
||||
package aiproxy
|
||||
|
||||
type AIProxyUserOverviewResponse struct {
|
||||
Success bool `json:"success"`
|
||||
Message string `json:"message"`
|
||||
ErrorCode int `json:"error_code"`
|
||||
Data struct {
|
||||
TotalPoints float64 `json:"totalPoints"`
|
||||
} `json:"data"`
|
||||
}
|
@@ -1,41 +0,0 @@
|
||||
package ali
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"one-api/providers/base"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
// 定义供应商工厂
|
||||
type AliProviderFactory struct{}
|
||||
|
||||
// 创建 AliProvider
|
||||
// https://dashscope.aliyuncs.com/api/v1/services/aigc/text-generation/generation
|
||||
func (f AliProviderFactory) Create(c *gin.Context) base.ProviderInterface {
|
||||
return &AliProvider{
|
||||
BaseProvider: base.BaseProvider{
|
||||
BaseURL: "https://dashscope.aliyuncs.com",
|
||||
ChatCompletions: "/api/v1/services/aigc/text-generation/generation",
|
||||
Embeddings: "/api/v1/services/embeddings/text-embedding/text-embedding",
|
||||
Context: c,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
type AliProvider struct {
|
||||
base.BaseProvider
|
||||
}
|
||||
|
||||
// 获取请求头
|
||||
func (p *AliProvider) GetRequestHeaders() (headers map[string]string) {
|
||||
headers = make(map[string]string)
|
||||
p.CommonRequestHeaders(headers)
|
||||
headers["Authorization"] = fmt.Sprintf("Bearer %s", p.Context.GetString("api_key"))
|
||||
if p.Context.GetString("plugin") != "" {
|
||||
headers["X-DashScope-Plugin"] = p.Context.GetString("plugin")
|
||||
}
|
||||
|
||||
return headers
|
||||
}
|
@@ -1,216 +0,0 @@
|
||||
package ali
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"net/http"
|
||||
"one-api/common"
|
||||
"one-api/types"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// 阿里云响应处理
|
||||
func (aliResponse *AliChatResponse) ResponseHandler(resp *http.Response) (OpenAIResponse any, errWithCode *types.OpenAIErrorWithStatusCode) {
|
||||
if aliResponse.Code != "" {
|
||||
errWithCode = &types.OpenAIErrorWithStatusCode{
|
||||
OpenAIError: types.OpenAIError{
|
||||
Message: aliResponse.Message,
|
||||
Type: aliResponse.Code,
|
||||
Param: aliResponse.RequestId,
|
||||
Code: aliResponse.Code,
|
||||
},
|
||||
StatusCode: resp.StatusCode,
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
choice := types.ChatCompletionChoice{
|
||||
Index: 0,
|
||||
Message: types.ChatCompletionMessage{
|
||||
Role: "assistant",
|
||||
Content: aliResponse.Output.Text,
|
||||
},
|
||||
FinishReason: aliResponse.Output.FinishReason,
|
||||
}
|
||||
|
||||
OpenAIResponse = types.ChatCompletionResponse{
|
||||
ID: aliResponse.RequestId,
|
||||
Object: "chat.completion",
|
||||
Created: common.GetTimestamp(),
|
||||
Choices: []types.ChatCompletionChoice{choice},
|
||||
Usage: &types.Usage{
|
||||
PromptTokens: aliResponse.Usage.InputTokens,
|
||||
CompletionTokens: aliResponse.Usage.OutputTokens,
|
||||
TotalTokens: aliResponse.Usage.InputTokens + aliResponse.Usage.OutputTokens,
|
||||
},
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// 获取聊天请求体
|
||||
func (p *AliProvider) getChatRequestBody(request *types.ChatCompletionRequest) *AliChatRequest {
|
||||
messages := make([]AliMessage, 0, len(request.Messages))
|
||||
for i := 0; i < len(request.Messages); i++ {
|
||||
message := request.Messages[i]
|
||||
messages = append(messages, AliMessage{
|
||||
Content: message.StringContent(),
|
||||
Role: strings.ToLower(message.Role),
|
||||
})
|
||||
}
|
||||
return &AliChatRequest{
|
||||
Model: request.Model,
|
||||
Input: AliInput{
|
||||
Messages: messages,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// 聊天
|
||||
func (p *AliProvider) ChatAction(request *types.ChatCompletionRequest, isModelMapped bool, promptTokens int) (usage *types.Usage, errWithCode *types.OpenAIErrorWithStatusCode) {
|
||||
|
||||
requestBody := p.getChatRequestBody(request)
|
||||
fullRequestURL := p.GetFullRequestURL(p.ChatCompletions, request.Model)
|
||||
headers := p.GetRequestHeaders()
|
||||
if request.Stream {
|
||||
headers["Accept"] = "text/event-stream"
|
||||
headers["X-DashScope-SSE"] = "enable"
|
||||
}
|
||||
|
||||
client := common.NewClient()
|
||||
req, err := client.NewRequest(p.Context.Request.Method, fullRequestURL, common.WithBody(requestBody), common.WithHeader(headers))
|
||||
if err != nil {
|
||||
return nil, common.ErrorWrapper(err, "new_request_failed", http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
if request.Stream {
|
||||
usage, errWithCode = p.sendStreamRequest(req)
|
||||
if errWithCode != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if usage == nil {
|
||||
usage = &types.Usage{
|
||||
PromptTokens: 0,
|
||||
CompletionTokens: 0,
|
||||
TotalTokens: 0,
|
||||
}
|
||||
}
|
||||
|
||||
} else {
|
||||
aliResponse := &AliChatResponse{}
|
||||
errWithCode = p.SendRequest(req, aliResponse, false)
|
||||
if errWithCode != nil {
|
||||
return
|
||||
}
|
||||
|
||||
usage = &types.Usage{
|
||||
PromptTokens: aliResponse.Usage.InputTokens,
|
||||
CompletionTokens: aliResponse.Usage.OutputTokens,
|
||||
TotalTokens: aliResponse.Usage.InputTokens + aliResponse.Usage.OutputTokens,
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// 阿里云响应转OpenAI响应
|
||||
func (p *AliProvider) streamResponseAli2OpenAI(aliResponse *AliChatResponse) *types.ChatCompletionStreamResponse {
|
||||
var choice types.ChatCompletionStreamChoice
|
||||
choice.Delta.Content = aliResponse.Output.Text
|
||||
if aliResponse.Output.FinishReason != "null" {
|
||||
finishReason := aliResponse.Output.FinishReason
|
||||
choice.FinishReason = &finishReason
|
||||
}
|
||||
|
||||
response := types.ChatCompletionStreamResponse{
|
||||
ID: aliResponse.RequestId,
|
||||
Object: "chat.completion.chunk",
|
||||
Created: common.GetTimestamp(),
|
||||
Model: "ernie-bot",
|
||||
Choices: []types.ChatCompletionStreamChoice{choice},
|
||||
}
|
||||
return &response
|
||||
}
|
||||
|
||||
// 发送流请求
|
||||
func (p *AliProvider) sendStreamRequest(req *http.Request) (usage *types.Usage, errWithCode *types.OpenAIErrorWithStatusCode) {
|
||||
defer req.Body.Close()
|
||||
|
||||
usage = &types.Usage{}
|
||||
// 发送请求
|
||||
resp, err := common.HttpClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, common.ErrorWrapper(err, "http_request_failed", http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
if common.IsFailureStatusCode(resp) {
|
||||
return nil, common.HandleErrorResp(resp)
|
||||
}
|
||||
|
||||
defer resp.Body.Close()
|
||||
|
||||
scanner := bufio.NewScanner(resp.Body)
|
||||
scanner.Split(func(data []byte, atEOF bool) (advance int, token []byte, err error) {
|
||||
if atEOF && len(data) == 0 {
|
||||
return 0, nil, nil
|
||||
}
|
||||
if i := strings.Index(string(data), "\n"); i >= 0 {
|
||||
return i + 1, data[0:i], nil
|
||||
}
|
||||
if atEOF {
|
||||
return len(data), data, nil
|
||||
}
|
||||
return 0, nil, nil
|
||||
})
|
||||
dataChan := make(chan string)
|
||||
stopChan := make(chan bool)
|
||||
go func() {
|
||||
for scanner.Scan() {
|
||||
data := scanner.Text()
|
||||
if len(data) < 5 { // ignore blank line or wrong format
|
||||
continue
|
||||
}
|
||||
if data[:5] != "data:" {
|
||||
continue
|
||||
}
|
||||
data = data[5:]
|
||||
dataChan <- data
|
||||
}
|
||||
stopChan <- true
|
||||
}()
|
||||
common.SetEventStreamHeaders(p.Context)
|
||||
lastResponseText := ""
|
||||
p.Context.Stream(func(w io.Writer) bool {
|
||||
select {
|
||||
case data := <-dataChan:
|
||||
var aliResponse AliChatResponse
|
||||
err := json.Unmarshal([]byte(data), &aliResponse)
|
||||
if err != nil {
|
||||
common.SysError("error unmarshalling stream response: " + err.Error())
|
||||
return true
|
||||
}
|
||||
if aliResponse.Usage.OutputTokens != 0 {
|
||||
usage.PromptTokens = aliResponse.Usage.InputTokens
|
||||
usage.CompletionTokens = aliResponse.Usage.OutputTokens
|
||||
usage.TotalTokens = aliResponse.Usage.InputTokens + aliResponse.Usage.OutputTokens
|
||||
}
|
||||
response := p.streamResponseAli2OpenAI(&aliResponse)
|
||||
response.Choices[0].Delta.Content = strings.TrimPrefix(response.Choices[0].Delta.Content, lastResponseText)
|
||||
lastResponseText = aliResponse.Output.Text
|
||||
jsonResponse, err := json.Marshal(response)
|
||||
if err != nil {
|
||||
common.SysError("error marshalling stream response: " + err.Error())
|
||||
return true
|
||||
}
|
||||
p.Context.Render(-1, common.CustomEvent{Data: "data: " + string(jsonResponse)})
|
||||
return true
|
||||
case <-stopChan:
|
||||
p.Context.Render(-1, common.CustomEvent{Data: "data: [DONE]"})
|
||||
return false
|
||||
}
|
||||
})
|
||||
|
||||
return
|
||||
}
|
@@ -1,73 +0,0 @@
|
||||
package ali
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"one-api/common"
|
||||
"one-api/types"
|
||||
)
|
||||
|
||||
// 嵌入请求处理
|
||||
func (aliResponse *AliEmbeddingResponse) ResponseHandler(resp *http.Response) (any, *types.OpenAIErrorWithStatusCode) {
|
||||
if aliResponse.Code != "" {
|
||||
return nil, &types.OpenAIErrorWithStatusCode{
|
||||
OpenAIError: types.OpenAIError{
|
||||
Message: aliResponse.Message,
|
||||
Type: aliResponse.Code,
|
||||
Param: aliResponse.RequestId,
|
||||
Code: aliResponse.Code,
|
||||
},
|
||||
StatusCode: resp.StatusCode,
|
||||
}
|
||||
}
|
||||
|
||||
openAIEmbeddingResponse := &types.EmbeddingResponse{
|
||||
Object: "list",
|
||||
Data: make([]types.Embedding, 0, len(aliResponse.Output.Embeddings)),
|
||||
Model: "text-embedding-v1",
|
||||
Usage: &types.Usage{TotalTokens: aliResponse.Usage.TotalTokens},
|
||||
}
|
||||
|
||||
for _, item := range aliResponse.Output.Embeddings {
|
||||
openAIEmbeddingResponse.Data = append(openAIEmbeddingResponse.Data, types.Embedding{
|
||||
Object: `embedding`,
|
||||
Index: item.TextIndex,
|
||||
Embedding: item.Embedding,
|
||||
})
|
||||
}
|
||||
|
||||
return openAIEmbeddingResponse, nil
|
||||
}
|
||||
|
||||
// 获取嵌入请求体
|
||||
func (p *AliProvider) getEmbeddingsRequestBody(request *types.EmbeddingRequest) *AliEmbeddingRequest {
|
||||
return &AliEmbeddingRequest{
|
||||
Model: "text-embedding-v1",
|
||||
Input: struct {
|
||||
Texts []string `json:"texts"`
|
||||
}{
|
||||
Texts: request.ParseInput(),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (p *AliProvider) EmbeddingsAction(request *types.EmbeddingRequest, isModelMapped bool, promptTokens int) (usage *types.Usage, errWithCode *types.OpenAIErrorWithStatusCode) {
|
||||
|
||||
requestBody := p.getEmbeddingsRequestBody(request)
|
||||
fullRequestURL := p.GetFullRequestURL(p.Embeddings, request.Model)
|
||||
headers := p.GetRequestHeaders()
|
||||
|
||||
client := common.NewClient()
|
||||
req, err := client.NewRequest(p.Context.Request.Method, fullRequestURL, common.WithBody(requestBody), common.WithHeader(headers))
|
||||
if err != nil {
|
||||
return nil, common.ErrorWrapper(err, "new_request_failed", http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
aliEmbeddingResponse := &AliEmbeddingResponse{}
|
||||
errWithCode = p.SendRequest(req, aliEmbeddingResponse, false)
|
||||
if errWithCode != nil {
|
||||
return
|
||||
}
|
||||
usage = &types.Usage{TotalTokens: aliEmbeddingResponse.Usage.TotalTokens}
|
||||
|
||||
return usage, nil
|
||||
}
|
@@ -1,70 +0,0 @@
|
||||
package ali
|
||||
|
||||
type AliError struct {
|
||||
Code string `json:"code"`
|
||||
Message string `json:"message"`
|
||||
RequestId string `json:"request_id"`
|
||||
}
|
||||
|
||||
type AliUsage struct {
|
||||
InputTokens int `json:"input_tokens"`
|
||||
OutputTokens int `json:"output_tokens"`
|
||||
TotalTokens int `json:"total_tokens"`
|
||||
}
|
||||
|
||||
type AliMessage struct {
|
||||
Content string `json:"content"`
|
||||
Role string `json:"role"`
|
||||
}
|
||||
|
||||
type AliInput struct {
|
||||
// Prompt string `json:"prompt"`
|
||||
Messages []AliMessage `json:"messages"`
|
||||
}
|
||||
|
||||
type AliParameters struct {
|
||||
TopP float64 `json:"top_p,omitempty"`
|
||||
TopK int `json:"top_k,omitempty"`
|
||||
Seed uint64 `json:"seed,omitempty"`
|
||||
EnableSearch bool `json:"enable_search,omitempty"`
|
||||
}
|
||||
|
||||
type AliChatRequest struct {
|
||||
Model string `json:"model"`
|
||||
Input AliInput `json:"input"`
|
||||
Parameters AliParameters `json:"parameters,omitempty"`
|
||||
}
|
||||
|
||||
type AliOutput struct {
|
||||
Text string `json:"text"`
|
||||
FinishReason string `json:"finish_reason"`
|
||||
}
|
||||
|
||||
type AliChatResponse struct {
|
||||
Output AliOutput `json:"output"`
|
||||
Usage AliUsage `json:"usage"`
|
||||
AliError
|
||||
}
|
||||
|
||||
type AliEmbeddingRequest struct {
|
||||
Model string `json:"model"`
|
||||
Input struct {
|
||||
Texts []string `json:"texts"`
|
||||
} `json:"input"`
|
||||
Parameters *struct {
|
||||
TextType string `json:"text_type,omitempty"`
|
||||
} `json:"parameters,omitempty"`
|
||||
}
|
||||
|
||||
type AliEmbedding struct {
|
||||
Embedding []float64 `json:"embedding"`
|
||||
TextIndex int `json:"text_index"`
|
||||
}
|
||||
|
||||
type AliEmbeddingResponse struct {
|
||||
Output struct {
|
||||
Embeddings []AliEmbedding `json:"embeddings"`
|
||||
} `json:"output"`
|
||||
Usage AliUsage `json:"usage"`
|
||||
AliError
|
||||
}
|
@@ -1,30 +0,0 @@
|
||||
package api2d
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"one-api/common"
|
||||
"one-api/model"
|
||||
"one-api/providers/base"
|
||||
)
|
||||
|
||||
func (p *Api2dProvider) Balance(channel *model.Channel) (float64, error) {
|
||||
fullRequestURL := p.GetFullRequestURL("/dashboard/billing/credit_grants", "")
|
||||
headers := p.GetRequestHeaders()
|
||||
|
||||
client := common.NewClient()
|
||||
req, err := client.NewRequest("GET", fullRequestURL, common.WithHeader(headers))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// 发送请求
|
||||
var response base.BalanceResponse
|
||||
_, errWithCode := common.SendRequest(req, &response, false)
|
||||
if errWithCode != nil {
|
||||
return 0, errors.New(errWithCode.OpenAIError.Message)
|
||||
}
|
||||
|
||||
channel.UpdateBalance(response.TotalAvailable)
|
||||
|
||||
return response.TotalAvailable, nil
|
||||
}
|
@@ -1,21 +0,0 @@
|
||||
package api2d
|
||||
|
||||
import (
|
||||
"one-api/providers/base"
|
||||
"one-api/providers/openai"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
type Api2dProviderFactory struct{}
|
||||
|
||||
// 创建 Api2dProvider
|
||||
func (f Api2dProviderFactory) Create(c *gin.Context) base.ProviderInterface {
|
||||
return &Api2dProvider{
|
||||
OpenAIProvider: openai.CreateOpenAIProvider(c, "https://oa.api2d.net"),
|
||||
}
|
||||
}
|
||||
|
||||
type Api2dProvider struct {
|
||||
*openai.OpenAIProvider
|
||||
}
|
@@ -1,30 +0,0 @@
|
||||
package api2gpt
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"one-api/common"
|
||||
"one-api/model"
|
||||
"one-api/providers/base"
|
||||
)
|
||||
|
||||
func (p *Api2gptProvider) Balance(channel *model.Channel) (float64, error) {
|
||||
fullRequestURL := p.GetFullRequestURL("/dashboard/billing/credit_grants", "")
|
||||
headers := p.GetRequestHeaders()
|
||||
|
||||
client := common.NewClient()
|
||||
req, err := client.NewRequest("GET", fullRequestURL, common.WithHeader(headers))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// 发送请求
|
||||
var response base.BalanceResponse
|
||||
_, errWithCode := common.SendRequest(req, &response, false)
|
||||
if errWithCode != nil {
|
||||
return 0, errors.New(errWithCode.OpenAIError.Message)
|
||||
}
|
||||
|
||||
channel.UpdateBalance(response.TotalAvailable)
|
||||
|
||||
return response.TotalRemaining, nil
|
||||
}
|
@@ -1,20 +0,0 @@
|
||||
package api2gpt
|
||||
|
||||
import (
|
||||
"one-api/providers/base"
|
||||
"one-api/providers/openai"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
type Api2gptProviderFactory struct{}
|
||||
|
||||
func (f Api2gptProviderFactory) Create(c *gin.Context) base.ProviderInterface {
|
||||
return &Api2gptProvider{
|
||||
OpenAIProvider: openai.CreateOpenAIProvider(c, "https://api.api2gpt.com"),
|
||||
}
|
||||
}
|
||||
|
||||
type Api2gptProvider struct {
|
||||
*openai.OpenAIProvider
|
||||
}
|
@@ -1,36 +0,0 @@
|
||||
package azure
|
||||
|
||||
import (
|
||||
"one-api/providers/base"
|
||||
"one-api/providers/openai"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
type AzureProviderFactory struct{}
|
||||
|
||||
// 创建 AzureProvider
|
||||
func (f AzureProviderFactory) Create(c *gin.Context) base.ProviderInterface {
|
||||
return &AzureProvider{
|
||||
OpenAIProvider: openai.OpenAIProvider{
|
||||
BaseProvider: base.BaseProvider{
|
||||
BaseURL: "",
|
||||
Completions: "/completions",
|
||||
ChatCompletions: "/chat/completions",
|
||||
Embeddings: "/embeddings",
|
||||
AudioTranscriptions: "/audio/transcriptions",
|
||||
AudioTranslations: "/audio/translations",
|
||||
ImagesGenerations: "/images/generations",
|
||||
// ImagesEdit: "/images/edit",
|
||||
// ImagesVariations: "/images/variations",
|
||||
Context: c,
|
||||
// AudioSpeech: "/audio/speech",
|
||||
},
|
||||
IsAzure: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
type AzureProvider struct {
|
||||
openai.OpenAIProvider
|
||||
}
|
@@ -1,102 +0,0 @@
|
||||
package azure
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"one-api/common"
|
||||
"one-api/providers/openai"
|
||||
"one-api/types"
|
||||
"time"
|
||||
)
|
||||
|
||||
func (c *ImageAzureResponse) ResponseHandler(resp *http.Response) (OpenAIResponse any, errWithCode *types.OpenAIErrorWithStatusCode) {
|
||||
if c.Status == "canceled" || c.Status == "failed" {
|
||||
errWithCode = &types.OpenAIErrorWithStatusCode{
|
||||
OpenAIError: types.OpenAIError{
|
||||
Message: c.Error.Message,
|
||||
Type: "one_api_error",
|
||||
Code: c.Error.Code,
|
||||
},
|
||||
StatusCode: resp.StatusCode,
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
operation_location := resp.Header.Get("operation-location")
|
||||
if operation_location == "" {
|
||||
return nil, common.ErrorWrapper(errors.New("image url is empty"), "get_images_url_failed", http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
client := common.NewClient()
|
||||
req, err := client.NewRequest("GET", operation_location, common.WithHeader(c.Header))
|
||||
if err != nil {
|
||||
return nil, common.ErrorWrapper(err, "get_images_request_failed", http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
getImageAzureResponse := ImageAzureResponse{}
|
||||
for i := 0; i < 3; i++ {
|
||||
// 休眠 2 秒
|
||||
time.Sleep(2 * time.Second)
|
||||
_, errWithCode = common.SendRequest(req, &getImageAzureResponse, false)
|
||||
fmt.Println("getImageAzureResponse", getImageAzureResponse)
|
||||
if errWithCode != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if getImageAzureResponse.Status == "canceled" || getImageAzureResponse.Status == "failed" {
|
||||
return nil, &types.OpenAIErrorWithStatusCode{
|
||||
OpenAIError: types.OpenAIError{
|
||||
Message: c.Error.Message,
|
||||
Type: "get_images_request_failed",
|
||||
Code: c.Error.Code,
|
||||
},
|
||||
StatusCode: resp.StatusCode,
|
||||
}
|
||||
}
|
||||
if getImageAzureResponse.Status == "succeeded" {
|
||||
return getImageAzureResponse.Result, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, common.ErrorWrapper(errors.New("get image Timeout"), "get_images_url_failed", http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
func (p *AzureProvider) ImageGenerationsAction(request *types.ImageRequest, isModelMapped bool, promptTokens int) (usage *types.Usage, errWithCode *types.OpenAIErrorWithStatusCode) {
|
||||
|
||||
requestBody, err := p.GetRequestBody(&request, isModelMapped)
|
||||
if err != nil {
|
||||
return nil, common.ErrorWrapper(err, "json_marshal_failed", http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
fullRequestURL := p.GetFullRequestURL(p.ImagesGenerations, request.Model)
|
||||
headers := p.GetRequestHeaders()
|
||||
|
||||
client := common.NewClient()
|
||||
req, err := client.NewRequest(p.Context.Request.Method, fullRequestURL, common.WithBody(requestBody), common.WithHeader(headers))
|
||||
if err != nil {
|
||||
return nil, common.ErrorWrapper(err, "new_request_failed", http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
if request.Model == "dall-e-2" {
|
||||
imageAzureResponse := &ImageAzureResponse{
|
||||
Header: headers,
|
||||
}
|
||||
errWithCode = p.SendRequest(req, imageAzureResponse, false)
|
||||
} else {
|
||||
openAIProviderImageResponseResponse := &openai.OpenAIProviderImageResponseResponse{}
|
||||
errWithCode = p.SendRequest(req, openAIProviderImageResponseResponse, true)
|
||||
}
|
||||
|
||||
if errWithCode != nil {
|
||||
return
|
||||
}
|
||||
|
||||
usage = &types.Usage{
|
||||
PromptTokens: promptTokens,
|
||||
CompletionTokens: 0,
|
||||
TotalTokens: promptTokens,
|
||||
}
|
||||
|
||||
return
|
||||
}
|
@@ -1,21 +0,0 @@
|
||||
package azure
|
||||
|
||||
import "one-api/types"
|
||||
|
||||
type ImageAzureResponse struct {
|
||||
ID string `json:"id,omitempty"`
|
||||
Created int64 `json:"created,omitempty"`
|
||||
Expires int64 `json:"expires,omitempty"`
|
||||
Result types.ImageResponse `json:"result,omitempty"`
|
||||
Status string `json:"status,omitempty"`
|
||||
Error ImageAzureError `json:"error,omitempty"`
|
||||
Header map[string]string `json:"header,omitempty"`
|
||||
}
|
||||
|
||||
type ImageAzureError struct {
|
||||
Code string `json:"code,omitempty"`
|
||||
Target string `json:"target,omitempty"`
|
||||
Message string `json:"message,omitempty"`
|
||||
Details []string `json:"details,omitempty"`
|
||||
InnerError any `json:"innererror,omitempty"`
|
||||
}
|
@@ -1,36 +0,0 @@
|
||||
package azureSpeech
|
||||
|
||||
import (
|
||||
"one-api/providers/base"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
// 定义供应商工厂
|
||||
type AzureSpeechProviderFactory struct{}
|
||||
|
||||
// 创建 AliProvider
|
||||
func (f AzureSpeechProviderFactory) Create(c *gin.Context) base.ProviderInterface {
|
||||
return &AzureSpeechProvider{
|
||||
BaseProvider: base.BaseProvider{
|
||||
BaseURL: "",
|
||||
AudioSpeech: "/cognitiveservices/v1",
|
||||
Context: c,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
type AzureSpeechProvider struct {
|
||||
base.BaseProvider
|
||||
}
|
||||
|
||||
// 获取请求头
|
||||
func (p *AzureSpeechProvider) GetRequestHeaders() (headers map[string]string) {
|
||||
headers = make(map[string]string)
|
||||
headers["Ocp-Apim-Subscription-Key"] = p.Context.GetString("api_key")
|
||||
headers["Content-Type"] = "application/ssml+xml"
|
||||
headers["User-Agent"] = "OneAPI"
|
||||
// headers["X-Microsoft-OutputFormat"] = "audio-16khz-128kbitrate-mono-mp3"
|
||||
|
||||
return headers
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user