mirror of
https://github.com/songquanpeng/one-api.git
synced 2025-09-20 02:26:38 +08:00
docs: update README for clarity and fix translation issues; improve token update descriptions
This commit is contained in:
parent
b5a242da15
commit
388f0ef6aa
46
README.md
46
README.md
@ -49,24 +49,24 @@ oneapi:
|
||||
options:
|
||||
max-size: "10m"
|
||||
environment:
|
||||
# (optional) SESSION_SECRET set a fixed session secret so that user sessions won't be invalidated after server restart
|
||||
SESSION_SECRET: xxxxxxx
|
||||
# (optional) DEBUG enable debug mode
|
||||
DEBUG: "true"
|
||||
# (optional) DEBUG_SQL display SQL logs
|
||||
DEBUG_SQL: "true"
|
||||
# (optional) ENFORCE_INCLUDE_USAGE require upstream API responses to include usage field
|
||||
ENFORCE_INCLUDE_USAGE: "true"
|
||||
# (optional) GLOBAL_API_RATE_LIMIT maximum API requests per IP within three minutes, default is 1000
|
||||
GLOBAL_API_RATE_LIMIT: 1000
|
||||
# (optional) GLOBAL_WEB_RATE_LIMIT maximum web page requests per IP within three minutes, default is 1000
|
||||
GLOBAL_WEB_RATE_LIMIT: 1000
|
||||
# (optional) REDIS_CONN_STRING set REDIS cache connection
|
||||
REDIS_CONN_STRING: redis://100.122.41.16:6379/1
|
||||
# (optional) FRONTEND_BASE_URL redirect page requests to specified address, server-side setting only
|
||||
FRONTEND_BASE_URL: https://oneapi.laisky.com
|
||||
# (optional) OPENROUTER_PROVIDER_SORT set sorting method for OpenRouter Providers, default is throughput
|
||||
OPENROUTER_PROVIDER_SORT: throughput
|
||||
# (optional) SESSION_SECRET set a fixed session secret so that user sessions won't be invalidated after server restart
|
||||
SESSION_SECRET: xxxxxxx
|
||||
# (optional) DEBUG enable debug mode
|
||||
DEBUG: "true"
|
||||
# (optional) DEBUG_SQL display SQL logs
|
||||
DEBUG_SQL: "true"
|
||||
# (optional) ENFORCE_INCLUDE_USAGE require upstream API responses to include usage field
|
||||
ENFORCE_INCLUDE_USAGE: "true"
|
||||
# (optional) GLOBAL_API_RATE_LIMIT maximum API requests per IP within three minutes, default is 1000
|
||||
GLOBAL_API_RATE_LIMIT: 1000
|
||||
# (optional) GLOBAL_WEB_RATE_LIMIT maximum web page requests per IP within three minutes, default is 1000
|
||||
GLOBAL_WEB_RATE_LIMIT: 1000
|
||||
# (optional) REDIS_CONN_STRING set REDIS cache connection
|
||||
REDIS_CONN_STRING: redis://100.122.41.16:6379/1
|
||||
# (optional) FRONTEND_BASE_URL redirect page requests to specified address, server-side setting only
|
||||
FRONTEND_BASE_URL: https://oneapi.laisky.com
|
||||
# (optional) OPENROUTER_PROVIDER_SORT set sorting method for OpenRouter Providers, default is throughput
|
||||
OPENROUTER_PROVIDER_SORT: throughput
|
||||
volumes:
|
||||
- /var/lib/oneapi:/data
|
||||
ports:
|
||||
@ -213,10 +213,12 @@ Supports two URL parameters: `thinking` and `reasoning_format`.
|
||||
|
||||
## Bug fix
|
||||
|
||||
- [BUGFIX: 更新令牌时的一些问题 #1933](https://github.com/songquanpeng/one-api/pull/1933)
|
||||
- [BUGFIX: Several issues when updating tokens #1933](https://github.com/songquanpeng/one-api/pull/1933)
|
||||
- [feat(audio): count whisper-1 quota by audio duration #2022](https://github.com/songquanpeng/one-api/pull/2022)
|
||||
- [fix: 修复高并发下,高额度用户使用低额度令牌没有预扣费而导致令牌大额欠费 #25](https://github.com/Laisky/one-api/pull/25)
|
||||
- [fix: Fix issue where high-quota users using low-quota tokens aren't pre-charged, causing large token deficits under high concurrency #25](https://github.com/Laisky/one-api/pull/25)
|
||||
- [fix: channel test false negative #2065](https://github.com/songquanpeng/one-api/pull/2065)
|
||||
- [fix: resolve "bufio.Scanner: token too long" error by increasing buff… #2128](https://github.com/songquanpeng/one-api/pull/2128)
|
||||
- [fix: resolve "bufio.Scanner: token too long" error by increasing buffer size #2128](https://github.com/songquanpeng/one-api/pull/2128)
|
||||
- [feat: Enhance VolcEngine channel support with bot model #2131](https://github.com/songquanpeng/one-api/pull/2131)
|
||||
- [fix: models api return models in deactivate channels #2150](https://github.com/songquanpeng/one-api/pull/2150)
|
||||
- [fix: models API returns models in deactivated channels #2150](https://github.com/songquanpeng/one-api/pull/2150)
|
||||
- [fix: Automatically close channel when connection fails](https://github.com/Laisky/one-api/pull/34)
|
||||
- [fix: update EmailDomainWhitelist submission logic #33](https://github.com/Laisky/one-api/pull/33)
|
||||
|
@ -49,6 +49,8 @@ func StreamHandler(c *gin.Context, resp *http.Response, relayMode int) (*model.E
|
||||
for scanner.Scan() {
|
||||
data := NormalizeDataLine(scanner.Text())
|
||||
|
||||
// logger.Debugf(c.Request.Context(), "stream response: %s", data)
|
||||
|
||||
// Skip lines that don't match expected format
|
||||
if len(data) < dataPrefixLength {
|
||||
continue // Ignore blank line or wrong format
|
||||
@ -74,7 +76,7 @@ func StreamHandler(c *gin.Context, resp *http.Response, relayMode int) (*model.E
|
||||
// Parse the JSON response
|
||||
err := json.Unmarshal([]byte(data[dataPrefixLength:]), &streamResponse)
|
||||
if err != nil {
|
||||
logger.SysError("error unmarshalling stream response: " + err.Error())
|
||||
logger.Errorf(c.Request.Context(), "unmarshalling stream data %q got %+v", data, err)
|
||||
render.StringData(c, data) // Pass raw data to client if parsing fails
|
||||
continue
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user