Compare commits

..

12 Commits

Author SHA1 Message Date
Laisky.Cai
1c6b044a91 Merge c44928b3a7 into c936198ac8 2024-07-24 02:42:23 +00:00
Laisky.Cai
c44928b3a7 Merge remote-tracking branch 'origin/upstream/main' into patch/images-edits 2024-07-24 02:42:16 +00:00
Laisky.Cai
c936198ac8 feat: add Proxy channel type and relay mode (#1678)
Add the Proxy channel type and relay mode to support proxying requests to custom upstream services.
2024-07-22 22:51:19 +08:00
TAKO
296ab013b8 feat: support gpt-4o mini (#1665)
* feat: support gpt-4o mini

* feat: fix gpt-4o mini image price
2024-07-22 22:44:08 +08:00
zijiren
5f03c856b4 feat: fast build linux/arm64 frontend (#1663)
* feat: fast build linux/arm64 frontend

* fix: dockerfile as replace to AS

* fix: trim space
2024-07-22 22:39:22 +08:00
igophper
39383e5532 fix: support embedding models for doubao (#1662)
Fixes #1594
2024-07-22 22:38:50 +08:00
Laisky.Cai
02da017791 Merge remote-tracking branch 'origin/upstream/main' into patch/images-edits 2024-07-13 13:42:49 +00:00
Laisky.Cai
7ef45ee28f fix: remove unused imports in logger and middleware files 2024-06-27 06:11:51 +00:00
Laisky.Cai
1430152a90 Merge branch 'main' into patch/images-edits 2024-06-27 10:54:52 +08:00
Laisky.Cai
dd73f5b6b1 Merge branch 'main' into patch/images-edits 2024-05-28 11:11:03 +08:00
JustSong
1264ddcef5 Merge branch 'main' into patch/images-edits 2024-04-27 16:18:01 +08:00
Laisky.Cai
f71e4ef151 feat: support openai images edits api 2024-04-25 03:09:26 +00:00
24 changed files with 143 additions and 139 deletions

View File

@@ -1,61 +0,0 @@
name: Publish Docker image (amd64)
on:
push:
tags:
- 'v*.*.*'
workflow_dispatch:
inputs:
name:
description: 'reason'
required: false
jobs:
push_to_registries:
name: Push Docker image to multiple registries
runs-on: ubuntu-latest
permissions:
packages: write
contents: read
steps:
- name: Check out the repo
uses: actions/checkout@v3
- name: Check repository URL
run: |
REPO_URL=$(git config --get remote.origin.url)
if [[ $REPO_URL == *"pro" ]]; then
exit 1
fi
- name: Save version info
run: |
git describe --tags > VERSION
- name: Log in to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Log in to the Container registry
uses: docker/login-action@v2
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Extract metadata (tags, labels) for Docker
id: meta
uses: docker/metadata-action@v4
with:
images: |
justsong/one-api
ghcr.io/${{ github.repository }}
- name: Build and push Docker images
uses: docker/build-push-action@v3
with:
context: .
push: true
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}

View File

@@ -1,4 +1,4 @@
name: Publish Docker image (amd64, English)
name: Publish Docker image (English)
on:
push:
@@ -34,6 +34,13 @@ jobs:
- name: Translate
run: |
python ./i18n/translate.py --repository_path . --json_file_path ./i18n/en.json
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Log in to Docker Hub
uses: docker/login-action@v2
with:
@@ -51,6 +58,7 @@ jobs:
uses: docker/build-push-action@v3
with:
context: .
platforms: linux/amd64,linux/arm64
push: true
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}

View File

@@ -1,10 +1,9 @@
name: Publish Docker image (arm64)
name: Publish Docker image
on:
push:
tags:
- 'v*.*.*'
- '!*-alpha*'
workflow_dispatch:
inputs:
name:

View File

@@ -1,4 +1,4 @@
FROM node:16 as builder
FROM --platform=$BUILDPLATFORM node:16 AS builder
WORKDIR /web
COPY ./VERSION .

View File

@@ -3,6 +3,7 @@ package ctxkey
const (
Config = "config"
Id = "id"
RequestId = "X-Oneapi-Request-Id"
Username = "username"
Role = "role"
Status = "status"
@@ -15,6 +16,7 @@ const (
Group = "group"
ModelMapping = "model_mapping"
ChannelName = "channel_name"
ContentType = "content_type"
TokenId = "token_id"
TokenName = "token_name"
BaseURL = "base_url"

View File

@@ -2,11 +2,11 @@ package common
import (
"bytes"
"encoding/json"
"github.com/gin-gonic/gin"
"github.com/songquanpeng/one-api/common/ctxkey"
"io"
"strings"
"github.com/gin-gonic/gin"
"github.com/pkg/errors"
"github.com/songquanpeng/one-api/common/ctxkey"
)
func GetRequestBody(c *gin.Context) ([]byte, error) {
@@ -28,18 +28,16 @@ func UnmarshalBodyReusable(c *gin.Context, v any) error {
if err != nil {
return err
}
contentType := c.Request.Header.Get("Content-Type")
if strings.HasPrefix(contentType, "application/json") {
err = json.Unmarshal(requestBody, &v)
} else {
// skip for now
// TODO: someday non json request have variant model, we will need to implementation this
}
if err != nil {
return err
}
// Reset request body
c.Request.Body = io.NopCloser(bytes.NewBuffer(requestBody))
defer func() {
c.Request.Body = io.NopCloser(bytes.NewBuffer(requestBody))
}()
if err = c.Bind(v); err != nil {
return errors.Wrap(err, "bind request body failed")
}
return nil
}

View File

@@ -26,7 +26,8 @@ import (
func relayHelper(c *gin.Context, relayMode int) *model.ErrorWithStatusCode {
var err *model.ErrorWithStatusCode
switch relayMode {
case relaymode.ImagesGenerations:
case relaymode.ImagesGenerations,
relaymode.ImagesEdits:
err = controller.RelayImageHelper(c, relayMode)
case relaymode.AudioSpeech:
fallthrough
@@ -45,10 +46,6 @@ func relayHelper(c *gin.Context, relayMode int) *model.ErrorWithStatusCode {
func Relay(c *gin.Context) {
ctx := c.Request.Context()
relayMode := relaymode.GetByPath(c.Request.URL.Path)
if config.DebugEnabled {
requestBody, _ := common.GetRequestBody(c)
logger.Debugf(ctx, "request body: %s", string(requestBody))
}
channelId := c.GetInt(ctxkey.ChannelId)
userId := c.GetInt(ctxkey.Id)
bizErr := relayHelper(c, relayMode)
@@ -60,6 +57,8 @@ func Relay(c *gin.Context) {
channelName := c.GetString(ctxkey.ChannelName)
group := c.GetString(ctxkey.Group)
originalModel := c.GetString(ctxkey.OriginalModel)
// BUG: bizErr is shared, should not run this function in goroutine to avoid race
go processChannelRelayError(ctx, userId, channelId, channelName, bizErr)
requestId := c.GetString(helper.RequestIdKey)
retryTimes := config.RetryTimes
@@ -90,6 +89,7 @@ func Relay(c *gin.Context) {
// BUG: bizErr is in race condition
go processChannelRelayError(ctx, userId, channelId, channelName, bizErr)
}
if bizErr != nil {
if bizErr.StatusCode == http.StatusTooManyRequests {
bizErr.Error.Message = "当前分组上游负载已饱和,请稍后再试"

View File

@@ -12,7 +12,7 @@ import (
)
type ModelRequest struct {
Model string `json:"model"`
Model string `json:"model" form:"model"`
}
func Distribute() func(c *gin.Context) {
@@ -61,6 +61,7 @@ func SetupContextForSelectedChannel(c *gin.Context, channel *model.Channel, mode
c.Set(ctxkey.Channel, channel.Type)
c.Set(ctxkey.ChannelId, channel.Id)
c.Set(ctxkey.ChannelName, channel.Name)
c.Set(ctxkey.ContentType, c.Request.Header.Get("Content-Type"))
c.Set(ctxkey.ModelMapping, channel.GetModelMapping())
c.Set(ctxkey.OriginalModel, modelName) // for retry
c.Request.Header.Set("Authorization", fmt.Sprintf("Bearer %s", channel.Key))

View File

@@ -2,6 +2,7 @@ package middleware
import (
"fmt"
"github.com/gin-gonic/gin"
"github.com/songquanpeng/one-api/common/helper"
)

View File

@@ -2,6 +2,7 @@ package middleware
import (
"context"
"github.com/gin-gonic/gin"
"github.com/songquanpeng/one-api/common/helper"
)

View File

@@ -2,11 +2,12 @@ package middleware
import (
"fmt"
"strings"
"github.com/gin-gonic/gin"
"github.com/songquanpeng/one-api/common"
"github.com/songquanpeng/one-api/common/helper"
"github.com/songquanpeng/one-api/common/logger"
"strings"
)
func abortWithMessage(c *gin.Context, statusCode int, message string) {

View File

@@ -3,11 +3,13 @@ package adaptor
import (
"errors"
"fmt"
"github.com/gin-gonic/gin"
"github.com/songquanpeng/one-api/common/client"
"github.com/songquanpeng/one-api/relay/meta"
"io"
"net/http"
"github.com/gin-gonic/gin"
"github.com/songquanpeng/one-api/common/client"
"github.com/songquanpeng/one-api/common/ctxkey"
"github.com/songquanpeng/one-api/relay/meta"
)
func SetupCommonRequestHeader(c *gin.Context, req *http.Request, meta *meta.Meta) {
@@ -27,6 +29,9 @@ func DoRequestHelper(a Adaptor, c *gin.Context, meta *meta.Meta, requestBody io.
if err != nil {
return nil, fmt.Errorf("new request failed: %w", err)
}
req.Header.Set("Content-Type", c.GetString(ctxkey.ContentType))
err = a.SetupRequestHeader(c, req, meta)
if err != nil {
return nil, fmt.Errorf("setup request header failed: %w", err)

View File

@@ -7,8 +7,12 @@ import (
)
func GetRequestURL(meta *meta.Meta) (string, error) {
if meta.Mode == relaymode.ChatCompletions {
switch meta.Mode {
case relaymode.ChatCompletions:
return fmt.Sprintf("%s/api/v3/chat/completions", meta.BaseURL), nil
case relaymode.Embeddings:
return fmt.Sprintf("%s/api/v3/embeddings", meta.BaseURL), nil
default:
}
return "", fmt.Errorf("unsupported relay mode %d for doubao", meta.Mode)
}

View File

@@ -104,10 +104,13 @@ func (a *Adaptor) DoResponse(c *gin.Context, resp *http.Response, meta *meta.Met
switch meta.Mode {
case relaymode.ImagesGenerations:
err, _ = ImageHandler(c, resp)
case relaymode.ImagesEdits:
err, _ = ImagesEditsHandler(c, resp)
default:
err, usage = Handler(c, resp, meta.PromptTokens, meta.ActualModelName)
}
}
return
}

View File

@@ -8,6 +8,7 @@ var ModelList = []string{
"gpt-4-32k", "gpt-4-32k-0314", "gpt-4-32k-0613",
"gpt-4-turbo-preview", "gpt-4-turbo", "gpt-4-turbo-2024-04-09",
"gpt-4o", "gpt-4o-2024-05-13",
"gpt-4o-mini", "gpt-4o-mini-2024-07-18",
"gpt-4-vision-preview",
"text-embedding-ada-002", "text-embedding-3-small", "text-embedding-3-large",
"text-curie-001", "text-babbage-001", "text-ada-001", "text-davinci-002", "text-davinci-003",

View File

@@ -3,12 +3,30 @@ package openai
import (
"bytes"
"encoding/json"
"github.com/gin-gonic/gin"
"github.com/songquanpeng/one-api/relay/model"
"io"
"net/http"
"github.com/gin-gonic/gin"
"github.com/songquanpeng/one-api/relay/model"
)
// ImagesEditsHandler just copy response body to client
//
// https://platform.openai.com/docs/api-reference/images/createEdit
func ImagesEditsHandler(c *gin.Context, resp *http.Response) (*model.ErrorWithStatusCode, *model.Usage) {
c.Writer.WriteHeader(resp.StatusCode)
for k, v := range resp.Header {
c.Writer.Header().Set(k, v[0])
}
if _, err := io.Copy(c.Writer, resp.Body); err != nil {
return ErrorWrapper(err, "copy_response_body_failed", http.StatusInternalServerError), nil
}
defer resp.Body.Close()
return nil, nil
}
func ImageHandler(c *gin.Context, resp *http.Response) (*model.ErrorWithStatusCode, *model.Usage) {
var imageResponse ImageResponse
responseBody, err := io.ReadAll(resp.Body)

View File

@@ -110,7 +110,7 @@ func CountTokenMessages(messages []model.Message, model string) int {
if imageUrl["detail"] != nil {
detail = imageUrl["detail"].(string)
}
imageTokens, err := countImageTokens(url, detail)
imageTokens, err := countImageTokens(url, detail, model)
if err != nil {
logger.SysError("error counting image tokens: " + err.Error())
} else {
@@ -134,11 +134,15 @@ const (
lowDetailCost = 85
highDetailCostPerTile = 170
additionalCost = 85
// gpt-4o-mini cost higher than other model
gpt4oMiniLowDetailCost = 2833
gpt4oMiniHighDetailCost = 5667
gpt4oMiniAdditionalCost = 2833
)
// https://platform.openai.com/docs/guides/vision/calculating-costs
// https://github.com/openai/openai-cookbook/blob/05e3f9be4c7a2ae7ecf029a7c32065b024730ebe/examples/How_to_count_tokens_with_tiktoken.ipynb
func countImageTokens(url string, detail string) (_ int, err error) {
func countImageTokens(url string, detail string, model string) (_ int, err error) {
var fetchSize = true
var width, height int
// Reference: https://platform.openai.com/docs/guides/vision/low-or-high-fidelity-image-understanding
@@ -172,6 +176,9 @@ func countImageTokens(url string, detail string) (_ int, err error) {
}
switch detail {
case "low":
if strings.HasPrefix(model, "gpt-4o-mini") {
return gpt4oMiniLowDetailCost, nil
}
return lowDetailCost, nil
case "high":
if fetchSize {
@@ -191,6 +198,9 @@ func countImageTokens(url string, detail string) (_ int, err error) {
height = int(float64(height) * ratio)
}
numSquares := int(math.Ceil(float64(width)/512) * math.Ceil(float64(height)/512))
if strings.HasPrefix(model, "gpt-4o-mini") {
return numSquares*gpt4oMiniHighDetailCost + gpt4oMiniAdditionalCost, nil
}
result := numSquares*highDetailCostPerTile + additionalCost
return result, nil
default:

View File

@@ -28,15 +28,17 @@ var ModelRatio = map[string]float64{
"gpt-4-32k": 30,
"gpt-4-32k-0314": 30,
"gpt-4-32k-0613": 30,
"gpt-4-1106-preview": 5, // $0.01 / 1K tokens
"gpt-4-0125-preview": 5, // $0.01 / 1K tokens
"gpt-4-turbo-preview": 5, // $0.01 / 1K tokens
"gpt-4-turbo": 5, // $0.01 / 1K tokens
"gpt-4-turbo-2024-04-09": 5, // $0.01 / 1K tokens
"gpt-4o": 2.5, // $0.005 / 1K tokens
"gpt-4o-2024-05-13": 2.5, // $0.005 / 1K tokens
"gpt-4-vision-preview": 5, // $0.01 / 1K tokens
"gpt-3.5-turbo": 0.25, // $0.0005 / 1K tokens
"gpt-4-1106-preview": 5, // $0.01 / 1K tokens
"gpt-4-0125-preview": 5, // $0.01 / 1K tokens
"gpt-4-turbo-preview": 5, // $0.01 / 1K tokens
"gpt-4-turbo": 5, // $0.01 / 1K tokens
"gpt-4-turbo-2024-04-09": 5, // $0.01 / 1K tokens
"gpt-4o": 2.5, // $0.005 / 1K tokens
"gpt-4o-2024-05-13": 2.5, // $0.005 / 1K tokens
"gpt-4o-mini": 0.075, // $0.00015 / 1K tokens
"gpt-4o-mini-2024-07-18": 0.075, // $0.00015 / 1K tokens
"gpt-4-vision-preview": 5, // $0.01 / 1K tokens
"gpt-3.5-turbo": 0.25, // $0.0005 / 1K tokens
"gpt-3.5-turbo-0301": 0.75,
"gpt-3.5-turbo-0613": 0.75,
"gpt-3.5-turbo-16k": 1.5, // $0.003 / 1K tokens
@@ -308,6 +310,9 @@ func GetCompletionRatio(name string, channelType int) float64 {
return 4.0 / 3.0
}
if strings.HasPrefix(name, "gpt-4") {
if strings.HasPrefix(name, "gpt-4o-mini") {
return 4
}
if strings.HasPrefix(name, "gpt-4-turbo") ||
strings.HasPrefix(name, "gpt-4o") ||
strings.HasSuffix(name, "preview") {

View File

@@ -8,6 +8,7 @@ import (
"fmt"
"io"
"net/http"
"strings"
"github.com/gin-gonic/gin"
"github.com/songquanpeng/one-api/common"
@@ -134,7 +135,8 @@ func RelayImageHelper(c *gin.Context, relayMode int) *relaymodel.ErrorWithStatus
c.Set("response_format", imageRequest.ResponseFormat)
var requestBody io.Reader
if isModelMapped || meta.ChannelType == channeltype.Azure { // make Azure channel request body
if strings.ToLower(c.GetString(ctxkey.ContentType)) == "application/json" &&
isModelMapped || meta.ChannelType == channeltype.Azure { // make Azure channel request body
jsonStr, err := json.Marshal(imageRequest)
if err != nil {
return openai.ErrorWrapper(err, "marshal_image_request_failed", http.StatusInternalServerError)

View File

@@ -10,6 +10,7 @@ import (
"github.com/gin-gonic/gin"
"github.com/songquanpeng/one-api/common/logger"
"github.com/songquanpeng/one-api/relay"
"github.com/songquanpeng/one-api/relay/adaptor"
"github.com/songquanpeng/one-api/relay/adaptor/openai"
"github.com/songquanpeng/one-api/relay/apitype"
"github.com/songquanpeng/one-api/relay/billing"
@@ -31,9 +32,8 @@ func RelayTextHelper(c *gin.Context) *model.ErrorWithStatusCode {
meta.IsStream = textRequest.Stream
// map model name
var isModelMapped bool
meta.OriginModelName = textRequest.Model
textRequest.Model, isModelMapped = getMappedModelName(textRequest.Model, meta.ModelMapping)
textRequest.Model, _ = getMappedModelName(textRequest.Model, meta.ModelMapping)
meta.ActualModelName = textRequest.Model
// get model ratio & group ratio
modelRatio := billingratio.GetModelRatio(textRequest.Model, meta.ChannelType)
@@ -55,30 +55,9 @@ func RelayTextHelper(c *gin.Context) *model.ErrorWithStatusCode {
adaptor.Init(meta)
// get request body
var requestBody io.Reader
if meta.APIType == apitype.OpenAI {
// no need to convert request for openai
shouldResetRequestBody := isModelMapped || meta.ChannelType == channeltype.Baichuan // frequency_penalty 0 is not acceptable for baichuan
if shouldResetRequestBody {
jsonStr, err := json.Marshal(textRequest)
if err != nil {
return openai.ErrorWrapper(err, "json_marshal_failed", http.StatusInternalServerError)
}
requestBody = bytes.NewBuffer(jsonStr)
} else {
requestBody = c.Request.Body
}
} else {
convertedRequest, err := adaptor.ConvertRequest(c, meta.Mode, textRequest)
if err != nil {
return openai.ErrorWrapper(err, "convert_request_failed", http.StatusInternalServerError)
}
jsonData, err := json.Marshal(convertedRequest)
if err != nil {
return openai.ErrorWrapper(err, "json_marshal_failed", http.StatusInternalServerError)
}
logger.Debugf(ctx, "converted request: \n%s", string(jsonData))
requestBody = bytes.NewBuffer(jsonData)
requestBody, err := getRequestBody(c, meta, textRequest, adaptor)
if err != nil {
return openai.ErrorWrapper(err, "convert_request_failed", http.StatusInternalServerError)
}
// do request
@@ -103,3 +82,26 @@ func RelayTextHelper(c *gin.Context) *model.ErrorWithStatusCode {
go postConsumeQuota(ctx, usage, meta, textRequest, ratio, preConsumedQuota, modelRatio, groupRatio)
return nil
}
func getRequestBody(c *gin.Context, meta *meta.Meta, textRequest *model.GeneralOpenAIRequest, adaptor adaptor.Adaptor) (io.Reader, error) {
if meta.APIType == apitype.OpenAI && meta.OriginModelName == meta.ActualModelName && meta.ChannelType != channeltype.Baichuan {
// no need to convert request for openai
return c.Request.Body, nil
}
// get request body
var requestBody io.Reader
convertedRequest, err := adaptor.ConvertRequest(c, meta.Mode, textRequest)
if err != nil {
logger.Debugf(c.Request.Context(), "converted request failed: %s\n", err.Error())
return nil, err
}
jsonData, err := json.Marshal(convertedRequest)
if err != nil {
logger.Debugf(c.Request.Context(), "converted request json_marshal_failed: %s\n", err.Error())
return nil, err
}
logger.Debugf(c.Request.Context(), "converted request: \n%s", string(jsonData))
requestBody = bytes.NewBuffer(jsonData)
return requestBody, nil
}

View File

@@ -1,12 +1,12 @@
package model
type ImageRequest struct {
Model string `json:"model"`
Prompt string `json:"prompt" binding:"required"`
N int `json:"n,omitempty"`
Size string `json:"size,omitempty"`
Quality string `json:"quality,omitempty"`
ResponseFormat string `json:"response_format,omitempty"`
Style string `json:"style,omitempty"`
User string `json:"user,omitempty"`
Model string `json:"model" form:"model"`
Prompt string `json:"prompt" binding:"required" form:"prompt"`
N int `json:"n,omitempty" form:"n"`
Size string `json:"size,omitempty" form:"size"`
Quality string `json:"quality,omitempty" form:"quality"`
ResponseFormat string `json:"response_format,omitempty" form:"response_format"`
Style string `json:"style,omitempty" form:"style"`
User string `json:"user,omitempty" form:"user"`
}

View File

@@ -11,6 +11,7 @@ const (
AudioSpeech
AudioTranscription
AudioTranslation
ImagesEdits
// Proxy is a special relay mode for proxying requests to custom upstream
Proxy
)

View File

@@ -24,8 +24,11 @@ func GetByPath(path string) int {
relayMode = AudioTranscription
} else if strings.HasPrefix(path, "/v1/audio/translations") {
relayMode = AudioTranslation
} else if strings.HasPrefix(path, "/v1/images/edits") {
relayMode = ImagesEdits
} else if strings.HasPrefix(path, "/v1/oneapi/proxy") {
relayMode = Proxy
}
return relayMode
}

View File

@@ -24,7 +24,7 @@ func SetRelayRouter(router *gin.Engine) {
relayV1Router.POST("/chat/completions", controller.Relay)
relayV1Router.POST("/edits", controller.Relay)
relayV1Router.POST("/images/generations", controller.Relay)
relayV1Router.POST("/images/edits", controller.RelayNotImplemented)
relayV1Router.POST("/images/edits", controller.Relay)
relayV1Router.POST("/images/variations", controller.RelayNotImplemented)
relayV1Router.POST("/embeddings", controller.Relay)
relayV1Router.POST("/engines/:model/embeddings", controller.Relay)