mirror of
https://github.com/songquanpeng/one-api.git
synced 2025-09-17 17:16:38 +08:00
Merge remote-tracking branch 'origin/upstream/main'
This commit is contained in:
commit
082cfbe574
@ -86,6 +86,7 @@ docker image: `ppcelery/one-api:latest`
|
|||||||
+ [x] [together.ai](https://www.together.ai/)
|
+ [x] [together.ai](https://www.together.ai/)
|
||||||
+ [x] [novita.ai](https://www.novita.ai/)
|
+ [x] [novita.ai](https://www.novita.ai/)
|
||||||
+ [x] [硅基流动 SiliconCloud](https://siliconflow.cn/siliconcloud)
|
+ [x] [硅基流动 SiliconCloud](https://siliconflow.cn/siliconcloud)
|
||||||
|
+ [x] [xAI](https://x.ai/)
|
||||||
2. 支持配置镜像以及众多[第三方代理服务](https://iamazing.cn/page/openai-api-third-party-services)。
|
2. 支持配置镜像以及众多[第三方代理服务](https://iamazing.cn/page/openai-api-third-party-services)。
|
||||||
3. 支持通过**负载均衡**的方式访问多个渠道。
|
3. 支持通过**负载均衡**的方式访问多个渠道。
|
||||||
4. 支持 **stream 模式**,可以通过流式传输实现打字机效果。
|
4. 支持 **stream 模式**,可以通过流式传输实现打字机效果。
|
||||||
@ -395,6 +396,7 @@ graph LR
|
|||||||
26. `METRIC_SUCCESS_RATE_THRESHOLD`:请求成功率阈值,默认为 `0.8`。
|
26. `METRIC_SUCCESS_RATE_THRESHOLD`:请求成功率阈值,默认为 `0.8`。
|
||||||
27. `INITIAL_ROOT_TOKEN`:如果设置了该值,则在系统首次启动时会自动创建一个值为该环境变量值的 root 用户令牌。
|
27. `INITIAL_ROOT_TOKEN`:如果设置了该值,则在系统首次启动时会自动创建一个值为该环境变量值的 root 用户令牌。
|
||||||
28. `INITIAL_ROOT_ACCESS_TOKEN`:如果设置了该值,则在系统首次启动时会自动创建一个值为该环境变量的 root 用户创建系统管理令牌。
|
28. `INITIAL_ROOT_ACCESS_TOKEN`:如果设置了该值,则在系统首次启动时会自动创建一个值为该环境变量的 root 用户创建系统管理令牌。
|
||||||
|
29. `ENFORCE_INCLUDE_USAGE`:是否强制在 stream 模型下返回 usage,默认不开启,可选值为 `true` 和 `false`。
|
||||||
|
|
||||||
### 命令行参数
|
### 命令行参数
|
||||||
1. `--port <port_number>`: 指定服务器监听的端口号,默认为 `3000`。
|
1. `--port <port_number>`: 指定服务器监听的端口号,默认为 `3000`。
|
||||||
|
@ -175,3 +175,5 @@ var OnlyOneLogFile = env.Bool("ONLY_ONE_LOG_FILE", false)
|
|||||||
var RelayProxy = env.String("RELAY_PROXY", "")
|
var RelayProxy = env.String("RELAY_PROXY", "")
|
||||||
var UserContentRequestProxy = env.String("USER_CONTENT_REQUEST_PROXY", "")
|
var UserContentRequestProxy = env.String("USER_CONTENT_REQUEST_PROXY", "")
|
||||||
var UserContentRequestTimeout = env.Int("USER_CONTENT_REQUEST_TIMEOUT", 30)
|
var UserContentRequestTimeout = env.Int("USER_CONTENT_REQUEST_TIMEOUT", 30)
|
||||||
|
|
||||||
|
var EnforceIncludeUsage = env.Bool("ENFORCE_INCLUDE_USAGE", false)
|
||||||
|
@ -24,4 +24,5 @@ const (
|
|||||||
BaseURL = "base_url"
|
BaseURL = "base_url"
|
||||||
AvailableModels = "available_models"
|
AvailableModels = "available_models"
|
||||||
KeyRequestBody = "key_request_body"
|
KeyRequestBody = "key_request_body"
|
||||||
|
SystemPrompt = "system_prompt"
|
||||||
)
|
)
|
||||||
|
@ -138,3 +138,23 @@ func String2Int(str string) int {
|
|||||||
}
|
}
|
||||||
return num
|
return num
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func Float64PtrMax(p *float64, maxValue float64) *float64 {
|
||||||
|
if p == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if *p > maxValue {
|
||||||
|
return &maxValue
|
||||||
|
}
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
|
||||||
|
func Float64PtrMin(p *float64, minValue float64) *float64 {
|
||||||
|
if p == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if *p < minValue {
|
||||||
|
return &minValue
|
||||||
|
}
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
@ -85,6 +85,9 @@ func SetupContextForSelectedChannel(c *gin.Context, channel *model.Channel, mode
|
|||||||
c.Set(ctxkey.ChannelId, channel.Id)
|
c.Set(ctxkey.ChannelId, channel.Id)
|
||||||
c.Set(ctxkey.ChannelName, channel.Name)
|
c.Set(ctxkey.ChannelName, channel.Name)
|
||||||
c.Set(ctxkey.ContentType, c.Request.Header.Get("Content-Type"))
|
c.Set(ctxkey.ContentType, c.Request.Header.Get("Content-Type"))
|
||||||
|
if channel.SystemPrompt != nil && *channel.SystemPrompt != "" {
|
||||||
|
c.Set(ctxkey.SystemPrompt, *channel.SystemPrompt)
|
||||||
|
}
|
||||||
c.Set(ctxkey.ModelMapping, channel.GetModelMapping())
|
c.Set(ctxkey.ModelMapping, channel.GetModelMapping())
|
||||||
c.Set(ctxkey.OriginalModel, modelName) // for retry
|
c.Set(ctxkey.OriginalModel, modelName) // for retry
|
||||||
c.Request.Header.Set("Authorization", fmt.Sprintf("Bearer %s", channel.Key))
|
c.Request.Header.Set("Authorization", fmt.Sprintf("Bearer %s", channel.Key))
|
||||||
|
@ -37,6 +37,7 @@ type Channel struct {
|
|||||||
ModelMapping *string `json:"model_mapping" gorm:"type:varchar(1024);default:''"`
|
ModelMapping *string `json:"model_mapping" gorm:"type:varchar(1024);default:''"`
|
||||||
Priority *int64 `json:"priority" gorm:"bigint;default:0"`
|
Priority *int64 `json:"priority" gorm:"bigint;default:0"`
|
||||||
Config string `json:"config"`
|
Config string `json:"config"`
|
||||||
|
SystemPrompt *string `json:"system_prompt" gorm:"type:text"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type ChannelConfig struct {
|
type ChannelConfig struct {
|
||||||
|
@ -36,9 +36,7 @@ func ConvertRequest(request model.GeneralOpenAIRequest) *ChatRequest {
|
|||||||
enableSearch = true
|
enableSearch = true
|
||||||
aliModel = strings.TrimSuffix(aliModel, EnableSearchModelSuffix)
|
aliModel = strings.TrimSuffix(aliModel, EnableSearchModelSuffix)
|
||||||
}
|
}
|
||||||
if request.TopP >= 1 {
|
request.TopP = helper.Float64PtrMax(request.TopP, 0.9999)
|
||||||
request.TopP = 0.9999
|
|
||||||
}
|
|
||||||
return &ChatRequest{
|
return &ChatRequest{
|
||||||
Model: aliModel,
|
Model: aliModel,
|
||||||
Input: Input{
|
Input: Input{
|
||||||
|
@ -16,13 +16,13 @@ type Input struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type Parameters struct {
|
type Parameters struct {
|
||||||
TopP float64 `json:"top_p,omitempty"`
|
TopP *float64 `json:"top_p,omitempty"`
|
||||||
TopK int `json:"top_k,omitempty"`
|
TopK int `json:"top_k,omitempty"`
|
||||||
Seed uint64 `json:"seed,omitempty"`
|
Seed uint64 `json:"seed,omitempty"`
|
||||||
EnableSearch bool `json:"enable_search,omitempty"`
|
EnableSearch bool `json:"enable_search,omitempty"`
|
||||||
IncrementalOutput bool `json:"incremental_output,omitempty"`
|
IncrementalOutput bool `json:"incremental_output,omitempty"`
|
||||||
MaxTokens int `json:"max_tokens,omitempty"`
|
MaxTokens int `json:"max_tokens,omitempty"`
|
||||||
Temperature float64 `json:"temperature,omitempty"`
|
Temperature *float64 `json:"temperature,omitempty"`
|
||||||
ResultFormat string `json:"result_format,omitempty"`
|
ResultFormat string `json:"result_format,omitempty"`
|
||||||
Tools []model.Tool `json:"tools,omitempty"`
|
Tools []model.Tool `json:"tools,omitempty"`
|
||||||
}
|
}
|
||||||
|
@ -3,6 +3,7 @@ package anthropic
|
|||||||
var ModelList = []string{
|
var ModelList = []string{
|
||||||
"claude-instant-1.2", "claude-2.0", "claude-2.1",
|
"claude-instant-1.2", "claude-2.0", "claude-2.1",
|
||||||
"claude-3-haiku-20240307",
|
"claude-3-haiku-20240307",
|
||||||
|
"claude-3-5-haiku-20241022",
|
||||||
"claude-3-sonnet-20240229",
|
"claude-3-sonnet-20240229",
|
||||||
"claude-3-opus-20240229",
|
"claude-3-opus-20240229",
|
||||||
"claude-3-5-sonnet-20240620",
|
"claude-3-5-sonnet-20240620",
|
||||||
|
@ -48,8 +48,8 @@ type Request struct {
|
|||||||
MaxTokens int `json:"max_tokens,omitempty"`
|
MaxTokens int `json:"max_tokens,omitempty"`
|
||||||
StopSequences []string `json:"stop_sequences,omitempty"`
|
StopSequences []string `json:"stop_sequences,omitempty"`
|
||||||
Stream bool `json:"stream,omitempty"`
|
Stream bool `json:"stream,omitempty"`
|
||||||
Temperature float64 `json:"temperature,omitempty"`
|
Temperature *float64 `json:"temperature,omitempty"`
|
||||||
TopP float64 `json:"top_p,omitempty"`
|
TopP *float64 `json:"top_p,omitempty"`
|
||||||
TopK int `json:"top_k,omitempty"`
|
TopK int `json:"top_k,omitempty"`
|
||||||
Tools []Tool `json:"tools,omitempty"`
|
Tools []Tool `json:"tools,omitempty"`
|
||||||
ToolChoice any `json:"tool_choice,omitempty"`
|
ToolChoice any `json:"tool_choice,omitempty"`
|
||||||
|
@ -11,8 +11,8 @@ type Request struct {
|
|||||||
Messages []anthropic.Message `json:"messages"`
|
Messages []anthropic.Message `json:"messages"`
|
||||||
System string `json:"system,omitempty"`
|
System string `json:"system,omitempty"`
|
||||||
MaxTokens int `json:"max_tokens,omitempty"`
|
MaxTokens int `json:"max_tokens,omitempty"`
|
||||||
Temperature float64 `json:"temperature,omitempty"`
|
Temperature *float64 `json:"temperature,omitempty"`
|
||||||
TopP float64 `json:"top_p,omitempty"`
|
TopP *float64 `json:"top_p,omitempty"`
|
||||||
TopK int `json:"top_k,omitempty"`
|
TopK int `json:"top_k,omitempty"`
|
||||||
StopSequences []string `json:"stop_sequences,omitempty"`
|
StopSequences []string `json:"stop_sequences,omitempty"`
|
||||||
Tools []anthropic.Tool `json:"tools,omitempty"`
|
Tools []anthropic.Tool `json:"tools,omitempty"`
|
||||||
|
@ -4,10 +4,10 @@ package aws
|
|||||||
//
|
//
|
||||||
// https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters-meta.html
|
// https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters-meta.html
|
||||||
type Request struct {
|
type Request struct {
|
||||||
Prompt string `json:"prompt"`
|
Prompt string `json:"prompt"`
|
||||||
MaxGenLen int `json:"max_gen_len,omitempty"`
|
MaxGenLen int `json:"max_gen_len,omitempty"`
|
||||||
Temperature float64 `json:"temperature,omitempty"`
|
Temperature *float64 `json:"temperature,omitempty"`
|
||||||
TopP float64 `json:"top_p,omitempty"`
|
TopP *float64 `json:"top_p,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Response is the response from AWS Llama3
|
// Response is the response from AWS Llama3
|
||||||
|
@ -35,9 +35,9 @@ type Message struct {
|
|||||||
|
|
||||||
type ChatRequest struct {
|
type ChatRequest struct {
|
||||||
Messages []Message `json:"messages"`
|
Messages []Message `json:"messages"`
|
||||||
Temperature float64 `json:"temperature,omitempty"`
|
Temperature *float64 `json:"temperature,omitempty"`
|
||||||
TopP float64 `json:"top_p,omitempty"`
|
TopP *float64 `json:"top_p,omitempty"`
|
||||||
PenaltyScore float64 `json:"penalty_score,omitempty"`
|
PenaltyScore *float64 `json:"penalty_score,omitempty"`
|
||||||
Stream bool `json:"stream,omitempty"`
|
Stream bool `json:"stream,omitempty"`
|
||||||
System string `json:"system,omitempty"`
|
System string `json:"system,omitempty"`
|
||||||
DisableSearch bool `json:"disable_search,omitempty"`
|
DisableSearch bool `json:"disable_search,omitempty"`
|
||||||
|
@ -9,5 +9,5 @@ type Request struct {
|
|||||||
Prompt string `json:"prompt,omitempty"`
|
Prompt string `json:"prompt,omitempty"`
|
||||||
Raw bool `json:"raw,omitempty"`
|
Raw bool `json:"raw,omitempty"`
|
||||||
Stream bool `json:"stream,omitempty"`
|
Stream bool `json:"stream,omitempty"`
|
||||||
Temperature float64 `json:"temperature,omitempty"`
|
Temperature *float64 `json:"temperature,omitempty"`
|
||||||
}
|
}
|
||||||
|
@ -43,7 +43,7 @@ func ConvertRequest(textRequest model.GeneralOpenAIRequest) *Request {
|
|||||||
K: textRequest.TopK,
|
K: textRequest.TopK,
|
||||||
Stream: textRequest.Stream,
|
Stream: textRequest.Stream,
|
||||||
FrequencyPenalty: textRequest.FrequencyPenalty,
|
FrequencyPenalty: textRequest.FrequencyPenalty,
|
||||||
PresencePenalty: textRequest.FrequencyPenalty,
|
PresencePenalty: textRequest.PresencePenalty,
|
||||||
Seed: int(textRequest.Seed),
|
Seed: int(textRequest.Seed),
|
||||||
}
|
}
|
||||||
if cohereRequest.Model == "" {
|
if cohereRequest.Model == "" {
|
||||||
|
@ -10,15 +10,15 @@ type Request struct {
|
|||||||
PromptTruncation string `json:"prompt_truncation,omitempty"` // 默认值为"AUTO"
|
PromptTruncation string `json:"prompt_truncation,omitempty"` // 默认值为"AUTO"
|
||||||
Connectors []Connector `json:"connectors,omitempty"`
|
Connectors []Connector `json:"connectors,omitempty"`
|
||||||
Documents []Document `json:"documents,omitempty"`
|
Documents []Document `json:"documents,omitempty"`
|
||||||
Temperature float64 `json:"temperature,omitempty"` // 默认值为0.3
|
Temperature *float64 `json:"temperature,omitempty"` // 默认值为0.3
|
||||||
MaxTokens int `json:"max_tokens,omitempty"`
|
MaxTokens int `json:"max_tokens,omitempty"`
|
||||||
MaxInputTokens int `json:"max_input_tokens,omitempty"`
|
MaxInputTokens int `json:"max_input_tokens,omitempty"`
|
||||||
K int `json:"k,omitempty"` // 默认值为0
|
K int `json:"k,omitempty"` // 默认值为0
|
||||||
P float64 `json:"p,omitempty"` // 默认值为0.75
|
P *float64 `json:"p,omitempty"` // 默认值为0.75
|
||||||
Seed int `json:"seed,omitempty"`
|
Seed int `json:"seed,omitempty"`
|
||||||
StopSequences []string `json:"stop_sequences,omitempty"`
|
StopSequences []string `json:"stop_sequences,omitempty"`
|
||||||
FrequencyPenalty float64 `json:"frequency_penalty,omitempty"` // 默认值为0.0
|
FrequencyPenalty *float64 `json:"frequency_penalty,omitempty"` // 默认值为0.0
|
||||||
PresencePenalty float64 `json:"presence_penalty,omitempty"` // 默认值为0.0
|
PresencePenalty *float64 `json:"presence_penalty,omitempty"` // 默认值为0.0
|
||||||
Tools []Tool `json:"tools,omitempty"`
|
Tools []Tool `json:"tools,omitempty"`
|
||||||
ToolResults []ToolResult `json:"tool_results,omitempty"`
|
ToolResults []ToolResult `json:"tool_results,omitempty"`
|
||||||
}
|
}
|
||||||
|
@ -67,8 +67,8 @@ type ChatTools struct {
|
|||||||
type ChatGenerationConfig struct {
|
type ChatGenerationConfig struct {
|
||||||
ResponseMimeType string `json:"responseMimeType,omitempty"`
|
ResponseMimeType string `json:"responseMimeType,omitempty"`
|
||||||
ResponseSchema any `json:"responseSchema,omitempty"`
|
ResponseSchema any `json:"responseSchema,omitempty"`
|
||||||
Temperature float64 `json:"temperature,omitempty"`
|
Temperature *float64 `json:"temperature,omitempty"`
|
||||||
TopP float64 `json:"topP,omitempty"`
|
TopP *float64 `json:"topP,omitempty"`
|
||||||
TopK float64 `json:"topK,omitempty"`
|
TopK float64 `json:"topK,omitempty"`
|
||||||
MaxOutputTokens int `json:"maxOutputTokens,omitempty"`
|
MaxOutputTokens int `json:"maxOutputTokens,omitempty"`
|
||||||
CandidateCount int `json:"candidateCount,omitempty"`
|
CandidateCount int `json:"candidateCount,omitempty"`
|
||||||
|
@ -1,14 +1,14 @@
|
|||||||
package ollama
|
package ollama
|
||||||
|
|
||||||
type Options struct {
|
type Options struct {
|
||||||
Seed int `json:"seed,omitempty"`
|
Seed int `json:"seed,omitempty"`
|
||||||
Temperature float64 `json:"temperature,omitempty"`
|
Temperature *float64 `json:"temperature,omitempty"`
|
||||||
TopK int `json:"top_k,omitempty"`
|
TopK int `json:"top_k,omitempty"`
|
||||||
TopP float64 `json:"top_p,omitempty"`
|
TopP *float64 `json:"top_p,omitempty"`
|
||||||
FrequencyPenalty float64 `json:"frequency_penalty,omitempty"`
|
FrequencyPenalty *float64 `json:"frequency_penalty,omitempty"`
|
||||||
PresencePenalty float64 `json:"presence_penalty,omitempty"`
|
PresencePenalty *float64 `json:"presence_penalty,omitempty"`
|
||||||
NumPredict int `json:"num_predict,omitempty"`
|
NumPredict int `json:"num_predict,omitempty"`
|
||||||
NumCtx int `json:"num_ctx,omitempty"`
|
NumCtx int `json:"num_ctx,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type Message struct {
|
type Message struct {
|
||||||
|
@ -11,9 +11,10 @@ import (
|
|||||||
"github.com/songquanpeng/one-api/relay/adaptor/mistral"
|
"github.com/songquanpeng/one-api/relay/adaptor/mistral"
|
||||||
"github.com/songquanpeng/one-api/relay/adaptor/moonshot"
|
"github.com/songquanpeng/one-api/relay/adaptor/moonshot"
|
||||||
"github.com/songquanpeng/one-api/relay/adaptor/novita"
|
"github.com/songquanpeng/one-api/relay/adaptor/novita"
|
||||||
|
"github.com/songquanpeng/one-api/relay/adaptor/siliconflow"
|
||||||
"github.com/songquanpeng/one-api/relay/adaptor/stepfun"
|
"github.com/songquanpeng/one-api/relay/adaptor/stepfun"
|
||||||
"github.com/songquanpeng/one-api/relay/adaptor/togetherai"
|
"github.com/songquanpeng/one-api/relay/adaptor/togetherai"
|
||||||
"github.com/songquanpeng/one-api/relay/adaptor/siliconflow"
|
"github.com/songquanpeng/one-api/relay/adaptor/xai"
|
||||||
"github.com/songquanpeng/one-api/relay/channeltype"
|
"github.com/songquanpeng/one-api/relay/channeltype"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -32,6 +33,7 @@ var CompatibleChannels = []int{
|
|||||||
channeltype.TogetherAI,
|
channeltype.TogetherAI,
|
||||||
channeltype.Novita,
|
channeltype.Novita,
|
||||||
channeltype.SiliconFlow,
|
channeltype.SiliconFlow,
|
||||||
|
channeltype.XAI,
|
||||||
}
|
}
|
||||||
|
|
||||||
func GetCompatibleChannelMeta(channelType int) (string, []string) {
|
func GetCompatibleChannelMeta(channelType int) (string, []string) {
|
||||||
@ -64,6 +66,8 @@ func GetCompatibleChannelMeta(channelType int) (string, []string) {
|
|||||||
return "novita", novita.ModelList
|
return "novita", novita.ModelList
|
||||||
case channeltype.SiliconFlow:
|
case channeltype.SiliconFlow:
|
||||||
return "siliconflow", siliconflow.ModelList
|
return "siliconflow", siliconflow.ModelList
|
||||||
|
case channeltype.XAI:
|
||||||
|
return "xai", xai.ModelList
|
||||||
default:
|
default:
|
||||||
return "openai", ModelList
|
return "openai", ModelList
|
||||||
}
|
}
|
||||||
|
@ -19,11 +19,11 @@ type Prompt struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type ChatRequest struct {
|
type ChatRequest struct {
|
||||||
Prompt Prompt `json:"prompt"`
|
Prompt Prompt `json:"prompt"`
|
||||||
Temperature float64 `json:"temperature,omitempty"`
|
Temperature *float64 `json:"temperature,omitempty"`
|
||||||
CandidateCount int `json:"candidateCount,omitempty"`
|
CandidateCount int `json:"candidateCount,omitempty"`
|
||||||
TopP float64 `json:"topP,omitempty"`
|
TopP *float64 `json:"topP,omitempty"`
|
||||||
TopK int `json:"topK,omitempty"`
|
TopK int `json:"topK,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type Error struct {
|
type Error struct {
|
||||||
|
@ -39,8 +39,8 @@ func ConvertRequest(request model.GeneralOpenAIRequest) *ChatRequest {
|
|||||||
Model: &request.Model,
|
Model: &request.Model,
|
||||||
Stream: &request.Stream,
|
Stream: &request.Stream,
|
||||||
Messages: messages,
|
Messages: messages,
|
||||||
TopP: &request.TopP,
|
TopP: request.TopP,
|
||||||
Temperature: &request.Temperature,
|
Temperature: request.Temperature,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -11,8 +11,8 @@ type Request struct {
|
|||||||
MaxTokens int `json:"max_tokens,omitempty"`
|
MaxTokens int `json:"max_tokens,omitempty"`
|
||||||
StopSequences []string `json:"stop_sequences,omitempty"`
|
StopSequences []string `json:"stop_sequences,omitempty"`
|
||||||
Stream bool `json:"stream,omitempty"`
|
Stream bool `json:"stream,omitempty"`
|
||||||
Temperature float64 `json:"temperature,omitempty"`
|
Temperature *float64 `json:"temperature,omitempty"`
|
||||||
TopP float64 `json:"top_p,omitempty"`
|
TopP *float64 `json:"top_p,omitempty"`
|
||||||
TopK int `json:"top_k,omitempty"`
|
TopK int `json:"top_k,omitempty"`
|
||||||
Tools []anthropic.Tool `json:"tools,omitempty"`
|
Tools []anthropic.Tool `json:"tools,omitempty"`
|
||||||
ToolChoice any `json:"tool_choice,omitempty"`
|
ToolChoice any `json:"tool_choice,omitempty"`
|
||||||
|
5
relay/adaptor/xai/constants.go
Normal file
5
relay/adaptor/xai/constants.go
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
package xai
|
||||||
|
|
||||||
|
var ModelList = []string{
|
||||||
|
"grok-beta",
|
||||||
|
}
|
@ -283,7 +283,7 @@ func parseAPIVersionByModelName(modelName string) string {
|
|||||||
func apiVersion2domain(apiVersion string) string {
|
func apiVersion2domain(apiVersion string) string {
|
||||||
switch apiVersion {
|
switch apiVersion {
|
||||||
case "v1.1":
|
case "v1.1":
|
||||||
return "general"
|
return "lite"
|
||||||
case "v2.1":
|
case "v2.1":
|
||||||
return "generalv2"
|
return "generalv2"
|
||||||
case "v3.1":
|
case "v3.1":
|
||||||
|
@ -19,11 +19,11 @@ type ChatRequest struct {
|
|||||||
} `json:"header"`
|
} `json:"header"`
|
||||||
Parameter struct {
|
Parameter struct {
|
||||||
Chat struct {
|
Chat struct {
|
||||||
Domain string `json:"domain,omitempty"`
|
Domain string `json:"domain,omitempty"`
|
||||||
Temperature float64 `json:"temperature,omitempty"`
|
Temperature *float64 `json:"temperature,omitempty"`
|
||||||
TopK int `json:"top_k,omitempty"`
|
TopK int `json:"top_k,omitempty"`
|
||||||
MaxTokens int `json:"max_tokens,omitempty"`
|
MaxTokens int `json:"max_tokens,omitempty"`
|
||||||
Auditing bool `json:"auditing,omitempty"`
|
Auditing bool `json:"auditing,omitempty"`
|
||||||
} `json:"chat"`
|
} `json:"chat"`
|
||||||
} `json:"parameter"`
|
} `json:"parameter"`
|
||||||
Payload struct {
|
Payload struct {
|
||||||
|
@ -4,11 +4,11 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"math"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/gin-gonic/gin"
|
"github.com/gin-gonic/gin"
|
||||||
|
"github.com/songquanpeng/one-api/common/helper"
|
||||||
"github.com/songquanpeng/one-api/relay/adaptor"
|
"github.com/songquanpeng/one-api/relay/adaptor"
|
||||||
"github.com/songquanpeng/one-api/relay/adaptor/openai"
|
"github.com/songquanpeng/one-api/relay/adaptor/openai"
|
||||||
"github.com/songquanpeng/one-api/relay/meta"
|
"github.com/songquanpeng/one-api/relay/meta"
|
||||||
@ -66,13 +66,13 @@ func (a *Adaptor) ConvertRequest(c *gin.Context, relayMode int, request *model.G
|
|||||||
baiduEmbeddingRequest, err := ConvertEmbeddingRequest(*request)
|
baiduEmbeddingRequest, err := ConvertEmbeddingRequest(*request)
|
||||||
return baiduEmbeddingRequest, err
|
return baiduEmbeddingRequest, err
|
||||||
default:
|
default:
|
||||||
// TopP (0.0, 1.0)
|
// TopP [0.0, 1.0]
|
||||||
request.TopP = math.Min(0.99, request.TopP)
|
request.TopP = helper.Float64PtrMax(request.TopP, 1)
|
||||||
request.TopP = math.Max(0.01, request.TopP)
|
request.TopP = helper.Float64PtrMin(request.TopP, 0)
|
||||||
|
|
||||||
// Temperature (0.0, 1.0)
|
// Temperature [0.0, 1.0]
|
||||||
request.Temperature = math.Min(0.99, request.Temperature)
|
request.Temperature = helper.Float64PtrMax(request.Temperature, 1)
|
||||||
request.Temperature = math.Max(0.01, request.Temperature)
|
request.Temperature = helper.Float64PtrMin(request.Temperature, 0)
|
||||||
a.SetVersionByModeName(request.Model)
|
a.SetVersionByModeName(request.Model)
|
||||||
if a.APIVersion == "v4" {
|
if a.APIVersion == "v4" {
|
||||||
return request, nil
|
return request, nil
|
||||||
|
@ -13,8 +13,8 @@ type Message struct {
|
|||||||
|
|
||||||
type Request struct {
|
type Request struct {
|
||||||
Prompt []Message `json:"prompt"`
|
Prompt []Message `json:"prompt"`
|
||||||
Temperature float64 `json:"temperature,omitempty"`
|
Temperature *float64 `json:"temperature,omitempty"`
|
||||||
TopP float64 `json:"top_p,omitempty"`
|
TopP *float64 `json:"top_p,omitempty"`
|
||||||
RequestId string `json:"request_id,omitempty"`
|
RequestId string `json:"request_id,omitempty"`
|
||||||
Incremental bool `json:"incremental,omitempty"`
|
Incremental bool `json:"incremental,omitempty"`
|
||||||
}
|
}
|
||||||
|
@ -79,6 +79,7 @@ var ModelRatio = map[string]float64{
|
|||||||
"claude-2.0": 8.0 / 1000 * USD,
|
"claude-2.0": 8.0 / 1000 * USD,
|
||||||
"claude-2.1": 8.0 / 1000 * USD,
|
"claude-2.1": 8.0 / 1000 * USD,
|
||||||
"claude-3-haiku-20240307": 0.25 / 1000 * USD,
|
"claude-3-haiku-20240307": 0.25 / 1000 * USD,
|
||||||
|
"claude-3-5-haiku-20241022": 1.0 / 1000 * USD,
|
||||||
"claude-3-sonnet-20240229": 3.0 / 1000 * USD,
|
"claude-3-sonnet-20240229": 3.0 / 1000 * USD,
|
||||||
"claude-3-5-sonnet-20240620": 3.0 / 1000 * USD,
|
"claude-3-5-sonnet-20240620": 3.0 / 1000 * USD,
|
||||||
"claude-3-5-sonnet-20241022": 3.0 / 1000 * USD,
|
"claude-3-5-sonnet-20241022": 3.0 / 1000 * USD,
|
||||||
@ -208,6 +209,8 @@ var ModelRatio = map[string]float64{
|
|||||||
"deepl-zh": 25.0 / 1000 * USD,
|
"deepl-zh": 25.0 / 1000 * USD,
|
||||||
"deepl-en": 25.0 / 1000 * USD,
|
"deepl-en": 25.0 / 1000 * USD,
|
||||||
"deepl-ja": 25.0 / 1000 * USD,
|
"deepl-ja": 25.0 / 1000 * USD,
|
||||||
|
// https://console.x.ai/
|
||||||
|
"grok-beta": 5.0 / 1000 * USD,
|
||||||
}
|
}
|
||||||
|
|
||||||
var CompletionRatio = map[string]float64{
|
var CompletionRatio = map[string]float64{
|
||||||
@ -372,6 +375,8 @@ func GetCompletionRatio(name string, channelType int) float64 {
|
|||||||
return 3
|
return 3
|
||||||
case "command-r-plus":
|
case "command-r-plus":
|
||||||
return 5
|
return 5
|
||||||
|
case "grok-beta":
|
||||||
|
return 3
|
||||||
}
|
}
|
||||||
return 1
|
return 1
|
||||||
}
|
}
|
||||||
|
@ -46,5 +46,6 @@ const (
|
|||||||
VertextAI
|
VertextAI
|
||||||
Proxy
|
Proxy
|
||||||
SiliconFlow
|
SiliconFlow
|
||||||
|
XAI
|
||||||
Dummy
|
Dummy
|
||||||
)
|
)
|
||||||
|
@ -45,7 +45,8 @@ var ChannelBaseURLs = []string{
|
|||||||
"https://api.novita.ai/v3/openai", // 41
|
"https://api.novita.ai/v3/openai", // 41
|
||||||
"", // 42
|
"", // 42
|
||||||
"", // 43
|
"", // 43
|
||||||
"https://api.siliconflow.cn", // 44
|
"https://api.siliconflow.cn", // 44
|
||||||
|
"https://api.x.ai", // 45
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
package role
|
package role
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
System = "system"
|
||||||
Assistant = "assistant"
|
Assistant = "assistant"
|
||||||
)
|
)
|
||||||
|
@ -3,8 +3,12 @@ package controller
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/Laisky/errors/v2"
|
"math"
|
||||||
|
"net/http"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"github.com/gin-gonic/gin"
|
"github.com/gin-gonic/gin"
|
||||||
|
"github.com/pkg/errors"
|
||||||
"github.com/songquanpeng/one-api/common"
|
"github.com/songquanpeng/one-api/common"
|
||||||
"github.com/songquanpeng/one-api/common/config"
|
"github.com/songquanpeng/one-api/common/config"
|
||||||
"github.com/songquanpeng/one-api/common/logger"
|
"github.com/songquanpeng/one-api/common/logger"
|
||||||
@ -12,13 +16,11 @@ import (
|
|||||||
"github.com/songquanpeng/one-api/relay/adaptor/openai"
|
"github.com/songquanpeng/one-api/relay/adaptor/openai"
|
||||||
billingratio "github.com/songquanpeng/one-api/relay/billing/ratio"
|
billingratio "github.com/songquanpeng/one-api/relay/billing/ratio"
|
||||||
"github.com/songquanpeng/one-api/relay/channeltype"
|
"github.com/songquanpeng/one-api/relay/channeltype"
|
||||||
|
"github.com/songquanpeng/one-api/relay/constant/role"
|
||||||
"github.com/songquanpeng/one-api/relay/controller/validator"
|
"github.com/songquanpeng/one-api/relay/controller/validator"
|
||||||
"github.com/songquanpeng/one-api/relay/meta"
|
"github.com/songquanpeng/one-api/relay/meta"
|
||||||
relaymodel "github.com/songquanpeng/one-api/relay/model"
|
relaymodel "github.com/songquanpeng/one-api/relay/model"
|
||||||
"github.com/songquanpeng/one-api/relay/relaymode"
|
"github.com/songquanpeng/one-api/relay/relaymode"
|
||||||
"math"
|
|
||||||
"net/http"
|
|
||||||
"strings"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func getAndValidateTextRequest(c *gin.Context, relayMode int) (*relaymodel.GeneralOpenAIRequest, error) {
|
func getAndValidateTextRequest(c *gin.Context, relayMode int) (*relaymodel.GeneralOpenAIRequest, error) {
|
||||||
@ -89,7 +91,7 @@ func preConsumeQuota(ctx context.Context, textRequest *relaymodel.GeneralOpenAIR
|
|||||||
return preConsumedQuota, nil
|
return preConsumedQuota, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func postConsumeQuota(ctx context.Context, usage *relaymodel.Usage, meta *meta.Meta, textRequest *relaymodel.GeneralOpenAIRequest, ratio float64, preConsumedQuota int64, modelRatio float64, groupRatio float64) (quota int64) {
|
func postConsumeQuota(ctx context.Context, usage *relaymodel.Usage, meta *meta.Meta, textRequest *relaymodel.GeneralOpenAIRequest, ratio float64, preConsumedQuota int64, modelRatio float64, groupRatio float64, systemPromptReset bool) {
|
||||||
if usage == nil {
|
if usage == nil {
|
||||||
logger.Error(ctx, "usage is nil, which is unexpected")
|
logger.Error(ctx, "usage is nil, which is unexpected")
|
||||||
return
|
return
|
||||||
@ -98,7 +100,7 @@ func postConsumeQuota(ctx context.Context, usage *relaymodel.Usage, meta *meta.M
|
|||||||
completionRatio := billingratio.GetCompletionRatio(textRequest.Model, meta.ChannelType)
|
completionRatio := billingratio.GetCompletionRatio(textRequest.Model, meta.ChannelType)
|
||||||
promptTokens := usage.PromptTokens
|
promptTokens := usage.PromptTokens
|
||||||
completionTokens := usage.CompletionTokens
|
completionTokens := usage.CompletionTokens
|
||||||
quota = int64(math.Ceil((float64(promptTokens) + float64(completionTokens)*completionRatio) * ratio))
|
quota := int64(math.Ceil((float64(promptTokens) + float64(completionTokens)*completionRatio) * ratio))
|
||||||
if ratio != 0 && quota <= 0 {
|
if ratio != 0 && quota <= 0 {
|
||||||
quota = 1
|
quota = 1
|
||||||
}
|
}
|
||||||
@ -117,8 +119,11 @@ func postConsumeQuota(ctx context.Context, usage *relaymodel.Usage, meta *meta.M
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Error(ctx, "error update user quota cache: "+err.Error())
|
logger.Error(ctx, "error update user quota cache: "+err.Error())
|
||||||
}
|
}
|
||||||
|
var extraLog string
|
||||||
logContent := fmt.Sprintf("模型倍率 %.2f,分组倍率 %.2f,补全倍率 %.2f", modelRatio, groupRatio, completionRatio)
|
if systemPromptReset {
|
||||||
|
extraLog = " (注意系统提示词已被重置)"
|
||||||
|
}
|
||||||
|
logContent := fmt.Sprintf("模型倍率 %.2f,分组倍率 %.2f,补全倍率 %.2f%s", modelRatio, groupRatio, completionRatio, extraLog)
|
||||||
model.RecordConsumeLog(ctx, meta.UserId, meta.ChannelId, promptTokens, completionTokens, textRequest.Model, meta.TokenName, quota, logContent)
|
model.RecordConsumeLog(ctx, meta.UserId, meta.ChannelId, promptTokens, completionTokens, textRequest.Model, meta.TokenName, quota, logContent)
|
||||||
model.UpdateUserUsedQuotaAndRequestCount(meta.UserId, quota)
|
model.UpdateUserUsedQuotaAndRequestCount(meta.UserId, quota)
|
||||||
model.UpdateChannelUsedQuota(meta.ChannelId, quota)
|
model.UpdateChannelUsedQuota(meta.ChannelId, quota)
|
||||||
@ -156,3 +161,23 @@ func isErrorHappened(meta *meta.Meta, resp *http.Response) bool {
|
|||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func setSystemPrompt(ctx context.Context, request *relaymodel.GeneralOpenAIRequest, prompt string) (reset bool) {
|
||||||
|
if prompt == "" {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if len(request.Messages) == 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if request.Messages[0].Role == role.System {
|
||||||
|
request.Messages[0].Content = prompt
|
||||||
|
logger.Infof(ctx, "rewrite system prompt")
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
request.Messages = append([]relaymodel.Message{{
|
||||||
|
Role: role.System,
|
||||||
|
Content: prompt,
|
||||||
|
}}, request.Messages...)
|
||||||
|
logger.Infof(ctx, "add system prompt")
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
@ -8,9 +8,9 @@ import (
|
|||||||
|
|
||||||
"github.com/Laisky/errors/v2"
|
"github.com/Laisky/errors/v2"
|
||||||
"github.com/gin-gonic/gin"
|
"github.com/gin-gonic/gin"
|
||||||
|
"github.com/songquanpeng/one-api/common/config"
|
||||||
"github.com/songquanpeng/one-api/common/ctxkey"
|
"github.com/songquanpeng/one-api/common/ctxkey"
|
||||||
"github.com/songquanpeng/one-api/common/logger"
|
"github.com/songquanpeng/one-api/common/logger"
|
||||||
"github.com/songquanpeng/one-api/model"
|
|
||||||
"github.com/songquanpeng/one-api/relay"
|
"github.com/songquanpeng/one-api/relay"
|
||||||
"github.com/songquanpeng/one-api/relay/adaptor"
|
"github.com/songquanpeng/one-api/relay/adaptor"
|
||||||
"github.com/songquanpeng/one-api/relay/adaptor/openai"
|
"github.com/songquanpeng/one-api/relay/adaptor/openai"
|
||||||
@ -37,6 +37,8 @@ func RelayTextHelper(c *gin.Context) *relaymodel.ErrorWithStatusCode {
|
|||||||
meta.OriginModelName = textRequest.Model
|
meta.OriginModelName = textRequest.Model
|
||||||
textRequest.Model, _ = getMappedModelName(textRequest.Model, meta.ModelMapping)
|
textRequest.Model, _ = getMappedModelName(textRequest.Model, meta.ModelMapping)
|
||||||
meta.ActualModelName = textRequest.Model
|
meta.ActualModelName = textRequest.Model
|
||||||
|
// set system prompt if not empty
|
||||||
|
systemPromptReset := setSystemPrompt(ctx, textRequest, meta.SystemPrompt)
|
||||||
// get model ratio & group ratio
|
// get model ratio & group ratio
|
||||||
modelRatio := billingratio.GetModelRatio(textRequest.Model, meta.ChannelType)
|
modelRatio := billingratio.GetModelRatio(textRequest.Model, meta.ChannelType)
|
||||||
// groupRatio := billingratio.GetGroupRatio(meta.Group)
|
// groupRatio := billingratio.GetGroupRatio(meta.Group)
|
||||||
@ -88,23 +90,12 @@ func RelayTextHelper(c *gin.Context) *relaymodel.ErrorWithStatusCode {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// post-consume quota
|
// post-consume quota
|
||||||
go func() {
|
go postConsumeQuota(ctx, usage, meta, textRequest, ratio, preConsumedQuota, modelRatio, groupRatio, systemPromptReset)
|
||||||
quota := postConsumeQuota(c, usage, meta, textRequest, ratio, preConsumedQuota, modelRatio, groupRatio)
|
|
||||||
docu := model.NewUserRequestCost(
|
|
||||||
c.GetInt(ctxkey.Id),
|
|
||||||
c.GetString(ctxkey.RequestId),
|
|
||||||
quota,
|
|
||||||
)
|
|
||||||
if err = docu.Insert(); err != nil {
|
|
||||||
logger.Errorf(c, "insert user request cost failed: %+v", err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func getRequestBody(c *gin.Context, meta *meta.Meta, textRequest *relaymodel.GeneralOpenAIRequest, adaptor adaptor.Adaptor) (io.Reader, error) {
|
func getRequestBody(c *gin.Context, meta *meta.Meta, textRequest *relaymodel.GeneralOpenAIRequest, adaptor adaptor.Adaptor) (io.Reader, error) {
|
||||||
if meta.APIType == apitype.OpenAI && meta.OriginModelName == meta.ActualModelName && meta.ChannelType != channeltype.Baichuan {
|
if !config.EnforceIncludeUsage && meta.APIType == apitype.OpenAI && meta.OriginModelName == meta.ActualModelName && meta.ChannelType != channeltype.Baichuan {
|
||||||
// no need to convert request for openai
|
// no need to convert request for openai
|
||||||
return c.Request.Body, nil
|
return c.Request.Body, nil
|
||||||
}
|
}
|
||||||
|
@ -32,6 +32,7 @@ type Meta struct {
|
|||||||
RequestURLPath string
|
RequestURLPath string
|
||||||
PromptTokens int // only for DoResponse
|
PromptTokens int // only for DoResponse
|
||||||
ChannelRatio float64
|
ChannelRatio float64
|
||||||
|
SystemPrompt string
|
||||||
}
|
}
|
||||||
|
|
||||||
func GetByContext(c *gin.Context) *Meta {
|
func GetByContext(c *gin.Context) *Meta {
|
||||||
@ -49,6 +50,7 @@ func GetByContext(c *gin.Context) *Meta {
|
|||||||
APIKey: strings.TrimPrefix(c.Request.Header.Get("Authorization"), "Bearer "),
|
APIKey: strings.TrimPrefix(c.Request.Header.Get("Authorization"), "Bearer "),
|
||||||
RequestURLPath: c.Request.URL.String(),
|
RequestURLPath: c.Request.URL.String(),
|
||||||
ChannelRatio: c.GetFloat64(ctxkey.ChannelRatio), // add by Laisky
|
ChannelRatio: c.GetFloat64(ctxkey.ChannelRatio), // add by Laisky
|
||||||
|
SystemPrompt: c.GetString(ctxkey.SystemPrompt),
|
||||||
}
|
}
|
||||||
cfg, ok := c.Get(ctxkey.Config)
|
cfg, ok := c.Get(ctxkey.Config)
|
||||||
if ok {
|
if ok {
|
||||||
|
@ -22,34 +22,49 @@ type StreamOptions struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type GeneralOpenAIRequest struct {
|
type GeneralOpenAIRequest struct {
|
||||||
Messages []Message `json:"messages,omitempty"`
|
// https://platform.openai.com/docs/api-reference/chat/create
|
||||||
Model string `json:"model,omitempty"`
|
Messages []Message `json:"messages,omitempty"`
|
||||||
Modalities []string `json:"modalities,omitempty"`
|
Model string `json:"model,omitempty"`
|
||||||
Audio *Audio `json:"audio,omitempty"`
|
Store *bool `json:"store,omitempty"`
|
||||||
FrequencyPenalty float64 `json:"frequency_penalty,omitempty"`
|
Metadata any `json:"metadata,omitempty"`
|
||||||
MaxTokens int `json:"max_tokens,omitempty"`
|
FrequencyPenalty *float64 `json:"frequency_penalty,omitempty"`
|
||||||
N int `json:"n,omitempty"`
|
LogitBias any `json:"logit_bias,omitempty"`
|
||||||
PresencePenalty float64 `json:"presence_penalty,omitempty"`
|
Logprobs *bool `json:"logprobs,omitempty"`
|
||||||
ResponseFormat *ResponseFormat `json:"response_format,omitempty"`
|
TopLogprobs *int `json:"top_logprobs,omitempty"`
|
||||||
Seed float64 `json:"seed,omitempty"`
|
MaxTokens int `json:"max_tokens,omitempty"`
|
||||||
Stop any `json:"stop,omitempty"`
|
MaxCompletionTokens *int `json:"max_completion_tokens,omitempty"`
|
||||||
Stream bool `json:"stream,omitempty"`
|
N int `json:"n,omitempty"`
|
||||||
StreamOptions *StreamOptions `json:"stream_options,omitempty"`
|
Modalities []string `json:"modalities,omitempty"`
|
||||||
Temperature float64 `json:"temperature,omitempty"`
|
Prediction any `json:"prediction,omitempty"`
|
||||||
TopP float64 `json:"top_p,omitempty"`
|
Audio *Audio `json:"audio,omitempty"`
|
||||||
TopK int `json:"top_k,omitempty"`
|
PresencePenalty *float64 `json:"presence_penalty,omitempty"`
|
||||||
Tools []Tool `json:"tools,omitempty"`
|
ResponseFormat *ResponseFormat `json:"response_format,omitempty"`
|
||||||
ToolChoice any `json:"tool_choice,omitempty"`
|
Seed float64 `json:"seed,omitempty"`
|
||||||
FunctionCall any `json:"function_call,omitempty"`
|
ServiceTier *string `json:"service_tier,omitempty"`
|
||||||
Functions any `json:"functions,omitempty"`
|
Stop any `json:"stop,omitempty"`
|
||||||
User string `json:"user,omitempty"`
|
Stream bool `json:"stream,omitempty"`
|
||||||
Prompt any `json:"prompt,omitempty"`
|
StreamOptions *StreamOptions `json:"stream_options,omitempty"`
|
||||||
Input any `json:"input,omitempty"`
|
Temperature *float64 `json:"temperature,omitempty"`
|
||||||
EncodingFormat string `json:"encoding_format,omitempty"`
|
TopP *float64 `json:"top_p,omitempty"`
|
||||||
Dimensions int `json:"dimensions,omitempty"`
|
TopK int `json:"top_k,omitempty"`
|
||||||
Instruction string `json:"instruction,omitempty"`
|
Tools []Tool `json:"tools,omitempty"`
|
||||||
Size string `json:"size,omitempty"`
|
ToolChoice any `json:"tool_choice,omitempty"`
|
||||||
NumCtx int `json:"num_ctx,omitempty"`
|
ParallelTooCalls *bool `json:"parallel_tool_calls,omitempty"`
|
||||||
|
User string `json:"user,omitempty"`
|
||||||
|
FunctionCall any `json:"function_call,omitempty"`
|
||||||
|
Functions any `json:"functions,omitempty"`
|
||||||
|
// https://platform.openai.com/docs/api-reference/embeddings/create
|
||||||
|
Input any `json:"input,omitempty"`
|
||||||
|
EncodingFormat string `json:"encoding_format,omitempty"`
|
||||||
|
Dimensions int `json:"dimensions,omitempty"`
|
||||||
|
// https://platform.openai.com/docs/api-reference/images/create
|
||||||
|
Prompt any `json:"prompt,omitempty"`
|
||||||
|
Quality *string `json:"quality,omitempty"`
|
||||||
|
Size string `json:"size,omitempty"`
|
||||||
|
Style *string `json:"style,omitempty"`
|
||||||
|
// Others
|
||||||
|
Instruction string `json:"instruction,omitempty"`
|
||||||
|
NumCtx int `json:"num_ctx,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r GeneralOpenAIRequest) ParseInput() []string {
|
func (r GeneralOpenAIRequest) ParseInput() []string {
|
||||||
|
@ -30,6 +30,7 @@ export const CHANNEL_OPTIONS = [
|
|||||||
{ key: 42, text: 'VertexAI', value: 42, color: 'blue' },
|
{ key: 42, text: 'VertexAI', value: 42, color: 'blue' },
|
||||||
{ key: 43, text: 'Proxy', value: 43, color: 'blue' },
|
{ key: 43, text: 'Proxy', value: 43, color: 'blue' },
|
||||||
{ key: 44, text: 'SiliconFlow', value: 44, color: 'blue' },
|
{ key: 44, text: 'SiliconFlow', value: 44, color: 'blue' },
|
||||||
|
{ key: 45, text: 'xAI', value: 45, color: 'blue' },
|
||||||
{ key: 8, text: '自定义渠道', value: 8, color: 'pink' },
|
{ key: 8, text: '自定义渠道', value: 8, color: 'pink' },
|
||||||
{ key: 22, text: '知识库:FastGPT', value: 22, color: 'blue' },
|
{ key: 22, text: '知识库:FastGPT', value: 22, color: 'blue' },
|
||||||
{ key: 21, text: '知识库:AI Proxy', value: 21, color: 'purple' },
|
{ key: 21, text: '知识库:AI Proxy', value: 21, color: 'purple' },
|
||||||
|
@ -43,6 +43,7 @@ const EditChannel = (props) => {
|
|||||||
base_url: '',
|
base_url: '',
|
||||||
other: '',
|
other: '',
|
||||||
model_mapping: '',
|
model_mapping: '',
|
||||||
|
system_prompt: '',
|
||||||
models: [],
|
models: [],
|
||||||
auto_ban: 1,
|
auto_ban: 1,
|
||||||
groups: ['default']
|
groups: ['default']
|
||||||
@ -63,7 +64,7 @@ const EditChannel = (props) => {
|
|||||||
let localModels = [];
|
let localModels = [];
|
||||||
switch (value) {
|
switch (value) {
|
||||||
case 14:
|
case 14:
|
||||||
localModels = ["claude-instant-1.2", "claude-2", "claude-2.0", "claude-2.1", "claude-3-opus-20240229", "claude-3-sonnet-20240229", "claude-3-haiku-20240307", "claude-3-5-sonnet-20240620", "claude-3-5-sonnet-20241022"];
|
localModels = ["claude-instant-1.2", "claude-2", "claude-2.0", "claude-2.1", "claude-3-opus-20240229", "claude-3-sonnet-20240229", "claude-3-haiku-20240307", "claude-3-5-haiku-20241022", "claude-3-5-sonnet-20240620", "claude-3-5-sonnet-20241022"];
|
||||||
break;
|
break;
|
||||||
case 11:
|
case 11:
|
||||||
localModels = ['PaLM-2'];
|
localModels = ['PaLM-2'];
|
||||||
@ -304,163 +305,163 @@ const EditChannel = (props) => {
|
|||||||
width={isMobile() ? '100%' : 600}
|
width={isMobile() ? '100%' : 600}
|
||||||
>
|
>
|
||||||
<Spin spinning={loading}>
|
<Spin spinning={loading}>
|
||||||
<div style={{marginTop: 10}}>
|
<div style={{ marginTop: 10 }}>
|
||||||
<Typography.Text strong>类型:</Typography.Text>
|
<Typography.Text strong>类型:</Typography.Text>
|
||||||
</div>
|
</div>
|
||||||
<Select
|
<Select
|
||||||
name='type'
|
name='type'
|
||||||
required
|
required
|
||||||
optionList={CHANNEL_OPTIONS}
|
optionList={CHANNEL_OPTIONS}
|
||||||
value={inputs.type}
|
value={inputs.type}
|
||||||
onChange={value => handleInputChange('type', value)}
|
onChange={value => handleInputChange('type', value)}
|
||||||
style={{width: '50%'}}
|
style={{ width: '50%' }}
|
||||||
/>
|
/>
|
||||||
{
|
{
|
||||||
inputs.type === 3 && (
|
inputs.type === 3 && (
|
||||||
<>
|
<>
|
||||||
<div style={{marginTop: 10}}>
|
<div style={{ marginTop: 10 }}>
|
||||||
<Banner type={"warning"} description={
|
<Banner type={"warning"} description={
|
||||||
<>
|
<>
|
||||||
注意,<strong>模型部署名称必须和模型名称保持一致</strong>,因为 One API 会把请求体中的
|
注意,<strong>模型部署名称必须和模型名称保持一致</strong>,因为 One API 会把请求体中的
|
||||||
model
|
model
|
||||||
参数替换为你的部署名称(模型名称中的点会被剔除),<a target='_blank'
|
参数替换为你的部署名称(模型名称中的点会被剔除),<a target='_blank'
|
||||||
href='https://github.com/songquanpeng/one-api/issues/133?notification_referrer_id=NT_kwDOAmJSYrM2NjIwMzI3NDgyOjM5OTk4MDUw#issuecomment-1571602271'>图片演示</a>。
|
href='https://github.com/songquanpeng/one-api/issues/133?notification_referrer_id=NT_kwDOAmJSYrM2NjIwMzI3NDgyOjM5OTk4MDUw#issuecomment-1571602271'>图片演示</a>。
|
||||||
</>
|
</>
|
||||||
}>
|
}>
|
||||||
</Banner>
|
</Banner>
|
||||||
</div>
|
</div>
|
||||||
<div style={{marginTop: 10}}>
|
<div style={{ marginTop: 10 }}>
|
||||||
<Typography.Text strong>AZURE_OPENAI_ENDPOINT:</Typography.Text>
|
<Typography.Text strong>AZURE_OPENAI_ENDPOINT:</Typography.Text>
|
||||||
</div>
|
</div>
|
||||||
<Input
|
<Input
|
||||||
label='AZURE_OPENAI_ENDPOINT'
|
label='AZURE_OPENAI_ENDPOINT'
|
||||||
name='azure_base_url'
|
name='azure_base_url'
|
||||||
placeholder={'请输入 AZURE_OPENAI_ENDPOINT,例如:https://docs-test-001.openai.azure.com'}
|
placeholder={'请输入 AZURE_OPENAI_ENDPOINT,例如:https://docs-test-001.openai.azure.com'}
|
||||||
onChange={value => {
|
onChange={value => {
|
||||||
handleInputChange('base_url', value)
|
handleInputChange('base_url', value)
|
||||||
}}
|
}}
|
||||||
value={inputs.base_url}
|
value={inputs.base_url}
|
||||||
autoComplete='new-password'
|
autoComplete='new-password'
|
||||||
/>
|
/>
|
||||||
<div style={{marginTop: 10}}>
|
<div style={{ marginTop: 10 }}>
|
||||||
<Typography.Text strong>默认 API 版本:</Typography.Text>
|
<Typography.Text strong>默认 API 版本:</Typography.Text>
|
||||||
</div>
|
</div>
|
||||||
<Input
|
<Input
|
||||||
label='默认 API 版本'
|
label='默认 API 版本'
|
||||||
name='azure_other'
|
name='azure_other'
|
||||||
placeholder={'请输入默认 API 版本,例如:2024-03-01-preview,该配置可以被实际的请求查询参数所覆盖'}
|
placeholder={'请输入默认 API 版本,例如:2024-03-01-preview,该配置可以被实际的请求查询参数所覆盖'}
|
||||||
onChange={value => {
|
onChange={value => {
|
||||||
handleInputChange('other', value)
|
handleInputChange('other', value)
|
||||||
}}
|
}}
|
||||||
value={inputs.other}
|
value={inputs.other}
|
||||||
autoComplete='new-password'
|
autoComplete='new-password'
|
||||||
/>
|
/>
|
||||||
</>
|
</>
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
inputs.type === 8 && (
|
inputs.type === 8 && (
|
||||||
<>
|
<>
|
||||||
<div style={{marginTop: 10}}>
|
<div style={{ marginTop: 10 }}>
|
||||||
<Typography.Text strong>Base URL:</Typography.Text>
|
<Typography.Text strong>Base URL:</Typography.Text>
|
||||||
</div>
|
</div>
|
||||||
<Input
|
<Input
|
||||||
name='base_url'
|
name='base_url'
|
||||||
placeholder={'请输入自定义渠道的 Base URL'}
|
placeholder={'请输入自定义渠道的 Base URL'}
|
||||||
onChange={value => {
|
onChange={value => {
|
||||||
handleInputChange('base_url', value)
|
handleInputChange('base_url', value)
|
||||||
}}
|
}}
|
||||||
value={inputs.base_url}
|
value={inputs.base_url}
|
||||||
autoComplete='new-password'
|
autoComplete='new-password'
|
||||||
/>
|
/>
|
||||||
</>
|
</>
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
<div style={{marginTop: 10}}>
|
<div style={{ marginTop: 10 }}>
|
||||||
<Typography.Text strong>名称:</Typography.Text>
|
<Typography.Text strong>名称:</Typography.Text>
|
||||||
</div>
|
</div>
|
||||||
<Input
|
<Input
|
||||||
required
|
required
|
||||||
name='name'
|
name='name'
|
||||||
placeholder={'请为渠道命名'}
|
placeholder={'请为渠道命名'}
|
||||||
onChange={value => {
|
onChange={value => {
|
||||||
handleInputChange('name', value)
|
handleInputChange('name', value)
|
||||||
}}
|
}}
|
||||||
value={inputs.name}
|
value={inputs.name}
|
||||||
autoComplete='new-password'
|
autoComplete='new-password'
|
||||||
/>
|
/>
|
||||||
<div style={{marginTop: 10}}>
|
<div style={{ marginTop: 10 }}>
|
||||||
<Typography.Text strong>分组:</Typography.Text>
|
<Typography.Text strong>分组:</Typography.Text>
|
||||||
</div>
|
</div>
|
||||||
<Select
|
<Select
|
||||||
placeholder={'请选择可以使用该渠道的分组'}
|
placeholder={'请选择可以使用该渠道的分组'}
|
||||||
name='groups'
|
name='groups'
|
||||||
required
|
required
|
||||||
multiple
|
multiple
|
||||||
selection
|
selection
|
||||||
allowAdditions
|
allowAdditions
|
||||||
additionLabel={'请在系统设置页面编辑分组倍率以添加新的分组:'}
|
additionLabel={'请在系统设置页面编辑分组倍率以添加新的分组:'}
|
||||||
onChange={value => {
|
onChange={value => {
|
||||||
handleInputChange('groups', value)
|
handleInputChange('groups', value)
|
||||||
}}
|
}}
|
||||||
value={inputs.groups}
|
value={inputs.groups}
|
||||||
autoComplete='new-password'
|
autoComplete='new-password'
|
||||||
optionList={groupOptions}
|
optionList={groupOptions}
|
||||||
/>
|
/>
|
||||||
{
|
{
|
||||||
inputs.type === 18 && (
|
inputs.type === 18 && (
|
||||||
<>
|
<>
|
||||||
<div style={{marginTop: 10}}>
|
<div style={{ marginTop: 10 }}>
|
||||||
<Typography.Text strong>模型版本:</Typography.Text>
|
<Typography.Text strong>模型版本:</Typography.Text>
|
||||||
</div>
|
</div>
|
||||||
<Input
|
<Input
|
||||||
name='other'
|
name='other'
|
||||||
placeholder={'请输入星火大模型版本,注意是接口地址中的版本号,例如:v2.1'}
|
placeholder={'请输入星火大模型版本,注意是接口地址中的版本号,例如:v2.1'}
|
||||||
onChange={value => {
|
onChange={value => {
|
||||||
handleInputChange('other', value)
|
handleInputChange('other', value)
|
||||||
}}
|
}}
|
||||||
value={inputs.other}
|
value={inputs.other}
|
||||||
autoComplete='new-password'
|
autoComplete='new-password'
|
||||||
/>
|
/>
|
||||||
</>
|
</>
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
inputs.type === 21 && (
|
inputs.type === 21 && (
|
||||||
<>
|
<>
|
||||||
<div style={{marginTop: 10}}>
|
<div style={{ marginTop: 10 }}>
|
||||||
<Typography.Text strong>知识库 ID:</Typography.Text>
|
<Typography.Text strong>知识库 ID:</Typography.Text>
|
||||||
</div>
|
</div>
|
||||||
<Input
|
<Input
|
||||||
label='知识库 ID'
|
label='知识库 ID'
|
||||||
name='other'
|
name='other'
|
||||||
placeholder={'请输入知识库 ID,例如:123456'}
|
placeholder={'请输入知识库 ID,例如:123456'}
|
||||||
onChange={value => {
|
onChange={value => {
|
||||||
handleInputChange('other', value)
|
handleInputChange('other', value)
|
||||||
}}
|
}}
|
||||||
value={inputs.other}
|
value={inputs.other}
|
||||||
autoComplete='new-password'
|
autoComplete='new-password'
|
||||||
/>
|
/>
|
||||||
</>
|
</>
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
<div style={{marginTop: 10}}>
|
<div style={{ marginTop: 10 }}>
|
||||||
<Typography.Text strong>模型:</Typography.Text>
|
<Typography.Text strong>模型:</Typography.Text>
|
||||||
</div>
|
</div>
|
||||||
<Select
|
<Select
|
||||||
placeholder={'请选择该渠道所支持的模型'}
|
placeholder={'请选择该渠道所支持的模型'}
|
||||||
name='models'
|
name='models'
|
||||||
required
|
required
|
||||||
multiple
|
multiple
|
||||||
selection
|
selection
|
||||||
onChange={value => {
|
onChange={value => {
|
||||||
handleInputChange('models', value)
|
handleInputChange('models', value)
|
||||||
}}
|
}}
|
||||||
value={inputs.models}
|
value={inputs.models}
|
||||||
autoComplete='new-password'
|
autoComplete='new-password'
|
||||||
optionList={modelOptions}
|
optionList={modelOptions}
|
||||||
/>
|
/>
|
||||||
<div style={{lineHeight: '40px', marginBottom: '12px'}}>
|
<div style={{ lineHeight: '40px', marginBottom: '12px' }}>
|
||||||
<Space>
|
<Space>
|
||||||
<Button type='primary' onClick={() => {
|
<Button type='primary' onClick={() => {
|
||||||
handleInputChange('models', basicModels);
|
handleInputChange('models', basicModels);
|
||||||
@ -473,28 +474,41 @@ const EditChannel = (props) => {
|
|||||||
}}>清除所有模型</Button>
|
}}>清除所有模型</Button>
|
||||||
</Space>
|
</Space>
|
||||||
<Input
|
<Input
|
||||||
addonAfter={
|
addonAfter={
|
||||||
<Button type='primary' onClick={addCustomModel}>填入</Button>
|
<Button type='primary' onClick={addCustomModel}>填入</Button>
|
||||||
}
|
}
|
||||||
placeholder='输入自定义模型名称'
|
placeholder='输入自定义模型名称'
|
||||||
value={customModel}
|
value={customModel}
|
||||||
onChange={(value) => {
|
onChange={(value) => {
|
||||||
setCustomModel(value.trim());
|
setCustomModel(value.trim());
|
||||||
}}
|
}}
|
||||||
/>
|
/>
|
||||||
</div>
|
</div>
|
||||||
<div style={{marginTop: 10}}>
|
<div style={{ marginTop: 10 }}>
|
||||||
<Typography.Text strong>模型重定向:</Typography.Text>
|
<Typography.Text strong>模型重定向:</Typography.Text>
|
||||||
</div>
|
</div>
|
||||||
<TextArea
|
<TextArea
|
||||||
placeholder={`此项可选,用于修改请求体中的模型名称,为一个 JSON 字符串,键为请求中模型名称,值为要替换的模型名称,例如:\n${JSON.stringify(MODEL_MAPPING_EXAMPLE, null, 2)}`}
|
placeholder={`此项可选,用于修改请求体中的模型名称,为一个 JSON 字符串,键为请求中模型名称,值为要替换的模型名称,例如:\n${JSON.stringify(MODEL_MAPPING_EXAMPLE, null, 2)}`}
|
||||||
name='model_mapping'
|
name='model_mapping'
|
||||||
onChange={value => {
|
onChange={value => {
|
||||||
handleInputChange('model_mapping', value)
|
handleInputChange('model_mapping', value)
|
||||||
}}
|
}}
|
||||||
autosize
|
autosize
|
||||||
value={inputs.model_mapping}
|
value={inputs.model_mapping}
|
||||||
autoComplete='new-password'
|
autoComplete='new-password'
|
||||||
|
/>
|
||||||
|
<div style={{ marginTop: 10 }}>
|
||||||
|
<Typography.Text strong>系统提示词:</Typography.Text>
|
||||||
|
</div>
|
||||||
|
<TextArea
|
||||||
|
placeholder={`此项可选,用于强制设置给定的系统提示词,请配合自定义模型 & 模型重定向使用,首先创建一个唯一的自定义模型名称并在上面填入,之后将该自定义模型重定向映射到该渠道一个原生支持的模型`}
|
||||||
|
name='system_prompt'
|
||||||
|
onChange={value => {
|
||||||
|
handleInputChange('system_prompt', value)
|
||||||
|
}}
|
||||||
|
autosize
|
||||||
|
value={inputs.system_prompt}
|
||||||
|
autoComplete='new-password'
|
||||||
/>
|
/>
|
||||||
<Typography.Text style={{
|
<Typography.Text style={{
|
||||||
color: 'rgba(var(--semi-blue-5), 1)',
|
color: 'rgba(var(--semi-blue-5), 1)',
|
||||||
@ -507,116 +521,116 @@ const EditChannel = (props) => {
|
|||||||
}>
|
}>
|
||||||
填入模板
|
填入模板
|
||||||
</Typography.Text>
|
</Typography.Text>
|
||||||
<div style={{marginTop: 10}}>
|
<div style={{ marginTop: 10 }}>
|
||||||
<Typography.Text strong>密钥:</Typography.Text>
|
<Typography.Text strong>密钥:</Typography.Text>
|
||||||
</div>
|
</div>
|
||||||
{
|
{
|
||||||
batch ?
|
batch ?
|
||||||
<TextArea
|
<TextArea
|
||||||
label='密钥'
|
label='密钥'
|
||||||
name='key'
|
name='key'
|
||||||
required
|
required
|
||||||
placeholder={'请输入密钥,一行一个'}
|
placeholder={'请输入密钥,一行一个'}
|
||||||
onChange={value => {
|
onChange={value => {
|
||||||
handleInputChange('key', value)
|
handleInputChange('key', value)
|
||||||
}}
|
}}
|
||||||
value={inputs.key}
|
value={inputs.key}
|
||||||
style={{minHeight: 150, fontFamily: 'JetBrains Mono, Consolas'}}
|
style={{ minHeight: 150, fontFamily: 'JetBrains Mono, Consolas' }}
|
||||||
autoComplete='new-password'
|
autoComplete='new-password'
|
||||||
/>
|
/>
|
||||||
:
|
:
|
||||||
<Input
|
<Input
|
||||||
label='密钥'
|
label='密钥'
|
||||||
name='key'
|
name='key'
|
||||||
required
|
required
|
||||||
placeholder={type2secretPrompt(inputs.type)}
|
placeholder={type2secretPrompt(inputs.type)}
|
||||||
onChange={value => {
|
onChange={value => {
|
||||||
handleInputChange('key', value)
|
handleInputChange('key', value)
|
||||||
}}
|
}}
|
||||||
value={inputs.key}
|
value={inputs.key}
|
||||||
autoComplete='new-password'
|
autoComplete='new-password'
|
||||||
/>
|
/>
|
||||||
}
|
}
|
||||||
<div style={{marginTop: 10}}>
|
<div style={{ marginTop: 10 }}>
|
||||||
<Typography.Text strong>组织:</Typography.Text>
|
<Typography.Text strong>组织:</Typography.Text>
|
||||||
</div>
|
</div>
|
||||||
<Input
|
<Input
|
||||||
label='组织,可选,不填则为默认组织'
|
label='组织,可选,不填则为默认组织'
|
||||||
name='openai_organization'
|
name='openai_organization'
|
||||||
placeholder='请输入组织org-xxx'
|
placeholder='请输入组织org-xxx'
|
||||||
onChange={value => {
|
onChange={value => {
|
||||||
handleInputChange('openai_organization', value)
|
handleInputChange('openai_organization', value)
|
||||||
}}
|
}}
|
||||||
value={inputs.openai_organization}
|
value={inputs.openai_organization}
|
||||||
/>
|
/>
|
||||||
<div style={{marginTop: 10, display: 'flex'}}>
|
<div style={{ marginTop: 10, display: 'flex' }}>
|
||||||
<Space>
|
<Space>
|
||||||
<Checkbox
|
<Checkbox
|
||||||
name='auto_ban'
|
name='auto_ban'
|
||||||
checked={autoBan}
|
checked={autoBan}
|
||||||
onChange={
|
onChange={
|
||||||
() => {
|
() => {
|
||||||
setAutoBan(!autoBan);
|
setAutoBan(!autoBan);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// onChange={handleInputChange}
|
// onChange={handleInputChange}
|
||||||
/>
|
/>
|
||||||
<Typography.Text
|
<Typography.Text
|
||||||
strong>是否自动禁用(仅当自动禁用开启时有效),关闭后不会自动禁用该渠道:</Typography.Text>
|
strong>是否自动禁用(仅当自动禁用开启时有效),关闭后不会自动禁用该渠道:</Typography.Text>
|
||||||
</Space>
|
</Space>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
{
|
{
|
||||||
!isEdit && (
|
!isEdit && (
|
||||||
<div style={{marginTop: 10, display: 'flex'}}>
|
<div style={{ marginTop: 10, display: 'flex' }}>
|
||||||
<Space>
|
<Space>
|
||||||
<Checkbox
|
<Checkbox
|
||||||
checked={batch}
|
checked={batch}
|
||||||
label='批量创建'
|
label='批量创建'
|
||||||
name='batch'
|
name='batch'
|
||||||
onChange={() => setBatch(!batch)}
|
onChange={() => setBatch(!batch)}
|
||||||
/>
|
/>
|
||||||
<Typography.Text strong>批量创建</Typography.Text>
|
<Typography.Text strong>批量创建</Typography.Text>
|
||||||
</Space>
|
</Space>
|
||||||
|
</div>
|
||||||
|
)
|
||||||
|
}
|
||||||
|
{
|
||||||
|
inputs.type !== 3 && inputs.type !== 8 && inputs.type !== 22 && (
|
||||||
|
<>
|
||||||
|
<div style={{ marginTop: 10 }}>
|
||||||
|
<Typography.Text strong>代理:</Typography.Text>
|
||||||
</div>
|
</div>
|
||||||
)
|
<Input
|
||||||
|
label='代理'
|
||||||
|
name='base_url'
|
||||||
|
placeholder={'此项可选,用于通过代理站来进行 API 调用'}
|
||||||
|
onChange={value => {
|
||||||
|
handleInputChange('base_url', value)
|
||||||
|
}}
|
||||||
|
value={inputs.base_url}
|
||||||
|
autoComplete='new-password'
|
||||||
|
/>
|
||||||
|
</>
|
||||||
|
)
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
inputs.type !== 3 && inputs.type !== 8 && inputs.type !== 22 && (
|
inputs.type === 22 && (
|
||||||
<>
|
<>
|
||||||
<div style={{marginTop: 10}}>
|
<div style={{ marginTop: 10 }}>
|
||||||
<Typography.Text strong>代理:</Typography.Text>
|
<Typography.Text strong>私有部署地址:</Typography.Text>
|
||||||
</div>
|
</div>
|
||||||
<Input
|
<Input
|
||||||
label='代理'
|
name='base_url'
|
||||||
name='base_url'
|
placeholder={'请输入私有部署地址,格式为:https://fastgpt.run/api/openapi'}
|
||||||
placeholder={'此项可选,用于通过代理站来进行 API 调用'}
|
onChange={value => {
|
||||||
onChange={value => {
|
handleInputChange('base_url', value)
|
||||||
handleInputChange('base_url', value)
|
}}
|
||||||
}}
|
value={inputs.base_url}
|
||||||
value={inputs.base_url}
|
autoComplete='new-password'
|
||||||
autoComplete='new-password'
|
/>
|
||||||
/>
|
</>
|
||||||
</>
|
)
|
||||||
)
|
|
||||||
}
|
|
||||||
{
|
|
||||||
inputs.type === 22 && (
|
|
||||||
<>
|
|
||||||
<div style={{marginTop: 10}}>
|
|
||||||
<Typography.Text strong>私有部署地址:</Typography.Text>
|
|
||||||
</div>
|
|
||||||
<Input
|
|
||||||
name='base_url'
|
|
||||||
placeholder={'请输入私有部署地址,格式为:https://fastgpt.run/api/openapi'}
|
|
||||||
onChange={value => {
|
|
||||||
handleInputChange('base_url', value)
|
|
||||||
}}
|
|
||||||
value={inputs.base_url}
|
|
||||||
autoComplete='new-password'
|
|
||||||
/>
|
|
||||||
</>
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
</Spin>
|
</Spin>
|
||||||
|
@ -179,6 +179,12 @@ export const CHANNEL_OPTIONS = {
|
|||||||
value: 44,
|
value: 44,
|
||||||
color: 'primary'
|
color: 'primary'
|
||||||
},
|
},
|
||||||
|
45: {
|
||||||
|
key: 45,
|
||||||
|
text: 'xAI',
|
||||||
|
value: 45,
|
||||||
|
color: 'primary'
|
||||||
|
},
|
||||||
41: {
|
41: {
|
||||||
key: 41,
|
key: 41,
|
||||||
text: 'Novita',
|
text: 'Novita',
|
||||||
|
@ -595,6 +595,28 @@ const EditModal = ({ open, channelId, onCancel, onOk }) => {
|
|||||||
<FormHelperText id="helper-tex-channel-model_mapping-label"> {inputPrompt.model_mapping} </FormHelperText>
|
<FormHelperText id="helper-tex-channel-model_mapping-label"> {inputPrompt.model_mapping} </FormHelperText>
|
||||||
)}
|
)}
|
||||||
</FormControl>
|
</FormControl>
|
||||||
|
<FormControl fullWidth error={Boolean(touched.system_prompt && errors.system_prompt)} sx={{ ...theme.typography.otherInput }}>
|
||||||
|
{/* <InputLabel htmlFor="channel-model_mapping-label">{inputLabel.model_mapping}</InputLabel> */}
|
||||||
|
<TextField
|
||||||
|
multiline
|
||||||
|
id="channel-system_prompt-label"
|
||||||
|
label={inputLabel.system_prompt}
|
||||||
|
value={values.system_prompt}
|
||||||
|
name="system_prompt"
|
||||||
|
onBlur={handleBlur}
|
||||||
|
onChange={handleChange}
|
||||||
|
aria-describedby="helper-text-channel-system_prompt-label"
|
||||||
|
minRows={5}
|
||||||
|
placeholder={inputPrompt.system_prompt}
|
||||||
|
/>
|
||||||
|
{touched.system_prompt && errors.system_prompt ? (
|
||||||
|
<FormHelperText error id="helper-tex-channel-system_prompt-label">
|
||||||
|
{errors.system_prompt}
|
||||||
|
</FormHelperText>
|
||||||
|
) : (
|
||||||
|
<FormHelperText id="helper-tex-channel-system_prompt-label"> {inputPrompt.system_prompt} </FormHelperText>
|
||||||
|
)}
|
||||||
|
</FormControl>
|
||||||
<DialogActions>
|
<DialogActions>
|
||||||
<Button onClick={onCancel}>取消</Button>
|
<Button onClick={onCancel}>取消</Button>
|
||||||
<Button disableElevation disabled={isSubmitting} type="submit" variant="contained" color="primary">
|
<Button disableElevation disabled={isSubmitting} type="submit" variant="contained" color="primary">
|
||||||
|
@ -18,6 +18,7 @@ const defaultConfig = {
|
|||||||
other: '其他参数',
|
other: '其他参数',
|
||||||
models: '模型',
|
models: '模型',
|
||||||
model_mapping: '模型映射关系',
|
model_mapping: '模型映射关系',
|
||||||
|
system_prompt: '系统提示词',
|
||||||
groups: '用户组',
|
groups: '用户组',
|
||||||
config: null
|
config: null
|
||||||
},
|
},
|
||||||
@ -30,6 +31,7 @@ const defaultConfig = {
|
|||||||
models: '请选择该渠道所支持的模型',
|
models: '请选择该渠道所支持的模型',
|
||||||
model_mapping:
|
model_mapping:
|
||||||
'请输入要修改的模型映射关系,格式为:api请求模型ID:实际转发给渠道的模型ID,使用JSON数组表示,例如:{"gpt-3.5": "gpt-35"}',
|
'请输入要修改的模型映射关系,格式为:api请求模型ID:实际转发给渠道的模型ID,使用JSON数组表示,例如:{"gpt-3.5": "gpt-35"}',
|
||||||
|
system_prompt:"此项可选,用于强制设置给定的系统提示词,请配合自定义模型 & 模型重定向使用,首先创建一个唯一的自定义模型名称并在上面填入,之后将该自定义模型重定向映射到该渠道一个原生支持的模型此项可选,用于强制设置给定的系统提示词,请配合自定义模型 & 模型重定向使用,首先创建一个唯一的自定义模型名称并在上面填入,之后将该自定义模型重定向映射到该渠道一个原生支持的模型",
|
||||||
groups: '请选择该渠道所支持的用户组',
|
groups: '请选择该渠道所支持的用户组',
|
||||||
config: null
|
config: null
|
||||||
},
|
},
|
||||||
@ -223,6 +225,9 @@ const typeConfig = {
|
|||||||
},
|
},
|
||||||
modelGroup: 'anthropic'
|
modelGroup: 'anthropic'
|
||||||
},
|
},
|
||||||
|
45: {
|
||||||
|
modelGroup: 'xai'
|
||||||
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
export { defaultConfig, typeConfig };
|
export { defaultConfig, typeConfig };
|
||||||
|
@ -30,6 +30,7 @@ export const CHANNEL_OPTIONS = [
|
|||||||
{ key: 42, text: 'VertexAI', value: 42, color: 'blue' },
|
{ key: 42, text: 'VertexAI', value: 42, color: 'blue' },
|
||||||
{ key: 43, text: 'Proxy', value: 43, color: 'blue' },
|
{ key: 43, text: 'Proxy', value: 43, color: 'blue' },
|
||||||
{ key: 44, text: 'SiliconFlow', value: 44, color: 'blue' },
|
{ key: 44, text: 'SiliconFlow', value: 44, color: 'blue' },
|
||||||
|
{ key: 45, text: 'xAI', value: 45, color: 'blue' },
|
||||||
{ key: 8, text: '自定义渠道', value: 8, color: 'pink' },
|
{ key: 8, text: '自定义渠道', value: 8, color: 'pink' },
|
||||||
{ key: 22, text: '知识库:FastGPT', value: 22, color: 'blue' },
|
{ key: 22, text: '知识库:FastGPT', value: 22, color: 'blue' },
|
||||||
{ key: 21, text: '知识库:AI Proxy', value: 21, color: 'purple' },
|
{ key: 21, text: '知识库:AI Proxy', value: 21, color: 'purple' },
|
||||||
|
@ -43,6 +43,7 @@ const EditChannel = () => {
|
|||||||
base_url: '',
|
base_url: '',
|
||||||
other: '',
|
other: '',
|
||||||
model_mapping: '',
|
model_mapping: '',
|
||||||
|
system_prompt: '',
|
||||||
models: [],
|
models: [],
|
||||||
groups: ['default']
|
groups: ['default']
|
||||||
};
|
};
|
||||||
@ -425,7 +426,7 @@ const EditChannel = () => {
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
inputs.type !== 43 && (
|
inputs.type !== 43 && (<>
|
||||||
<Form.Field>
|
<Form.Field>
|
||||||
<Form.TextArea
|
<Form.TextArea
|
||||||
label='模型重定向'
|
label='模型重定向'
|
||||||
@ -437,6 +438,18 @@ const EditChannel = () => {
|
|||||||
autoComplete='new-password'
|
autoComplete='new-password'
|
||||||
/>
|
/>
|
||||||
</Form.Field>
|
</Form.Field>
|
||||||
|
<Form.Field>
|
||||||
|
<Form.TextArea
|
||||||
|
label='系统提示词'
|
||||||
|
placeholder={`此项可选,用于强制设置给定的系统提示词,请配合自定义模型 & 模型重定向使用,首先创建一个唯一的自定义模型名称并在上面填入,之后将该自定义模型重定向映射到该渠道一个原生支持的模型`}
|
||||||
|
name='system_prompt'
|
||||||
|
onChange={handleInputChange}
|
||||||
|
value={inputs.system_prompt}
|
||||||
|
style={{ minHeight: 150, fontFamily: 'JetBrains Mono, Consolas' }}
|
||||||
|
autoComplete='new-password'
|
||||||
|
/>
|
||||||
|
</Form.Field>
|
||||||
|
</>
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
|
Loading…
Reference in New Issue
Block a user