mirror of
https://github.com/songquanpeng/one-api.git
synced 2025-09-17 09:16:36 +08:00
* fix:修复在渠道配置中设置模型重定向时,temperature为0被忽略的问题 * fix: set optional fields to pointer type --------- Co-authored-by: JustSong <songquanpeng@foxmail.com>
30 lines
1.1 KiB
Go
30 lines
1.1 KiB
Go
package aws
|
|
|
|
// Request is the request to AWS Llama3
|
|
//
|
|
// https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters-meta.html
|
|
type Request struct {
|
|
Prompt string `json:"prompt"`
|
|
MaxGenLen int `json:"max_gen_len,omitempty"`
|
|
Temperature *float64 `json:"temperature,omitempty"`
|
|
TopP *float64 `json:"top_p,omitempty"`
|
|
}
|
|
|
|
// Response is the response from AWS Llama3
|
|
//
|
|
// https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters-meta.html
|
|
type Response struct {
|
|
Generation string `json:"generation"`
|
|
PromptTokenCount int `json:"prompt_token_count"`
|
|
GenerationTokenCount int `json:"generation_token_count"`
|
|
StopReason string `json:"stop_reason"`
|
|
}
|
|
|
|
// {'generation': 'Hi', 'prompt_token_count': 15, 'generation_token_count': 1, 'stop_reason': None}
|
|
type StreamResponse struct {
|
|
Generation string `json:"generation"`
|
|
PromptTokenCount int `json:"prompt_token_count"`
|
|
GenerationTokenCount int `json:"generation_token_count"`
|
|
StopReason string `json:"stop_reason"`
|
|
}
|