mirror of
https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web.git
synced 2025-11-13 20:53:45 +08:00
feat: support gemini-pro-vision
This commit is contained in:
@@ -140,10 +140,9 @@ export class ChatGPTApi implements LLMApi {
|
||||
presence_penalty: modelConfig.presence_penalty,
|
||||
frequency_penalty: modelConfig.frequency_penalty,
|
||||
top_p: modelConfig.top_p,
|
||||
max_tokens:
|
||||
modelConfig.model == "gpt-4-vision-preview"
|
||||
? modelConfig.max_tokens
|
||||
: null,
|
||||
max_tokens: modelConfig.model.includes("vision")
|
||||
? modelConfig.max_tokens
|
||||
: null,
|
||||
// max_tokens: Math.max(modelConfig.max_tokens, 1024),
|
||||
// Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore.
|
||||
};
|
||||
|
||||
Reference in New Issue
Block a user