Compare commits

...

10 Commits

Author SHA1 Message Date
H0llyW00dzZ
550a16152e
Merge 138548ad45 into 995bef73de 2025-08-19 14:32:52 -04:00
RiverRay
995bef73de
Merge pull request #6599 from DreamRivulet/add-support-GPT5
Some checks failed
Run Tests / test (push) Has been cancelled
add: model gpt-5
2025-08-10 17:21:12 +08:00
Sam
38ac502d80 Add support for GPT5 2025-08-09 17:03:49 +08:00
Sam
0511808900 use max_completion_tokens 2025-08-09 17:03:49 +08:00
Sam
42eff644b4 use max_completion_tokens 2025-08-09 17:03:49 +08:00
Sam
8ae6883784 add gpt-5 2025-08-09 17:03:49 +08:00
Sam
c0f2ab6de3 add gpt-5 2025-08-09 17:03:06 +08:00
H0llyW00dzZ
138548ad45
Feat [Terminal] [Chats] enhance local storage
- [+] feat(chat.tsx): enhance local storage handling for chat input
2024-02-05 22:10:02 +07:00
H0llyW00dzZ
e05af75891
Fix [UI/UX] [Chat] [Front End] React Warning
- [+] refactor(chat.tsx): capture current input reference value for use in component unmount or dependencies change
2024-02-05 20:42:12 +07:00
H0llyW00dzZ
67ce78cac2
Improve [UI/UX] [Chat] [Front End] unfinished input
- [+] refactor(chat.tsx): improve unfinished input handling in chat component
- [+] feat(chat.tsx): add session id dependency to useEffect for better session handling
- [+] feat(chat.tsx): skip saving commands to local storage
2024-02-05 20:42:12 +07:00
3 changed files with 37 additions and 7 deletions

View File

@ -200,6 +200,7 @@ export class ChatGPTApi implements LLMApi {
options.config.model.startsWith("o1") ||
options.config.model.startsWith("o3") ||
options.config.model.startsWith("o4-mini");
const isGpt5 = options.config.model.startsWith("gpt-5");
if (isDalle3) {
const prompt = getMessageTextContent(
options.messages.slice(-1)?.pop() as any,
@ -230,7 +231,7 @@ export class ChatGPTApi implements LLMApi {
messages,
stream: options.config.stream,
model: modelConfig.model,
temperature: !isO1OrO3 ? modelConfig.temperature : 1,
temperature: (!isO1OrO3 && !isGpt5) ? modelConfig.temperature : 1,
presence_penalty: !isO1OrO3 ? modelConfig.presence_penalty : 0,
frequency_penalty: !isO1OrO3 ? modelConfig.frequency_penalty : 0,
top_p: !isO1OrO3 ? modelConfig.top_p : 1,
@ -238,7 +239,13 @@ export class ChatGPTApi implements LLMApi {
// Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore.
};
if (isO1OrO3) {
if (isGpt5) {
// Remove max_tokens if present
delete requestPayload.max_tokens;
// Add max_completion_tokens (or max_completion_tokens if that's what you meant)
requestPayload["max_completion_tokens"] = modelConfig.max_tokens;
} else if (isO1OrO3) {
// by default the o1/o3 models will not attempt to produce output that includes markdown formatting
// manually add "Formatting re-enabled" developer message to encourage markdown inclusion in model responses
// (https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/reasoning?tabs=python-secure#markdown-output)
@ -251,8 +258,9 @@ export class ChatGPTApi implements LLMApi {
requestPayload["max_completion_tokens"] = modelConfig.max_tokens;
}
// add max_tokens to vision model
if (visionModel && !isO1OrO3) {
if (visionModel && !isO1OrO3 && ! isGpt5) {
requestPayload["max_tokens"] = Math.max(modelConfig.max_tokens, 4000);
}
}

View File

@ -1494,20 +1494,36 @@ function _Chat() {
// remember unfinished input
useEffect(() => {
// try to load from local storage
// Define the key for storing unfinished input based on the session ID.
const key = UNFINISHED_INPUT(session.id);
// Attempt to load unfinished input from local storage.
const mayBeUnfinishedInput = localStorage.getItem(key);
if (mayBeUnfinishedInput && userInput.length === 0) {
setUserInput(mayBeUnfinishedInput);
// Clear the unfinished input from local storage after loading it.
localStorage.removeItem(key);
}
const dom = inputRef.current;
// Capture the current value of the input reference.
const currentInputRef = inputRef.current;
// This function will be called when the component unmounts or dependencies change.
return () => {
localStorage.setItem(key, dom?.value ?? "");
// Save the current input to local storage only if it is not a command.
// Use the captured value from the input reference.
const currentInputValue = currentInputRef?.value ?? "";
// Save the input to local storage only if it's not empty and not a command.
if (currentInputValue && !currentInputValue.startsWith(ChatCommandPrefix)) {
localStorage.setItem(key, currentInputValue);
} else {
// If there's no value, ensure we don't create an empty key in local storage.
localStorage.removeItem(key);
}
};
// The effect should depend on the session ID to ensure it runs when the session changes.
// eslint-disable-next-line react-hooks/exhaustive-deps
}, []);
}, [session.id]);
const handlePaste = useCallback(
async (event: React.ClipboardEvent<HTMLTextAreaElement>) => {

View File

@ -493,6 +493,7 @@ export const VISION_MODEL_REGEXES = [
/o3/,
/o4-mini/,
/grok-4/i,
/gpt-5/
];
export const EXCLUDE_VISION_MODEL_REGEXES = [/claude-3-5-haiku-20241022/];
@ -517,6 +518,11 @@ const openaiModels = [
"gpt-4.1-nano-2025-04-14",
"gpt-4.5-preview",
"gpt-4.5-preview-2025-02-27",
"gpt-5-chat",
"gpt-5-mini",
"gpt-5-nano",
"gpt-5",
"gpt-5-chat-2025-01-01-preview",
"gpt-4o",
"gpt-4o-2024-05-13",
"gpt-4o-2024-08-06",