mirror of
				https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web.git
				synced 2025-11-04 08:13:43 +08:00 
			
		
		
		
	Merge pull request #2241 from Yidadaa/bugfix-0704
feat: add top p config
This commit is contained in:
		@@ -50,6 +50,7 @@ export class ChatGPTApi implements LLMApi {
 | 
			
		||||
      temperature: modelConfig.temperature,
 | 
			
		||||
      presence_penalty: modelConfig.presence_penalty,
 | 
			
		||||
      frequency_penalty: modelConfig.frequency_penalty,
 | 
			
		||||
      top_p: modelConfig.top_p,
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
    console.log("[Request] openai payload: ", requestPayload);
 | 
			
		||||
 
 | 
			
		||||
@@ -888,7 +888,8 @@ export function Chat() {
 | 
			
		||||
          const showActions =
 | 
			
		||||
            !isUser &&
 | 
			
		||||
            i > 0 &&
 | 
			
		||||
            !(message.preview || message.content.length === 0);
 | 
			
		||||
            !(message.preview || message.content.length === 0) &&
 | 
			
		||||
            i >= context.length; // do not show actions for context prompts
 | 
			
		||||
          const showTyping = message.preview || message.streaming;
 | 
			
		||||
 | 
			
		||||
          const shouldShowClearContextDivider = i === clearContextIndex - 1;
 | 
			
		||||
 
 | 
			
		||||
@@ -48,6 +48,25 @@ export function ModelConfigList(props: {
 | 
			
		||||
          }}
 | 
			
		||||
        ></InputRange>
 | 
			
		||||
      </ListItem>
 | 
			
		||||
      <ListItem
 | 
			
		||||
        title={Locale.Settings.TopP.Title}
 | 
			
		||||
        subTitle={Locale.Settings.TopP.SubTitle}
 | 
			
		||||
      >
 | 
			
		||||
        <InputRange
 | 
			
		||||
          value={(props.modelConfig.top_p ?? 1).toFixed(1)}
 | 
			
		||||
          min="0"
 | 
			
		||||
          max="1"
 | 
			
		||||
          step="0.1"
 | 
			
		||||
          onChange={(e) => {
 | 
			
		||||
            props.updateConfig(
 | 
			
		||||
              (config) =>
 | 
			
		||||
                (config.temperature = ModalConfigValidator.top_p(
 | 
			
		||||
                  e.currentTarget.valueAsNumber,
 | 
			
		||||
                )),
 | 
			
		||||
            );
 | 
			
		||||
          }}
 | 
			
		||||
        ></InputRange>
 | 
			
		||||
      </ListItem>
 | 
			
		||||
      <ListItem
 | 
			
		||||
        title={Locale.Settings.MaxTokens.Title}
 | 
			
		||||
        subTitle={Locale.Settings.MaxTokens.SubTitle}
 | 
			
		||||
 
 | 
			
		||||
@@ -214,6 +214,10 @@ const cn = {
 | 
			
		||||
      Title: "随机性 (temperature)",
 | 
			
		||||
      SubTitle: "值越大,回复越随机",
 | 
			
		||||
    },
 | 
			
		||||
    TopP: {
 | 
			
		||||
      Title: "核采样 (top_p)",
 | 
			
		||||
      SubTitle: "与随机性类似,但不要和随机性一起更改",
 | 
			
		||||
    },
 | 
			
		||||
    MaxTokens: {
 | 
			
		||||
      Title: "单次回复限制 (max_tokens)",
 | 
			
		||||
      SubTitle: "单次交互所用的最大 Token 数",
 | 
			
		||||
 
 | 
			
		||||
@@ -215,6 +215,10 @@ const en: LocaleType = {
 | 
			
		||||
      Title: "Temperature",
 | 
			
		||||
      SubTitle: "A larger value makes the more random output",
 | 
			
		||||
    },
 | 
			
		||||
    TopP: {
 | 
			
		||||
      Title: "Top P",
 | 
			
		||||
      SubTitle: "Do not alter this value together with temperature",
 | 
			
		||||
    },
 | 
			
		||||
    MaxTokens: {
 | 
			
		||||
      Title: "Max Tokens",
 | 
			
		||||
      SubTitle: "Maximum length of input tokens and generated tokens",
 | 
			
		||||
@@ -249,7 +253,7 @@ const en: LocaleType = {
 | 
			
		||||
  },
 | 
			
		||||
  Context: {
 | 
			
		||||
    Toast: (x: any) => `With ${x} contextual prompts`,
 | 
			
		||||
    Edit: "Contextual and Memory Prompts",
 | 
			
		||||
    Edit: "Current Chat Settings",
 | 
			
		||||
    Add: "Add a Prompt",
 | 
			
		||||
    Clear: "Context Cleared",
 | 
			
		||||
    Revert: "Revert",
 | 
			
		||||
 
 | 
			
		||||
@@ -33,6 +33,7 @@ export const DEFAULT_CONFIG = {
 | 
			
		||||
  modelConfig: {
 | 
			
		||||
    model: "gpt-3.5-turbo" as ModelType,
 | 
			
		||||
    temperature: 0.5,
 | 
			
		||||
    top_p: 1,
 | 
			
		||||
    max_tokens: 2000,
 | 
			
		||||
    presence_penalty: 0,
 | 
			
		||||
    frequency_penalty: 0,
 | 
			
		||||
@@ -158,6 +159,9 @@ export const ModalConfigValidator = {
 | 
			
		||||
  temperature(x: number) {
 | 
			
		||||
    return limitNumber(x, 0, 1, 1);
 | 
			
		||||
  },
 | 
			
		||||
  top_p(x: number) {
 | 
			
		||||
    return limitNumber(x, 0, 1, 1);
 | 
			
		||||
  },
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
export const useAppConfig = create<ChatConfigStore>()(
 | 
			
		||||
@@ -177,15 +181,16 @@ export const useAppConfig = create<ChatConfigStore>()(
 | 
			
		||||
    }),
 | 
			
		||||
    {
 | 
			
		||||
      name: StoreKey.Config,
 | 
			
		||||
      version: 3.2,
 | 
			
		||||
      version: 3.3,
 | 
			
		||||
      migrate(persistedState, version) {
 | 
			
		||||
        if (version === 3.2) return persistedState as any;
 | 
			
		||||
        if (version === 3.3) return persistedState as any;
 | 
			
		||||
 | 
			
		||||
        const state = persistedState as ChatConfig;
 | 
			
		||||
        state.modelConfig.sendMemory = true;
 | 
			
		||||
        state.modelConfig.historyMessageCount = 4;
 | 
			
		||||
        state.modelConfig.compressMessageLengthThreshold = 1000;
 | 
			
		||||
        state.modelConfig.frequency_penalty = 0;
 | 
			
		||||
        state.modelConfig.top_p = 1;
 | 
			
		||||
        state.modelConfig.template = DEFAULT_INPUT_TEMPLATE;
 | 
			
		||||
        state.dontShowMaskSplashScreen = false;
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -127,7 +127,7 @@ export const usePromptStore = create<PromptStore>()(
 | 
			
		||||
      search(text) {
 | 
			
		||||
        if (text.length === 0) {
 | 
			
		||||
          // return all rompts
 | 
			
		||||
          return SearchService.allPrompts.concat([...get().getUserPrompts()]);
 | 
			
		||||
          return get().getUserPrompts().concat(SearchService.builtinPrompts);
 | 
			
		||||
        }
 | 
			
		||||
        return SearchService.search(text) as Prompt[];
 | 
			
		||||
      },
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user