mirror of
				https://github.com/linux-do/new-api.git
				synced 2025-11-04 13:23:42 +08:00 
			
		
		
		
	feat: add claude-3.7-sonnet model support
This commit is contained in:
		@@ -23,133 +23,136 @@ const (
 | 
			
		||||
 | 
			
		||||
var defaultModelRatio = map[string]float64{
 | 
			
		||||
	//"midjourney":                50,
 | 
			
		||||
	"gpt-4-gizmo-*":                  15,
 | 
			
		||||
	"g-*":                            15,
 | 
			
		||||
	"gpt-4":                          15,
 | 
			
		||||
	"gpt-4-0314":                     15,
 | 
			
		||||
	"gpt-4-0613":                     15,
 | 
			
		||||
	"gpt-4-32k":                      30,
 | 
			
		||||
	"gpt-4-32k-0314":                 30,
 | 
			
		||||
	"gpt-4-32k-0613":                 30,
 | 
			
		||||
	"gpt-4o-mini":                    0.075, // $0.00015 / 1K tokens
 | 
			
		||||
	"gpt-4o-mini-2024-07-18":         0.075,
 | 
			
		||||
	"chatgpt-4o-latest":              2.5,  // $0.01 / 1K tokens
 | 
			
		||||
	"gpt-4o":                         1.25, // $0.005 / 1K tokens
 | 
			
		||||
	"gpt-4o-2024-05-13":              2.5,  // $0.005 / 1K tokens
 | 
			
		||||
	"gpt-4o-2024-08-06":              1.25, // $0.01 / 1K tokens
 | 
			
		||||
	"gpt-4o-2024-11-20":              1.25, // $0.01 / 1K tokens
 | 
			
		||||
	"o1-preview":                     7.5,
 | 
			
		||||
	"o1-preview-2024-09-12":          7.5,
 | 
			
		||||
	"o1-mini":                        0.55, // $0.0011 / 1K tokens
 | 
			
		||||
	"o1-mini-2024-09-12":             0.55,
 | 
			
		||||
	"o3-mini":                        0.55,
 | 
			
		||||
	"o3-mini-2025-01-31":             0.55,
 | 
			
		||||
	"gpt-4-turbo":                    5,    // $0.01 / 1K tokens
 | 
			
		||||
	"gpt-4-turbo-2024-04-09":         5,    // $0.01 / 1K tokens
 | 
			
		||||
	"gpt-4-1106-preview":             5,    // $0.01 / 1K tokens
 | 
			
		||||
	"gpt-4-0125-preview":             5,    // $0.01 / 1K tokens
 | 
			
		||||
	"gpt-4-turbo-preview":            5,    // $0.01 / 1K tokens
 | 
			
		||||
	"gpt-4-vision-preview":           5,    // $0.01 / 1K tokens
 | 
			
		||||
	"gpt-4-1106-vision-preview":      5,    // $0.01 / 1K tokens
 | 
			
		||||
	"gpt-3.5-turbo":                  0.25, // $0.0005 / 1K tokens
 | 
			
		||||
	"gpt-3.5-turbo-0301":             0.75,
 | 
			
		||||
	"gpt-3.5-turbo-0613":             0.75,
 | 
			
		||||
	"gpt-3.5-turbo-16k":              1.5, // $0.003 / 1K tokens
 | 
			
		||||
	"gpt-3.5-turbo-16k-0613":         1.5,
 | 
			
		||||
	"gpt-3.5-turbo-instruct":         0.75, // $0.0015 / 1K tokens
 | 
			
		||||
	"gpt-3.5-turbo-1106":             0.5,  // $0.001 / 1K tokens
 | 
			
		||||
	"gpt-3.5-turbo-0125":             0.25,
 | 
			
		||||
	"babbage-002":                    0.2, // $0.0004 / 1K tokens
 | 
			
		||||
	"davinci-002":                    1,   // $0.002 / 1K tokens
 | 
			
		||||
	"text-ada-001":                   0.2,
 | 
			
		||||
	"text-babbage-001":               0.25,
 | 
			
		||||
	"text-curie-001":                 1,
 | 
			
		||||
	"text-davinci-002":               10,
 | 
			
		||||
	"text-davinci-003":               10,
 | 
			
		||||
	"text-davinci-edit-001":          10,
 | 
			
		||||
	"code-davinci-edit-001":          10,
 | 
			
		||||
	"whisper-1":                      15,  // $0.006 / minute -> $0.006 / 150 words -> $0.006 / 200 tokens -> $0.03 / 1k tokens
 | 
			
		||||
	"tts-1":                          7.5, // 1k characters -> $0.015
 | 
			
		||||
	"tts-1-1106":                     7.5, // 1k characters -> $0.015
 | 
			
		||||
	"tts-1-hd":                       15,  // 1k characters -> $0.03
 | 
			
		||||
	"tts-1-hd-1106":                  15,  // 1k characters -> $0.03
 | 
			
		||||
	"davinci":                        10,
 | 
			
		||||
	"curie":                          10,
 | 
			
		||||
	"babbage":                        10,
 | 
			
		||||
	"ada":                            10,
 | 
			
		||||
	"text-embedding-3-small":         0.01,
 | 
			
		||||
	"text-embedding-3-large":         0.065,
 | 
			
		||||
	"text-embedding-ada-002":         0.05,
 | 
			
		||||
	"text-search-ada-doc-001":        10,
 | 
			
		||||
	"text-moderation-stable":         0.1,
 | 
			
		||||
	"text-moderation-latest":         0.1,
 | 
			
		||||
	"claude-instant-1":               0.4,   // $0.8 / 1M tokens
 | 
			
		||||
	"claude-2.0":                     4,     // $8 / 1M tokens
 | 
			
		||||
	"claude-2.1":                     4,     // $8 / 1M tokens
 | 
			
		||||
	"claude-3-haiku-20240307":        0.125, // $0.25 / 1M tokens
 | 
			
		||||
	"claude-3-5-sonnet-20240620":     1.5,   // $3 / 1M tokens
 | 
			
		||||
	"claude-3-5-sonnet-20241022":     1.5,   // $3 / 1M tokens
 | 
			
		||||
	"claude-3-sonnet-20240229":       1.5,   // $3 / 1M tokens
 | 
			
		||||
	"claude-3-opus-20240229":         7.5,   // $15 / 1M tokens
 | 
			
		||||
	"ERNIE-4.0-8K":                   0.120 * RMB,
 | 
			
		||||
	"ERNIE-3.5-8K":                   0.012 * RMB,
 | 
			
		||||
	"ERNIE-3.5-8K-0205":              0.024 * RMB,
 | 
			
		||||
	"ERNIE-3.5-8K-1222":              0.012 * RMB,
 | 
			
		||||
	"ERNIE-Bot-8K":                   0.024 * RMB,
 | 
			
		||||
	"ERNIE-3.5-4K-0205":              0.012 * RMB,
 | 
			
		||||
	"ERNIE-Speed-8K":                 0.004 * RMB,
 | 
			
		||||
	"ERNIE-Speed-128K":               0.004 * RMB,
 | 
			
		||||
	"ERNIE-Lite-8K-0922":             0.008 * RMB,
 | 
			
		||||
	"ERNIE-Lite-8K-0308":             0.003 * RMB,
 | 
			
		||||
	"ERNIE-Tiny-8K":                  0.001 * RMB,
 | 
			
		||||
	"BLOOMZ-7B":                      0.004 * RMB,
 | 
			
		||||
	"Embedding-V1":                   0.002 * RMB,
 | 
			
		||||
	"bge-large-zh":                   0.002 * RMB,
 | 
			
		||||
	"bge-large-en":                   0.002 * RMB,
 | 
			
		||||
	"tao-8k":                         0.002 * RMB,
 | 
			
		||||
	"PaLM-2":                         1,
 | 
			
		||||
	"gemini-pro":                     1, // $0.00025 / 1k characters -> $0.001 / 1k tokens
 | 
			
		||||
	"gemini-pro-vision":              1, // $0.00025 / 1k characters -> $0.001 / 1k tokens
 | 
			
		||||
	"gemini-1.0-pro-vision-001":      1,
 | 
			
		||||
	"gemini-1.0-pro-001":             1,
 | 
			
		||||
	"gemini-1.5-pro-latest":          1.75, // $3.5 / 1M tokens
 | 
			
		||||
	"gemini-1.5-pro-exp-0827":        1.75, // $3.5 / 1M tokens
 | 
			
		||||
	"gemini-1.5-flash-latest":        1,
 | 
			
		||||
	"gemini-1.5-flash-exp-0827":      1,
 | 
			
		||||
	"gemini-1.0-pro-latest":          1,
 | 
			
		||||
	"gemini-1.0-pro-vision-latest":   1,
 | 
			
		||||
	"gemini-ultra":                   1,
 | 
			
		||||
	"chatglm_turbo":                  0.3572,     // ¥0.005 / 1k tokens
 | 
			
		||||
	"chatglm_pro":                    0.7143,     // ¥0.01 / 1k tokens
 | 
			
		||||
	"chatglm_std":                    0.3572,     // ¥0.005 / 1k tokens
 | 
			
		||||
	"chatglm_lite":                   0.1429,     // ¥0.002 / 1k tokens
 | 
			
		||||
	"glm-4":                          7.143,      // ¥0.1 / 1k tokens
 | 
			
		||||
	"glm-4v":                         0.05 * RMB, // ¥0.05 / 1k tokens
 | 
			
		||||
	"glm-4-alltools":                 0.1 * RMB,  // ¥0.1 / 1k tokens
 | 
			
		||||
	"glm-3-turbo":                    0.3572,
 | 
			
		||||
	"glm-4-plus":                     0.05 * RMB,
 | 
			
		||||
	"glm-4-0520":                     0.1 * RMB,
 | 
			
		||||
	"glm-4-air":                      0.001 * RMB,
 | 
			
		||||
	"glm-4-airx":                     0.01 * RMB,
 | 
			
		||||
	"glm-4-long":                     0.001 * RMB,
 | 
			
		||||
	"glm-4-flash":                    0,
 | 
			
		||||
	"glm-4v-plus":                    0.01 * RMB,
 | 
			
		||||
	"qwen-turbo":                     0.8572, // ¥0.012 / 1k tokens
 | 
			
		||||
	"qwen-plus":                      10,     // ¥0.14 / 1k tokens
 | 
			
		||||
	"text-embedding-v1":              0.05,   // ¥0.0007 / 1k tokens
 | 
			
		||||
	"SparkDesk-v1.1":                 1.2858, // ¥0.018 / 1k tokens
 | 
			
		||||
	"SparkDesk-v2.1":                 1.2858, // ¥0.018 / 1k tokens
 | 
			
		||||
	"SparkDesk-v3.1":                 1.2858, // ¥0.018 / 1k tokens
 | 
			
		||||
	"SparkDesk-v3.5":                 1.2858, // ¥0.018 / 1k tokens
 | 
			
		||||
	"SparkDesk-v4.0":                 1.2858,
 | 
			
		||||
	"360GPT_S2_V9":                   0.8572, // ¥0.012 / 1k tokens
 | 
			
		||||
	"360gpt-turbo":                   0.0858, // ¥0.0012 / 1k tokens
 | 
			
		||||
	"360gpt-turbo-responsibility-8k": 0.8572, // ¥0.012 / 1k tokens
 | 
			
		||||
	"360gpt-pro":                     0.8572, // ¥0.012 / 1k tokens
 | 
			
		||||
	"embedding-bert-512-v1":          0.0715, // ¥0.001 / 1k tokens
 | 
			
		||||
	"embedding_s1_v1":                0.0715, // ¥0.001 / 1k tokens
 | 
			
		||||
	"semantic_similarity_s1_v1":      0.0715, // ¥0.001 / 1k tokens
 | 
			
		||||
	"hunyuan":                        7.143,  // ¥0.1 / 1k tokens  // https://cloud.tencent.com/document/product/1729/97731#e0e6be58-60c8-469f-bdeb-6c264ce3b4d0
 | 
			
		||||
	"gpt-4-gizmo-*":                       15,
 | 
			
		||||
	"g-*":                                 15,
 | 
			
		||||
	"gpt-4":                               15,
 | 
			
		||||
	"gpt-4-0314":                          15,
 | 
			
		||||
	"gpt-4-0613":                          15,
 | 
			
		||||
	"gpt-4-32k":                           30,
 | 
			
		||||
	"gpt-4-32k-0314":                      30,
 | 
			
		||||
	"gpt-4-32k-0613":                      30,
 | 
			
		||||
	"gpt-4o-mini":                         0.075, // $0.00015 / 1K tokens
 | 
			
		||||
	"gpt-4o-mini-2024-07-18":              0.075,
 | 
			
		||||
	"chatgpt-4o-latest":                   2.5,  // $0.01 / 1K tokens
 | 
			
		||||
	"gpt-4o":                              1.25, // $0.005 / 1K tokens
 | 
			
		||||
	"gpt-4o-2024-05-13":                   2.5,  // $0.005 / 1K tokens
 | 
			
		||||
	"gpt-4o-2024-08-06":                   1.25, // $0.01 / 1K tokens
 | 
			
		||||
	"gpt-4o-2024-11-20":                   1.25, // $0.01 / 1K tokens
 | 
			
		||||
	"o1-preview":                          7.5,
 | 
			
		||||
	"o1-preview-2024-09-12":               7.5,
 | 
			
		||||
	"o1-mini":                             0.55, // $0.0011 / 1K tokens
 | 
			
		||||
	"o1-mini-2024-09-12":                  0.55,
 | 
			
		||||
	"o3-mini":                             0.55,
 | 
			
		||||
	"o3-mini-2025-01-31":                  0.55,
 | 
			
		||||
	"gpt-4-turbo":                         5,    // $0.01 / 1K tokens
 | 
			
		||||
	"gpt-4-turbo-2024-04-09":              5,    // $0.01 / 1K tokens
 | 
			
		||||
	"gpt-4-1106-preview":                  5,    // $0.01 / 1K tokens
 | 
			
		||||
	"gpt-4-0125-preview":                  5,    // $0.01 / 1K tokens
 | 
			
		||||
	"gpt-4-turbo-preview":                 5,    // $0.01 / 1K tokens
 | 
			
		||||
	"gpt-4-vision-preview":                5,    // $0.01 / 1K tokens
 | 
			
		||||
	"gpt-4-1106-vision-preview":           5,    // $0.01 / 1K tokens
 | 
			
		||||
	"gpt-3.5-turbo":                       0.25, // $0.0005 / 1K tokens
 | 
			
		||||
	"gpt-3.5-turbo-0301":                  0.75,
 | 
			
		||||
	"gpt-3.5-turbo-0613":                  0.75,
 | 
			
		||||
	"gpt-3.5-turbo-16k":                   1.5, // $0.003 / 1K tokens
 | 
			
		||||
	"gpt-3.5-turbo-16k-0613":              1.5,
 | 
			
		||||
	"gpt-3.5-turbo-instruct":              0.75, // $0.0015 / 1K tokens
 | 
			
		||||
	"gpt-3.5-turbo-1106":                  0.5,  // $0.001 / 1K tokens
 | 
			
		||||
	"gpt-3.5-turbo-0125":                  0.25,
 | 
			
		||||
	"babbage-002":                         0.2, // $0.0004 / 1K tokens
 | 
			
		||||
	"davinci-002":                         1,   // $0.002 / 1K tokens
 | 
			
		||||
	"text-ada-001":                        0.2,
 | 
			
		||||
	"text-babbage-001":                    0.25,
 | 
			
		||||
	"text-curie-001":                      1,
 | 
			
		||||
	"text-davinci-002":                    10,
 | 
			
		||||
	"text-davinci-003":                    10,
 | 
			
		||||
	"text-davinci-edit-001":               10,
 | 
			
		||||
	"code-davinci-edit-001":               10,
 | 
			
		||||
	"whisper-1":                           15,  // $0.006 / minute -> $0.006 / 150 words -> $0.006 / 200 tokens -> $0.03 / 1k tokens
 | 
			
		||||
	"tts-1":                               7.5, // 1k characters -> $0.015
 | 
			
		||||
	"tts-1-1106":                          7.5, // 1k characters -> $0.015
 | 
			
		||||
	"tts-1-hd":                            15,  // 1k characters -> $0.03
 | 
			
		||||
	"tts-1-hd-1106":                       15,  // 1k characters -> $0.03
 | 
			
		||||
	"davinci":                             10,
 | 
			
		||||
	"curie":                               10,
 | 
			
		||||
	"babbage":                             10,
 | 
			
		||||
	"ada":                                 10,
 | 
			
		||||
	"text-embedding-3-small":              0.01,
 | 
			
		||||
	"text-embedding-3-large":              0.065,
 | 
			
		||||
	"text-embedding-ada-002":              0.05,
 | 
			
		||||
	"text-search-ada-doc-001":             10,
 | 
			
		||||
	"text-moderation-stable":              0.1,
 | 
			
		||||
	"text-moderation-latest":              0.1,
 | 
			
		||||
	"claude-instant-1":                    0.4, // $0.8 / 1M tokens
 | 
			
		||||
	"claude-2.0":                          4,   // $8 / 1M tokens
 | 
			
		||||
	"claude-2.1":                          4,   // $8 / 1M tokens
 | 
			
		||||
	"claude-3-7-sonnet-20250219":          1.5,
 | 
			
		||||
	"claude-3-7-sonnet-20250219-thinking": 1.5,
 | 
			
		||||
	"claude-3-5-haiku-20241022":           0.4,
 | 
			
		||||
	"claude-3-haiku-20240307":             0.125, // $0.25 / 1M tokens
 | 
			
		||||
	"claude-3-5-sonnet-20240620":          1.5,   // $3 / 1M tokens
 | 
			
		||||
	"claude-3-5-sonnet-20241022":          1.5,   // $3 / 1M tokens
 | 
			
		||||
	"claude-3-sonnet-20240229":            1.5,   // $3 / 1M tokens
 | 
			
		||||
	"claude-3-opus-20240229":              7.5,   // $15 / 1M tokens
 | 
			
		||||
	"ERNIE-4.0-8K":                        0.120 * RMB,
 | 
			
		||||
	"ERNIE-3.5-8K":                        0.012 * RMB,
 | 
			
		||||
	"ERNIE-3.5-8K-0205":                   0.024 * RMB,
 | 
			
		||||
	"ERNIE-3.5-8K-1222":                   0.012 * RMB,
 | 
			
		||||
	"ERNIE-Bot-8K":                        0.024 * RMB,
 | 
			
		||||
	"ERNIE-3.5-4K-0205":                   0.012 * RMB,
 | 
			
		||||
	"ERNIE-Speed-8K":                      0.004 * RMB,
 | 
			
		||||
	"ERNIE-Speed-128K":                    0.004 * RMB,
 | 
			
		||||
	"ERNIE-Lite-8K-0922":                  0.008 * RMB,
 | 
			
		||||
	"ERNIE-Lite-8K-0308":                  0.003 * RMB,
 | 
			
		||||
	"ERNIE-Tiny-8K":                       0.001 * RMB,
 | 
			
		||||
	"BLOOMZ-7B":                           0.004 * RMB,
 | 
			
		||||
	"Embedding-V1":                        0.002 * RMB,
 | 
			
		||||
	"bge-large-zh":                        0.002 * RMB,
 | 
			
		||||
	"bge-large-en":                        0.002 * RMB,
 | 
			
		||||
	"tao-8k":                              0.002 * RMB,
 | 
			
		||||
	"PaLM-2":                              1,
 | 
			
		||||
	"gemini-pro":                          1, // $0.00025 / 1k characters -> $0.001 / 1k tokens
 | 
			
		||||
	"gemini-pro-vision":                   1, // $0.00025 / 1k characters -> $0.001 / 1k tokens
 | 
			
		||||
	"gemini-1.0-pro-vision-001":           1,
 | 
			
		||||
	"gemini-1.0-pro-001":                  1,
 | 
			
		||||
	"gemini-1.5-pro-latest":               1.75, // $3.5 / 1M tokens
 | 
			
		||||
	"gemini-1.5-pro-exp-0827":             1.75, // $3.5 / 1M tokens
 | 
			
		||||
	"gemini-1.5-flash-latest":             1,
 | 
			
		||||
	"gemini-1.5-flash-exp-0827":           1,
 | 
			
		||||
	"gemini-1.0-pro-latest":               1,
 | 
			
		||||
	"gemini-1.0-pro-vision-latest":        1,
 | 
			
		||||
	"gemini-ultra":                        1,
 | 
			
		||||
	"chatglm_turbo":                       0.3572,     // ¥0.005 / 1k tokens
 | 
			
		||||
	"chatglm_pro":                         0.7143,     // ¥0.01 / 1k tokens
 | 
			
		||||
	"chatglm_std":                         0.3572,     // ¥0.005 / 1k tokens
 | 
			
		||||
	"chatglm_lite":                        0.1429,     // ¥0.002 / 1k tokens
 | 
			
		||||
	"glm-4":                               7.143,      // ¥0.1 / 1k tokens
 | 
			
		||||
	"glm-4v":                              0.05 * RMB, // ¥0.05 / 1k tokens
 | 
			
		||||
	"glm-4-alltools":                      0.1 * RMB,  // ¥0.1 / 1k tokens
 | 
			
		||||
	"glm-3-turbo":                         0.3572,
 | 
			
		||||
	"glm-4-plus":                          0.05 * RMB,
 | 
			
		||||
	"glm-4-0520":                          0.1 * RMB,
 | 
			
		||||
	"glm-4-air":                           0.001 * RMB,
 | 
			
		||||
	"glm-4-airx":                          0.01 * RMB,
 | 
			
		||||
	"glm-4-long":                          0.001 * RMB,
 | 
			
		||||
	"glm-4-flash":                         0,
 | 
			
		||||
	"glm-4v-plus":                         0.01 * RMB,
 | 
			
		||||
	"qwen-turbo":                          0.8572, // ¥0.012 / 1k tokens
 | 
			
		||||
	"qwen-plus":                           10,     // ¥0.14 / 1k tokens
 | 
			
		||||
	"text-embedding-v1":                   0.05,   // ¥0.0007 / 1k tokens
 | 
			
		||||
	"SparkDesk-v1.1":                      1.2858, // ¥0.018 / 1k tokens
 | 
			
		||||
	"SparkDesk-v2.1":                      1.2858, // ¥0.018 / 1k tokens
 | 
			
		||||
	"SparkDesk-v3.1":                      1.2858, // ¥0.018 / 1k tokens
 | 
			
		||||
	"SparkDesk-v3.5":                      1.2858, // ¥0.018 / 1k tokens
 | 
			
		||||
	"SparkDesk-v4.0":                      1.2858,
 | 
			
		||||
	"360GPT_S2_V9":                        0.8572, // ¥0.012 / 1k tokens
 | 
			
		||||
	"360gpt-turbo":                        0.0858, // ¥0.0012 / 1k tokens
 | 
			
		||||
	"360gpt-turbo-responsibility-8k":      0.8572, // ¥0.012 / 1k tokens
 | 
			
		||||
	"360gpt-pro":                          0.8572, // ¥0.012 / 1k tokens
 | 
			
		||||
	"embedding-bert-512-v1":               0.0715, // ¥0.001 / 1k tokens
 | 
			
		||||
	"embedding_s1_v1":                     0.0715, // ¥0.001 / 1k tokens
 | 
			
		||||
	"semantic_similarity_s1_v1":           0.0715, // ¥0.001 / 1k tokens
 | 
			
		||||
	"hunyuan":                             7.143,  // ¥0.1 / 1k tokens  // https://cloud.tencent.com/document/product/1729/97731#e0e6be58-60c8-469f-bdeb-6c264ce3b4d0
 | 
			
		||||
	// https://platform.lingyiwanwu.com/docs#-计费单元
 | 
			
		||||
	// 已经按照 7.2 来换算美元价格
 | 
			
		||||
	"yi-34b-chat-0205":       0.18,
 | 
			
		||||
 
 | 
			
		||||
@@ -36,6 +36,13 @@ type GeneralOpenAIRequest struct {
 | 
			
		||||
	Dimensions          int            `json:"dimensions,omitempty"`
 | 
			
		||||
	ParallelToolCalls   bool           `json:"parallel_tool_calls,omitempty"`
 | 
			
		||||
	EncodingFormat      any            `json:"encoding_format,omitempty"`
 | 
			
		||||
 | 
			
		||||
	Thinking *Thinking `json:"thinking,omitempty"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type Thinking struct {
 | 
			
		||||
	Type         string `json:"type"`
 | 
			
		||||
	BudgetTokens int    `json:"budget_tokens"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type OpenAITools struct {
 | 
			
		||||
@@ -77,11 +84,12 @@ func (r GeneralOpenAIRequest) ParseInput() []string {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type Message struct {
 | 
			
		||||
	Role       string          `json:"role"`
 | 
			
		||||
	Content    json.RawMessage `json:"content"`
 | 
			
		||||
	Name       *string         `json:"name,omitempty"`
 | 
			
		||||
	ToolCalls  any             `json:"tool_calls,omitempty"`
 | 
			
		||||
	ToolCallId string          `json:"tool_call_id,omitempty"`
 | 
			
		||||
	Role             string          `json:"role"`
 | 
			
		||||
	Content          json.RawMessage `json:"content"`
 | 
			
		||||
	ReasoningContent *string         `json:"reasoning_content,omitempty"`
 | 
			
		||||
	Name             *string         `json:"name,omitempty"`
 | 
			
		||||
	ToolCalls        any             `json:"tool_calls,omitempty"`
 | 
			
		||||
	ToolCallId       string          `json:"tool_call_id,omitempty"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type MediaMessage struct {
 | 
			
		||||
 
 | 
			
		||||
@@ -62,9 +62,10 @@ type ChatCompletionsStreamResponseChoice struct {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type ChatCompletionsStreamResponseChoiceDelta struct {
 | 
			
		||||
	Content   *string    `json:"content,omitempty"`
 | 
			
		||||
	Role      string     `json:"role,omitempty"`
 | 
			
		||||
	ToolCalls []ToolCall `json:"tool_calls,omitempty"`
 | 
			
		||||
	Content          *string    `json:"content,omitempty"`
 | 
			
		||||
	ReasoningContent *string    `json:"reasoning_content,omitempty"`
 | 
			
		||||
	Role             string     `json:"role,omitempty"`
 | 
			
		||||
	ToolCalls        []ToolCall `json:"tool_calls,omitempty"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *ChatCompletionsStreamResponseChoiceDelta) SetContentString(s string) {
 | 
			
		||||
 
 | 
			
		||||
@@ -8,8 +8,11 @@ var ModelList = []string{
 | 
			
		||||
	"claude-3-sonnet-20240229",
 | 
			
		||||
	"claude-3-opus-20240229",
 | 
			
		||||
	"claude-3-haiku-20240307",
 | 
			
		||||
	"claude-3-5-haiku-20241022",
 | 
			
		||||
	"claude-3-5-sonnet-20240620",
 | 
			
		||||
	"claude-3-5-sonnet-20241022",
 | 
			
		||||
	"claude-3-7-sonnet-20250219",
 | 
			
		||||
	"claude-3-7-sonnet-20250219-thinking",
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
var ChannelName = "claude"
 | 
			
		||||
 
 | 
			
		||||
@@ -1,5 +1,7 @@
 | 
			
		||||
package claude
 | 
			
		||||
 | 
			
		||||
import "one-api/dto"
 | 
			
		||||
 | 
			
		||||
type ClaudeMetadata struct {
 | 
			
		||||
	UserId string `json:"user_id"`
 | 
			
		||||
}
 | 
			
		||||
@@ -11,6 +13,9 @@ type ClaudeMediaMessage struct {
 | 
			
		||||
	Usage       *ClaudeUsage         `json:"usage,omitempty"`
 | 
			
		||||
	StopReason  *string              `json:"stop_reason,omitempty"`
 | 
			
		||||
	PartialJson string               `json:"partial_json,omitempty"`
 | 
			
		||||
	Thinking    string               `json:"thinking,omitempty"`
 | 
			
		||||
	Signature   string               `json:"signature,omitempty"`
 | 
			
		||||
	Delta       string               `json:"delta,omitempty"`
 | 
			
		||||
	// tool_calls
 | 
			
		||||
	Id        string `json:"id,omitempty"`
 | 
			
		||||
	Name      string `json:"name,omitempty"`
 | 
			
		||||
@@ -58,9 +63,10 @@ type ClaudeRequest struct {
 | 
			
		||||
	TopP              float64         `json:"top_p,omitempty"`
 | 
			
		||||
	TopK              int             `json:"top_k,omitempty"`
 | 
			
		||||
	//ClaudeMetadata    `json:"metadata,omitempty"`
 | 
			
		||||
	Stream     bool   `json:"stream,omitempty"`
 | 
			
		||||
	Tools      []Tool `json:"tools,omitempty"`
 | 
			
		||||
	ToolChoice any    `json:"tool_choice,omitempty"`
 | 
			
		||||
	Stream     bool          `json:"stream,omitempty"`
 | 
			
		||||
	Tools      []Tool        `json:"tools,omitempty"`
 | 
			
		||||
	ToolChoice any           `json:"tool_choice,omitempty"`
 | 
			
		||||
	Thinking   *dto.Thinking `json:"thinking,omitempty"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type ClaudeError struct {
 | 
			
		||||
 
 | 
			
		||||
@@ -62,6 +62,24 @@ func RequestOpenAI2ClaudeComplete(textRequest dto.GeneralOpenAIRequest) *ClaudeR
 | 
			
		||||
func RequestOpenAI2ClaudeMessage(textRequest dto.GeneralOpenAIRequest) (*ClaudeRequest, error) {
 | 
			
		||||
	claudeTools := make([]Tool, 0, len(textRequest.Tools))
 | 
			
		||||
 | 
			
		||||
	if strings.HasSuffix(textRequest.Model, "-thinking") {
 | 
			
		||||
		textRequest.Model = strings.TrimSuffix(textRequest.Model, "-thinking")
 | 
			
		||||
 | 
			
		||||
		if textRequest.MaxTokens == 0 {
 | 
			
		||||
			textRequest.MaxTokens = 4096
 | 
			
		||||
		} else if textRequest.MaxTokens < 1280 {
 | 
			
		||||
			textRequest.MaxTokens = 1280
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		textRequest.TopP = 0
 | 
			
		||||
		textRequest.TopK = 0
 | 
			
		||||
		textRequest.Temperature = 0
 | 
			
		||||
		textRequest.Thinking = &dto.Thinking{
 | 
			
		||||
			Type:         "enabled",
 | 
			
		||||
			BudgetTokens: int(float64(textRequest.MaxTokens) * 0.8),
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for _, tool := range textRequest.Tools {
 | 
			
		||||
		claudeTool := Tool{
 | 
			
		||||
			Name:        tool.Function.Name,
 | 
			
		||||
@@ -113,6 +131,7 @@ func RequestOpenAI2ClaudeMessage(textRequest dto.GeneralOpenAIRequest) (*ClaudeR
 | 
			
		||||
		Stream:        textRequest.Stream,
 | 
			
		||||
		Tools:         claudeTools,
 | 
			
		||||
		ToolChoice:    textRequest.ToolChoice,
 | 
			
		||||
		Thinking:      textRequest.Thinking,
 | 
			
		||||
	}
 | 
			
		||||
	if claudeRequest.MaxTokens == 0 {
 | 
			
		||||
		claudeRequest.MaxTokens = 4096
 | 
			
		||||
@@ -334,12 +353,19 @@ func StreamResponseClaude2OpenAI(reqMode int, claudeResponse *ClaudeResponse) (*
 | 
			
		||||
			if claudeResponse.Delta != nil {
 | 
			
		||||
				choice.Index = claudeResponse.Index
 | 
			
		||||
				choice.Delta.SetContentString(claudeResponse.Delta.Text)
 | 
			
		||||
				if claudeResponse.Delta.Type == "input_json_delta" {
 | 
			
		||||
				switch claudeResponse.Delta.Type {
 | 
			
		||||
				case "input_json_delta":
 | 
			
		||||
					tools = append(tools, dto.ToolCall{
 | 
			
		||||
						Function: dto.FunctionCall{
 | 
			
		||||
							Arguments: claudeResponse.Delta.PartialJson,
 | 
			
		||||
						},
 | 
			
		||||
					})
 | 
			
		||||
				case "signature_delta":
 | 
			
		||||
					reasoningContent := "\n"
 | 
			
		||||
					choice.Delta.ReasoningContent = &reasoningContent
 | 
			
		||||
				case "thinking_delta":
 | 
			
		||||
					reasoningContent := claudeResponse.Delta.Thinking
 | 
			
		||||
					choice.Delta.ReasoningContent = &reasoningContent
 | 
			
		||||
				}
 | 
			
		||||
			}
 | 
			
		||||
		} else if claudeResponse.Type == "message_delta" {
 | 
			
		||||
@@ -377,6 +403,8 @@ func ResponseClaude2OpenAI(reqMode int, claudeResponse *ClaudeResponse) *dto.Ope
 | 
			
		||||
	if len(claudeResponse.Content) > 0 {
 | 
			
		||||
		responseText = claudeResponse.Content[0].Text
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	reasoningContent := ""
 | 
			
		||||
	tools := make([]dto.ToolCall, 0)
 | 
			
		||||
	if reqMode == RequestModeCompletion {
 | 
			
		||||
		content, _ := json.Marshal(strings.TrimPrefix(claudeResponse.Completion, " "))
 | 
			
		||||
@@ -393,7 +421,8 @@ func ResponseClaude2OpenAI(reqMode int, claudeResponse *ClaudeResponse) *dto.Ope
 | 
			
		||||
	} else {
 | 
			
		||||
		fullTextResponse.Id = claudeResponse.Id
 | 
			
		||||
		for _, message := range claudeResponse.Content {
 | 
			
		||||
			if message.Type == "tool_use" {
 | 
			
		||||
			switch message.Type {
 | 
			
		||||
			case "tool_use":
 | 
			
		||||
				args, _ := json.Marshal(message.Input)
 | 
			
		||||
				tools = append(tools, dto.ToolCall{
 | 
			
		||||
					ID:   message.Id,
 | 
			
		||||
@@ -403,6 +432,10 @@ func ResponseClaude2OpenAI(reqMode int, claudeResponse *ClaudeResponse) *dto.Ope
 | 
			
		||||
						Arguments: string(args),
 | 
			
		||||
					},
 | 
			
		||||
				})
 | 
			
		||||
			case "thinking":
 | 
			
		||||
				reasoningContent = message.Thinking
 | 
			
		||||
			case "text":
 | 
			
		||||
				responseText = message.Text
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
@@ -417,6 +450,7 @@ func ResponseClaude2OpenAI(reqMode int, claudeResponse *ClaudeResponse) *dto.Ope
 | 
			
		||||
	if len(tools) > 0 {
 | 
			
		||||
		choice.Message.ToolCalls = tools
 | 
			
		||||
	}
 | 
			
		||||
	choice.Message.ReasoningContent = &reasoningContent
 | 
			
		||||
	fullTextResponse.Model = claudeResponse.Model
 | 
			
		||||
	choices = append(choices, choice)
 | 
			
		||||
	fullTextResponse.Choices = choices
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user