From 9bf53ad9e1ed3072794bed27ee2e11b3b9379c4b Mon Sep 17 00:00:00 2001 From: yyhhyyyyyy Date: Fri, 15 Aug 2025 18:39:53 +0800 Subject: [PATCH] fix: resolve reasoningEffort parameter transmission issues --- .../providers/ollamaProvider.ts | 23 +++++++-- .../sqlitePresenter/tables/conversations.ts | 51 ++++++++++++++++--- src/renderer/src/components/TitleView.vue | 17 +++---- src/renderer/src/stores/chat.ts | 5 +- src/shared/presenter.d.ts | 2 + 5 files changed, 73 insertions(+), 25 deletions(-) diff --git a/src/main/presenter/llmProviderPresenter/providers/ollamaProvider.ts b/src/main/presenter/llmProviderPresenter/providers/ollamaProvider.ts index 2d7cda7e8..24ba1d073 100644 --- a/src/main/presenter/llmProviderPresenter/providers/ollamaProvider.ts +++ b/src/main/presenter/llmProviderPresenter/providers/ollamaProvider.ts @@ -175,9 +175,16 @@ export class OllamaProvider extends BaseLLMProvider { } } - // 处理标签 + // 处理 thinking 字段 const content = response.message?.content || '' - if (content.includes('')) { + const thinking = response.message?.thinking || '' + + if (thinking) { + resultResp.reasoning_content = thinking + resultResp.content = content + } + // 处理标签(其他模型) + else if (content.includes('')) { const thinkStart = content.indexOf('') const thinkEnd = content.indexOf('') @@ -194,7 +201,7 @@ export class OllamaProvider extends BaseLLMProvider { resultResp.content = content } } else { - // 没有think标签,所有内容作为普通内容 + // 没有特殊格式,所有内容作为普通内容 resultResp.content = content } @@ -492,10 +499,10 @@ export class OllamaProvider extends BaseLLMProvider { messages: processedMessages, options: { temperature: temperature || 0.7, - num_predict: maxTokens, - ...(modelConfig?.reasoningEffort && { reasoning_effort: modelConfig.reasoningEffort }) + num_predict: maxTokens }, stream: true as const, + ...(modelConfig?.reasoningEffort && { reasoning_effort: modelConfig.reasoningEffort }), ...(supportsFunctionCall && ollamaTools && ollamaTools.length > 0 ? { tools: ollamaTools } : {}) @@ -600,6 +607,12 @@ export class OllamaProvider extends BaseLLMProvider { continue } + // 处理 thinking 字段 + const currentThinking = chunk.message?.thinking || '' + if (currentThinking) { + yield { type: 'reasoning', reasoning_content: currentThinking } + } + // 获取当前内容 const currentContent = chunk.message?.content || '' if (!currentContent) continue diff --git a/src/main/presenter/sqlitePresenter/tables/conversations.ts b/src/main/presenter/sqlitePresenter/tables/conversations.ts index 1b58cbc53..43176937e 100644 --- a/src/main/presenter/sqlitePresenter/tables/conversations.ts +++ b/src/main/presenter/sqlitePresenter/tables/conversations.ts @@ -19,6 +19,8 @@ type ConversationRow = { is_pinned: number enabled_mcp_tools: string | null thinking_budget: number | null + reasoning_effort: string | null + verbosity: string | null } // 解析 JSON 字段 @@ -95,12 +97,21 @@ export class ConversationsTable extends BaseTable { UPDATE conversations SET enabled_mcp_tools = NULL WHERE enabled_mcp_tools = '[]'; ` } + if (version === 6) { + return ` + -- 添加 reasoning_effort 字段 + ALTER TABLE conversations ADD COLUMN reasoning_effort TEXT DEFAULT NULL; + + -- 添加 verbosity 字段 + ALTER TABLE conversations ADD COLUMN verbosity TEXT DEFAULT NULL; + ` + } return null } getLatestVersion(): number { - return 5 + return 6 } async create(title: string, settings: Partial = {}): Promise { @@ -120,9 +131,11 @@ export class ConversationsTable extends BaseTable { artifacts, is_pinned, enabled_mcp_tools, - thinking_budget + thinking_budget, + reasoning_effort, + verbosity ) - VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) + VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) `) const conv_id = nanoid() const now = Date.now() @@ -141,7 +154,9 @@ export class ConversationsTable extends BaseTable { settings.artifacts || 0, 0, // Default is_pinned to 0 settings.enabledMcpTools ? JSON.stringify(settings.enabledMcpTools) : 'NULL', - settings.thinkingBudget !== undefined ? settings.thinkingBudget : null + settings.thinkingBudget !== undefined ? settings.thinkingBudget : null, + settings.reasoningEffort !== undefined ? settings.reasoningEffort : null, + settings.verbosity !== undefined ? settings.verbosity : null ) return conv_id } @@ -165,7 +180,9 @@ export class ConversationsTable extends BaseTable { artifacts, is_pinned, enabled_mcp_tools, - thinking_budget + thinking_budget, + reasoning_effort, + verbosity FROM conversations WHERE conv_id = ? ` @@ -192,7 +209,11 @@ export class ConversationsTable extends BaseTable { modelId: result.modelId, artifacts: result.artifacts as 0 | 1, enabledMcpTools: getJsonField(result.enabled_mcp_tools, undefined), - thinkingBudget: result.thinking_budget !== null ? result.thinking_budget : undefined + thinkingBudget: result.thinking_budget !== null ? result.thinking_budget : undefined, + reasoningEffort: result.reasoning_effort + ? (result.reasoning_effort as 'minimal' | 'low' | 'medium' | 'high') + : undefined, + verbosity: result.verbosity ? (result.verbosity as 'low' | 'medium' | 'high') : undefined } } } @@ -253,6 +274,14 @@ export class ConversationsTable extends BaseTable { updates.push('thinking_budget = ?') params.push(data.settings.thinkingBudget) } + if (data.settings.reasoningEffort !== undefined) { + updates.push('reasoning_effort = ?') + params.push(data.settings.reasoningEffort) + } + if (data.settings.verbosity !== undefined) { + updates.push('verbosity = ?') + params.push(data.settings.verbosity) + } } if (updates.length > 0 || data.updatedAt) { updates.push('updated_at = ?') @@ -298,7 +327,9 @@ export class ConversationsTable extends BaseTable { artifacts, is_pinned, enabled_mcp_tools, - thinking_budget + thinking_budget, + reasoning_effort, + verbosity FROM conversations ORDER BY updated_at DESC LIMIT ? OFFSET ? @@ -324,7 +355,11 @@ export class ConversationsTable extends BaseTable { modelId: row.modelId, artifacts: row.artifacts as 0 | 1, enabledMcpTools: getJsonField(row.enabled_mcp_tools, undefined), - thinkingBudget: row.thinking_budget !== null ? row.thinking_budget : undefined + thinkingBudget: row.thinking_budget !== null ? row.thinking_budget : undefined, + reasoningEffort: row.reasoning_effort + ? (row.reasoning_effort as 'minimal' | 'low' | 'medium' | 'high') + : undefined, + verbosity: row.verbosity ? (row.verbosity as 'low' | 'medium' | 'high') : undefined } })) } diff --git a/src/renderer/src/components/TitleView.vue b/src/renderer/src/components/TitleView.vue index ca294ea6a..8c840409a 100644 --- a/src/renderer/src/components/TitleView.vue +++ b/src/renderer/src/components/TitleView.vue @@ -104,8 +104,8 @@ const maxTokens = ref(chatStore.chatConfig.maxTokens) const systemPrompt = ref(chatStore.chatConfig.systemPrompt) const artifacts = ref(chatStore.chatConfig.artifacts) const thinkingBudget = ref(chatStore.chatConfig.thinkingBudget) -const reasoningEffort = ref((chatStore.chatConfig as any).reasoningEffort) -const verbosity = ref((chatStore.chatConfig as any).verbosity) +const reasoningEffort = ref(chatStore.chatConfig.reasoningEffort) +const verbosity = ref(chatStore.chatConfig.verbosity) // 获取模型配置来初始化默认值 const loadModelConfig = async () => { @@ -258,8 +258,8 @@ watch( newSystemPrompt !== chatStore.chatConfig.systemPrompt || newArtifacts !== chatStore.chatConfig.artifacts || newThinkingBudget !== chatStore.chatConfig.thinkingBudget || - newReasoningEffort !== (chatStore.chatConfig as any).reasoningEffort || - newVerbosity !== (chatStore.chatConfig as any).verbosity + newReasoningEffort !== chatStore.chatConfig.reasoningEffort || + newVerbosity !== chatStore.chatConfig.verbosity ) { chatStore.updateChatConfig({ temperature: newTemp, @@ -285,13 +285,8 @@ watch( systemPrompt.value = newConfig.systemPrompt artifacts.value = newConfig.artifacts thinkingBudget.value = newConfig.thinkingBudget - - if ((newConfig as any).reasoningEffort !== undefined) { - reasoningEffort.value = (newConfig as any).reasoningEffort - } - if ((newConfig as any).verbosity !== undefined) { - verbosity.value = (newConfig as any).verbosity - } + reasoningEffort.value = newConfig.reasoningEffort + verbosity.value = newConfig.verbosity if ( oldConfig && (newConfig.modelId !== oldConfig.modelId || newConfig.providerId !== oldConfig.providerId) diff --git a/src/renderer/src/stores/chat.ts b/src/renderer/src/stores/chat.ts index 12e5e7a01..5702423a5 100644 --- a/src/renderer/src/stores/chat.ts +++ b/src/renderer/src/stores/chat.ts @@ -57,7 +57,10 @@ export const useChatStore = defineStore('chat', () => { providerId: '', modelId: '', artifacts: 0, - enabledMcpTools: [] + enabledMcpTools: [], + thinkingBudget: undefined, + reasoningEffort: undefined, + verbosity: undefined }) // Deeplink 消息缓存 diff --git a/src/shared/presenter.d.ts b/src/shared/presenter.d.ts index ee97f1661..9dc55b247 100644 --- a/src/shared/presenter.d.ts +++ b/src/shared/presenter.d.ts @@ -625,6 +625,8 @@ export type CONVERSATION_SETTINGS = { artifacts: 0 | 1 enabledMcpTools?: string[] thinkingBudget?: number + reasoningEffort?: 'minimal' | 'low' | 'medium' | 'high' + verbosity?: 'low' | 'medium' | 'high' } export type CONVERSATION = {