From 51b7f0a8ad5ed6d56cde6d994bc0b91f9a73866b Mon Sep 17 00:00:00 2001 From: zerob13 Date: Fri, 20 Mar 2026 18:41:41 +0800 Subject: [PATCH 1/3] fix(provider): forward mcp runtime injection --- .../providers/_302AIProvider.ts | 9 +- .../providers/aihubmixProvider.ts | 9 +- .../providers/cherryInProvider.ts | 9 +- .../providers/dashscopeProvider.ts | 9 +- .../providers/deepseekProvider.ts | 9 +- .../providers/doubaoProvider.ts | 9 +- .../providers/githubProvider.ts | 9 +- .../providers/grokProvider.ts | 9 +- .../providers/groqProvider.ts | 9 +- .../providers/jiekouProvider.ts | 9 +- .../providers/lmstudioProvider.ts | 9 +- .../providers/modelscopeProvider.ts | 9 +- .../providers/o3fanProvider.ts | 9 +- .../providers/openAIProvider.ts | 9 +- .../providers/openRouterProvider.ts | 9 +- .../providers/poeProvider.ts | 9 +- .../providers/ppioProvider.ts | 9 +- .../providers/siliconcloudProvider.ts | 9 +- .../providers/togetherProvider.ts | 9 +- .../providers/tokenfluxProvider.ts | 9 +- .../providers/vercelAIGatewayProvider.ts | 9 +- .../providers/zenmuxProvider.ts | 9 +- .../providers/zhipuProvider.ts | 9 +- .../openAICompatibleProvider.test.ts | 339 ++++++++++++++++++ 24 files changed, 500 insertions(+), 46 deletions(-) create mode 100644 test/main/presenter/llmProviderPresenter/openAICompatibleProvider.test.ts diff --git a/src/main/presenter/llmProviderPresenter/providers/_302AIProvider.ts b/src/main/presenter/llmProviderPresenter/providers/_302AIProvider.ts index 64381472c..ea4ea6207 100644 --- a/src/main/presenter/llmProviderPresenter/providers/_302AIProvider.ts +++ b/src/main/presenter/llmProviderPresenter/providers/_302AIProvider.ts @@ -7,6 +7,7 @@ import { IConfigPresenter } from '@shared/presenter' import { OpenAICompatibleProvider } from './openAICompatibleProvider' +import type { ProviderMcpRuntimePort } from '../runtimePorts' // Define interface for 302AI API balance response interface _302AIBalanceResponse { @@ -38,8 +39,12 @@ interface _302AIModelResponse { } export class _302AIProvider extends OpenAICompatibleProvider { - constructor(provider: LLM_PROVIDER, configPresenter: IConfigPresenter) { - super(provider, configPresenter) + constructor( + provider: LLM_PROVIDER, + configPresenter: IConfigPresenter, + mcpRuntime?: ProviderMcpRuntimePort + ) { + super(provider, configPresenter, mcpRuntime) } async completions( diff --git a/src/main/presenter/llmProviderPresenter/providers/aihubmixProvider.ts b/src/main/presenter/llmProviderPresenter/providers/aihubmixProvider.ts index 786179ebc..107598e4a 100644 --- a/src/main/presenter/llmProviderPresenter/providers/aihubmixProvider.ts +++ b/src/main/presenter/llmProviderPresenter/providers/aihubmixProvider.ts @@ -3,10 +3,15 @@ import { OpenAICompatibleProvider } from './openAICompatibleProvider' import { proxyConfig } from '@/presenter/proxyConfig' import { ProxyAgent } from 'undici' import OpenAI from 'openai' +import type { ProviderMcpRuntimePort } from '../runtimePorts' export class AihubmixProvider extends OpenAICompatibleProvider { - constructor(provider: LLM_PROVIDER, configPresenter: IConfigPresenter) { - super(provider, configPresenter) + constructor( + provider: LLM_PROVIDER, + configPresenter: IConfigPresenter, + mcpRuntime?: ProviderMcpRuntimePort + ) { + super(provider, configPresenter, mcpRuntime) } protected createOpenAIClient(): void { diff --git a/src/main/presenter/llmProviderPresenter/providers/cherryInProvider.ts b/src/main/presenter/llmProviderPresenter/providers/cherryInProvider.ts index c2545466f..a80a3fa36 100644 --- a/src/main/presenter/llmProviderPresenter/providers/cherryInProvider.ts +++ b/src/main/presenter/llmProviderPresenter/providers/cherryInProvider.ts @@ -1,13 +1,18 @@ import { LLM_PROVIDER, MODEL_META, IConfigPresenter, KeyStatus } from '@shared/presenter' import { OpenAICompatibleProvider } from './openAICompatibleProvider' +import type { ProviderMcpRuntimePort } from '../runtimePorts' interface CherryInUsageResponse { total_usage: number } export class CherryInProvider extends OpenAICompatibleProvider { - constructor(provider: LLM_PROVIDER, configPresenter: IConfigPresenter) { - super(provider, configPresenter) + constructor( + provider: LLM_PROVIDER, + configPresenter: IConfigPresenter, + mcpRuntime?: ProviderMcpRuntimePort + ) { + super(provider, configPresenter, mcpRuntime) } private getBaseUrl(): string { diff --git a/src/main/presenter/llmProviderPresenter/providers/dashscopeProvider.ts b/src/main/presenter/llmProviderPresenter/providers/dashscopeProvider.ts index 2725e5fef..f1f7153e3 100644 --- a/src/main/presenter/llmProviderPresenter/providers/dashscopeProvider.ts +++ b/src/main/presenter/llmProviderPresenter/providers/dashscopeProvider.ts @@ -11,10 +11,15 @@ import { import { DEFAULT_MODEL_CONTEXT_LENGTH, DEFAULT_MODEL_MAX_TOKENS } from '@shared/modelConfigDefaults' import { OpenAICompatibleProvider } from './openAICompatibleProvider' import { modelCapabilities } from '../../configPresenter/modelCapabilities' +import type { ProviderMcpRuntimePort } from '../runtimePorts' export class DashscopeProvider extends OpenAICompatibleProvider { - constructor(provider: LLM_PROVIDER, configPresenter: IConfigPresenter) { - super(provider, configPresenter) + constructor( + provider: LLM_PROVIDER, + configPresenter: IConfigPresenter, + mcpRuntime?: ProviderMcpRuntimePort + ) { + super(provider, configPresenter, mcpRuntime) } private supportsEnableThinking(modelId: string): boolean { diff --git a/src/main/presenter/llmProviderPresenter/providers/deepseekProvider.ts b/src/main/presenter/llmProviderPresenter/providers/deepseekProvider.ts index e4083a90f..d3eb5a765 100644 --- a/src/main/presenter/llmProviderPresenter/providers/deepseekProvider.ts +++ b/src/main/presenter/llmProviderPresenter/providers/deepseekProvider.ts @@ -7,6 +7,7 @@ import { } from '@shared/presenter' import { OpenAICompatibleProvider } from './openAICompatibleProvider' import { SUMMARY_TITLES_PROMPT } from '../baseProvider' +import type { ProviderMcpRuntimePort } from '../runtimePorts' // Define interface for DeepSeek API key response interface DeepSeekBalanceResponse { @@ -20,8 +21,12 @@ interface DeepSeekBalanceResponse { } export class DeepseekProvider extends OpenAICompatibleProvider { - constructor(provider: LLM_PROVIDER, configPresenter: IConfigPresenter) { - super(provider, configPresenter) + constructor( + provider: LLM_PROVIDER, + configPresenter: IConfigPresenter, + mcpRuntime?: ProviderMcpRuntimePort + ) { + super(provider, configPresenter, mcpRuntime) } async completions( diff --git a/src/main/presenter/llmProviderPresenter/providers/doubaoProvider.ts b/src/main/presenter/llmProviderPresenter/providers/doubaoProvider.ts index 9392608b9..22dfdae67 100644 --- a/src/main/presenter/llmProviderPresenter/providers/doubaoProvider.ts +++ b/src/main/presenter/llmProviderPresenter/providers/doubaoProvider.ts @@ -17,6 +17,7 @@ import { import { OpenAICompatibleProvider } from './openAICompatibleProvider' import { providerDbLoader } from '../../configPresenter/providerDbLoader' import { modelCapabilities } from '../../configPresenter/modelCapabilities' +import type { ProviderMcpRuntimePort } from '../runtimePorts' export class DoubaoProvider extends OpenAICompatibleProvider { // List of models that support thinking parameter @@ -30,9 +31,13 @@ export class DoubaoProvider extends OpenAICompatibleProvider { 'doubao-1-5-thinking-pro-m-250428' ] - constructor(provider: LLM_PROVIDER, configPresenter: IConfigPresenter) { + constructor( + provider: LLM_PROVIDER, + configPresenter: IConfigPresenter, + mcpRuntime?: ProviderMcpRuntimePort + ) { // Initialize Doubao model configuration - super(provider, configPresenter) + super(provider, configPresenter, mcpRuntime) } private supportsThinking(modelId: string): boolean { diff --git a/src/main/presenter/llmProviderPresenter/providers/githubProvider.ts b/src/main/presenter/llmProviderPresenter/providers/githubProvider.ts index 108a36446..6f2bc269b 100644 --- a/src/main/presenter/llmProviderPresenter/providers/githubProvider.ts +++ b/src/main/presenter/llmProviderPresenter/providers/githubProvider.ts @@ -8,10 +8,15 @@ import { import { DEFAULT_MODEL_CONTEXT_LENGTH, DEFAULT_MODEL_MAX_TOKENS } from '@shared/modelConfigDefaults' import { OpenAICompatibleProvider } from './openAICompatibleProvider' import { ModelsPage } from 'openai/resources' +import type { ProviderMcpRuntimePort } from '../runtimePorts' export class GithubProvider extends OpenAICompatibleProvider { - constructor(provider: LLM_PROVIDER, configPresenter: IConfigPresenter) { - super(provider, configPresenter) + constructor( + provider: LLM_PROVIDER, + configPresenter: IConfigPresenter, + mcpRuntime?: ProviderMcpRuntimePort + ) { + super(provider, configPresenter, mcpRuntime) } protected async fetchOpenAIModels(options?: { timeout: number }): Promise { const response = (await this.openai.models.list(options)) as ModelsPage & { diff --git a/src/main/presenter/llmProviderPresenter/providers/grokProvider.ts b/src/main/presenter/llmProviderPresenter/providers/grokProvider.ts index 21c67cf5b..77f830ceb 100644 --- a/src/main/presenter/llmProviderPresenter/providers/grokProvider.ts +++ b/src/main/presenter/llmProviderPresenter/providers/grokProvider.ts @@ -1,6 +1,7 @@ import { LLM_PROVIDER, LLMResponse, ChatMessage, IConfigPresenter } from '@shared/presenter' import { OpenAICompatibleProvider } from './openAICompatibleProvider' import { ModelConfig, MCPToolDefinition, LLMCoreStreamEvent } from '@shared/presenter' +import type { ProviderMcpRuntimePort } from '../runtimePorts' export class GrokProvider extends OpenAICompatibleProvider { // Image generation model ID @@ -13,8 +14,12 @@ export class GrokProvider extends OpenAICompatibleProvider { // Models that support reasoning_effort parameter (grok-4 does not) private static readonly REASONING_EFFORT_MODELS: string[] = ['grok-3-mini', 'grok-3-mini-fast'] - constructor(provider: LLM_PROVIDER, configPresenter: IConfigPresenter) { - super(provider, configPresenter) + constructor( + provider: LLM_PROVIDER, + configPresenter: IConfigPresenter, + mcpRuntime?: ProviderMcpRuntimePort + ) { + super(provider, configPresenter, mcpRuntime) } // Check if it's an image model diff --git a/src/main/presenter/llmProviderPresenter/providers/groqProvider.ts b/src/main/presenter/llmProviderPresenter/providers/groqProvider.ts index 7453ca41b..02bec185e 100644 --- a/src/main/presenter/llmProviderPresenter/providers/groqProvider.ts +++ b/src/main/presenter/llmProviderPresenter/providers/groqProvider.ts @@ -6,6 +6,7 @@ import { IConfigPresenter } from '@shared/presenter' import { OpenAICompatibleProvider } from './openAICompatibleProvider' +import type { ProviderMcpRuntimePort } from '../runtimePorts' // Define interface for Groq model response (following PPIO naming convention) interface GroqModelResponse { @@ -28,8 +29,12 @@ interface GroqModelResponse { } export class GroqProvider extends OpenAICompatibleProvider { - constructor(provider: LLM_PROVIDER, configPresenter: IConfigPresenter) { - super(provider, configPresenter) + constructor( + provider: LLM_PROVIDER, + configPresenter: IConfigPresenter, + mcpRuntime?: ProviderMcpRuntimePort + ) { + super(provider, configPresenter, mcpRuntime) } async completions( diff --git a/src/main/presenter/llmProviderPresenter/providers/jiekouProvider.ts b/src/main/presenter/llmProviderPresenter/providers/jiekouProvider.ts index 29e00d0bb..29000d59b 100644 --- a/src/main/presenter/llmProviderPresenter/providers/jiekouProvider.ts +++ b/src/main/presenter/llmProviderPresenter/providers/jiekouProvider.ts @@ -1,9 +1,14 @@ import { LLM_PROVIDER, MODEL_META, IConfigPresenter } from '@shared/presenter' import { OpenAICompatibleProvider } from './openAICompatibleProvider' +import type { ProviderMcpRuntimePort } from '../runtimePorts' export class JiekouProvider extends OpenAICompatibleProvider { - constructor(provider: LLM_PROVIDER, configPresenter: IConfigPresenter) { - super(provider, configPresenter) + constructor( + provider: LLM_PROVIDER, + configPresenter: IConfigPresenter, + mcpRuntime?: ProviderMcpRuntimePort + ) { + super(provider, configPresenter, mcpRuntime) } protected async fetchOpenAIModels(options?: { timeout: number }): Promise { diff --git a/src/main/presenter/llmProviderPresenter/providers/lmstudioProvider.ts b/src/main/presenter/llmProviderPresenter/providers/lmstudioProvider.ts index eb078b133..ccc7c13a5 100644 --- a/src/main/presenter/llmProviderPresenter/providers/lmstudioProvider.ts +++ b/src/main/presenter/llmProviderPresenter/providers/lmstudioProvider.ts @@ -1,7 +1,12 @@ import { IConfigPresenter, LLM_PROVIDER } from '@shared/presenter' import { OpenAICompatibleProvider } from './openAICompatibleProvider' +import type { ProviderMcpRuntimePort } from '../runtimePorts' export class LMStudioProvider extends OpenAICompatibleProvider { - constructor(provider: LLM_PROVIDER, configPresenter: IConfigPresenter) { - super(provider, configPresenter) + constructor( + provider: LLM_PROVIDER, + configPresenter: IConfigPresenter, + mcpRuntime?: ProviderMcpRuntimePort + ) { + super(provider, configPresenter, mcpRuntime) } } diff --git a/src/main/presenter/llmProviderPresenter/providers/modelscopeProvider.ts b/src/main/presenter/llmProviderPresenter/providers/modelscopeProvider.ts index 160af5825..2f9960693 100644 --- a/src/main/presenter/llmProviderPresenter/providers/modelscopeProvider.ts +++ b/src/main/presenter/llmProviderPresenter/providers/modelscopeProvider.ts @@ -8,6 +8,7 @@ import { ModelScopeMcpSyncOptions } from '@shared/presenter' import { OpenAICompatibleProvider } from './openAICompatibleProvider' +import type { ProviderMcpRuntimePort } from '../runtimePorts' // Define interface for ModelScope MCP API response export interface ModelScopeMcpServerResponse { @@ -46,8 +47,12 @@ export interface ModelScopeMcpServer { } export class ModelscopeProvider extends OpenAICompatibleProvider { - constructor(provider: LLM_PROVIDER, configPresenter: IConfigPresenter) { - super(provider, configPresenter) + constructor( + provider: LLM_PROVIDER, + configPresenter: IConfigPresenter, + mcpRuntime?: ProviderMcpRuntimePort + ) { + super(provider, configPresenter, mcpRuntime) } async completions( diff --git a/src/main/presenter/llmProviderPresenter/providers/o3fanProvider.ts b/src/main/presenter/llmProviderPresenter/providers/o3fanProvider.ts index d0f630881..fc92712c4 100644 --- a/src/main/presenter/llmProviderPresenter/providers/o3fanProvider.ts +++ b/src/main/presenter/llmProviderPresenter/providers/o3fanProvider.ts @@ -14,10 +14,15 @@ import { import { OpenAICompatibleProvider } from './openAICompatibleProvider' import { providerDbLoader } from '../../configPresenter/providerDbLoader' import { modelCapabilities } from '../../configPresenter/modelCapabilities' +import type { ProviderMcpRuntimePort } from '../runtimePorts' export class O3fanProvider extends OpenAICompatibleProvider { - constructor(provider: LLM_PROVIDER, configPresenter: IConfigPresenter) { - super(provider, configPresenter) + constructor( + provider: LLM_PROVIDER, + configPresenter: IConfigPresenter, + mcpRuntime?: ProviderMcpRuntimePort + ) { + super(provider, configPresenter, mcpRuntime) } protected async fetchOpenAIModels(): Promise { diff --git a/src/main/presenter/llmProviderPresenter/providers/openAIProvider.ts b/src/main/presenter/llmProviderPresenter/providers/openAIProvider.ts index 28b9eed45..9b97fe60c 100644 --- a/src/main/presenter/llmProviderPresenter/providers/openAIProvider.ts +++ b/src/main/presenter/llmProviderPresenter/providers/openAIProvider.ts @@ -1,8 +1,13 @@ import { IConfigPresenter, LLM_PROVIDER, LLMResponse } from '@shared/presenter' import { OpenAICompatibleProvider } from './openAICompatibleProvider' +import type { ProviderMcpRuntimePort } from '../runtimePorts' export class OpenAIProvider extends OpenAICompatibleProvider { - constructor(provider: LLM_PROVIDER, configPresenter: IConfigPresenter) { - super(provider, configPresenter) + constructor( + provider: LLM_PROVIDER, + configPresenter: IConfigPresenter, + mcpRuntime?: ProviderMcpRuntimePort + ) { + super(provider, configPresenter, mcpRuntime) } async completions( diff --git a/src/main/presenter/llmProviderPresenter/providers/openRouterProvider.ts b/src/main/presenter/llmProviderPresenter/providers/openRouterProvider.ts index 9acac7ca7..04b274ef5 100644 --- a/src/main/presenter/llmProviderPresenter/providers/openRouterProvider.ts +++ b/src/main/presenter/llmProviderPresenter/providers/openRouterProvider.ts @@ -7,6 +7,7 @@ import { IConfigPresenter } from '@shared/presenter' import { OpenAICompatibleProvider } from './openAICompatibleProvider' +import type { ProviderMcpRuntimePort } from '../runtimePorts' // Define interface for OpenRouter API key response interface OpenRouterKeyResponse { @@ -57,8 +58,12 @@ interface OpenRouterModelResponse { } export class OpenRouterProvider extends OpenAICompatibleProvider { - constructor(provider: LLM_PROVIDER, configPresenter: IConfigPresenter) { - super(provider, configPresenter) + constructor( + provider: LLM_PROVIDER, + configPresenter: IConfigPresenter, + mcpRuntime?: ProviderMcpRuntimePort + ) { + super(provider, configPresenter, mcpRuntime) } async completions( diff --git a/src/main/presenter/llmProviderPresenter/providers/poeProvider.ts b/src/main/presenter/llmProviderPresenter/providers/poeProvider.ts index 9eca00ef5..a9f73b8c2 100644 --- a/src/main/presenter/llmProviderPresenter/providers/poeProvider.ts +++ b/src/main/presenter/llmProviderPresenter/providers/poeProvider.ts @@ -1,5 +1,6 @@ import { LLM_PROVIDER, MODEL_META, IConfigPresenter } from '@shared/presenter' import { OpenAICompatibleProvider } from './openAICompatibleProvider' +import type { ProviderMcpRuntimePort } from '../runtimePorts' /** * PoeProvider integrates Poe's OpenAI-compatible API surface with the shared @@ -11,8 +12,12 @@ import { OpenAICompatibleProvider } from './openAICompatibleProvider' * tweak metadata so the renderer can present a clearer group name. */ export class PoeProvider extends OpenAICompatibleProvider { - constructor(provider: LLM_PROVIDER, configPresenter: IConfigPresenter) { - super(provider, configPresenter) + constructor( + provider: LLM_PROVIDER, + configPresenter: IConfigPresenter, + mcpRuntime?: ProviderMcpRuntimePort + ) { + super(provider, configPresenter, mcpRuntime) } protected async fetchOpenAIModels(options?: { timeout: number }): Promise { diff --git a/src/main/presenter/llmProviderPresenter/providers/ppioProvider.ts b/src/main/presenter/llmProviderPresenter/providers/ppioProvider.ts index a34f6e246..eb9476466 100644 --- a/src/main/presenter/llmProviderPresenter/providers/ppioProvider.ts +++ b/src/main/presenter/llmProviderPresenter/providers/ppioProvider.ts @@ -7,6 +7,7 @@ import { IConfigPresenter } from '@shared/presenter' import { OpenAICompatibleProvider } from './openAICompatibleProvider' +import type { ProviderMcpRuntimePort } from '../runtimePorts' // Define interface for PPIO API key response interface PPIOKeyResponse { @@ -29,8 +30,12 @@ interface PPIOModelResponse { } export class PPIOProvider extends OpenAICompatibleProvider { - constructor(provider: LLM_PROVIDER, configPresenter: IConfigPresenter) { - super(provider, configPresenter) + constructor( + provider: LLM_PROVIDER, + configPresenter: IConfigPresenter, + mcpRuntime?: ProviderMcpRuntimePort + ) { + super(provider, configPresenter, mcpRuntime) } async completions( diff --git a/src/main/presenter/llmProviderPresenter/providers/siliconcloudProvider.ts b/src/main/presenter/llmProviderPresenter/providers/siliconcloudProvider.ts index d253df0a7..d94e6c148 100644 --- a/src/main/presenter/llmProviderPresenter/providers/siliconcloudProvider.ts +++ b/src/main/presenter/llmProviderPresenter/providers/siliconcloudProvider.ts @@ -11,6 +11,7 @@ import { } from '@shared/presenter' import { DEFAULT_MODEL_CONTEXT_LENGTH, DEFAULT_MODEL_MAX_TOKENS } from '@shared/modelConfigDefaults' import { OpenAICompatibleProvider } from './openAICompatibleProvider' +import type { ProviderMcpRuntimePort } from '../runtimePorts' // Define interface for SiliconCloud API key response interface SiliconCloudKeyResponse { @@ -46,8 +47,12 @@ export class SiliconcloudProvider extends OpenAICompatibleProvider { 'pro/deepseek-ai/deepseek-v3.1' ] - constructor(provider: LLM_PROVIDER, configPresenter: IConfigPresenter) { - super(provider, configPresenter) + constructor( + provider: LLM_PROVIDER, + configPresenter: IConfigPresenter, + mcpRuntime?: ProviderMcpRuntimePort + ) { + super(provider, configPresenter, mcpRuntime) } /** diff --git a/src/main/presenter/llmProviderPresenter/providers/togetherProvider.ts b/src/main/presenter/llmProviderPresenter/providers/togetherProvider.ts index 3413a7ce2..4ce142258 100644 --- a/src/main/presenter/llmProviderPresenter/providers/togetherProvider.ts +++ b/src/main/presenter/llmProviderPresenter/providers/togetherProvider.ts @@ -2,9 +2,14 @@ import { IConfigPresenter, LLM_PROVIDER, LLMResponse, MODEL_META } from '@shared import { DEFAULT_MODEL_CONTEXT_LENGTH, DEFAULT_MODEL_MAX_TOKENS } from '@shared/modelConfigDefaults' import { OpenAICompatibleProvider } from './openAICompatibleProvider' import Together from 'together-ai' +import type { ProviderMcpRuntimePort } from '../runtimePorts' export class TogetherProvider extends OpenAICompatibleProvider { - constructor(provider: LLM_PROVIDER, configPresenter: IConfigPresenter) { - super(provider, configPresenter) + constructor( + provider: LLM_PROVIDER, + configPresenter: IConfigPresenter, + mcpRuntime?: ProviderMcpRuntimePort + ) { + super(provider, configPresenter, mcpRuntime) } async completions( diff --git a/src/main/presenter/llmProviderPresenter/providers/tokenfluxProvider.ts b/src/main/presenter/llmProviderPresenter/providers/tokenfluxProvider.ts index c8a512697..3947acfa3 100644 --- a/src/main/presenter/llmProviderPresenter/providers/tokenfluxProvider.ts +++ b/src/main/presenter/llmProviderPresenter/providers/tokenfluxProvider.ts @@ -7,6 +7,7 @@ import { IConfigPresenter } from '@shared/presenter' import { OpenAICompatibleProvider } from './openAICompatibleProvider' +import type { ProviderMcpRuntimePort } from '../runtimePorts' // Define interface for TokenFlux API model response interface TokenFluxModelResponse { @@ -30,8 +31,12 @@ interface TokenFluxModelsResponse { } export class TokenFluxProvider extends OpenAICompatibleProvider { - constructor(provider: LLM_PROVIDER, configPresenter: IConfigPresenter) { - super(provider, configPresenter) + constructor( + provider: LLM_PROVIDER, + configPresenter: IConfigPresenter, + mcpRuntime?: ProviderMcpRuntimePort + ) { + super(provider, configPresenter, mcpRuntime) } async completions( diff --git a/src/main/presenter/llmProviderPresenter/providers/vercelAIGatewayProvider.ts b/src/main/presenter/llmProviderPresenter/providers/vercelAIGatewayProvider.ts index 890a67822..d7a42b8de 100644 --- a/src/main/presenter/llmProviderPresenter/providers/vercelAIGatewayProvider.ts +++ b/src/main/presenter/llmProviderPresenter/providers/vercelAIGatewayProvider.ts @@ -1,9 +1,14 @@ import { LLM_PROVIDER, LLMResponse, ChatMessage, IConfigPresenter } from '@shared/presenter' import { OpenAICompatibleProvider } from './openAICompatibleProvider' +import type { ProviderMcpRuntimePort } from '../runtimePorts' export class VercelAIGatewayProvider extends OpenAICompatibleProvider { - constructor(provider: LLM_PROVIDER, configPresenter: IConfigPresenter) { - super(provider, configPresenter) + constructor( + provider: LLM_PROVIDER, + configPresenter: IConfigPresenter, + mcpRuntime?: ProviderMcpRuntimePort + ) { + super(provider, configPresenter, mcpRuntime) } async completions( diff --git a/src/main/presenter/llmProviderPresenter/providers/zenmuxProvider.ts b/src/main/presenter/llmProviderPresenter/providers/zenmuxProvider.ts index 9f6b3c7c8..be1bf5c42 100644 --- a/src/main/presenter/llmProviderPresenter/providers/zenmuxProvider.ts +++ b/src/main/presenter/llmProviderPresenter/providers/zenmuxProvider.ts @@ -1,9 +1,14 @@ import { IConfigPresenter, LLM_PROVIDER, MODEL_META } from '@shared/presenter' import { OpenAICompatibleProvider } from './openAICompatibleProvider' +import type { ProviderMcpRuntimePort } from '../runtimePorts' export class ZenmuxProvider extends OpenAICompatibleProvider { - constructor(provider: LLM_PROVIDER, configPresenter: IConfigPresenter) { - super(provider, configPresenter) + constructor( + provider: LLM_PROVIDER, + configPresenter: IConfigPresenter, + mcpRuntime?: ProviderMcpRuntimePort + ) { + super(provider, configPresenter, mcpRuntime) } protected async fetchOpenAIModels(options?: { timeout: number }): Promise { diff --git a/src/main/presenter/llmProviderPresenter/providers/zhipuProvider.ts b/src/main/presenter/llmProviderPresenter/providers/zhipuProvider.ts index a419a839d..a460184fd 100644 --- a/src/main/presenter/llmProviderPresenter/providers/zhipuProvider.ts +++ b/src/main/presenter/llmProviderPresenter/providers/zhipuProvider.ts @@ -14,11 +14,16 @@ import { import { OpenAICompatibleProvider } from './openAICompatibleProvider' import { providerDbLoader } from '../../configPresenter/providerDbLoader' import { modelCapabilities } from '../../configPresenter/modelCapabilities' +import type { ProviderMcpRuntimePort } from '../runtimePorts' export class ZhipuProvider extends OpenAICompatibleProvider { - constructor(provider: LLM_PROVIDER, configPresenter: IConfigPresenter) { + constructor( + provider: LLM_PROVIDER, + configPresenter: IConfigPresenter, + mcpRuntime?: ProviderMcpRuntimePort + ) { // Initialize Zhipu AI model configuration - super(provider, configPresenter) + super(provider, configPresenter, mcpRuntime) } protected async fetchOpenAIModels(): Promise { diff --git a/test/main/presenter/llmProviderPresenter/openAICompatibleProvider.test.ts b/test/main/presenter/llmProviderPresenter/openAICompatibleProvider.test.ts new file mode 100644 index 000000000..4851c9ec9 --- /dev/null +++ b/test/main/presenter/llmProviderPresenter/openAICompatibleProvider.test.ts @@ -0,0 +1,339 @@ +import { beforeEach, describe, expect, it, vi } from 'vitest' +import type { + ChatMessage, + IConfigPresenter, + ISQLitePresenter, + LLM_PROVIDER, + MCPToolDefinition, + ModelConfig +} from '../../../../src/shared/presenter' +import { OpenAICompatibleProvider } from '../../../../src/main/presenter/llmProviderPresenter/providers/openAICompatibleProvider' +import { OpenRouterProvider } from '../../../../src/main/presenter/llmProviderPresenter/providers/openRouterProvider' +import { LLMProviderPresenter } from '../../../../src/main/presenter/llmProviderPresenter' + +const { + mockChatCompletionsCreate, + mockModelsList, + mockMcpToolsToOpenAITools, + mockGetProxyUrl, + mockCacheImage +} = vi.hoisted(() => ({ + mockChatCompletionsCreate: vi.fn(), + mockModelsList: vi.fn().mockResolvedValue({ data: [] }), + mockMcpToolsToOpenAITools: vi.fn().mockResolvedValue([]), + mockGetProxyUrl: vi.fn().mockReturnValue(null), + mockCacheImage: vi.fn() +})) + +vi.mock('electron', () => ({ + app: { + getName: vi.fn(() => 'DeepChat'), + getVersion: vi.fn(() => '0.0.0-test'), + getPath: vi.fn(() => '/mock/path'), + isReady: vi.fn(() => true), + on: vi.fn() + }, + session: {}, + ipcMain: { + on: vi.fn(), + handle: vi.fn(), + removeHandler: vi.fn() + }, + BrowserWindow: vi.fn(() => ({ + loadURL: vi.fn(), + loadFile: vi.fn(), + on: vi.fn(), + webContents: { send: vi.fn(), on: vi.fn(), isDestroyed: vi.fn(() => false) }, + isDestroyed: vi.fn(() => false), + close: vi.fn(), + show: vi.fn(), + hide: vi.fn() + })), + dialog: { + showOpenDialog: vi.fn() + }, + shell: { + openExternal: vi.fn() + } +})) + +vi.mock('openai', () => { + class MockOpenAI { + chat = { + completions: { + create: mockChatCompletionsCreate + } + } + models = { + list: mockModelsList + } + } + + return { + default: MockOpenAI, + AzureOpenAI: MockOpenAI + } +}) + +vi.mock('@/presenter', () => ({ + presenter: { + devicePresenter: { + cacheImage: mockCacheImage + } + } +})) + +vi.mock('@/eventbus', () => ({ + eventBus: { + on: vi.fn(), + sendToRenderer: vi.fn(), + sendToMain: vi.fn(), + emit: vi.fn(), + send: vi.fn() + }, + SendTarget: { + ALL_WINDOWS: 'ALL_WINDOWS' + } +})) + +vi.mock('@/events', () => ({ + CONFIG_EVENTS: { + PROXY_RESOLVED: 'PROXY_RESOLVED', + PROVIDER_ATOMIC_UPDATE: 'PROVIDER_ATOMIC_UPDATE', + PROVIDER_BATCH_UPDATE: 'PROVIDER_BATCH_UPDATE', + MODEL_LIST_CHANGED: 'MODEL_LIST_CHANGED' + }, + NOTIFICATION_EVENTS: { + SHOW_ERROR: 'SHOW_ERROR' + } +})) + +vi.mock('../../../../src/main/presenter/proxyConfig', () => ({ + proxyConfig: { + getProxyUrl: mockGetProxyUrl + } +})) + +vi.mock('../../../../src/main/presenter/configPresenter/modelCapabilities', () => ({ + modelCapabilities: { + supportsReasoningEffort: vi.fn().mockReturnValue(false), + supportsVerbosity: vi.fn().mockReturnValue(false), + supportsReasoning: vi.fn().mockReturnValue(false), + resolveProviderId: vi.fn((providerId: string) => providerId) + } +})) + +const createAsyncStream = (chunks: Array>) => ({ + async *[Symbol.asyncIterator]() { + for (const chunk of chunks) { + yield chunk + } + } +}) + +const collectEvents = async ( + provider: OpenAICompatibleProvider, + providerModel: string, + modelConfig: ModelConfig, + messages: ChatMessage[], + tools: MCPToolDefinition[] +) => { + const events = [] + for await (const event of provider.coreStream( + messages, + providerModel, + modelConfig, + 0.7, + 512, + tools + )) { + events.push(event) + } + return events +} + +const createConfigPresenter = (providers: LLM_PROVIDER[]) => + ({ + getProviders: vi.fn().mockReturnValue(providers), + getProviderModels: vi.fn().mockReturnValue([]), + getCustomModels: vi.fn().mockReturnValue([]), + getModelConfig: vi.fn().mockReturnValue(undefined), + getSetting: vi.fn().mockReturnValue(undefined), + setProviderModels: vi.fn(), + getModelStatus: vi.fn().mockReturnValue(true) + }) as unknown as IConfigPresenter + +const mockSqlitePresenter = { + getAcpSession: vi.fn().mockResolvedValue(null), + upsertAcpSession: vi.fn().mockResolvedValue(undefined), + updateAcpSessionId: vi.fn().mockResolvedValue(undefined), + updateAcpWorkdir: vi.fn().mockResolvedValue(undefined), + updateAcpSessionStatus: vi.fn().mockResolvedValue(undefined), + deleteAcpSession: vi.fn().mockResolvedValue(undefined), + deleteAcpSessions: vi.fn().mockResolvedValue(undefined) +} as unknown as ISQLitePresenter + +describe('OpenAICompatibleProvider MCP runtime injection', () => { + const convertedTools = [ + { + type: 'function', + function: { + name: 'get_weather', + description: 'Get current weather', + parameters: { + type: 'object', + properties: { + city: { + type: 'string' + } + } + } + } + } + ] + + const modelConfig: ModelConfig = { + maxTokens: 1024, + contextLength: 8192, + vision: false, + functionCall: true, + reasoning: false, + type: 'chat' + } + + const messages: ChatMessage[] = [{ role: 'user', content: 'What is the weather today?' }] + + const mcpTools: MCPToolDefinition[] = [ + { + type: 'function', + function: { + name: 'get_weather', + description: 'Get current weather', + parameters: { + type: 'object', + properties: { + city: { + type: 'string' + } + }, + required: ['city'] + } + }, + server: { + name: 'weather-server', + icons: '', + description: 'Weather tools' + } + } + ] + + const mcpRuntime = { + mcpToolsToOpenAITools: mockMcpToolsToOpenAITools + } + + const createProvider = (overrides?: Partial): LLM_PROVIDER => ({ + id: 'mock-openai-compatible', + name: 'Mock OpenAI Compatible', + apiType: 'openai-compatible', + apiKey: 'test-key', + baseUrl: 'https://mock.example.com/v1', + enable: false, + ...overrides + }) + + beforeEach(() => { + vi.clearAllMocks() + mockModelsList.mockResolvedValue({ data: [] }) + mockGetProxyUrl.mockReturnValue(null) + mockMcpToolsToOpenAITools.mockResolvedValue(convertedTools) + mockChatCompletionsCreate.mockResolvedValue( + createAsyncStream([ + { + choices: [ + { + delta: { + content: 'ok' + }, + finish_reason: 'stop' + } + ], + usage: { + prompt_tokens: 12, + completion_tokens: 4, + total_tokens: 16 + } + } + ]) + ) + }) + + it('injects converted tools for direct OpenAICompatibleProvider instances', async () => { + const provider = new OpenAICompatibleProvider( + createProvider(), + createConfigPresenter([]), + mcpRuntime as any + ) + ;(provider as any).isInitialized = true + + const events = await collectEvents(provider, 'gpt-4o', modelConfig, messages, mcpTools) + const requestParams = mockChatCompletionsCreate.mock.calls[0]?.[0] + + expect(events.some((event) => event.type === 'text')).toBe(true) + expect(events.some((event) => event.type === 'stop')).toBe(true) + expect(mockMcpToolsToOpenAITools).toHaveBeenCalledWith(mcpTools, 'mock-openai-compatible') + expect(requestParams.tools).toEqual(convertedTools) + }) + + it('does not inject tools when mcpRuntime is missing', async () => { + const provider = new OpenAICompatibleProvider(createProvider(), createConfigPresenter([])) + ;(provider as any).isInitialized = true + + await collectEvents(provider, 'gpt-4o', modelConfig, messages, mcpTools) + const requestParams = mockChatCompletionsCreate.mock.calls[0]?.[0] + + expect(mockMcpToolsToOpenAITools).not.toHaveBeenCalled() + expect(requestParams.tools).toBeUndefined() + }) + + it('forwards mcpRuntime through OpenAICompatibleProvider subclasses', async () => { + const provider = new OpenRouterProvider( + createProvider({ + id: 'openrouter', + name: 'OpenRouter' + }), + createConfigPresenter([]), + mcpRuntime as any + ) + ;(provider as any).isInitialized = true + + await collectEvents(provider, 'gpt-4o', modelConfig, messages, mcpTools) + const requestParams = mockChatCompletionsCreate.mock.calls[0]?.[0] + + expect(mockMcpToolsToOpenAITools).toHaveBeenCalledWith(mcpTools, 'openrouter') + expect(requestParams.tools).toEqual(convertedTools) + }) + + it('preserves mcpRuntime on the LLMProviderPresenter instantiation path', async () => { + const providerConfig = createProvider({ + id: 'openrouter', + name: 'OpenRouter' + }) + const llmProviderPresenter = new LLMProviderPresenter( + createConfigPresenter([providerConfig]), + mockSqlitePresenter, + undefined, + undefined, + mcpRuntime as any + ) + + const provider = llmProviderPresenter.getProviderInstance('openrouter') as OpenRouterProvider + ;(provider as any).isInitialized = true + + await collectEvents(provider, 'gpt-4o', modelConfig, messages, mcpTools) + const requestParams = mockChatCompletionsCreate.mock.calls[0]?.[0] + + expect(provider).toBeInstanceOf(OpenRouterProvider) + expect(mockMcpToolsToOpenAITools).toHaveBeenCalledWith(mcpTools, 'openrouter') + expect(requestParams.tools).toEqual(convertedTools) + }) +}) From 6a8f8a8a756467d907049f818790a071dd3375b3 Mon Sep 17 00:00:00 2001 From: zerob13 Date: Fri, 20 Mar 2026 18:54:50 +0800 Subject: [PATCH 2/3] fix: conversation --- src/main/presenter/index.ts | 5 -- .../presenter/mcpPresenter/toolManager.ts | 48 ++--------------- .../mcpPresenter/toolManager.test.ts | 51 +++++++------------ 3 files changed, 21 insertions(+), 83 deletions(-) diff --git a/src/main/presenter/index.ts b/src/main/presenter/index.ts index 5c71d8a6d..aec002da1 100644 --- a/src/main/presenter/index.ts +++ b/src/main/presenter/index.ts @@ -4,7 +4,6 @@ import { BrowserWindow, ipcMain, IpcMainInvokeEvent, app } from 'electron' import { WindowPresenter } from './windowPresenter' import { ShortcutPresenter } from './shortcutPresenter' import { - CONVERSATION, CONVERSATION_SETTINGS, IConfigPresenter, IDeeplinkPresenter, @@ -361,10 +360,6 @@ export class Presenter implements IPresenter { return this.legacySessionManager?.getSessionSync(conversationId) ?? null } - async getLegacyConversation(conversationId: string): Promise { - return await this.getLegacySessionPresenter().getConversation(conversationId) - } - async updateLegacyConversationSettings( conversationId: string, settings: Partial diff --git a/src/main/presenter/mcpPresenter/toolManager.ts b/src/main/presenter/mcpPresenter/toolManager.ts index cbc88ae3a..386ad126d 100644 --- a/src/main/presenter/mcpPresenter/toolManager.ts +++ b/src/main/presenter/mcpPresenter/toolManager.ts @@ -7,8 +7,7 @@ import { MCPContentItem, MCPTextContent, IConfigPresenter, - Resource, - CONVERSATION + Resource } from '@shared/presenter' import { ServerManager } from './serverManager' import { McpClient } from './mcpClient' @@ -292,53 +291,12 @@ export class ToolManager { projectDir: session.projectDir?.trim() || null } } - } catch (error) { - console.warn('[ToolManager] Failed to resolve new session MCP context:', error) - } - - try { - const conversation = await presenter.getLegacyConversation(sessionId) - return this.mapLegacyConversationToAcpContext(conversation) - } catch (error) { - console.warn('[ToolManager] Failed to resolve legacy session MCP context:', error) - return null - } - } - private mapLegacyConversationToAcpContext(conversation: CONVERSATION | null | undefined): { - agentId: string - providerId: string - projectDir: string | null - } | null { - const settings = conversation?.settings - if (!settings) { return null - } - - const providerId = typeof settings.providerId === 'string' ? settings.providerId.trim() : '' - const chatMode = settings.chatMode - const isAcpConversation = providerId === 'acp' || chatMode === 'acp agent' - if (!isAcpConversation) { - return null - } - - const agentId = typeof settings.modelId === 'string' ? settings.modelId.trim() : '' - if (!agentId) { + } catch (error) { + console.warn('[ToolManager] Failed to resolve new session MCP context:', error) return null } - - const directProjectDir = - typeof settings.agentWorkspacePath === 'string' ? settings.agentWorkspacePath.trim() : '' - const mappedProjectDir = - typeof settings.acpWorkdirMap?.[agentId] === 'string' - ? settings.acpWorkdirMap[agentId]?.trim() - : '' - - return { - agentId, - providerId: providerId || 'acp', - projectDir: directProjectDir || mappedProjectDir || null - } } // 检查工具调用权限 diff --git a/test/main/presenter/mcpPresenter/toolManager.test.ts b/test/main/presenter/mcpPresenter/toolManager.test.ts index e4e9f73ab..6db638b0d 100644 --- a/test/main/presenter/mcpPresenter/toolManager.test.ts +++ b/test/main/presenter/mcpPresenter/toolManager.test.ts @@ -1,4 +1,4 @@ -import { beforeEach, describe, expect, it, vi } from 'vitest' +import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest' const eventBusMocks = vi.hoisted(() => ({ on: vi.fn(), @@ -10,13 +10,7 @@ const eventBusMocks = vi.hoisted(() => ({ const presenterMocks = vi.hoisted(() => ({ newAgentPresenter: { getSession: vi.fn() - }, - sessionPresenter: { - getConversation: vi.fn() - }, - getLegacyConversation: vi.fn((conversationId: string) => - presenterMocks.sessionPresenter.getConversation(conversationId) - ) + } })) vi.mock('@/eventbus', () => ({ @@ -44,8 +38,15 @@ vi.mock('@/presenter', () => ({ import { ToolManager } from '../../../../src/main/presenter/mcpPresenter/toolManager' describe('ToolManager ACP MCP access control', () => { + let warnSpy: ReturnType + beforeEach(() => { vi.clearAllMocks() + warnSpy = vi.spyOn(console, 'warn').mockImplementation(() => {}) + }) + + afterEach(() => { + warnSpy.mockRestore() }) function createClient(serverName: string) { @@ -132,30 +133,10 @@ describe('ToolManager ACP MCP access control', () => { expect(configPresenter.getAgentMcpSelections).toHaveBeenCalledWith('agent-1') }) - it('falls back to legacy conversation ACP context when new session is missing', async () => { - const client = createClient('legacy-server') - const configPresenter = createConfigPresenter('legacy-server') - configPresenter.getAcpAgents.mockResolvedValue([{ id: 'agent-legacy', name: 'Legacy Agent' }]) - configPresenter.getAgentMcpSelections.mockResolvedValue(['legacy-server']) - + it('treats missing new session context as non-ACP and still executes the tool', async () => { + const client = createClient('open-server') + const configPresenter = createConfigPresenter('open-server') presenterMocks.newAgentPresenter.getSession.mockResolvedValue(null) - presenterMocks.sessionPresenter.getConversation.mockResolvedValue({ - id: 'conv-1', - title: 'Legacy', - createdAt: Date.now(), - updatedAt: Date.now(), - settings: { - providerId: 'acp', - modelId: 'agent-legacy', - chatMode: 'acp agent', - agentWorkspacePath: '/workspace/legacy', - systemPrompt: '', - temperature: 0.7, - contextLength: 32000, - maxTokens: 8000, - artifacts: 0 - } - }) const manager = new ToolManager( configPresenter as never, @@ -177,8 +158,12 @@ describe('ToolManager ACP MCP access control', () => { expect(result.isError).toBe(false) expect(result.content).toBe('ok') expect(client.callTool).toHaveBeenCalledWith('echo', {}) - expect(presenterMocks.sessionPresenter.getConversation).toHaveBeenCalledWith('conv-1') - expect(configPresenter.getAgentMcpSelections).toHaveBeenCalledWith('agent-legacy') + expect(configPresenter.getAgentMcpSelections).not.toHaveBeenCalled() + expect( + warnSpy.mock.calls.some((call) => + String(call[0]).includes('Failed to resolve legacy session MCP context') + ) + ).toBe(false) }) it('skips ACP selection gating for non-ACP sessions', async () => { From c60ff54b768ab96220ead4a596be162e9b8bc6ac Mon Sep 17 00:00:00 2001 From: zerob13 Date: Fri, 20 Mar 2026 19:02:21 +0800 Subject: [PATCH 3/3] fix(mcp): skip deepchat ACP session lookup --- .../deepchatAgentPresenter/dispatch.ts | 6 ++- .../presenter/deepchatAgentPresenter/index.ts | 4 +- .../deepchatAgentPresenter/process.ts | 3 +- .../presenter/mcpPresenter/toolManager.ts | 5 ++- src/shared/types/core/mcp.ts | 1 + .../types/presenters/legacy.presenters.d.ts | 4 ++ .../deepchatAgentPresenter.test.ts | 33 ++++++++++++++++ .../deepchatAgentPresenter/dispatch.test.ts | 9 +++-- .../deepchatAgentPresenter/process.test.ts | 2 +- .../mcpPresenter/toolManager.test.ts | 38 +++++++++++++++++-- 10 files changed, 93 insertions(+), 12 deletions(-) diff --git a/src/main/presenter/deepchatAgentPresenter/dispatch.ts b/src/main/presenter/deepchatAgentPresenter/dispatch.ts index 709f71917..047129900 100644 --- a/src/main/presenter/deepchatAgentPresenter/dispatch.ts +++ b/src/main/presenter/deepchatAgentPresenter/dispatch.ts @@ -396,7 +396,8 @@ export async function executeTools( toolOutputGuard: ToolOutputGuard, contextLength: number, maxTokens: number, - hooks?: ProcessHooks + hooks?: ProcessHooks, + providerId?: string ): Promise<{ executed: number pendingInteractions: PendingToolInteraction[] @@ -445,7 +446,8 @@ export async function executeTools( type: 'function', function: { name: tc.name, arguments: tc.arguments }, server: toolDef?.server, - conversationId: io.sessionId + conversationId: io.sessionId, + providerId: providerId?.trim() || undefined } const toolContext = { diff --git a/src/main/presenter/deepchatAgentPresenter/index.ts b/src/main/presenter/deepchatAgentPresenter/index.ts index ccc822003..903adc205 100644 --- a/src/main/presenter/deepchatAgentPresenter/index.ts +++ b/src/main/presenter/deepchatAgentPresenter/index.ts @@ -2351,6 +2351,7 @@ export class DeepChatAgentPresenter implements IAgentImplementation { } const projectDir = this.resolveProjectDir(sessionId) + const sessionState = await this.getSessionState(sessionId) const toolDefinitions = await this.loadToolDefinitionsForSession(sessionId, projectDir) const toolDefinition = toolDefinitions.find((definition) => { @@ -2386,7 +2387,8 @@ export class DeepChatAgentPresenter implements IAgentImplementation { arguments: toolCall.params || '{}' }, server: toolDefinition?.server, - conversationId: sessionId + conversationId: sessionId, + providerId: sessionState?.providerId?.trim() || undefined } try { diff --git a/src/main/presenter/deepchatAgentPresenter/process.ts b/src/main/presenter/deepchatAgentPresenter/process.ts index 36b963c4a..540afe442 100644 --- a/src/main/presenter/deepchatAgentPresenter/process.ts +++ b/src/main/presenter/deepchatAgentPresenter/process.ts @@ -151,7 +151,8 @@ export async function processStream(params: ProcessParams): Promise 0 ? modelConfig.contextLength : UNKNOWN_CONTEXT_LIMIT, maxTokens, - hooks + hooks, + providerId ) toolCallCount += executed.executed echo.flush() diff --git a/src/main/presenter/mcpPresenter/toolManager.ts b/src/main/presenter/mcpPresenter/toolManager.ts index 386ad126d..282c04be9 100644 --- a/src/main/presenter/mcpPresenter/toolManager.ts +++ b/src/main/presenter/mcpPresenter/toolManager.ts @@ -447,9 +447,12 @@ export class ToolManager { const { client: targetClient, originalName } = targetInfo const toolServerName = targetClient.serverName + const hintedProviderId = toolCall.providerId?.trim() + const shouldResolveAcpContext = + Boolean(toolCall.conversationId) && (!hintedProviderId || hintedProviderId === 'acp') // ACP agent-level MCP access control resolves from session context, not global chat mode. - if (toolCall.conversationId) { + if (shouldResolveAcpContext && toolCall.conversationId) { try { const acpContext = await this.resolveAcpSessionContext(toolCall.conversationId) if (acpContext?.providerId === 'acp' && acpContext.agentId) { diff --git a/src/shared/types/core/mcp.ts b/src/shared/types/core/mcp.ts index 7ee797fb3..2b586c093 100644 --- a/src/shared/types/core/mcp.ts +++ b/src/shared/types/core/mcp.ts @@ -32,6 +32,7 @@ export interface MCPToolCall { description: string } conversationId?: string + providerId?: string } export type MCPContentItem = MCPTextContent | MCPImageContent | MCPResourceContent diff --git a/src/shared/types/presenters/legacy.presenters.d.ts b/src/shared/types/presenters/legacy.presenters.d.ts index fa044e476..9ac632cd3 100644 --- a/src/shared/types/presenters/legacy.presenters.d.ts +++ b/src/shared/types/presenters/legacy.presenters.d.ts @@ -1634,6 +1634,10 @@ export interface MCPToolCall { * Optional conversation context (used for ACP agent MCP access control). */ conversationId?: string + /** + * Optional provider hint to skip ACP session resolution for non-ACP sessions. + */ + providerId?: string } export interface MCPToolResponse { diff --git a/test/main/presenter/deepchatAgentPresenter/deepchatAgentPresenter.test.ts b/test/main/presenter/deepchatAgentPresenter/deepchatAgentPresenter.test.ts index 77cc22829..ba26d41bd 100644 --- a/test/main/presenter/deepchatAgentPresenter/deepchatAgentPresenter.test.ts +++ b/test/main/presenter/deepchatAgentPresenter/deepchatAgentPresenter.test.ts @@ -2335,5 +2335,38 @@ describe('DeepChatAgentPresenter', () => { }) ) }) + + it('passes providerId when executing a deferred MCP tool call', async () => { + toolPresenter.getAllToolDefinitions.mockResolvedValueOnce([ + { + type: 'function', + function: { + name: 'echo', + description: 'Echo tool', + parameters: { type: 'object', properties: {} } + }, + server: { name: 'test-server', icons: '', description: '' } + } + ]) + toolPresenter.callTool.mockResolvedValueOnce({ + content: 'tool result', + rawData: { toolCallId: 'tc1', content: 'tool result', isError: false } + }) + + await agent.initSession('s1', { providerId: 'openai', modelId: 'gpt-4' }) + + await (agent as any).executeDeferredToolCall('s1', { + id: 'tc1', + name: 'echo', + params: '{}' + }) + + expect(toolPresenter.callTool).toHaveBeenCalledWith( + expect.objectContaining({ + conversationId: 's1', + providerId: 'openai' + }) + ) + }) }) }) diff --git a/test/main/presenter/deepchatAgentPresenter/dispatch.test.ts b/test/main/presenter/deepchatAgentPresenter/dispatch.test.ts index f97f91353..114ab35fd 100644 --- a/test/main/presenter/deepchatAgentPresenter/dispatch.test.ts +++ b/test/main/presenter/deepchatAgentPresenter/dispatch.test.ts @@ -139,7 +139,9 @@ describe('dispatch', () => { 'full_access', new ToolOutputGuard(), 32000, - 1024 + 1024, + undefined, + 'openai' ) expect(executed.executed).toBe(1) @@ -148,7 +150,8 @@ describe('dispatch', () => { id: 'tc1', function: { name: 'get_weather', arguments: '{}' }, server: tools[0].server, - conversationId: 's1' + conversationId: 's1', + providerId: 'openai' }) ) @@ -584,7 +587,7 @@ describe('dispatch', () => { expect(executed.terminalError).toBeUndefined() const toolMessage = conversation.find((message: any) => message.role === 'tool') expect(toolMessage.content).toContain('[Tool output offloaded]') - expect(toolMessage.content).toContain('tool_function.cdp_send_11.offload') + expect(toolMessage.content).toMatch(/tool_function\.cdp_send_11(?:_[a-f0-9]+)?\.offload/) expect(toolMessage.content).not.toContain(':11.offload') expect(toolMessage.content).not.toContain(tempHome!) expect(state.blocks[0].tool_call?.response).toContain('[Tool output offloaded]') diff --git a/test/main/presenter/deepchatAgentPresenter/process.test.ts b/test/main/presenter/deepchatAgentPresenter/process.test.ts index ed4753515..31c32b04f 100644 --- a/test/main/presenter/deepchatAgentPresenter/process.test.ts +++ b/test/main/presenter/deepchatAgentPresenter/process.test.ts @@ -248,7 +248,7 @@ describe('processStream', () => { const secondCallMessages = (coreStream as ReturnType).mock.calls[1][0] const toolResultMsg = secondCallMessages.find((m: any) => m.role === 'tool') expect(toolResultMsg.content).toContain('[Tool output offloaded]') - expect(toolResultMsg.content).toContain('tool_function.cdp_send_11.offload') + expect(toolResultMsg.content).toMatch(/tool_function\.cdp_send_11(?:_[a-f0-9]+)?\.offload/) expect(toolResultMsg.content).not.toContain(':11.offload') expect(toolResultMsg.content).not.toContain(tempHome!) }) diff --git a/test/main/presenter/mcpPresenter/toolManager.test.ts b/test/main/presenter/mcpPresenter/toolManager.test.ts index 6db638b0d..79e7e0e83 100644 --- a/test/main/presenter/mcpPresenter/toolManager.test.ts +++ b/test/main/presenter/mcpPresenter/toolManager.test.ts @@ -123,7 +123,8 @@ describe('ToolManager ACP MCP access control', () => { name: 'echo', arguments: '{}' }, - conversationId: 'session-1' + conversationId: 'session-1', + providerId: 'acp' }) expect(result.isError).toBe(true) @@ -133,7 +134,7 @@ describe('ToolManager ACP MCP access control', () => { expect(configPresenter.getAgentMcpSelections).toHaveBeenCalledWith('agent-1') }) - it('treats missing new session context as non-ACP and still executes the tool', async () => { + it('skips ACP session resolution when provider hint is non-ACP', async () => { const client = createClient('open-server') const configPresenter = createConfigPresenter('open-server') presenterMocks.newAgentPresenter.getSession.mockResolvedValue(null) @@ -152,12 +153,14 @@ describe('ToolManager ACP MCP access control', () => { name: 'echo', arguments: '{}' }, - conversationId: 'conv-1' + conversationId: 'conv-1', + providerId: 'openai' }) expect(result.isError).toBe(false) expect(result.content).toBe('ok') expect(client.callTool).toHaveBeenCalledWith('echo', {}) + expect(presenterMocks.newAgentPresenter.getSession).not.toHaveBeenCalled() expect(configPresenter.getAgentMcpSelections).not.toHaveBeenCalled() expect( warnSpy.mock.calls.some((call) => @@ -206,4 +209,33 @@ describe('ToolManager ACP MCP access control', () => { expect(client.callTool).toHaveBeenCalledWith('echo', {}) expect(configPresenter.getAgentMcpSelections).not.toHaveBeenCalled() }) + + it('treats missing provider hint as a fallback to new session resolution', async () => { + const client = createClient('open-server') + const configPresenter = createConfigPresenter('open-server') + presenterMocks.newAgentPresenter.getSession.mockResolvedValue(null) + + const manager = new ToolManager( + configPresenter as never, + { + getRunningClients: vi.fn().mockResolvedValue([client]) + } as never + ) + + const result = await manager.callTool({ + id: 'tool-4', + type: 'function', + function: { + name: 'echo', + arguments: '{}' + }, + conversationId: 'conv-fallback' + }) + + expect(result.isError).toBe(false) + expect(result.content).toBe('ok') + expect(client.callTool).toHaveBeenCalledWith('echo', {}) + expect(presenterMocks.newAgentPresenter.getSession).toHaveBeenCalledWith('conv-fallback') + expect(configPresenter.getAgentMcpSelections).not.toHaveBeenCalled() + }) })