Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
357 changes: 357 additions & 0 deletions docs/trace-request-params-feature.md

Large diffs are not rendered by default.

1,017 changes: 822 additions & 195 deletions resources/model-db/providers.json

Large diffs are not rendered by default.

1 change: 1 addition & 0 deletions src/main/events.ts
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ export const CONFIG_EVENTS = {
CONTENT_PROTECTION_CHANGED: 'config:content-protection-changed',
SOUND_ENABLED_CHANGED: 'config:sound-enabled-changed', // 新增:声音开关变更事件
COPY_WITH_COT_CHANGED: 'config:copy-with-cot-enabled-changed',
TRACE_DEBUG_CHANGED: 'config:trace-debug-changed', // Trace 调试功能开关变更事件
PROXY_RESOLVED: 'config:proxy-resolved',
LANGUAGE_CHANGED: 'config:language-changed', // 新增:语言变更事件
// 模型配置相关事件
Expand Down
138 changes: 138 additions & 0 deletions src/main/lib/redact.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,138 @@
/**
* Redaction utilities for sensitive information in request preview
*/

/**
* Sensitive header keys that should be redacted
*/
const SENSITIVE_HEADER_KEYS = [
'authorization',
'api-key',
'x-api-key',
'apikey',
'bearer',
'token',
'secret',
'password',
'credential',
'auth'
]

/**
* Sensitive body keys that should be redacted
* Note: We use exact match to avoid filtering legitimate keys like 'max_tokens'
*/
const SENSITIVE_BODY_KEYS = ['api_key', 'apiKey', 'apikey', 'secret', 'password', 'token']

/**
* Body keys that should never be redacted (even if they contain sensitive keywords)
*/
const ALLOWED_BODY_KEYS = [
'max_tokens',
'max_completion_tokens',
'max_output_tokens',
'temperature',
'stream',
'model',
'messages',
'tools'
]

/**
* Redact sensitive values in headers
* @param headers Original headers
* @returns Redacted headers
*/
export function redactHeaders(headers: Record<string, string>): Record<string, string> {
const redacted: Record<string, string> = {}

for (const [key, value] of Object.entries(headers)) {
const keyLower = key.toLowerCase()
const shouldRedact = SENSITIVE_HEADER_KEYS.some((sensitiveKey) =>
keyLower.includes(sensitiveKey)
)

if (shouldRedact) {
redacted[key] = '***REDACTED***'
} else {
redacted[key] = value
}
}

return redacted
}

/**
* Redact sensitive values in request body
* @param body Original body
* @returns Redacted body
*/
export function redactBody(body: unknown): unknown {
if (body === null || body === undefined) {
return body
}

if (Array.isArray(body)) {
return body.map((item) => redactBody(item))
}

if (typeof body === 'object') {
const redacted: Record<string, unknown> = {}

for (const [key, value] of Object.entries(body)) {
// Skip redaction for allowed keys (like max_tokens, max_completion_tokens, etc.)
if (ALLOWED_BODY_KEYS.includes(key)) {
if (typeof value === 'object' && value !== null) {
redacted[key] = redactBody(value)
} else {
redacted[key] = value
}
continue
}

// Check if key matches sensitive patterns (exact match or ends with sensitive keyword)
const keyLower = key.toLowerCase()
const shouldRedact = SENSITIVE_BODY_KEYS.some((sensitiveKey) => {
const sensitiveKeyLower = sensitiveKey.toLowerCase()
// Exact match
if (keyLower === sensitiveKeyLower) {
return true
}
// Key ends with sensitive keyword (e.g., 'api_token', 'access_token')
// But exclude keys that contain allowed patterns (e.g., 'max_tokens')
if (keyLower.endsWith(`_${sensitiveKeyLower}`) || keyLower.endsWith(sensitiveKeyLower)) {
// Double check: make sure it's not a false positive
return !ALLOWED_BODY_KEYS.some((allowed) => keyLower.includes(allowed.toLowerCase()))
}
return false
})

if (shouldRedact) {
redacted[key] = '***REDACTED***'
} else if (typeof value === 'object' && value !== null) {
redacted[key] = redactBody(value)
} else {
redacted[key] = value
}
}

return redacted
}

return body
}

/**
* Redact sensitive information in full request preview
* @param preview Request preview data
* @returns Redacted preview
*/
export function redactRequestPreview(preview: { headers: Record<string, string>; body: unknown }): {
headers: Record<string, string>
body: unknown
} {
return {
headers: redactHeaders(preview.headers),
body: redactBody(preview.body)
}
}
5 changes: 5 additions & 0 deletions src/main/presenter/configPresenter/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -1061,6 +1061,11 @@ export class ConfigPresenter implements IConfigPresenter {
eventBus.sendToRenderer(CONFIG_EVENTS.COPY_WITH_COT_CHANGED, SendTarget.ALL_WINDOWS, enabled)
}

setTraceDebugEnabled(enabled: boolean): void {
this.setSetting('traceDebugEnabled', enabled)
eventBus.sendToRenderer(CONFIG_EVENTS.TRACE_DEBUG_CHANGED, SendTarget.ALL_WINDOWS, enabled)
}

// Get floating button switch status
getFloatingButtonEnabled(): boolean {
const value = this.getSetting<boolean>('floatingButtonEnabled') ?? false
Expand Down
33 changes: 33 additions & 0 deletions src/main/presenter/llmProviderPresenter/baseProvider.ts
Original file line number Diff line number Diff line change
Expand Up @@ -649,6 +649,39 @@ ${this.convertToolsToXml(tools)}
return null // 默认实现返回 null,表示不支持此功能
}

/**
* Get request preview for debugging (DEV mode only)
* Build the actual request parameters that would be sent to the provider API
* @param messages Conversation messages
* @param modelId Model ID
* @param modelConfig Model configuration
* @param temperature Temperature parameter
* @param maxTokens Max tokens parameter
* @param mcpTools MCP tools definitions
* @returns Preview data including endpoint, headers, and body (all redacted)
*/
public async getRequestPreview(
// eslint-disable-next-line @typescript-eslint/no-unused-vars
_messages: ChatMessage[],
// eslint-disable-next-line @typescript-eslint/no-unused-vars
_modelId: string,
// eslint-disable-next-line @typescript-eslint/no-unused-vars
_modelConfig: ModelConfig,
// eslint-disable-next-line @typescript-eslint/no-unused-vars
_temperature: number,
// eslint-disable-next-line @typescript-eslint/no-unused-vars
_maxTokens: number,
// eslint-disable-next-line @typescript-eslint/no-unused-vars
_mcpTools: MCPToolDefinition[]
): Promise<{
endpoint: string
headers: Record<string, string>
body: unknown
}> {
// Default implementation returns not implemented marker
throw new Error('Provider has not implemented getRequestPreview')
}

/**
* 将 MCPToolDefinition 转换为 XML 格式
* @param tools MCPToolDefinition 数组
Expand Down
2 changes: 1 addition & 1 deletion src/main/presenter/llmProviderPresenter/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -556,7 +556,7 @@ export class LLMProviderPresenter implements ILlmProviderPresenter {
}
}

private getProviderInstance(providerId: string): BaseLLMProvider {
public getProviderInstance(providerId: string): BaseLLMProvider {
let instance = this.providerInstances.get(providerId)
if (!instance) {
const provider = this.getProviderById(providerId)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1624,4 +1624,99 @@ export class OpenAICompatibleProvider extends BaseLLMProvider {
}
}
}

/**
* Get request preview for debugging (DEV mode only)
* Builds the actual request parameters without sending the request
*/
public async getRequestPreview(
messages: ChatMessage[],
modelId: string,
modelConfig: ModelConfig,
temperature: number,
maxTokens: number,
mcpTools: MCPToolDefinition[]
): Promise<{
endpoint: string
headers: Record<string, string>
body: unknown
}> {
const tools = mcpTools || []
const supportsFunctionCall = modelConfig?.functionCall || false
let processedMessages = [
...this.formatMessages(messages, supportsFunctionCall)
] as ChatCompletionMessageParam[]

// Prepare non-native function call prompt if needed
if (tools.length > 0 && !supportsFunctionCall) {
processedMessages = this.prepareFunctionCallPrompt(processedMessages, tools)
}

// Convert tools to OpenAI format if native support
const apiTools =
tools.length > 0 && supportsFunctionCall
? await presenter.mcpPresenter.mcpToolsToOpenAITools(tools, this.provider.id)
: undefined

// Build request params (same logic as handleChatCompletion)
const requestParams: OpenAI.Chat.ChatCompletionCreateParams = {
messages: processedMessages,
model: modelId,
stream: true,
temperature,
...(modelId.startsWith('o1') ||
modelId.startsWith('o3') ||
modelId.startsWith('o4') ||
modelId.includes('gpt-5')
? { max_completion_tokens: maxTokens }
: { max_tokens: maxTokens })
}

requestParams.stream_options = { include_usage: true }

if (this.provider.id.toLowerCase().includes('dashscope')) {
requestParams.response_format = { type: 'text' }
}

if (
this.provider.id.toLowerCase().includes('openrouter') &&
modelId.startsWith('deepseek/deepseek-chat-v3-0324:free')
) {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
;(requestParams as any).provider = {
only: ['chutes']
}
}

if (modelConfig.reasoningEffort && this.supportsEffortParameter(modelId)) {
;(requestParams as any).reasoning_effort = modelConfig.reasoningEffort
}

if (modelConfig.verbosity && this.supportsVerbosityParameter(modelId)) {
;(requestParams as any).verbosity = modelConfig.verbosity
}

OPENAI_REASONING_MODELS.forEach((noTempId) => {
if (modelId.startsWith(noTempId)) delete requestParams.temperature
})

if (apiTools && apiTools.length > 0 && supportsFunctionCall) requestParams.tools = apiTools

// Build headers
const headers: Record<string, string> = {
'Content-Type': 'application/json',
Authorization: `Bearer ${this.provider.apiKey || 'MISSING_API_KEY'}`,
...this.defaultHeaders
}

// Determine endpoint
const baseUrl = this.provider.baseUrl || 'https://api.openai.com/v1'
const endpoint = `${baseUrl}/chat/completions`

return {
endpoint,
headers,
body: requestParams
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -1324,4 +1324,72 @@ export class OpenAIResponsesProvider extends BaseLLMProvider {
return []
}
}

/**
* Get request preview for debugging (DEV mode only)
*/
public async getRequestPreview(
messages: ChatMessage[],
modelId: string,
modelConfig: ModelConfig,
temperature: number,
maxTokens: number,
mcpTools: MCPToolDefinition[]
): Promise<{
endpoint: string
headers: Record<string, string>
body: unknown
}> {
const tools = mcpTools || []
const supportsFunctionCall = modelConfig?.functionCall || false
let processedMessages = this.formatMessages(messages)

if (tools.length > 0 && !supportsFunctionCall) {
processedMessages = this.prepareFunctionCallPrompt(processedMessages, tools)
}

const apiTools =
tools.length > 0 && supportsFunctionCall
? await presenter.mcpPresenter.mcpToolsToOpenAIResponsesTools(tools, this.provider.id)
: undefined

const requestParams: OpenAI.Responses.ResponseCreateParams = {
model: modelId,
input: processedMessages,
temperature,
max_output_tokens: maxTokens,
stream: true
}

if (tools.length > 0 && supportsFunctionCall && apiTools) {
requestParams.tools = apiTools
}

if (modelConfig.reasoningEffort && this.supportsEffortParameter(modelId)) {
;(requestParams as any).reasoning = {
effort: modelConfig.reasoningEffort
}
}

if (modelConfig.verbosity && this.supportsVerbosityParameter(modelId)) {
;(requestParams as any).text = {
verbosity: modelConfig.verbosity
}
}

const headers: Record<string, string> = {
'Content-Type': 'application/json',
Authorization: `Bearer ${this.provider.apiKey || 'MISSING_API_KEY'}`,
...this.defaultHeaders
}

const baseUrl = this.provider.baseUrl || 'https://api.openai.com/v1'
const endpoint = `${baseUrl}/responses`

return {
endpoint,
headers,
body: requestParams
}
}
}
Loading