From 5dbdba513d0a7cb72f844d62cca5bddb045e448c Mon Sep 17 00:00:00 2001 From: Thomas Kosiewski Date: Wed, 17 Dec 2025 17:10:24 +0100 Subject: [PATCH] =?UTF-8?q?=F0=9F=A4=96=20feat:=20persist=20per-workspace?= =?UTF-8?q?=20model=20+=20thinking?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Change-Id: I20f621f85e3475ca30f07a64d0e50822cbc59641 Signed-off-by: Thomas Kosiewski --- src/browser/App.tsx | 47 ++++- src/browser/components/ChatInput/index.tsx | 27 ++- .../ChatInput/useCreationWorkspace.test.tsx | 38 +++- .../ChatInput/useCreationWorkspace.ts | 26 ++- src/browser/components/ThinkingSlider.tsx | 2 +- src/browser/contexts/ThinkingContext.test.tsx | 29 +-- src/browser/contexts/ThinkingContext.tsx | 109 ++++++++---- .../contexts/WorkspaceContext.test.tsx | 36 +++- src/browser/contexts/WorkspaceContext.tsx | 53 +++++- src/browser/utils/messages/sendOptions.ts | 31 +++- src/common/constants/storage.ts | 15 +- src/common/orpc/schemas.ts | 1 + src/common/orpc/schemas/api.ts | 8 + src/common/orpc/schemas/project.ts | 4 + src/common/orpc/schemas/workspace.ts | 4 + .../orpc/schemas/workspaceAiSettings.ts | 15 ++ src/node/config.ts | 6 + src/node/orpc/router.ts | 6 + src/node/services/workspaceService.ts | 165 +++++++++++++++++- tests/ipc/workspaceAISettings.test.ts | 51 ++++++ 20 files changed, 594 insertions(+), 79 deletions(-) create mode 100644 src/common/orpc/schemas/workspaceAiSettings.ts create mode 100644 tests/ipc/workspaceAISettings.test.ts diff --git a/src/browser/App.tsx b/src/browser/App.tsx index f2d8dad95e..d308d1e8f7 100644 --- a/src/browser/App.tsx +++ b/src/browser/App.tsx @@ -34,8 +34,13 @@ import { buildCoreSources, type BuildSourcesParams } from "./utils/commands/sour import type { ThinkingLevel } from "@/common/types/thinking"; import { CUSTOM_EVENTS } from "@/common/constants/events"; import { isWorkspaceForkSwitchEvent } from "./utils/workspaceEvents"; -import { getThinkingLevelByModelKey, getModelKey } from "@/common/constants/storage"; +import { + getThinkingLevelByModelKey, + getThinkingLevelKey, + getModelKey, +} from "@/common/constants/storage"; import { migrateGatewayModel } from "@/browser/hooks/useGatewayModels"; +import { enforceThinkingPolicy } from "@/browser/utils/thinking/policy"; import { getDefaultModel } from "@/browser/hooks/useModelsFromSettings"; import type { BranchListResult } from "@/common/orpc/types"; import { useTelemetry } from "./hooks/useTelemetry"; @@ -52,7 +57,7 @@ import { TooltipProvider } from "./components/ui/tooltip"; import { ExperimentsProvider } from "./contexts/ExperimentsContext"; import { getWorkspaceSidebarKey } from "./utils/workspace"; -const THINKING_LEVELS: ThinkingLevel[] = ["off", "low", "medium", "high"]; +const THINKING_LEVELS: ThinkingLevel[] = ["off", "low", "medium", "high", "xhigh"]; function isStorybookIframe(): boolean { return typeof window !== "undefined" && window.location.pathname.endsWith("iframe.html"); @@ -293,9 +298,25 @@ function AppInner() { if (!workspaceId) { return "off"; } + + const scopedKey = getThinkingLevelKey(workspaceId); + const scoped = readPersistedState(scopedKey, undefined); + if (scoped !== undefined) { + return THINKING_LEVELS.includes(scoped) ? scoped : "off"; + } + + // Migration: fall back to legacy per-model thinking and seed the workspace-scoped key. const model = getModelForWorkspace(workspaceId); - const level = readPersistedState(getThinkingLevelByModelKey(model), "off"); - return THINKING_LEVELS.includes(level) ? level : "off"; + const legacy = readPersistedState( + getThinkingLevelByModelKey(model), + undefined + ); + if (legacy !== undefined && THINKING_LEVELS.includes(legacy)) { + updatePersistedState(scopedKey, legacy); + return legacy; + } + + return "off"; }, [getModelForWorkspace] ); @@ -308,22 +329,32 @@ function AppInner() { const normalized = THINKING_LEVELS.includes(level) ? level : "off"; const model = getModelForWorkspace(workspaceId); - const key = getThinkingLevelByModelKey(model); + const effective = enforceThinkingPolicy(model, normalized); + const key = getThinkingLevelKey(workspaceId); // Use the utility function which handles localStorage and event dispatch // ThinkingProvider will pick this up via its listener - updatePersistedState(key, normalized); + updatePersistedState(key, effective); + + // Persist to backend so the palette change follows the workspace across devices. + if (api) { + api.workspace + .updateAISettings({ workspaceId, aiSettings: { model, thinkingLevel: effective } }) + .catch(() => { + // Best-effort only. + }); + } // Dispatch toast notification event for UI feedback if (typeof window !== "undefined") { window.dispatchEvent( new CustomEvent(CUSTOM_EVENTS.THINKING_LEVEL_TOAST, { - detail: { workspaceId, level: normalized }, + detail: { workspaceId, level: effective }, }) ); } }, - [getModelForWorkspace] + [api, getModelForWorkspace] ); const registerParamsRef = useRef(null); diff --git a/src/browser/components/ChatInput/index.tsx b/src/browser/components/ChatInput/index.tsx index 7eef227cac..58e244cc4c 100644 --- a/src/browser/components/ChatInput/index.tsx +++ b/src/browser/components/ChatInput/index.tsx @@ -20,6 +20,9 @@ import { useMode } from "@/browser/contexts/ModeContext"; import { ThinkingSliderComponent } from "../ThinkingSlider"; import { ModelSettings } from "../ModelSettings"; import { useAPI } from "@/browser/contexts/API"; +import { useThinkingLevel } from "@/browser/hooks/useThinkingLevel"; +import { migrateGatewayModel } from "@/browser/hooks/useGatewayModels"; +import { enforceThinkingPolicy } from "@/browser/utils/thinking/policy"; import { useSendMessageOptions } from "@/browser/hooks/useSendMessageOptions"; import { getModelKey, @@ -133,6 +136,8 @@ export type { ChatInputProps, ChatInputAPI }; const ChatInputInner: React.FC = (props) => { const { api } = useAPI(); const { variant } = props; + const [thinkingLevel] = useThinkingLevel(); + const workspaceId = variant === "workspace" ? props.workspaceId : null; // Extract workspace-specific props with defaults const disabled = props.disabled ?? false; @@ -333,10 +338,26 @@ const ChatInputInner: React.FC = (props) => { const setPreferredModel = useCallback( (model: string) => { - ensureModelInSettings(model); // Ensure model exists in Settings - updatePersistedState(storageKeys.modelKey, model); // Update workspace or project-specific + const canonicalModel = migrateGatewayModel(model); + ensureModelInSettings(canonicalModel); // Ensure model exists in Settings + updatePersistedState(storageKeys.modelKey, canonicalModel); // Update workspace or project-specific + + // Workspace variant: persist to backend for cross-device consistency. + if (!api || variant !== "workspace" || !workspaceId) { + return; + } + + const effectiveThinkingLevel = enforceThinkingPolicy(canonicalModel, thinkingLevel); + api.workspace + .updateAISettings({ + workspaceId, + aiSettings: { model: canonicalModel, thinkingLevel: effectiveThinkingLevel }, + }) + .catch(() => { + // Best-effort only. If offline or backend is old, sendMessage will persist. + }); }, - [storageKeys.modelKey, ensureModelInSettings] + [api, storageKeys.modelKey, ensureModelInSettings, thinkingLevel, variant, workspaceId] ); const deferredModel = useDeferredValue(preferredModel); const deferredInput = useDeferredValue(input); diff --git a/src/browser/components/ChatInput/useCreationWorkspace.test.tsx b/src/browser/components/ChatInput/useCreationWorkspace.test.tsx index e853953348..9534665824 100644 --- a/src/browser/components/ChatInput/useCreationWorkspace.test.tsx +++ b/src/browser/components/ChatInput/useCreationWorkspace.test.tsx @@ -7,6 +7,7 @@ import { getModeKey, getPendingScopeId, getProjectScopeId, + getThinkingLevelKey, } from "@/common/constants/storage"; import type { SendMessageError as _SendMessageError } from "@/common/types/errors"; import type { WorkspaceChatMessage } from "@/common/orpc/types"; @@ -83,11 +84,18 @@ type ListBranchesArgs = Parameters[0]; type WorkspaceSendMessageArgs = Parameters[0]; type WorkspaceSendMessageResult = Awaited>; type WorkspaceCreateArgs = Parameters[0]; +type WorkspaceUpdateAISettingsArgs = Parameters[0]; +type WorkspaceUpdateAISettingsResult = Awaited< + ReturnType +>; type WorkspaceCreateResult = Awaited>; type NameGenerationArgs = Parameters[0]; type NameGenerationResult = Awaited>; type MockOrpcProjectsClient = Pick; -type MockOrpcWorkspaceClient = Pick; +type MockOrpcWorkspaceClient = Pick< + APIClient["workspace"], + "sendMessage" | "create" | "updateAISettings" +>; type MockOrpcNameGenerationClient = Pick; type WindowWithApi = Window & typeof globalThis; type WindowApi = WindowWithApi["api"]; @@ -114,6 +122,9 @@ interface SetupWindowOptions { sendMessage?: ReturnType< typeof mock<(args: WorkspaceSendMessageArgs) => Promise> >; + updateAISettings?: ReturnType< + typeof mock<(args: WorkspaceUpdateAISettingsArgs) => Promise> + >; create?: ReturnType Promise>>; nameGeneration?: ReturnType< typeof mock<(args: NameGenerationArgs) => Promise> @@ -124,6 +135,7 @@ const setupWindow = ({ listBranches, sendMessage, create, + updateAISettings, nameGeneration, }: SetupWindowOptions = {}) => { const listBranchesMock = @@ -157,6 +169,15 @@ const setupWindow = ({ } as WorkspaceCreateResult); }); + const updateAISettingsMock = + updateAISettings ?? + mock<(args: WorkspaceUpdateAISettingsArgs) => Promise>(() => { + return Promise.resolve({ + success: true, + data: undefined, + } as WorkspaceUpdateAISettingsResult); + }); + const nameGenerationMock = nameGeneration ?? mock<(args: NameGenerationArgs) => Promise>(() => { @@ -176,6 +197,7 @@ const setupWindow = ({ workspace: { sendMessage: (input: WorkspaceSendMessageArgs) => sendMessageMock(input), create: (input: WorkspaceCreateArgs) => createMock(input), + updateAISettings: (input: WorkspaceUpdateAISettingsArgs) => updateAISettingsMock(input), }, nameGeneration: { generate: (input: NameGenerationArgs) => nameGenerationMock(input), @@ -213,6 +235,7 @@ const setupWindow = ({ workspace: { list: rejectNotImplemented("workspace.list"), create: (args: WorkspaceCreateArgs) => createMock(args), + updateAISettings: (args: WorkspaceUpdateAISettingsArgs) => updateAISettingsMock(args), remove: rejectNotImplemented("workspace.remove"), rename: rejectNotImplemented("workspace.rename"), fork: rejectNotImplemented("workspace.fork"), @@ -278,7 +301,11 @@ const setupWindow = ({ return { projectsApi: { listBranches: listBranchesMock }, - workspaceApi: { sendMessage: sendMessageMock, create: createMock }, + workspaceApi: { + sendMessage: sendMessageMock, + create: createMock, + updateAISettings: updateAISettingsMock, + }, nameGenerationApi: { generate: nameGenerationMock }, }; }; @@ -466,7 +493,7 @@ describe("useCreationWorkspace", () => { const pendingInputKey = getInputKey(pendingScopeId); const pendingImagesKey = getInputImagesKey(pendingScopeId); expect(updatePersistedStateCalls).toContainEqual([modeKey, "plan"]); - // Note: thinking level is no longer synced per-workspace, it's stored per-model globally + // Thinking is workspace-scoped, but this test doesn't set a project-scoped thinking preference. expect(updatePersistedStateCalls).toContainEqual([pendingInputKey, ""]); expect(updatePersistedStateCalls).toContainEqual([pendingImagesKey, undefined]); }); @@ -510,7 +537,10 @@ describe("useCreationWorkspace", () => { expect(onWorkspaceCreated.mock.calls.length).toBe(0); await waitFor(() => expect(getHook().toast?.message).toBe("backend exploded")); await waitFor(() => expect(getHook().isSending).toBe(false)); - expect(updatePersistedStateCalls).toEqual([]); + + // Side effect: send-options reader may migrate thinking level into the project scope. + const thinkingKey = getThinkingLevelKey(getProjectScopeId(TEST_PROJECT_PATH)); + expect(updatePersistedStateCalls).toEqual([[thinkingKey, "off"]]); }); }); diff --git a/src/browser/components/ChatInput/useCreationWorkspace.ts b/src/browser/components/ChatInput/useCreationWorkspace.ts index 171fa5adc1..0d86fa700d 100644 --- a/src/browser/components/ChatInput/useCreationWorkspace.ts +++ b/src/browser/components/ChatInput/useCreationWorkspace.ts @@ -1,6 +1,7 @@ import { useState, useEffect, useCallback } from "react"; import type { FrontendWorkspaceMetadata } from "@/common/types/workspace"; import type { RuntimeConfig, RuntimeMode } from "@/common/types/runtime"; +import type { ThinkingLevel } from "@/common/types/thinking"; import type { UIMode } from "@/common/types/mode"; import { parseRuntimeString } from "@/browser/utils/chatCommands"; import { useDraftWorkspaceSettings } from "@/browser/hooks/useDraftWorkspaceSettings"; @@ -11,6 +12,7 @@ import { getInputImagesKey, getModelKey, getModeKey, + getThinkingLevelKey, getPendingScopeId, getProjectScopeId, } from "@/common/constants/storage"; @@ -45,8 +47,13 @@ function syncCreationPreferences(projectPath: string, workspaceId: string): void updatePersistedState(getModeKey(workspaceId), projectMode); } - // Note: thinking level is stored per-model globally, not per-workspace, - // so no sync is needed here + const projectThinkingLevel = readPersistedState( + getThinkingLevelKey(projectScopeId), + null + ); + if (projectThinkingLevel !== null) { + updatePersistedState(getThinkingLevelKey(workspaceId), projectThinkingLevel); + } } interface UseCreationWorkspaceReturn { @@ -196,6 +203,19 @@ export function useCreationWorkspace({ const { metadata } = createResult; + // Best-effort: persist the initial AI settings to the backend immediately so this workspace + // is portable across devices even before the first stream starts. + api.workspace + .updateAISettings({ + workspaceId: metadata.id, + aiSettings: { + model: settings.model, + thinkingLevel: settings.thinkingLevel, + }, + }) + .catch(() => { + // Ignore (offline / older backend). sendMessage will persist as a fallback. + }); // Sync preferences immediately (before switching) syncCreationPreferences(projectPath, metadata.id); if (projectPath) { @@ -239,6 +259,8 @@ export function useCreationWorkspace({ projectScopeId, onWorkspaceCreated, getRuntimeString, + settings.model, + settings.thinkingLevel, settings.trunkBranch, waitForGeneration, ] diff --git a/src/browser/components/ThinkingSlider.tsx b/src/browser/components/ThinkingSlider.tsx index 047f8ac180..bae8c41b97 100644 --- a/src/browser/components/ThinkingSlider.tsx +++ b/src/browser/components/ThinkingSlider.tsx @@ -199,7 +199,7 @@ export const ThinkingSliderComponent: React.FC = ({ modelS - Thinking: {formatKeybind(KEYBINDS.TOGGLE_THINKING)} to cycle. Saved per model. + Thinking: {formatKeybind(KEYBINDS.TOGGLE_THINKING)} to cycle. Saved per workspace. ); diff --git a/src/browser/contexts/ThinkingContext.test.tsx b/src/browser/contexts/ThinkingContext.test.tsx index 927974e499..3978c02154 100644 --- a/src/browser/contexts/ThinkingContext.test.tsx +++ b/src/browser/contexts/ThinkingContext.test.tsx @@ -4,7 +4,11 @@ import { act, cleanup, render, waitFor } from "@testing-library/react"; import React from "react"; import { ThinkingProvider } from "./ThinkingContext"; import { useThinkingLevel } from "@/browser/hooks/useThinkingLevel"; -import { getModelKey, getThinkingLevelByModelKey } from "@/common/constants/storage"; +import { + getModelKey, + getThinkingLevelByModelKey, + getThinkingLevelKey, +} from "@/common/constants/storage"; import { updatePersistedState } from "@/browser/hooks/usePersistedState"; // Setup basic DOM environment for testing-library @@ -49,8 +53,7 @@ describe("ThinkingContext", () => { const workspaceId = "ws-1"; updatePersistedState(getModelKey(workspaceId), "openai:gpt-5.2"); - updatePersistedState(getThinkingLevelByModelKey("openai:gpt-5.2"), "high"); - updatePersistedState(getThinkingLevelByModelKey("anthropic:claude-3.5"), "low"); + updatePersistedState(getThinkingLevelKey(workspaceId), "high"); let unmounts = 0; @@ -79,21 +82,18 @@ describe("ThinkingContext", () => { updatePersistedState(getModelKey(workspaceId), "anthropic:claude-3.5"); }); + // Thinking is workspace-scoped (not per-model), so switching models should not change it. await waitFor(() => { - expect(view.getByTestId("child").textContent).toBe("low"); + expect(view.getByTestId("child").textContent).toBe("high"); }); expect(unmounts).toBe(0); }); - test("switching models restores the per-model thinking level", async () => { + test("migrates legacy per-model thinking to the workspace-scoped key", async () => { const workspaceId = "ws-1"; - // Model A updatePersistedState(getModelKey(workspaceId), "openai:gpt-5.2"); - updatePersistedState(getThinkingLevelByModelKey("openai:gpt-5.2"), "high"); - - // Model B - updatePersistedState(getThinkingLevelByModelKey("anthropic:claude-3.5"), "low"); + updatePersistedState(getThinkingLevelByModelKey("openai:gpt-5.2"), "low"); const view = render( @@ -102,10 +102,15 @@ describe("ThinkingContext", () => { ); await waitFor(() => { - expect(view.getByTestId("thinking").textContent).toBe("high:ws-1"); + expect(view.getByTestId("thinking").textContent).toBe("low:ws-1"); }); - // Change model -> should restore that model's stored thinking level + // Migration should have populated the new workspace-scoped key. + const persisted = window.localStorage.getItem(getThinkingLevelKey(workspaceId)); + expect(persisted).toBeTruthy(); + expect(JSON.parse(persisted!)).toBe("low"); + + // Switching models should not change the workspace-scoped value. act(() => { updatePersistedState(getModelKey(workspaceId), "anthropic:claude-3.5"); }); diff --git a/src/browser/contexts/ThinkingContext.tsx b/src/browser/contexts/ThinkingContext.tsx index a587432545..1e7847b1b5 100644 --- a/src/browser/contexts/ThinkingContext.tsx +++ b/src/browser/contexts/ThinkingContext.tsx @@ -1,10 +1,22 @@ import type { ReactNode } from "react"; -import React, { createContext, useContext, useMemo } from "react"; +import React, { createContext, useContext, useEffect, useMemo, useCallback } from "react"; import type { ThinkingLevel } from "@/common/types/thinking"; -import { usePersistedState } from "@/browser/hooks/usePersistedState"; -import { getThinkingLevelByModelKey, getModelKey } from "@/common/constants/storage"; +import { + readPersistedState, + updatePersistedState, + usePersistedState, +} from "@/browser/hooks/usePersistedState"; +import { + getModelKey, + getProjectScopeId, + getThinkingLevelByModelKey, + getThinkingLevelKey, + GLOBAL_SCOPE_ID, +} from "@/common/constants/storage"; import { getDefaultModel } from "@/browser/hooks/useModelsFromSettings"; import { migrateGatewayModel } from "@/browser/hooks/useGatewayModels"; +import { enforceThinkingPolicy } from "@/browser/utils/thinking/policy"; +import { useAPI } from "@/browser/contexts/API"; interface ThinkingContextType { thinkingLevel: ThinkingLevel; @@ -14,45 +26,74 @@ interface ThinkingContextType { const ThinkingContext = createContext(undefined); interface ThinkingProviderProps { - workspaceId?: string; // For existing workspaces - projectPath?: string; // For workspace creation (uses project-scoped model key) + workspaceId?: string; // Workspace-scoped storage (highest priority) + projectPath?: string; // Project-scoped storage (fallback if no workspaceId) children: ReactNode; } -/** - * Hook to get the model key for the current scope. - */ -function useModelKey(workspaceId?: string, projectPath?: string): string | null { - return workspaceId - ? getModelKey(workspaceId) - : projectPath - ? getModelKey(`__project__/${projectPath}`) - : null; +function getScopeId(workspaceId: string | undefined, projectPath: string | undefined): string { + return workspaceId ?? (projectPath ? getProjectScopeId(projectPath) : GLOBAL_SCOPE_ID); } -export const ThinkingProvider: React.FC = ({ - workspaceId, - projectPath, - children, -}) => { +function getCanonicalModelForScope(scopeId: string, fallbackModel: string): string { + const rawModel = readPersistedState(getModelKey(scopeId), fallbackModel); + return migrateGatewayModel(rawModel || fallbackModel); +} + +export const ThinkingProvider: React.FC = (props) => { + const { api } = useAPI(); const defaultModel = getDefaultModel(); - const modelKey = useModelKey(workspaceId, projectPath); + const scopeId = getScopeId(props.workspaceId, props.projectPath); + const thinkingKey = getThinkingLevelKey(scopeId); + + // Workspace-scoped thinking. (No longer per-model.) + const [thinkingLevel, setThinkingLevelInternal] = usePersistedState( + thinkingKey, + "off", + { listener: true } + ); + + // One-time migration: if the new workspace-scoped key is missing, seed from the legacy per-model key. + useEffect(() => { + const existing = readPersistedState(thinkingKey, undefined); + if (existing !== undefined) { + return; + } - // Subscribe to model changes so we update thinking level when model changes. - // This uses a fallback key to satisfy hooks rules; it should be unused in practice - // because ThinkingProvider is expected to have either workspaceId or projectPath. - const [rawModel] = usePersistedState(modelKey ?? "model:__unused__", defaultModel, { - listener: true, - }); + const model = getCanonicalModelForScope(scopeId, defaultModel); + const legacyKey = getThinkingLevelByModelKey(model); + const legacy = readPersistedState(legacyKey, undefined); + if (legacy === undefined) { + return; + } - const thinkingKey = useMemo(() => { - const model = migrateGatewayModel(rawModel || defaultModel); - return getThinkingLevelByModelKey(model); - }, [rawModel, defaultModel]); + const effective = enforceThinkingPolicy(model, legacy); + updatePersistedState(thinkingKey, effective); + }, [defaultModel, scopeId, thinkingKey]); - const [thinkingLevel, setThinkingLevel] = usePersistedState(thinkingKey, "off", { - listener: true, - }); + const setThinkingLevel = useCallback( + (level: ThinkingLevel) => { + const model = getCanonicalModelForScope(scopeId, defaultModel); + const effective = enforceThinkingPolicy(model, level); + + setThinkingLevelInternal(effective); + + // Workspace variant: persist to backend so settings follow the workspace across devices. + if (!props.workspaceId || !api) { + return; + } + + api.workspace + .updateAISettings({ + workspaceId: props.workspaceId, + aiSettings: { model, thinkingLevel: effective }, + }) + .catch(() => { + // Best-effort only. If offline or backend is old, the next sendMessage will persist. + }); + }, + [api, defaultModel, props.workspaceId, scopeId, setThinkingLevelInternal] + ); // Memoize context value to prevent unnecessary re-renders of consumers. const contextValue = useMemo( @@ -60,7 +101,7 @@ export const ThinkingProvider: React.FC = ({ [thinkingLevel, setThinkingLevel] ); - return {children}; + return {props.children}; }; export const useThinking = () => { diff --git a/src/browser/contexts/WorkspaceContext.test.tsx b/src/browser/contexts/WorkspaceContext.test.tsx index b4c95a7024..cdc07b2ae1 100644 --- a/src/browser/contexts/WorkspaceContext.test.tsx +++ b/src/browser/contexts/WorkspaceContext.test.tsx @@ -6,7 +6,11 @@ import type { WorkspaceContext } from "./WorkspaceContext"; import { WorkspaceProvider, useWorkspaceContext } from "./WorkspaceContext"; import { ProjectProvider } from "@/browser/contexts/ProjectContext"; import { useWorkspaceStoreRaw as getWorkspaceStoreRaw } from "@/browser/stores/WorkspaceStore"; -import { SELECTED_WORKSPACE_KEY } from "@/common/constants/storage"; +import { + SELECTED_WORKSPACE_KEY, + getModelKey, + getThinkingLevelKey, +} from "@/common/constants/storage"; import type { RecursivePartial } from "@/browser/testUtils"; import type { APIClient } from "@/browser/contexts/API"; @@ -92,6 +96,36 @@ describe("WorkspaceContext", () => { expect(workspaceApi.onMetadata).toHaveBeenCalled(); }); + test("seeds model + thinking localStorage from backend metadata", async () => { + const initialWorkspaces: FrontendWorkspaceMetadata[] = [ + createWorkspaceMetadata({ + id: "ws-ai", + aiSettings: { model: "openai:gpt-5.2", thinkingLevel: "xhigh" }, + }), + ]; + + createMockAPI({ + workspace: { + list: () => Promise.resolve(initialWorkspaces), + }, + localStorage: { + // Seed with different values; backend should win. + [getModelKey("ws-ai")]: JSON.stringify("anthropic:claude-3.5"), + [getThinkingLevelKey("ws-ai")]: JSON.stringify("low"), + }, + }); + + const ctx = await setup(); + + await waitFor(() => expect(ctx().workspaceMetadata.size).toBe(1)); + + expect(JSON.parse(globalThis.localStorage.getItem(getModelKey("ws-ai"))!)).toBe( + "openai:gpt-5.2" + ); + expect(JSON.parse(globalThis.localStorage.getItem(getThinkingLevelKey("ws-ai"))!)).toBe( + "xhigh" + ); + }); test("loads workspace metadata on mount", async () => { const initialWorkspaces: FrontendWorkspaceMetadata[] = [ createWorkspaceMetadata({ diff --git a/src/browser/contexts/WorkspaceContext.tsx b/src/browser/contexts/WorkspaceContext.tsx index 66b288d11f..5665b1cd61 100644 --- a/src/browser/contexts/WorkspaceContext.tsx +++ b/src/browser/contexts/WorkspaceContext.tsx @@ -9,16 +9,56 @@ import { type SetStateAction, } from "react"; import type { FrontendWorkspaceMetadata } from "@/common/types/workspace"; +import type { ThinkingLevel } from "@/common/types/thinking"; import type { WorkspaceSelection } from "@/browser/components/ProjectSidebar"; import type { RuntimeConfig } from "@/common/types/runtime"; -import { deleteWorkspaceStorage, SELECTED_WORKSPACE_KEY } from "@/common/constants/storage"; +import { + deleteWorkspaceStorage, + getModelKey, + getThinkingLevelKey, + SELECTED_WORKSPACE_KEY, +} from "@/common/constants/storage"; import { useAPI } from "@/browser/contexts/API"; -import { usePersistedState } from "@/browser/hooks/usePersistedState"; +import { + readPersistedState, + updatePersistedState, + usePersistedState, +} from "@/browser/hooks/usePersistedState"; import { useProjectContext } from "@/browser/contexts/ProjectContext"; import { useWorkspaceStoreRaw } from "@/browser/stores/WorkspaceStore"; import { isExperimentEnabled } from "@/browser/hooks/useExperiments"; import { EXPERIMENT_IDS } from "@/common/constants/experiments"; +/** + * Seed per-workspace localStorage from backend workspace metadata. + * + * This keeps a workspace's model/thinking consistent across devices/browsers. + */ +function seedWorkspaceLocalStorageFromBackend(metadata: FrontendWorkspaceMetadata): void { + const ai = metadata.aiSettings; + if (!ai) { + return; + } + + // Seed model selection. + if (typeof ai.model === "string" && ai.model.length > 0) { + const modelKey = getModelKey(metadata.id); + const existingModel = readPersistedState(modelKey, undefined); + if (existingModel !== ai.model) { + updatePersistedState(modelKey, ai.model); + } + } + + // Seed thinking level. + if (ai.thinkingLevel) { + const thinkingKey = getThinkingLevelKey(metadata.id); + const existingThinking = readPersistedState(thinkingKey, undefined); + if (existingThinking !== ai.thinkingLevel) { + updatePersistedState(thinkingKey, ai.thinkingLevel); + } + } +} + /** * Ensure workspace metadata has createdAt timestamp. * DEFENSIVE: Backend guarantees createdAt, but default to 2025-01-01 if missing. @@ -128,6 +168,7 @@ export function WorkspaceProvider(props: WorkspaceProviderProps) { for (const metadata of metadataList) { ensureCreatedAt(metadata); // Use stable workspace ID as key (not path, which can change) + seedWorkspaceLocalStorageFromBackend(metadata); metadataMap.set(metadata.id, metadata); } setWorkspaceMetadata(metadataMap); @@ -255,6 +296,11 @@ export function WorkspaceProvider(props: WorkspaceProviderProps) { for await (const event of iterator) { if (signal.aborted) break; + if (event.metadata !== null) { + ensureCreatedAt(event.metadata); + seedWorkspaceLocalStorageFromBackend(event.metadata); + } + setWorkspaceMetadata((prev) => { const updated = new Map(prev); const isNewWorkspace = !prev.has(event.workspaceId) && event.metadata !== null; @@ -267,7 +313,6 @@ export function WorkspaceProvider(props: WorkspaceProviderProps) { // Workspace deleted - remove from map updated.delete(event.workspaceId); } else { - ensureCreatedAt(event.metadata); updated.set(event.workspaceId, event.metadata); } @@ -317,6 +362,7 @@ export function WorkspaceProvider(props: WorkspaceProviderProps) { // Update metadata immediately to avoid race condition with validation effect ensureCreatedAt(result.metadata); + seedWorkspaceLocalStorageFromBackend(result.metadata); setWorkspaceMetadata((prev) => { const updated = new Map(prev); updated.set(result.metadata.id, result.metadata); @@ -418,6 +464,7 @@ export function WorkspaceProvider(props: WorkspaceProviderProps) { const metadata = await api.workspace.getInfo({ workspaceId }); if (metadata) { ensureCreatedAt(metadata); + seedWorkspaceLocalStorageFromBackend(metadata); } return metadata; }, diff --git a/src/browser/utils/messages/sendOptions.ts b/src/browser/utils/messages/sendOptions.ts index e26114558d..30e6c1b0b6 100644 --- a/src/browser/utils/messages/sendOptions.ts +++ b/src/browser/utils/messages/sendOptions.ts @@ -1,6 +1,11 @@ -import { getModelKey, getThinkingLevelByModelKey, getModeKey } from "@/common/constants/storage"; +import { + getModelKey, + getThinkingLevelByModelKey, + getThinkingLevelKey, + getModeKey, +} from "@/common/constants/storage"; import { modeToToolPolicy } from "@/common/utils/ui/modeUtils"; -import { readPersistedState } from "@/browser/hooks/usePersistedState"; +import { readPersistedState, updatePersistedState } from "@/browser/hooks/usePersistedState"; import { getDefaultModel } from "@/browser/hooks/useModelsFromSettings"; import { toGatewayModel, migrateGatewayModel } from "@/browser/hooks/useGatewayModels"; import type { SendMessageOptions } from "@/common/orpc/types"; @@ -47,11 +52,21 @@ export function getSendOptionsFromStorage(workspaceId: string): SendMessageOptio // Transform to gateway format if gateway is enabled for this model const model = toGatewayModel(baseModel); - // Read thinking level (per-model global storage) - const thinkingLevel = readPersistedState( - getThinkingLevelByModelKey(baseModel), - WORKSPACE_DEFAULTS.thinkingLevel - ); + // Read thinking level (workspace-scoped). + // Migration: if the workspace-scoped value is missing, fall back to legacy per-model storage + // once, then persist into the workspace-scoped key. + const scopedKey = getThinkingLevelKey(workspaceId); + const existingScoped = readPersistedState(scopedKey, undefined); + const thinkingLevel = + existingScoped ?? + readPersistedState( + getThinkingLevelByModelKey(baseModel), + WORKSPACE_DEFAULTS.thinkingLevel + ); + if (existingScoped === undefined) { + // Best-effort: avoid losing a user's existing per-model preference. + updatePersistedState(scopedKey, thinkingLevel); + } // Read mode (workspace-specific) const mode = readPersistedState(getModeKey(workspaceId), WORKSPACE_DEFAULTS.mode); @@ -62,7 +77,7 @@ export function getSendOptionsFromStorage(workspaceId: string): SendMessageOptio // Plan mode instructions are now handled by the backend (has access to plan file path) // Enforce thinking policy (gpt-5-pro → high only) - const effectiveThinkingLevel = enforceThinkingPolicy(model, thinkingLevel); + const effectiveThinkingLevel = enforceThinkingPolicy(baseModel, thinkingLevel); return { model, diff --git a/src/common/constants/storage.ts b/src/common/constants/storage.ts index 96b6fd5703..c5103b1f42 100644 --- a/src/common/constants/storage.ts +++ b/src/common/constants/storage.ts @@ -58,8 +58,18 @@ export function getMCPTestResultsKey(projectPath: string): string { } /** - * Get the localStorage key for thinking level preference per model (global) + * Get the localStorage key for thinking level preference per scope (workspace/project). + * Format: "thinkingLevel:{scopeId}" + */ +export function getThinkingLevelKey(scopeId: string): string { + return `thinkingLevel:${scopeId}`; +} + +/** + * LEGACY: Get the localStorage key for thinking level preference per model (global). * Format: "thinkingLevel:model:{modelName}" + * + * Kept for one-time migration to per-workspace thinking. */ export function getThinkingLevelByModelKey(modelName: string): string { return `thinkingLevel:model:${modelName}`; @@ -306,6 +316,7 @@ const PERSISTENT_WORKSPACE_KEY_FUNCTIONS: Array<(workspaceId: string) => string> getInputKey, getInputImagesKey, getModeKey, + getThinkingLevelKey, getAutoRetryKey, getRetryStateKey, getReviewStateKey, @@ -315,7 +326,7 @@ const PERSISTENT_WORKSPACE_KEY_FUNCTIONS: Array<(workspaceId: string) => string> getReviewsKey, getAutoCompactionEnabledKey, getStatusStateKey, - // Note: thinking level and auto-compaction threshold are per-model, not per-workspace + // Note: auto-compaction threshold is per-model, not per-workspace ]; /** diff --git a/src/common/orpc/schemas.ts b/src/common/orpc/schemas.ts index efff170964..ff02984998 100644 --- a/src/common/orpc/schemas.ts +++ b/src/common/orpc/schemas.ts @@ -11,6 +11,7 @@ export { RuntimeConfigSchema, RuntimeModeSchema } from "./schemas/runtime"; export { ProjectConfigSchema, WorkspaceConfigSchema } from "./schemas/project"; // Workspace schemas +export { WorkspaceAISettingsSchema } from "./schemas/workspaceAiSettings"; export { FrontendWorkspaceMetadataSchema, GitStatusSchema, diff --git a/src/common/orpc/schemas/api.ts b/src/common/orpc/schemas/api.ts index 3447c348e8..db282c99bc 100644 --- a/src/common/orpc/schemas/api.ts +++ b/src/common/orpc/schemas/api.ts @@ -15,6 +15,7 @@ import { } from "./terminal"; import { BashToolResultSchema, FileTreeNodeSchema } from "./tools"; import { FrontendWorkspaceMetadataSchema, WorkspaceActivitySnapshotSchema } from "./workspace"; +import { WorkspaceAISettingsSchema } from "./workspaceAiSettings"; import { MCPAddParamsSchema, MCPRemoveParamsSchema, @@ -251,6 +252,13 @@ export const workspace = { input: z.object({ workspaceId: z.string(), title: z.string() }), output: ResultSchema(z.void(), z.string()), }, + updateAISettings: { + input: z.object({ + workspaceId: z.string(), + aiSettings: WorkspaceAISettingsSchema, + }), + output: ResultSchema(z.void(), z.string()), + }, fork: { input: z.object({ sourceWorkspaceId: z.string(), newName: z.string() }), output: z.discriminatedUnion("success", [ diff --git a/src/common/orpc/schemas/project.ts b/src/common/orpc/schemas/project.ts index 52297002f8..98019c44a9 100644 --- a/src/common/orpc/schemas/project.ts +++ b/src/common/orpc/schemas/project.ts @@ -1,6 +1,7 @@ import { z } from "zod"; import { RuntimeConfigSchema } from "./runtime"; import { WorkspaceMCPOverridesSchema } from "./mcp"; +import { WorkspaceAISettingsSchema } from "./workspaceAiSettings"; export const WorkspaceConfigSchema = z.object({ path: z.string().meta({ @@ -23,6 +24,9 @@ export const WorkspaceConfigSchema = z.object({ runtimeConfig: RuntimeConfigSchema.optional().meta({ description: "Runtime configuration (local vs SSH) - optional, defaults to local", }), + aiSettings: WorkspaceAISettingsSchema.optional().meta({ + description: "Workspace-scoped AI settings (model + thinking level)", + }), mcp: WorkspaceMCPOverridesSchema.optional().meta({ description: "Per-workspace MCP overrides (disabled servers, tool allowlists)", }), diff --git a/src/common/orpc/schemas/workspace.ts b/src/common/orpc/schemas/workspace.ts index 87a0b4ab5e..459570677e 100644 --- a/src/common/orpc/schemas/workspace.ts +++ b/src/common/orpc/schemas/workspace.ts @@ -1,5 +1,6 @@ import { z } from "zod"; import { RuntimeConfigSchema } from "./runtime"; +import { WorkspaceAISettingsSchema } from "./workspaceAiSettings"; export const WorkspaceMetadataSchema = z.object({ id: z.string().meta({ @@ -26,6 +27,9 @@ export const WorkspaceMetadataSchema = z.object({ runtimeConfig: RuntimeConfigSchema.meta({ description: "Runtime configuration for this workspace (always set, defaults to local on load)", }), + aiSettings: WorkspaceAISettingsSchema.optional().meta({ + description: "Workspace-scoped AI settings (model + thinking level) persisted in config", + }), status: z.enum(["creating"]).optional().meta({ description: "Workspace creation status. 'creating' = pending setup (ephemeral, not persisted). Absent = ready.", diff --git a/src/common/orpc/schemas/workspaceAiSettings.ts b/src/common/orpc/schemas/workspaceAiSettings.ts new file mode 100644 index 0000000000..275c7734ea --- /dev/null +++ b/src/common/orpc/schemas/workspaceAiSettings.ts @@ -0,0 +1,15 @@ +import { z } from "zod"; + +/** + * Workspace-scoped AI settings that should persist across devices. + * + * Notes: + * - `model` must be canonical "provider:model" (NOT mux-gateway:provider/model). + * - `thinkingLevel` is workspace-scoped (saved per workspace, not per-model). + */ +export const WorkspaceAISettingsSchema = z.object({ + model: z.string().meta({ description: 'Canonical model id in the form "provider:model"' }), + thinkingLevel: z.enum(["off", "low", "medium", "high", "xhigh"]).meta({ + description: "Thinking/reasoning effort level", + }), +}); diff --git a/src/node/config.ts b/src/node/config.ts index 3cec6d9403..5185dd1105 100644 --- a/src/node/config.ts +++ b/src/node/config.ts @@ -301,6 +301,7 @@ export class Config { createdAt: workspace.createdAt ?? new Date().toISOString(), // GUARANTEE: All workspaces must have runtimeConfig (apply default if missing) runtimeConfig: workspace.runtimeConfig ?? DEFAULT_RUNTIME_CONFIG, + aiSettings: workspace.aiSettings, }; // Migrate missing createdAt to config for next load @@ -340,6 +341,9 @@ export class Config { // GUARANTEE: All workspaces must have runtimeConfig metadata.runtimeConfig ??= DEFAULT_RUNTIME_CONFIG; + // Preserve any config-only fields that may not exist in legacy metadata.json + metadata.aiSettings ??= workspace.aiSettings; + // Migrate to config for next load workspace.id = metadata.id; workspace.name = metadata.name; @@ -363,6 +367,7 @@ export class Config { createdAt: new Date().toISOString(), // GUARANTEE: All workspaces must have runtimeConfig runtimeConfig: DEFAULT_RUNTIME_CONFIG, + aiSettings: workspace.aiSettings, }; // Save to config for next load @@ -387,6 +392,7 @@ export class Config { createdAt: new Date().toISOString(), // GUARANTEE: All workspaces must have runtimeConfig (even in error cases) runtimeConfig: DEFAULT_RUNTIME_CONFIG, + aiSettings: workspace.aiSettings, }; workspaceMetadata.push(this.addPathsToMetadata(metadata, workspace.path, projectPath)); } diff --git a/src/node/orpc/router.ts b/src/node/orpc/router.ts index 60082c3673..f913b20e7c 100644 --- a/src/node/orpc/router.ts +++ b/src/node/orpc/router.ts @@ -365,6 +365,12 @@ export const router = (authToken?: string) => { .handler(async ({ context, input }) => { return context.workspaceService.updateTitle(input.workspaceId, input.title); }), + updateAISettings: t + .input(schemas.workspace.updateAISettings.input) + .output(schemas.workspace.updateAISettings.output) + .handler(async ({ context, input }) => { + return context.workspaceService.updateAISettings(input.workspaceId, input.aiSettings); + }), fork: t .input(schemas.workspace.fork.input) .output(schemas.workspace.fork.output) diff --git a/src/node/services/workspaceService.ts b/src/node/services/workspaceService.ts index e64f7aa8a4..2df85890d5 100644 --- a/src/node/services/workspaceService.ts +++ b/src/node/services/workspaceService.ts @@ -47,9 +47,11 @@ import { import type { MuxMessage } from "@/common/types/message"; import type { RuntimeConfig } from "@/common/types/runtime"; import { hasSrcBaseDir, getSrcBaseDir, isSSHRuntime } from "@/common/types/runtime"; -import { defaultModel } from "@/common/utils/ai/models"; +import { defaultModel, isValidModelFormat, normalizeGatewayModel } from "@/common/utils/ai/models"; import type { StreamEndEvent, StreamAbortEvent } from "@/common/types/stream"; import type { TerminalService } from "@/node/services/terminalService"; +import type { WorkspaceAISettingsSchema } from "@/common/orpc/schemas"; +import { enforceThinkingPolicy } from "@/browser/utils/thinking/policy"; import type { BackgroundProcessManager } from "@/node/services/backgroundProcessManager"; import { DisposableTempDir } from "@/node/services/tempDir"; @@ -63,6 +65,9 @@ import { movePlanFile, copyPlanFile } from "@/node/utils/runtime/helpers"; const MAX_WORKSPACE_NAME_COLLISION_RETRIES = 3; // Keep short to feel instant, but debounce bursts of file_edit_* tool calls. + +// Shared type for workspace-scoped AI settings (model + thinking) +type WorkspaceAISettings = z.infer; const POST_COMPACTION_METADATA_REFRESH_DEBOUNCE_MS = 100; /** @@ -876,6 +881,125 @@ export class WorkspaceService extends EventEmitter { } } + private normalizeWorkspaceAISettings( + aiSettings: WorkspaceAISettings + ): Result { + const rawModel = aiSettings.model; + const model = normalizeGatewayModel(rawModel).trim(); + if (!model) { + return Err("Model is required"); + } + if (!isValidModelFormat(model)) { + return Err(`Invalid model format: ${rawModel}`); + } + + const effectiveThinkingLevel = enforceThinkingPolicy(model, aiSettings.thinkingLevel); + + return Ok({ + model, + thinkingLevel: effectiveThinkingLevel, + }); + } + + private extractWorkspaceAISettingsFromSendOptions( + options: SendMessageOptions | undefined + ): WorkspaceAISettings | null { + const rawModel = options?.model; + if (typeof rawModel !== "string" || rawModel.trim().length === 0) { + return null; + } + + const model = normalizeGatewayModel(rawModel).trim(); + if (!isValidModelFormat(model)) { + return null; + } + + const requestedThinking = options?.thinkingLevel; + // Be defensive: if a (very) old client doesn't send thinkingLevel, don't overwrite + // any existing workspace-scoped value. + if (requestedThinking === undefined) { + return null; + } + + const thinkingLevel = enforceThinkingPolicy(model, requestedThinking); + + return { model, thinkingLevel }; + } + + private async persistWorkspaceAISettings( + workspaceId: string, + aiSettings: WorkspaceAISettings, + options?: { emitMetadata?: boolean } + ): Promise> { + const found = this.config.findWorkspace(workspaceId); + if (!found) { + return Err("Workspace not found"); + } + + const { projectPath, workspacePath } = found; + + const config = this.config.loadConfigOrDefault(); + const projectConfig = config.projects.get(projectPath); + if (!projectConfig) { + return Err(`Project not found: ${projectPath}`); + } + + const workspaceEntry = projectConfig.workspaces.find( + (w) => w.id === workspaceId || w.path === workspacePath + ); + if (!workspaceEntry) { + return Err("Workspace not found"); + } + + const prev = workspaceEntry.aiSettings; + const changed = + prev?.model !== aiSettings.model || prev?.thinkingLevel !== aiSettings.thinkingLevel; + if (!changed) { + return Ok(false); + } + + workspaceEntry.aiSettings = aiSettings; + await this.config.saveConfig(config); + + if (options?.emitMetadata !== false) { + const allMetadata = await this.config.getAllWorkspaceMetadata(); + const updatedMetadata = allMetadata.find((m) => m.id === workspaceId) ?? null; + + const session = this.sessions.get(workspaceId); + if (session) { + session.emitMetadata(updatedMetadata); + } else { + this.emit("metadata", { workspaceId, metadata: updatedMetadata }); + } + } + + return Ok(true); + } + + async updateAISettings( + workspaceId: string, + aiSettings: WorkspaceAISettings + ): Promise> { + try { + const normalized = this.normalizeWorkspaceAISettings(aiSettings); + if (!normalized.success) { + return Err(normalized.error); + } + + const persistResult = await this.persistWorkspaceAISettings(workspaceId, normalized.data, { + emitMetadata: true, + }); + if (!persistResult.success) { + return Err(persistResult.error); + } + + return Ok(undefined); + } catch (error) { + const message = error instanceof Error ? error.message : String(error); + return Err(`Failed to update workspace AI settings: ${message}`); + } + } + async fork( sourceWorkspaceId: string, newName: string @@ -1064,6 +1188,25 @@ export class WorkspaceService extends EventEmitter { }, }; + // Persist last-used model + thinking level for cross-device consistency. + // Best-effort: failures should not block sending. + const extractedSettings = this.extractWorkspaceAISettingsFromSendOptions(resolvedOptions); + if (extractedSettings) { + const persistResult = await this.persistWorkspaceAISettings( + workspaceId, + extractedSettings, + { + emitMetadata: false, + } + ); + if (!persistResult.success) { + log.debug("Failed to persist workspace AI settings from send options", { + workspaceId, + error: persistResult.error, + }); + } + } + if (this.aiService.isStreaming(workspaceId) && !resolvedOptions?.editMessageId) { const pendingAskUserQuestion = askUserQuestionManager.getLatestPending(workspaceId); if (pendingAskUserQuestion) { @@ -1130,6 +1273,26 @@ export class WorkspaceService extends EventEmitter { } const session = this.getOrCreateSession(workspaceId); + + // Persist last-used model + thinking level for cross-device consistency. + // Best-effort: failures should not block resuming. + const extractedSettings = this.extractWorkspaceAISettingsFromSendOptions(options); + if (extractedSettings) { + const persistResult = await this.persistWorkspaceAISettings( + workspaceId, + extractedSettings, + { + emitMetadata: false, + } + ); + if (!persistResult.success) { + log.debug("Failed to persist workspace AI settings from resume options", { + workspaceId, + error: persistResult.error, + }); + } + } + const result = await session.resumeStream(options); if (!result.success) { log.error("resumeStream handler: session returned error", { diff --git a/tests/ipc/workspaceAISettings.test.ts b/tests/ipc/workspaceAISettings.test.ts new file mode 100644 index 0000000000..00d71b6f32 --- /dev/null +++ b/tests/ipc/workspaceAISettings.test.ts @@ -0,0 +1,51 @@ +/** + * IPC tests for workspace-scoped AI settings persistence. + * + * Verifies that model + thinking level can be persisted per workspace and + * are returned via metadata APIs (list/getInfo). + */ + +import { createTestEnvironment, cleanupTestEnvironment } from "./setup"; +import type { TestEnvironment } from "./setup"; +import { + createTempGitRepo, + cleanupTempGitRepo, + generateBranchName, + createWorkspace, +} from "./helpers"; +import { resolveOrpcClient } from "./helpers"; + +describe("workspace.updateAISettings", () => { + test("persists aiSettings and returns them via workspace.getInfo and workspace.list", async () => { + const env: TestEnvironment = await createTestEnvironment(); + const tempGitRepo = await createTempGitRepo(); + + try { + const branchName = generateBranchName("ai-settings"); + const createResult = await createWorkspace(env, tempGitRepo, branchName); + if (!createResult.success) { + throw new Error(`Workspace creation failed: ${createResult.error}`); + } + + const workspaceId = createResult.metadata.id; + expect(workspaceId).toBeTruthy(); + + const client = resolveOrpcClient(env); + const updateResult = await client.workspace.updateAISettings({ + workspaceId: workspaceId!, + aiSettings: { model: "openai:gpt-5.2", thinkingLevel: "xhigh" }, + }); + expect(updateResult.success).toBe(true); + + const info = await client.workspace.getInfo({ workspaceId: workspaceId! }); + expect(info?.aiSettings).toEqual({ model: "openai:gpt-5.2", thinkingLevel: "xhigh" }); + + const list = await client.workspace.list({ includePostCompaction: false }); + const fromList = list.find((m) => m.id === workspaceId); + expect(fromList?.aiSettings).toEqual({ model: "openai:gpt-5.2", thinkingLevel: "xhigh" }); + } finally { + await cleanupTestEnvironment(env); + await cleanupTempGitRepo(tempGitRepo); + } + }, 60000); +});