diff --git a/crates/codegraph-core/src/native_db.rs b/crates/codegraph-core/src/native_db.rs index fb7ae57f..1f31b6c8 100644 --- a/crates/codegraph-core/src/native_db.rs +++ b/crates/codegraph-core/src/native_db.rs @@ -349,6 +349,60 @@ pub struct DataflowEdge { pub confidence: f64, } +// ── Build-glue return types ──────────────────────────────────────────── + +/// A single row from file_hashes. +#[napi(object)] +#[derive(Debug, Clone)] +pub struct FileHashRow { + pub file: String, + pub hash: String, + pub mtime: i64, + pub size: i64, +} + +/// Batched result of file_hashes table read. +#[napi(object)] +#[derive(Debug, Clone)] +pub struct FileHashData { + pub exists: bool, + pub rows: Vec, + pub max_mtime: i64, +} + +/// Counts for pending analysis tables. +#[napi(object)] +#[derive(Debug, Clone)] +pub struct PendingAnalysisCounts { + pub cfg_count: i64, + pub dataflow_count: i64, +} + +/// Batched node/edge counts for finalize. +#[napi(object)] +#[derive(Debug, Clone)] +pub struct FinalizeCounts { + pub node_count: i64, + pub edge_count: i64, +} + +/// Batched advisory check results. +#[napi(object)] +#[derive(Debug, Clone)] +pub struct AdvisoryCheckResult { + pub orphaned_embeddings: i64, + pub embed_built_at: Option, + pub unused_exports: i64, +} + +/// Batched collect-files data. +#[napi(object)] +#[derive(Debug, Clone)] +pub struct CollectFilesData { + pub count: i64, + pub files: Vec, +} + // ── NativeDatabase class ──────────────────────────────────────────────── /// Persistent rusqlite Connection wrapper exposed to JS via napi-rs. @@ -974,6 +1028,237 @@ impl NativeDatabase { Ok(roles_db::do_classify_incremental(conn, &changed_files).ok()) } + // ── Phase 6.18: Batched build-glue queries ────────────────────────── + + /// Batched read of file_hashes table for detect-changes stage. + /// Returns table existence, all rows, and max mtime in a single napi call. + #[napi] + pub fn get_file_hash_data(&self) -> napi::Result { + let conn = self.conn()?; + if !has_table(conn, "file_hashes") { + return Ok(FileHashData { + exists: false, + rows: vec![], + max_mtime: 0, + }); + } + let mut stmt = conn + .prepare_cached("SELECT file, hash, mtime, size FROM file_hashes") + .map_err(|e| napi::Error::from_reason(format!("getFileHashData prepare failed: {e}")))?; + let mut rows = Vec::new(); + let mut max_mtime: i64 = 0; + let mapped = stmt + .query_map([], |row| { + Ok(( + row.get::<_, String>(0)?, + row.get::<_, String>(1)?, + row.get::<_, i64>(2)?, + row.get::<_, i64>(3)?, + )) + }) + .map_err(|e| napi::Error::from_reason(format!("getFileHashData query failed: {e}")))?; + for r in mapped { + let (file, hash, mtime, size) = + r.map_err(|e| napi::Error::from_reason(format!("getFileHashData row: {e}")))?; + if mtime > max_mtime { + max_mtime = mtime; + } + rows.push(FileHashRow { + file, + hash, + mtime, + size, + }); + } + Ok(FileHashData { + exists: true, + rows, + max_mtime, + }) + } + + /// Check pending analysis tables: returns counts for cfg_blocks and dataflow. + /// Tables that don't exist return -1 (distinguishes "missing" from "empty"). + #[napi] + pub fn check_pending_analysis(&self) -> napi::Result { + let conn = self.conn()?; + let cfg_count = if has_table(conn, "cfg_blocks") { + conn.query_row("SELECT COUNT(*) FROM cfg_blocks", [], |r| r.get::<_, i64>(0)) + .unwrap_or(-1) + } else { + -1 + }; + let dataflow_count = if has_table(conn, "dataflow") { + conn.query_row("SELECT COUNT(*) FROM dataflow", [], |r| r.get::<_, i64>(0)) + .unwrap_or(-1) + } else { + -1 + }; + Ok(PendingAnalysisCounts { + cfg_count, + dataflow_count, + }) + } + + /// Batch upsert file_hashes for metadata healing (mtime/size only updates). + #[napi] + pub fn heal_file_metadata(&self, entries: Vec) -> napi::Result { + if entries.is_empty() { + return Ok(0); + } + let conn = self.conn()?; + let tx = conn + .unchecked_transaction() + .map_err(|e| napi::Error::from_reason(format!("heal tx failed: {e}")))?; + let mut count = 0u32; + { + let mut stmt = tx + .prepare_cached( + "INSERT OR REPLACE INTO file_hashes (file, hash, mtime, size) VALUES (?1, ?2, ?3, ?4)", + ) + .map_err(|e| napi::Error::from_reason(format!("heal prepare failed: {e}")))?; + for entry in &entries { + stmt.execute(params![entry.file, entry.hash, entry.mtime, entry.size]) + .map_err(|e| napi::Error::from_reason(format!("heal row failed: {e}")))?; + count += 1; + } + } + tx.commit() + .map_err(|e| napi::Error::from_reason(format!("heal commit failed: {e}")))?; + Ok(count) + } + + /// Find files that have edges pointing to any of the changed files. + /// Returns deduplicated list of reverse-dependency file paths. + #[napi] + pub fn find_reverse_dependencies(&self, changed_files: Vec) -> napi::Result> { + if changed_files.is_empty() { + return Ok(vec![]); + } + let conn = self.conn()?; + let changed_set: std::collections::HashSet<&str> = + changed_files.iter().map(|s| s.as_str()).collect(); + let mut result_set: std::collections::HashSet = std::collections::HashSet::new(); + + let mut stmt = conn + .prepare_cached( + "SELECT DISTINCT n_src.file FROM edges e \ + JOIN nodes n_src ON e.source_id = n_src.id \ + JOIN nodes n_tgt ON e.target_id = n_tgt.id \ + WHERE n_tgt.file = ?1 AND n_src.file != n_tgt.file AND n_src.kind != 'directory'", + ) + .map_err(|e| napi::Error::from_reason(format!("reverseDeps prepare failed: {e}")))?; + + for file in &changed_files { + let rows = stmt + .query_map(params![file], |row| row.get::<_, String>(0)) + .map_err(|e| { + napi::Error::from_reason(format!("reverseDeps query failed: {e}")) + })?; + for row in rows { + if let Ok(dep_file) = row { + if !changed_set.contains(dep_file.as_str()) { + result_set.insert(dep_file); + } + } + } + } + let mut result_vec: Vec = result_set.into_iter().collect(); + result_vec.sort_unstable(); + Ok(result_vec) + } + + /// Get node and edge counts in a single napi call. + #[napi] + pub fn get_finalize_counts(&self) -> napi::Result { + let conn = self.conn()?; + let node_count = conn + .query_row("SELECT COUNT(*) FROM nodes", [], |r| r.get::<_, i64>(0)) + .unwrap_or(0); + let edge_count = conn + .query_row("SELECT COUNT(*) FROM edges", [], |r| r.get::<_, i64>(0)) + .unwrap_or(0); + Ok(FinalizeCounts { + node_count, + edge_count, + }) + } + + /// Run all advisory checks in a single napi call (orphaned embeddings, + /// stale embeddings, unused exports). Only called on full builds. + #[napi] + pub fn run_advisory_checks(&self, has_embeddings: bool) -> napi::Result { + let conn = self.conn()?; + let mut result = AdvisoryCheckResult { + orphaned_embeddings: 0, + embed_built_at: None, + unused_exports: 0, + }; + + if has_embeddings { + // Orphaned embeddings + result.orphaned_embeddings = conn + .query_row( + "SELECT COUNT(*) FROM embeddings WHERE node_id NOT IN (SELECT id FROM nodes)", + [], + |r| r.get::<_, i64>(0), + ) + .unwrap_or(0); + + // Stale embeddings + result.embed_built_at = conn + .query_row( + "SELECT value FROM embedding_meta WHERE key = 'built_at'", + [], + |r| r.get::<_, String>(0), + ) + .ok(); + } + + // Unused exports + result.unused_exports = conn + .query_row( + "SELECT COUNT(*) FROM nodes \ + WHERE exported = 1 AND kind != 'file' \ + AND id NOT IN ( \ + SELECT DISTINCT e.target_id FROM edges e \ + JOIN nodes caller ON e.source_id = caller.id \ + JOIN nodes target ON e.target_id = target.id \ + WHERE e.kind = 'calls' AND caller.file != target.file \ + )", + [], + |r| r.get::<_, i64>(0), + ) + .unwrap_or(0); + + Ok(result) + } + + /// Get file_hashes count and all file paths in a single napi call. + /// Used by the fast-collect path in collect-files stage. + #[napi] + pub fn get_collect_files_data(&self) -> napi::Result { + let conn = self.conn()?; + if !has_table(conn, "file_hashes") { + return Ok(CollectFilesData { + count: 0, + files: vec![], + }); + } + let mut stmt = conn + .prepare_cached("SELECT file FROM file_hashes") + .map_err(|e| napi::Error::from_reason(format!("collectFiles prepare failed: {e}")))?; + let rows = stmt + .query_map([], |row| row.get::<_, String>(0)) + .map_err(|e| napi::Error::from_reason(format!("collectFiles query failed: {e}")))?; + let files: Vec = rows.filter_map(|r| r.ok()).collect(); + let count = files.len() as i64; + Ok(CollectFilesData { + count, + files, + }) + } + /// Cascade-delete all graph data for the specified files across all tables. /// Order: dependent tables first (embeddings, cfg, dataflow, complexity, /// metrics, ast_nodes), then edges, then nodes, then optionally file_hashes. diff --git a/src/domain/graph/builder/stages/collect-files.ts b/src/domain/graph/builder/stages/collect-files.ts index aaa658b5..73c19441 100644 --- a/src/domain/graph/builder/stages/collect-files.ts +++ b/src/domain/graph/builder/stages/collect-files.ts @@ -21,13 +21,22 @@ function tryFastCollect( ctx: PipelineContext, ): { files: string[]; directories: Set } | null { const { db, rootDir } = ctx; + const useNative = ctx.engineName === 'native' && !!ctx.nativeDb?.getCollectFilesData; // 1. Check that file_hashes table exists and has entries let dbFileCount: number; - try { - dbFileCount = (db.prepare('SELECT COUNT(*) as c FROM file_hashes').get() as { c: number }).c; - } catch { - return null; + let dbFiles: string[]; + if (useNative) { + const data = ctx.nativeDb!.getCollectFilesData!(); + dbFileCount = data.count; + dbFiles = data.files; + } else { + try { + dbFileCount = (db.prepare('SELECT COUNT(*) as c FROM file_hashes').get() as { c: number }).c; + } catch { + return null; + } + dbFiles = []; // deferred — loaded below only if needed } if (dbFileCount === 0) return null; @@ -42,9 +51,11 @@ function tryFastCollect( if (!hasEntries) return null; // 3. Load existing file list from file_hashes (relative paths) - const dbFiles = (db.prepare('SELECT file FROM file_hashes').all() as Array<{ file: string }>).map( - (r) => r.file, - ); + if (!useNative) { + dbFiles = (db.prepare('SELECT file FROM file_hashes').all() as Array<{ file: string }>).map( + (r) => r.file, + ); + } // 4. Apply journal deltas: remove deleted files, add new/changed files const fileSet = new Set(dbFiles); diff --git a/src/domain/graph/builder/stages/detect-changes.ts b/src/domain/graph/builder/stages/detect-changes.ts index cf1d419f..e6524cb2 100644 --- a/src/domain/graph/builder/stages/detect-changes.ts +++ b/src/domain/graph/builder/stages/detect-changes.ts @@ -60,14 +60,27 @@ function getChangedFiles( rootDir: string, nativeDb?: NativeDatabase, ): ChangeResult { + // Batched native path: single napi call for table check + all rows + max mtime + if (nativeDb?.getFileHashData) { + const data = nativeDb.getFileHashData(); + if (!data.exists) { + return { + changed: allFiles.map((f) => ({ file: f })), + removed: [], + isFullBuild: true, + }; + } + const existing = new Map(data.rows.map((r) => [r.file, r])); + const removed = detectRemovedFiles(existing, allFiles, rootDir); + const journalResult = tryJournalTier(db, existing, rootDir, removed, data.maxMtime); + if (journalResult) return journalResult; + return mtimeAndHashTiers(existing, allFiles, rootDir, removed); + } + + // WASM / fallback path let hasTable = false; try { - if (nativeDb) { - nativeDb.queryGet('SELECT 1 FROM file_hashes LIMIT 1', []); - } else { - db.prepare('SELECT 1 FROM file_hashes LIMIT 1').get(); - } - // Query succeeded → table exists (result may be undefined if table is empty) + db.prepare('SELECT 1 FROM file_hashes LIMIT 1').get(); hasTable = true; } catch { /* table doesn't exist */ @@ -81,10 +94,7 @@ function getChangedFiles( }; } - const sql = 'SELECT file, hash, mtime, size FROM file_hashes'; - const rows = nativeDb - ? (nativeDb.queryAll(sql, []) as unknown as FileHashRow[]) - : (db.prepare(sql).all() as FileHashRow[]); + const rows = db.prepare('SELECT file, hash, mtime, size FROM file_hashes').all() as FileHashRow[]; const existing = new Map(rows.map((r) => [r.file, r])); const removed = detectRemovedFiles(existing, allFiles, rootDir); @@ -116,14 +126,19 @@ function tryJournalTier( existing: Map, rootDir: string, removed: string[], + precomputedMaxMtime?: number, ): ChangeResult | null { const journal = readJournal(rootDir); if (!journal.valid) return null; - const dbMtimes = db.prepare('SELECT MAX(mtime) as latest FROM file_hashes').get() as - | { latest: number | null } - | undefined; - const latestDbMtime = dbMtimes?.latest || 0; + const latestDbMtime = + precomputedMaxMtime ?? + (( + db.prepare('SELECT MAX(mtime) as latest FROM file_hashes').get() as + | { latest: number | null } + | undefined + )?.latest || + 0); const hasJournalEntries = journal.changed!.length > 0 || journal.removed!.length > 0; if (!hasJournalEntries || journal.timestamp! < latestDbMtime) { @@ -231,30 +246,42 @@ function mtimeAndHashTiers( async function runPendingAnalysis(ctx: PipelineContext): Promise { const { db, opts, engineOpts, allFiles, rootDir } = ctx; - const needsCfg = - (opts as Record).cfg !== false && - (() => { - try { - return ( - (db.prepare('SELECT COUNT(*) as c FROM cfg_blocks').get() as { c: number } | undefined) - ?.c === 0 - ); - } catch { - return true; - } - })(); - const needsDataflow = - (opts as Record).dataflow !== false && - (() => { - try { - return ( - (db.prepare('SELECT COUNT(*) as c FROM dataflow').get() as { c: number } | undefined) - ?.c === 0 - ); - } catch { - return true; - } - })(); + const useNative = ctx.engineName === 'native' && !!ctx.nativeDb?.checkPendingAnalysis; + + let needsCfg: boolean; + let needsDataflow: boolean; + + if (useNative) { + const counts = ctx.nativeDb!.checkPendingAnalysis!(); + needsCfg = (opts as Record).cfg !== false && counts.cfgCount <= 0; + needsDataflow = + (opts as Record).dataflow !== false && counts.dataflowCount <= 0; + } else { + needsCfg = + (opts as Record).cfg !== false && + (() => { + try { + return ( + (db.prepare('SELECT COUNT(*) as c FROM cfg_blocks').get() as { c: number } | undefined) + ?.c === 0 + ); + } catch { + return true; + } + })(); + needsDataflow = + (opts as Record).dataflow !== false && + (() => { + try { + return ( + (db.prepare('SELECT COUNT(*) as c FROM dataflow').get() as { c: number } | undefined) + ?.c === 0 + ); + } catch { + return true; + } + })(); + } if (!needsCfg && !needsDataflow) return false; info('No file changes. Running pending analysis pass...'); @@ -282,17 +309,27 @@ function healMetadata(ctx: PipelineContext): void { const { db, metadataUpdates } = ctx; if (!metadataUpdates || metadataUpdates.length === 0) return; try { - const healHash = db.prepare( - 'INSERT OR REPLACE INTO file_hashes (file, hash, mtime, size) VALUES (?, ?, ?, ?)', - ); - const healTx = db.transaction(() => { - for (const item of metadataUpdates) { - const mtime = item.stat ? Math.floor(item.stat.mtime) : 0; - const size = item.stat ? item.stat.size : 0; - healHash.run(item.relPath, item.hash, mtime, size); - } - }); - healTx(); + if (ctx.engineName === 'native' && ctx.nativeDb?.healFileMetadata) { + const entries = metadataUpdates.map((item) => ({ + file: item.relPath, + hash: item.hash, + mtime: item.stat ? Math.floor(item.stat.mtime) : 0, + size: item.stat ? item.stat.size : 0, + })); + ctx.nativeDb.healFileMetadata(entries); + } else { + const healHash = db.prepare( + 'INSERT OR REPLACE INTO file_hashes (file, hash, mtime, size) VALUES (?, ?, ?, ?)', + ); + const healTx = db.transaction(() => { + for (const item of metadataUpdates) { + const mtime = item.stat ? Math.floor(item.stat.mtime) : 0; + const size = item.stat ? item.stat.size : 0; + healHash.run(item.relPath, item.hash, mtime, size); + } + }); + healTx(); + } debug(`Self-healed mtime/size for ${metadataUpdates.length} files`); } catch { /* ignore heal errors */ @@ -303,9 +340,23 @@ function findReverseDependencies( db: BetterSqlite3Database, changedRelPaths: Set, rootDir: string, + nativeDb?: NativeDatabase, ): Set { const reverseDeps = new Set(); if (changedRelPaths.size === 0) return reverseDeps; + + if (nativeDb?.findReverseDependencies) { + const changedArray = [...changedRelPaths]; + const nativeResults = nativeDb.findReverseDependencies(changedArray); + for (const dep of nativeResults) { + const absPath = path.join(rootDir, dep); + if (fs.existsSync(absPath)) { + reverseDeps.add(dep); + } + } + return reverseDeps; + } + const findReverseDepsStmt = db.prepare(` SELECT DISTINCT n_src.file FROM edges e JOIN nodes n_src ON e.source_id = n_src.id @@ -360,7 +411,10 @@ function purgeAndAddReverseDeps( } } -function detectHasEmbeddings(db: BetterSqlite3Database): boolean { +function detectHasEmbeddings(db: BetterSqlite3Database, nativeDb?: NativeDatabase): boolean { + if (nativeDb?.hasEmbeddings) { + return nativeDb.hasEmbeddings(); + } try { db.prepare('SELECT 1 FROM embeddings LIMIT 1').get(); return true; @@ -371,14 +425,14 @@ function detectHasEmbeddings(db: BetterSqlite3Database): boolean { function handleScopedBuild(ctx: PipelineContext): void { const { db, rootDir, opts } = ctx; - ctx.hasEmbeddings = detectHasEmbeddings(db); + ctx.hasEmbeddings = detectHasEmbeddings(db, ctx.nativeDb); const changePaths = ctx.parseChanges.map( (item) => item.relPath || normalizePath(path.relative(rootDir, item.file)), ); let reverseDeps = new Set(); if (!(opts as Record).noReverseDeps) { const changedRelPaths = new Set([...changePaths, ...ctx.removed]); - reverseDeps = findReverseDependencies(db, changedRelPaths, rootDir); + reverseDeps = findReverseDependencies(db, changedRelPaths, rootDir, ctx.nativeDb); } purgeAndAddReverseDeps(ctx, changePaths, reverseDeps); info( @@ -388,7 +442,7 @@ function handleScopedBuild(ctx: PipelineContext): void { function handleFullBuild(ctx: PipelineContext): void { const { db } = ctx; - const hasEmbeddings = detectHasEmbeddings(db); + const hasEmbeddings = detectHasEmbeddings(db, ctx.nativeDb); ctx.hasEmbeddings = hasEmbeddings; const deletions = 'PRAGMA foreign_keys = OFF; DELETE FROM cfg_edges; DELETE FROM cfg_blocks; DELETE FROM node_metrics; DELETE FROM edges; DELETE FROM function_complexity; DELETE FROM dataflow; DELETE FROM ast_nodes; DELETE FROM nodes; PRAGMA foreign_keys = ON;'; @@ -401,7 +455,7 @@ function handleFullBuild(ctx: PipelineContext): void { function handleIncrementalBuild(ctx: PipelineContext): void { const { db, rootDir, opts } = ctx; - ctx.hasEmbeddings = detectHasEmbeddings(db); + ctx.hasEmbeddings = detectHasEmbeddings(db, ctx.nativeDb); let reverseDeps = new Set(); if (!(opts as Record).noReverseDeps) { const changedRelPaths = new Set(); @@ -411,7 +465,7 @@ function handleIncrementalBuild(ctx: PipelineContext): void { for (const relPath of ctx.removed) { changedRelPaths.add(relPath); } - reverseDeps = findReverseDependencies(db, changedRelPaths, rootDir); + reverseDeps = findReverseDependencies(db, changedRelPaths, rootDir, ctx.nativeDb); } info( `Incremental: ${ctx.parseChanges.length} changed, ${ctx.removed.length} removed${reverseDeps.size > 0 ? `, ${reverseDeps.size} reverse-deps` : ''}`, diff --git a/src/domain/graph/builder/stages/finalize.ts b/src/domain/graph/builder/stages/finalize.ts index 63a001b1..c72ee42c 100644 --- a/src/domain/graph/builder/stages/finalize.ts +++ b/src/domain/graph/builder/stages/finalize.ts @@ -114,11 +114,33 @@ function persistBuildMetadata( * Run advisory checks on full builds: orphaned embeddings, stale embeddings, * and unused exports. Informational only — does not affect correctness. */ -function runAdvisoryChecks( - db: PipelineContext['db'], - hasEmbeddings: boolean, - buildNow: Date, -): void { +function runAdvisoryChecks(ctx: PipelineContext, hasEmbeddings: boolean, buildNow: Date): void { + // Batched native path: single napi call for all 3 advisory checks + if (ctx.engineName === 'native' && ctx.nativeDb?.runAdvisoryChecks) { + const result = ctx.nativeDb.runAdvisoryChecks(hasEmbeddings); + if (result.orphanedEmbeddings > 0) { + warn( + `${result.orphanedEmbeddings} embeddings are orphaned (nodes changed). Run "codegraph embed" to refresh.`, + ); + } + if (result.embedBuiltAt) { + const embedTime = new Date(result.embedBuiltAt).getTime(); + if (!Number.isNaN(embedTime) && embedTime < buildNow.getTime()) { + warn( + 'Embeddings were built before the last graph rebuild. Run "codegraph embed" to update.', + ); + } + } + if (result.unusedExports > 0) { + warn( + `${result.unusedExports} exported symbol${result.unusedExports > 1 ? 's have' : ' has'} zero cross-file consumers. Run "codegraph exports --unused" to inspect.`, + ); + } + return; + } + + const { db } = ctx; + // Orphaned embeddings warning if (hasEmbeddings) { try { @@ -197,9 +219,17 @@ export async function finalize(ctx: PipelineContext): Promise { // both the stale-embeddings comparison and the persisted built_at metadata. const buildNow = new Date(); - const nodeCount = (ctx.db.prepare('SELECT COUNT(*) as c FROM nodes').get() as { c: number }).c; - const actualEdgeCount = (ctx.db.prepare('SELECT COUNT(*) as c FROM edges').get() as { c: number }) - .c; + const useNative = ctx.engineName === 'native' && !!ctx.nativeDb?.getFinalizeCounts; + let nodeCount: number; + let actualEdgeCount: number; + if (useNative) { + const counts = ctx.nativeDb!.getFinalizeCounts!(); + nodeCount = counts.nodeCount; + actualEdgeCount = counts.edgeCount; + } else { + nodeCount = (ctx.db.prepare('SELECT COUNT(*) as c FROM nodes').get() as { c: number }).c; + actualEdgeCount = (ctx.db.prepare('SELECT COUNT(*) as c FROM edges').get() as { c: number }).c; + } info(`Graph built: ${nodeCount} nodes, ${actualEdgeCount} edges`); info(`Stored in ${ctx.dbPath}`); @@ -213,7 +243,7 @@ export async function finalize(ctx: PipelineContext): Promise { 'Finalize: skipping advisory queries (orphaned/stale embeddings, unused exports) for incremental build', ); } else { - runAdvisoryChecks(ctx.db, hasEmbeddings, buildNow); + runAdvisoryChecks(ctx, hasEmbeddings, buildNow); } // Intentionally measured before closeDb / writeJournalHeader / auto-registration: diff --git a/src/types.ts b/src/types.ts index d66e9fbd..03663797 100644 --- a/src/types.ts +++ b/src/types.ts @@ -2279,6 +2279,32 @@ export interface NativeDatabase { fanOut: number; }>; + // ── Batched build-glue queries (6.18) ──────────────────────────────── + /** All file_hashes rows + table existence + max mtime in one call. */ + getFileHashData?(): { + exists: boolean; + rows: Array<{ file: string; hash: string; mtime: number; size: number }>; + maxMtime: number; + }; + /** CFG and dataflow table counts (-1 = table missing). */ + checkPendingAnalysis?(): { cfgCount: number; dataflowCount: number }; + /** Batch upsert file_hashes for metadata healing. */ + healFileMetadata?( + entries: Array<{ file: string; hash: string; mtime: number; size: number }>, + ): number; + /** Find files with edges pointing to changed files. */ + findReverseDependencies?(changedFiles: string[]): string[]; + /** Node + edge counts in one call. */ + getFinalizeCounts?(): { nodeCount: number; edgeCount: number }; + /** Orphaned embeddings, stale embeddings, unused exports in one call. */ + runAdvisoryChecks?(hasEmbeddings: boolean): { + orphanedEmbeddings: number; + embedBuiltAt: string | null; + unusedExports: number; + }; + /** File_hashes count + all file paths in one call. */ + getCollectFilesData?(): { count: number; files: string[] }; + // ── Generic query execution & version validation (6.16) ───────────── /** Execute a parameterized SELECT and return all rows as objects. */ queryAll(sql: string, params: Array): Record[];