From 12db9f659f1fdd2000800ceb67b2984e4d0dd4ce Mon Sep 17 00:00:00 2001 From: Randy Hammond Date: Fri, 24 Apr 2026 14:22:34 -0400 Subject: [PATCH 01/27] feat(api): add /api/ws canonical listener route; keep /ws as compat alias (#15) - Register GET /api/ws (canonical) alongside GET /ws (compat alias). Both delegate to the same ws.HandleListenerWS so behavior is identical. - Frontend listener client now connects to /api/ws. - Vite dev proxy covers both /api/ws and /ws with WebSocket upgrade. - Add api_test.TestListenerWSAlias asserting both routes are registered and share the same handler. - Deployment guide reverse-proxy section now lists /api/ws alongside /ws and /api/admin/ws as paths needing WS-upgrade forwarding. The /ws alias is kept for existing Trunk-Recorder / SDRTrunk / rdio- scanner-shaped clients during the legacy-API transition. --- CHANGELOG.md | 13 +++++++ .../internal/api/listener_ws_alias_test.go | 35 +++++++++++++++++++ backend/internal/api/routes.go | 7 +++- docs/deployment-guide.md | 2 +- frontend/src/services/wsClient.test.ts | 2 +- frontend/src/services/wsClient.ts | 2 +- frontend/vite.config.ts | 1 + 7 files changed, 58 insertions(+), 4 deletions(-) create mode 100644 backend/internal/api/listener_ws_alias_test.go diff --git a/CHANGELOG.md b/CHANGELOG.md index 233fcbd..843ee63 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,19 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] +### Added + +- Canonical `GET /api/ws` listener WebSocket route. The existing `GET /ws` + remains as a compatibility alias that delegates to the same handler; + retirement of the alias is tracked in the native-API design plan. The + frontend now connects to `/api/ws`, and the Vite dev proxy covers both + paths. + +### Changed + +- Deployment guide reverse-proxy instructions now list `/api/ws` alongside + `/ws` and `/api/admin/ws` as paths that need WebSocket-upgrade forwarding. + ## [1.1.2] — 2026-04-24 ### Security diff --git a/backend/internal/api/listener_ws_alias_test.go b/backend/internal/api/listener_ws_alias_test.go new file mode 100644 index 0000000..8fb4121 --- /dev/null +++ b/backend/internal/api/listener_ws_alias_test.go @@ -0,0 +1,35 @@ +package api_test + +import ( + "testing" +) + +// TestListenerWSAlias verifies that both /ws (legacy compat alias) and /api/ws +// (canonical) are registered and point at the same handler. +func TestListenerWSAlias(t *testing.T) { + router, _ := newTestEngine(t) + + var legacyHandler, canonicalHandler string + for _, rt := range router.Routes() { + if rt.Method != "GET" { + continue + } + switch rt.Path { + case "/ws": + legacyHandler = rt.Handler + case "/api/ws": + canonicalHandler = rt.Handler + } + } + + if legacyHandler == "" { + t.Error("GET /ws route is not registered") + } + if canonicalHandler == "" { + t.Error("GET /api/ws route is not registered") + } + if legacyHandler != "" && canonicalHandler != "" && legacyHandler != canonicalHandler { + t.Errorf("listener WS handlers differ: /ws=%q /api/ws=%q", + legacyHandler, canonicalHandler) + } +} diff --git a/backend/internal/api/routes.go b/backend/internal/api/routes.go index fa2f8c7..722dc4c 100644 --- a/backend/internal/api/routes.go +++ b/backend/internal/api/routes.go @@ -165,7 +165,12 @@ func RegisterRoutes(r *gin.Engine, deps Deps) { } // WebSocket endpoints. - r.GET("/ws", gin.WrapF(ws.HandleListenerWS(deps.Hub, deps.Queries))) + // /api/ws is the canonical OpenScanner listener route. /ws is a temporary + // compatibility alias that delegates to the same handler so existing + // rdio-scanner-shaped clients keep working during the legacy-API transition. + listenerWS := gin.WrapF(ws.HandleListenerWS(deps.Hub, deps.Queries)) + r.GET("/api/ws", listenerWS) + r.GET("/ws", listenerWS) r.GET("/api/admin/ws", gin.WrapF(ws.HandleAdminWS(deps.Hub, deps.Queries))) // Serve embedded frontend (SPA mode). diff --git a/docs/deployment-guide.md b/docs/deployment-guide.md index c0d3ca3..8c59bfd 100644 --- a/docs/deployment-guide.md +++ b/docs/deployment-guide.md @@ -117,7 +117,7 @@ Most people already have a web server (Caddy, nginx, Traefik) on their home serv Two rules to remember when proxying: -- **Forward WebSocket upgrades** on `/ws` and `/api/admin/ws` — the live audio stream and admin events use them. +- **Forward WebSocket upgrades** on `/api/ws`, `/ws`, and `/api/admin/ws` — the live audio stream and admin events use them. `/api/ws` is the canonical listener endpoint; `/ws` is a compatibility alias kept for legacy clients and should also be proxied. - **Send `X-Forwarded-Proto`** so OpenScanner knows whether to mark cookies as secure. If the proxy is on the same machine, it's also a good idea to bind OpenScanner to localhost only so nothing bypasses the proxy. In your compose file: diff --git a/frontend/src/services/wsClient.test.ts b/frontend/src/services/wsClient.test.ts index 78a1e90..b173ff1 100644 --- a/frontend/src/services/wsClient.test.ts +++ b/frontend/src/services/wsClient.test.ts @@ -109,7 +109,7 @@ describe("wsClient", () => { wsClient.connect(store.dispatch); expect(constructed).toHaveLength(1); const proto = window.location.protocol === "https:" ? "wss:" : "ws:"; - expect(constructed[0].url).toBe(`${proto}//${window.location.host}/ws`); + expect(constructed[0].url).toBe(`${proto}//${window.location.host}/api/ws`); }); it("sends the auth token as a JSON array after onopen", () => { diff --git a/frontend/src/services/wsClient.ts b/frontend/src/services/wsClient.ts index b896658..1e5383e 100644 --- a/frontend/src/services/wsClient.ts +++ b/frontend/src/services/wsClient.ts @@ -104,7 +104,7 @@ class WsClient { this.dispatch?.(setConnectionStatus("connecting")); const proto = window.location.protocol === "https:" ? "wss:" : "ws:"; - const url = `${proto}//${window.location.host}/ws`; + const url = `${proto}//${window.location.host}/api/ws`; this.ws = new WebSocket(url); this.ws.onopen = () => { diff --git a/frontend/vite.config.ts b/frontend/vite.config.ts index ded1dfc..2be8bb2 100644 --- a/frontend/vite.config.ts +++ b/frontend/vite.config.ts @@ -13,6 +13,7 @@ export default defineConfig({ }, server: { proxy: { + "/api/ws": { target: "ws://localhost:3000", ws: true }, "/api": "http://localhost:3000", "/ws": { target: "ws://localhost:3000", ws: true }, }, From 3a8e4c8847c701ba7b2ef17b4c726e426dfd6c2c Mon Sep 17 00:00:00 2001 From: Randy Hammond Date: Fri, 24 Apr 2026 15:15:11 -0400 Subject: [PATCH 02/27] refactor(backend): extract admin CRUD into internal/admin package (#16) Phase 2 of the directory restructure. The WebSocket layer is now protocol-only; admin CRUD / config / import-export business logic lives in a new transport-agnostic internal/admin package. Changes: - New internal/admin/ package with Operations struct. Files split by feature: users.go, systems.go, talkgroups.go, tags.go, groups.go, units.go, api_keys.go, dirmonitors.go, downstreams.go, webhooks.go, shared_links.go, settings.go, transcription.go, radioreference.go, filesystem.go, imports.go, exports.go. Does not import internal/ws or net/http. - internal/admin/operations.go defines the EventSink interface (BroadcastAdminEvent, BroadcastCFG, DisconnectByUser, ClientCount). ws.Hub implements it; Operations.New takes it at construction. - internal/ws/admin_router.go replaces admin_ops.go as the transport adapter. The adminOpHandlers map preserves every wire-protocol op name (users.list, systems.create, config.update, export.config, etc.) byte-identically. Live-state ops (activity.stats, activity.chart, logs.query, logs.level, activity.top-talkgroups) stay on *Client because they read hub in-memory state. - internal/ws/admin_ops.go deleted (3,201 lines removed). - Hub construction unchanged at the call site: NewHub(queries, version, HubDeps{...}) still works. HubDeps is now a type alias for admin.Deps. - cmd/server/main.go updated: SensitiveSettingKeys moved from ws to admin package. - Tests: admin_ops_settings_test.go split into internal/admin/settings_test.go (CRUD semantics) and internal/ws/admin_router_test.go (dispatch + error envelope). No wire-protocol, auth, or route changes. All frames, error envelopes, and action names are byte-identical to before. --- CHANGELOG.md | 4 + backend/cmd/server/main.go | 5 +- backend/internal/admin/api_keys.go | 145 + backend/internal/admin/dirmonitors.go | 171 + backend/internal/admin/downstreams.go | 160 + backend/internal/admin/exports.go | 241 ++ backend/internal/admin/filesystem.go | 78 + backend/internal/admin/groups.go | 112 + backend/internal/admin/imports.go | 396 ++ backend/internal/admin/operations.go | 466 +++ backend/internal/admin/radioreference.go | 233 ++ backend/internal/admin/settings.go | 174 + .../settings_test.go} | 90 +- backend/internal/admin/shared_links.go | 39 + backend/internal/admin/systems.go | 131 + backend/internal/admin/tags.go | 112 + backend/internal/admin/talkgroups.go | 141 + backend/internal/admin/transcription.go | 289 ++ backend/internal/admin/units.go | 146 + backend/internal/admin/users.go | 203 ++ backend/internal/admin/webhooks.go | 130 + backend/internal/ws/admin_ops.go | 3201 ----------------- backend/internal/ws/admin_router.go | 162 + backend/internal/ws/admin_router_test.go | 131 + backend/internal/ws/client.go | 18 +- backend/internal/ws/hub.go | 42 +- 26 files changed, 3724 insertions(+), 3296 deletions(-) create mode 100644 backend/internal/admin/api_keys.go create mode 100644 backend/internal/admin/dirmonitors.go create mode 100644 backend/internal/admin/downstreams.go create mode 100644 backend/internal/admin/exports.go create mode 100644 backend/internal/admin/filesystem.go create mode 100644 backend/internal/admin/groups.go create mode 100644 backend/internal/admin/imports.go create mode 100644 backend/internal/admin/operations.go create mode 100644 backend/internal/admin/radioreference.go create mode 100644 backend/internal/admin/settings.go rename backend/internal/{ws/admin_ops_settings_test.go => admin/settings_test.go} (60%) create mode 100644 backend/internal/admin/shared_links.go create mode 100644 backend/internal/admin/systems.go create mode 100644 backend/internal/admin/tags.go create mode 100644 backend/internal/admin/talkgroups.go create mode 100644 backend/internal/admin/transcription.go create mode 100644 backend/internal/admin/units.go create mode 100644 backend/internal/admin/users.go create mode 100644 backend/internal/admin/webhooks.go delete mode 100644 backend/internal/ws/admin_ops.go create mode 100644 backend/internal/ws/admin_router.go create mode 100644 backend/internal/ws/admin_router_test.go diff --git a/CHANGELOG.md b/CHANGELOG.md index 843ee63..1d9abdb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -17,6 +17,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Changed +- Admin CRUD business logic has been extracted from `internal/ws` into a + new transport-agnostic `internal/admin` package. The WebSocket layer + now only routes `ADM_REQ` frames to `admin.Operations` methods; the + wire protocol, action names, and response shapes are unchanged. - Deployment guide reverse-proxy instructions now list `/api/ws` alongside `/ws` and `/api/admin/ws` as paths that need WebSocket-upgrade forwarding. diff --git a/backend/cmd/server/main.go b/backend/cmd/server/main.go index e6277a9..bf7e7d7 100644 --- a/backend/cmd/server/main.go +++ b/backend/cmd/server/main.go @@ -36,6 +36,7 @@ import ( "github.com/gin-gonic/gin" "github.com/kardianos/service" + "github.com/openscanner/openscanner/internal/admin" "github.com/openscanner/openscanner/internal/api" "github.com/openscanner/openscanner/internal/audio" "github.com/openscanner/openscanner/internal/auth" @@ -1224,7 +1225,7 @@ func migrateSecrets(ctx context.Context, queries *db.Queries, sqlDB *sql.DB, enc // Check for encrypted values with no key configured. if encryptionKey == "" { for _, s := range settings { - if ws.SensitiveSettingKeys[s.Key] && auth.IsEncrypted(s.Value) { + if admin.SensitiveSettingKeys[s.Key] && auth.IsEncrypted(s.Value) { return fmt.Errorf("setting %q is encrypted but no encryption key is configured — set --encryption-key or OPENSCANNER_ENCRYPTION_KEY", s.Key) } } @@ -1250,7 +1251,7 @@ func migrateSecrets(ctx context.Context, queries *db.Queries, sqlDB *sql.DB, enc migrated := 0 for _, s := range settings { - if !ws.SensitiveSettingKeys[s.Key] { + if !admin.SensitiveSettingKeys[s.Key] { continue } if s.Value == "" { diff --git a/backend/internal/admin/api_keys.go b/backend/internal/admin/api_keys.go new file mode 100644 index 0000000..3f74c30 --- /dev/null +++ b/backend/internal/admin/api_keys.go @@ -0,0 +1,145 @@ +package admin + +import ( + "context" + "encoding/json" + "fmt" + "log/slog" + + "github.com/google/uuid" + "github.com/openscanner/openscanner/internal/auth" + "github.com/openscanner/openscanner/internal/db" +) + +// APIKeysList returns all API keys. +func (o *Operations) APIKeysList(ctx context.Context, _ json.RawMessage, _ int64) (any, error) { + keys, err := o.Queries.ListAPIKeys(ctx) + if err != nil { + return nil, fmt.Errorf("failed to list API keys: %w", err) + } + return mapAPIKeys(keys), nil +} + +// APIKeysCreate creates a new API key. Returns the plaintext key once. +func (o *Operations) APIKeysCreate(ctx context.Context, params json.RawMessage, callerID int64) (any, error) { + var req struct { + Key *string `json:"key"` + Ident *string `json:"ident"` + Disabled int64 `json:"disabled"` + SystemsJson *string `json:"systemsJson"` + CallRateLimit *int64 `json:"callRateLimit"` + Order int64 `json:"order"` + } + if err := json.Unmarshal(params, &req); err != nil { + return nil, UserError("invalid request body") + } + + plainKey := uuid.New().String() + if req.Key != nil && *req.Key != "" { + plainKey = *req.Key + } + hashedKey := auth.HashAPIKey(plainKey) + + id, err := o.Queries.CreateAPIKey(ctx, db.CreateAPIKeyParams{ + Key: hashedKey, + Ident: ptrToNullStr(req.Ident), + Disabled: req.Disabled, + SystemsJson: ptrToNullStr(req.SystemsJson), + CallRateLimit: ptrToNullInt(req.CallRateLimit), + Order: req.Order, + }) + if isUniqueViolation(err) { + return nil, UserError("API key already exists") + } + if err != nil { + return nil, fmt.Errorf("failed to create API key: %w", err) + } + + key, err := o.Queries.GetAPIKey(ctx, id) + if err != nil { + return nil, fmt.Errorf("failed to fetch created API key: %w", err) + } + slog.Info("admin: api key created", "id", key.ID, "ident", key.Ident.String, "by", callerID) + o.broadcastAdminEvent("apikeys.updated", nil) + + resp := mapAPIKey(key) + resp["createdKey"] = plainKey // Return plain key once on creation. + return resp, nil +} + +// APIKeysUpdate updates an existing API key. A blank key preserves the current one. +func (o *Operations) APIKeysUpdate(ctx context.Context, params json.RawMessage, callerID int64) (any, error) { + var req struct { + ID int64 `json:"id"` + Key *string `json:"key"` + Ident *string `json:"ident"` + Disabled int64 `json:"disabled"` + SystemsJson *string `json:"systemsJson"` + CallRateLimit *int64 `json:"callRateLimit"` + Order int64 `json:"order"` + } + if err := json.Unmarshal(params, &req); err != nil { + return nil, UserError("invalid request body") + } + if req.ID <= 0 { + return nil, UserError("id is required") + } + + current, err := o.Queries.GetAPIKey(ctx, req.ID) + if err != nil { + return nil, UserError("API key not found") + } + + keyHash := current.Key + if req.Key != nil && *req.Key != "" { + keyHash = auth.HashAPIKey(*req.Key) + } + + err = o.Queries.UpdateAPIKey(ctx, db.UpdateAPIKeyParams{ + ID: req.ID, + Key: keyHash, + Ident: ptrToNullStr(req.Ident), + Disabled: req.Disabled, + SystemsJson: ptrToNullStr(req.SystemsJson), + CallRateLimit: ptrToNullInt(req.CallRateLimit), + Order: req.Order, + }) + if isUniqueViolation(err) { + return nil, UserError("API key already exists") + } + if err != nil { + return nil, fmt.Errorf("failed to update API key: %w", err) + } + + key, err := o.Queries.GetAPIKey(ctx, req.ID) + if err != nil { + return nil, fmt.Errorf("failed to fetch updated API key: %w", err) + } + slog.Info("admin: api key updated", "id", key.ID, "ident", key.Ident.String, "by", callerID) + o.broadcastAdminEvent("apikeys.updated", nil) + return mapAPIKey(key), nil +} + +// APIKeysDelete deletes an API key. +func (o *Operations) APIKeysDelete(ctx context.Context, params json.RawMessage, callerID int64) (any, error) { + var req struct { + ID int64 `json:"id"` + } + if err := json.Unmarshal(params, &req); err != nil { + return nil, UserError("invalid request body") + } + if req.ID <= 0 { + return nil, UserError("id is required") + } + + if _, err := o.Queries.GetAPIKey(ctx, req.ID); err != nil { + return nil, UserError("API key not found") + } + + if err := o.Queries.DeleteAPIKey(ctx, req.ID); err != nil { + return nil, fmt.Errorf("failed to delete API key: %w", err) + } + slog.Info("admin: api key deleted", "id", req.ID, "by", callerID) + o.broadcastAdminEvent("apikeys.updated", nil) + return map[string]bool{"ok": true}, nil +} diff --git a/backend/internal/admin/dirmonitors.go b/backend/internal/admin/dirmonitors.go new file mode 100644 index 0000000..2ef126b --- /dev/null +++ b/backend/internal/admin/dirmonitors.go @@ -0,0 +1,171 @@ +package admin + +import ( + "context" + "encoding/json" + "fmt" + "log/slog" + "os" + + "github.com/openscanner/openscanner/internal/db" +) + +// DirMonitorsList returns all dirmonitors. +func (o *Operations) DirMonitorsList(ctx context.Context, _ json.RawMessage, _ int64) (any, error) { + dms, err := o.Queries.ListDirMonitors(ctx) + if err != nil { + return nil, fmt.Errorf("failed to list dirmonitors: %w", err) + } + return mapDirMonitors(dms), nil +} + +// DirMonitorsCreate creates a new dirmonitor and triggers a reload. +func (o *Operations) DirMonitorsCreate(ctx context.Context, params json.RawMessage, callerID int64) (any, error) { + var req struct { + Directory string `json:"directory"` + Type string `json:"type"` + Mask *string `json:"mask"` + Extension *string `json:"extension"` + Frequency *int64 `json:"frequency"` + Delay *int64 `json:"delay"` + DeleteAfter int64 `json:"deleteAfter"` + UsePolling int64 `json:"usePolling"` + Disabled int64 `json:"disabled"` + SystemID *int64 `json:"systemId"` + TalkgroupID *int64 `json:"talkgroupId"` + Order int64 `json:"order"` + } + if err := json.Unmarshal(params, &req); err != nil { + return nil, UserError("invalid request body") + } + if req.Directory == "" { + return nil, UserError("directory is required") + } + if info, statErr := os.Stat(req.Directory); statErr != nil { + return nil, UserError("directory does not exist or is not accessible: " + statErr.Error()) + } else if !info.IsDir() { + return nil, UserError("path is not a directory: " + req.Directory) + } + + id, err := o.Queries.CreateDirMonitor(ctx, db.CreateDirMonitorParams{ + Directory: req.Directory, + Type: req.Type, + Mask: ptrToNullStr(req.Mask), + Extension: ptrToNullStr(req.Extension), + Frequency: ptrToNullInt(req.Frequency), + Delay: ptrToNullInt(req.Delay), + DeleteAfter: req.DeleteAfter, + UsePolling: req.UsePolling, + Disabled: req.Disabled, + SystemID: ptrToNullInt(req.SystemID), + TalkgroupID: ptrToNullInt(req.TalkgroupID), + Order: req.Order, + }) + if err != nil { + return nil, fmt.Errorf("failed to create dirmonitor: %w", err) + } + + dm, err := o.Queries.GetDirMonitor(ctx, id) + if err != nil { + return nil, fmt.Errorf("failed to fetch created dirmonitor: %w", err) + } + if o.Deps.DirMonitorReload != nil { + o.Deps.DirMonitorReload.Reload() + } + slog.Info("admin: dirmonitor created", "id", dm.ID, "dir", dm.Directory, "by", callerID) + o.broadcastAdminEvent("dirmonitors.updated", nil) + return mapDirMonitor(dm), nil +} + +// DirMonitorsUpdate updates a dirmonitor and triggers a reload. +func (o *Operations) DirMonitorsUpdate(ctx context.Context, params json.RawMessage, callerID int64) (any, error) { + var req struct { + ID int64 `json:"id"` + Directory string `json:"directory"` + Type string `json:"type"` + Mask *string `json:"mask"` + Extension *string `json:"extension"` + Frequency *int64 `json:"frequency"` + Delay *int64 `json:"delay"` + DeleteAfter int64 `json:"deleteAfter"` + UsePolling int64 `json:"usePolling"` + Disabled int64 `json:"disabled"` + SystemID *int64 `json:"systemId"` + TalkgroupID *int64 `json:"talkgroupId"` + Order int64 `json:"order"` + } + if err := json.Unmarshal(params, &req); err != nil { + return nil, UserError("invalid request body") + } + if req.ID <= 0 { + return nil, UserError("id is required") + } + if req.Directory == "" { + return nil, UserError("directory is required") + } + if info, statErr := os.Stat(req.Directory); statErr != nil { + return nil, UserError("directory does not exist or is not accessible: " + statErr.Error()) + } else if !info.IsDir() { + return nil, UserError("path is not a directory: " + req.Directory) + } + + if _, err := o.Queries.GetDirMonitor(ctx, req.ID); err != nil { + return nil, UserError("dirmonitor not found") + } + + if err := o.Queries.UpdateDirMonitor(ctx, db.UpdateDirMonitorParams{ + ID: req.ID, + Directory: req.Directory, + Type: req.Type, + Mask: ptrToNullStr(req.Mask), + Extension: ptrToNullStr(req.Extension), + Frequency: ptrToNullInt(req.Frequency), + Delay: ptrToNullInt(req.Delay), + DeleteAfter: req.DeleteAfter, + UsePolling: req.UsePolling, + Disabled: req.Disabled, + SystemID: ptrToNullInt(req.SystemID), + TalkgroupID: ptrToNullInt(req.TalkgroupID), + Order: req.Order, + }); err != nil { + return nil, fmt.Errorf("failed to update dirmonitor: %w", err) + } + + dm, err := o.Queries.GetDirMonitor(ctx, req.ID) + if err != nil { + return nil, fmt.Errorf("failed to fetch updated dirmonitor: %w", err) + } + if o.Deps.DirMonitorReload != nil { + o.Deps.DirMonitorReload.Reload() + } + slog.Info("admin: dirmonitor updated", "id", dm.ID, "dir", dm.Directory, "by", callerID) + o.broadcastAdminEvent("dirmonitors.updated", nil) + return mapDirMonitor(dm), nil +} + +// DirMonitorsDelete deletes a dirmonitor and triggers a reload. +func (o *Operations) DirMonitorsDelete(ctx context.Context, params json.RawMessage, callerID int64) (any, error) { + var req struct { + ID int64 `json:"id"` + } + if err := json.Unmarshal(params, &req); err != nil { + return nil, UserError("invalid request body") + } + if req.ID <= 0 { + return nil, UserError("id is required") + } + + if _, err := o.Queries.GetDirMonitor(ctx, req.ID); err != nil { + return nil, UserError("dirmonitor not found") + } + + if err := o.Queries.DeleteDirMonitor(ctx, req.ID); err != nil { + return nil, fmt.Errorf("failed to delete dirmonitor: %w", err) + } + if o.Deps.DirMonitorReload != nil { + o.Deps.DirMonitorReload.Reload() + } + slog.Info("admin: dirmonitor deleted", "id", req.ID, "by", callerID) + o.broadcastAdminEvent("dirmonitors.updated", nil) + return map[string]bool{"ok": true}, nil +} diff --git a/backend/internal/admin/downstreams.go b/backend/internal/admin/downstreams.go new file mode 100644 index 0000000..ae50e3b --- /dev/null +++ b/backend/internal/admin/downstreams.go @@ -0,0 +1,160 @@ +package admin + +import ( + "context" + "encoding/json" + "fmt" + "log/slog" + + "github.com/openscanner/openscanner/internal/auth" + "github.com/openscanner/openscanner/internal/db" +) + +// DownstreamsList returns all downstreams. +func (o *Operations) DownstreamsList(ctx context.Context, _ json.RawMessage, _ int64) (any, error) { + ds, err := o.Queries.ListDownstreams(ctx) + if err != nil { + return nil, fmt.Errorf("failed to list downstreams: %w", err) + } + return mapDownstreams(ds), nil +} + +// DownstreamsCreate creates a new downstream and triggers a reload. +func (o *Operations) DownstreamsCreate(ctx context.Context, params json.RawMessage, callerID int64) (any, error) { + var req struct { + Url string `json:"url"` + ApiKey string `json:"apiKey"` + SystemsJson *string `json:"systemsJson"` + Disabled int64 `json:"disabled"` + Order int64 `json:"order"` + } + if err := json.Unmarshal(params, &req); err != nil { + return nil, UserError("invalid request body") + } + if req.Url == "" { + return nil, UserError("url is required") + } + if !validHTTPURL(req.Url) { + return nil, UserError("url must use http or https scheme") + } + + apiKey := req.ApiKey + if o.Deps.EncryptionKey != "" && apiKey != "" { + enc, err := auth.EncryptString(apiKey, o.Deps.EncryptionKey) + if err != nil { + return nil, fmt.Errorf("encrypt downstream API key: %w", err) + } + apiKey = enc + } + + id, err := o.Queries.CreateDownstream(ctx, db.CreateDownstreamParams{ + Url: req.Url, + ApiKey: apiKey, + SystemsJson: ptrToNullStr(req.SystemsJson), + Disabled: req.Disabled, + Order: req.Order, + }) + if err != nil { + return nil, fmt.Errorf("failed to create downstream: %w", err) + } + + ds, err := o.Queries.GetDownstream(ctx, id) + if err != nil { + return nil, fmt.Errorf("failed to fetch created downstream: %w", err) + } + if o.Deps.DownstreamReload != nil { + o.Deps.DownstreamReload.Reload() + } + slog.Info("admin: downstream created", "id", ds.ID, "url", ds.Url, "by", callerID) + o.broadcastAdminEvent("downstreams.updated", nil) + return mapDownstream(ds), nil +} + +// DownstreamsUpdate updates a downstream. Blank apiKey preserves the current one. +func (o *Operations) DownstreamsUpdate(ctx context.Context, params json.RawMessage, callerID int64) (any, error) { + var req struct { + ID int64 `json:"id"` + Url string `json:"url"` + ApiKey string `json:"apiKey"` + SystemsJson *string `json:"systemsJson"` + Disabled int64 `json:"disabled"` + Order int64 `json:"order"` + } + if err := json.Unmarshal(params, &req); err != nil { + return nil, UserError("invalid request body") + } + if req.ID <= 0 { + return nil, UserError("id is required") + } + if req.Url != "" && !validHTTPURL(req.Url) { + return nil, UserError("url must use http or https scheme") + } + + existing, err := o.Queries.GetDownstream(ctx, req.ID) + if err != nil { + return nil, UserError("downstream not found") + } + + // Preserve existing API key if none provided (key is never sent to clients). + apiKey := existing.ApiKey + if req.ApiKey != "" { + if o.Deps.EncryptionKey != "" { + enc, err := auth.EncryptString(req.ApiKey, o.Deps.EncryptionKey) + if err != nil { + return nil, fmt.Errorf("encrypt downstream API key: %w", err) + } + apiKey = enc + } else { + apiKey = req.ApiKey + } + } + + if err := o.Queries.UpdateDownstream(ctx, db.UpdateDownstreamParams{ + ID: req.ID, + Url: req.Url, + ApiKey: apiKey, + SystemsJson: ptrToNullStr(req.SystemsJson), + Disabled: req.Disabled, + Order: req.Order, + }); err != nil { + return nil, fmt.Errorf("failed to update downstream: %w", err) + } + + ds, err := o.Queries.GetDownstream(ctx, req.ID) + if err != nil { + return nil, fmt.Errorf("failed to fetch updated downstream: %w", err) + } + if o.Deps.DownstreamReload != nil { + o.Deps.DownstreamReload.Reload() + } + slog.Info("admin: downstream updated", "id", ds.ID, "url", ds.Url, "by", callerID) + o.broadcastAdminEvent("downstreams.updated", nil) + return mapDownstream(ds), nil +} + +// DownstreamsDelete deletes a downstream and triggers a reload. +func (o *Operations) DownstreamsDelete(ctx context.Context, params json.RawMessage, callerID int64) (any, error) { + var req struct { + ID int64 `json:"id"` + } + if err := json.Unmarshal(params, &req); err != nil { + return nil, UserError("invalid request body") + } + if req.ID <= 0 { + return nil, UserError("id is required") + } + + if _, err := o.Queries.GetDownstream(ctx, req.ID); err != nil { + return nil, UserError("downstream not found") + } + + if err := o.Queries.DeleteDownstream(ctx, req.ID); err != nil { + return nil, fmt.Errorf("failed to delete downstream: %w", err) + } + if o.Deps.DownstreamReload != nil { + o.Deps.DownstreamReload.Reload() + } + slog.Info("admin: downstream deleted", "id", req.ID, "by", callerID) + o.broadcastAdminEvent("downstreams.updated", nil) + return map[string]bool{"ok": true}, nil +} diff --git a/backend/internal/admin/exports.go b/backend/internal/admin/exports.go new file mode 100644 index 0000000..2ac8c3c --- /dev/null +++ b/backend/internal/admin/exports.go @@ -0,0 +1,241 @@ +package admin + +import ( + "context" + "encoding/csv" + "encoding/json" + "fmt" + "strconv" + "strings" +) + +// ExportConfig returns the full config (settings, systems, talkgroups, …) +// shaped for import.go to round-trip. +func (o *Operations) ExportConfig(ctx context.Context, _ json.RawMessage, _ int64) (any, error) { + settings, err := o.Queries.ListSettings(ctx) + if err != nil { + return nil, fmt.Errorf("failed to export settings: %w", err) + } + users, err := o.Queries.ListUsers(ctx) + if err != nil { + return nil, fmt.Errorf("failed to export users: %w", err) + } + systems, err := o.Queries.ListSystems(ctx) + if err != nil { + return nil, fmt.Errorf("failed to export systems: %w", err) + } + talkgroups, err := o.Queries.ListAllTalkgroups(ctx) + if err != nil { + return nil, fmt.Errorf("failed to export talkgroups: %w", err) + } + units, err := o.Queries.ListAllUnits(ctx) + if err != nil { + return nil, fmt.Errorf("failed to export units: %w", err) + } + groups, err := o.Queries.ListGroups(ctx) + if err != nil { + return nil, fmt.Errorf("failed to export groups: %w", err) + } + tags, err := o.Queries.ListTags(ctx) + if err != nil { + return nil, fmt.Errorf("failed to export tags: %w", err) + } + apiKeys, err := o.Queries.ListAPIKeys(ctx) + if err != nil { + return nil, fmt.Errorf("failed to export api keys: %w", err) + } + dirmonitors, err := o.Queries.ListDirMonitors(ctx) + if err != nil { + return nil, fmt.Errorf("failed to export dirmonitors: %w", err) + } + downstreams, err := o.Queries.ListDownstreams(ctx) + if err != nil { + return nil, fmt.Errorf("failed to export downstreams: %w", err) + } + webhooks, err := o.Queries.ListWebhooks(ctx) + if err != nil { + return nil, fmt.Errorf("failed to export webhooks: %w", err) + } + + // Export all fields — use snake_case keys to match db struct JSON tags. + // API keys include the hashed key so import can restore authentication. + // Downstream API keys and webhook secrets are included for full backup. + // The exported JSON file should be treated as sensitive. + exportAPIKeys := make([]map[string]any, len(apiKeys)) + for i, k := range apiKeys { + exportAPIKeys[i] = map[string]any{ + "id": k.ID, + "key": k.Key, + "ident": nullStr(k.Ident), + "disabled": k.Disabled, + "systems_json": nullStr(k.SystemsJson), + "call_rate_limit": nullInt(k.CallRateLimit), + "order": k.Order, + } + } + exportDownstreams := make([]map[string]any, len(downstreams)) + for i, d := range downstreams { + exportDownstreams[i] = map[string]any{ + "id": d.ID, + "url": d.Url, + "api_key": d.ApiKey, + "systems_json": nullStr(d.SystemsJson), + "disabled": d.Disabled, + "order": d.Order, + } + } + exportWebhooks := make([]map[string]any, len(webhooks)) + for i, w := range webhooks { + exportWebhooks[i] = map[string]any{ + "id": w.ID, + "url": w.Url, + "type": w.Type, + "secret": nullStr(w.Secret), + "systems_json": nullStr(w.SystemsJson), + "disabled": w.Disabled, + "order": w.Order, + } + } + + return map[string]any{ + "settings": settings, + "users": users, + "systems": systems, + "talkgroups": talkgroups, + "units": units, + "groups": groups, + "tags": tags, + "apiKeys": exportAPIKeys, + "dirmonitors": dirmonitors, + "downstreams": exportDownstreams, + "webhooks": exportWebhooks, + }, nil +} + +// ExportTalkgroups returns a CSV export of talkgroups for a given system. +func (o *Operations) ExportTalkgroups(ctx context.Context, params json.RawMessage, _ int64) (any, error) { + var req struct { + SystemID *int64 `json:"systemId"` + } + if params != nil { + _ = json.Unmarshal(params, &req) + } + if req.SystemID == nil { + return nil, fmt.Errorf("systemId is required") + } + + talkgroups, err := o.Queries.ListTalkgroupsBySystem(ctx, *req.SystemID) + if err != nil { + return nil, fmt.Errorf("failed to list talkgroups: %w", err) + } + + // Build ID→label maps so we can emit portable text names instead of + // PK integers (PKs are not stable across instances). + groupMap := make(map[int64]string) + if gs, err := o.Queries.ListGroups(ctx); err == nil { + for _, g := range gs { + groupMap[g.ID] = g.Label + } + } + tagMap := make(map[int64]string) + if ts, err := o.Queries.ListTags(ctx); err == nil { + for _, t := range ts { + tagMap[t.ID] = t.Label + } + } + + var buf strings.Builder + w := csv.NewWriter(&buf) + _ = w.Write([]string{"talkgroup_id", "label", "name", "tag", "group", "frequency", "led", "order"}) + for _, tg := range talkgroups { + freq := "" + if tg.Frequency.Valid { + freq = strconv.FormatInt(tg.Frequency.Int64, 10) + } + groupLabel := "" + if tg.GroupID.Valid { + groupLabel = groupMap[tg.GroupID.Int64] + } + tagLabel := "" + if tg.TagID.Valid { + tagLabel = tagMap[tg.TagID.Int64] + } + _ = w.Write([]string{ + strconv.FormatInt(tg.TalkgroupID, 10), + tg.Label.String, + tg.Name.String, + tagLabel, + groupLabel, + freq, + tg.Led.String, + strconv.FormatInt(tg.Order, 10), + }) + } + w.Flush() + + return buf.String(), nil +} + +// ExportUnits returns a CSV export of units for a given system. +func (o *Operations) ExportUnits(ctx context.Context, params json.RawMessage, _ int64) (any, error) { + var req struct { + SystemID *int64 `json:"systemId"` + } + if params != nil { + _ = json.Unmarshal(params, &req) + } + if req.SystemID == nil { + return nil, fmt.Errorf("systemId is required") + } + + units, err := o.Queries.ListUnitsBySystem(ctx, *req.SystemID) + if err != nil { + return nil, fmt.Errorf("failed to list units: %w", err) + } + + var buf strings.Builder + w := csv.NewWriter(&buf) + _ = w.Write([]string{"unit_id", "label", "order"}) + for _, u := range units { + _ = w.Write([]string{ + strconv.FormatInt(u.UnitID, 10), + u.Label.String, + strconv.FormatInt(u.Order, 10), + }) + } + w.Flush() + + return buf.String(), nil +} + +// ExportGroups returns a CSV export of groups. +func (o *Operations) ExportGroups(ctx context.Context, _ json.RawMessage, _ int64) (any, error) { + groups, err := o.Queries.ListGroups(ctx) + if err != nil { + return nil, fmt.Errorf("failed to list groups: %w", err) + } + var buf strings.Builder + w := csv.NewWriter(&buf) + _ = w.Write([]string{"label"}) + for _, g := range groups { + _ = w.Write([]string{g.Label}) + } + w.Flush() + return buf.String(), nil +} + +// ExportTags returns a CSV export of tags. +func (o *Operations) ExportTags(ctx context.Context, _ json.RawMessage, _ int64) (any, error) { + tags, err := o.Queries.ListTags(ctx) + if err != nil { + return nil, fmt.Errorf("failed to list tags: %w", err) + } + var buf strings.Builder + w := csv.NewWriter(&buf) + _ = w.Write([]string{"label"}) + for _, t := range tags { + _ = w.Write([]string{t.Label}) + } + w.Flush() + return buf.String(), nil +} diff --git a/backend/internal/admin/filesystem.go b/backend/internal/admin/filesystem.go new file mode 100644 index 0000000..c2599c0 --- /dev/null +++ b/backend/internal/admin/filesystem.go @@ -0,0 +1,78 @@ +package admin + +import ( + "context" + "encoding/json" + "os" + "path/filepath" + "sort" + "strings" +) + +// FSDirectories lists directories under the given path for the dirmonitor +// picker UI. Path must be absolute; hidden (dotfile) and top-level system +// directories are filtered out. +func (o *Operations) FSDirectories(_ context.Context, params json.RawMessage, _ int64) (any, error) { + var req struct { + Path string `json:"path"` + } + if params != nil { + _ = json.Unmarshal(params, &req) + } + if req.Path == "" { + req.Path = "/" + } + + clean := filepath.Clean(req.Path) + if !filepath.IsAbs(clean) { + return nil, UserError("path must be absolute") + } + + info, err := os.Stat(clean) + if err != nil { + return nil, UserError("directory does not exist or is not accessible: " + err.Error()) + } + if !info.IsDir() { + return nil, UserError("path is not a directory: " + clean) + } + + entries, err := os.ReadDir(clean) + if err != nil { + return nil, UserError("failed to read directory: " + err.Error()) + } + + type dirEntry struct { + Name string `json:"name"` + Path string `json:"path"` + } + + dirs := make([]dirEntry, 0, len(entries)) + for _, e := range entries { + if !e.IsDir() { + continue + } + name := e.Name() + if clean == "/" && hiddenTopLevelDirs[name] { + continue + } + if strings.HasPrefix(name, ".") { + continue + } + dirs = append(dirs, dirEntry{Name: name, Path: filepath.Join(clean, name)}) + } + sort.Slice(dirs, func(i, j int) bool { + return strings.ToLower(dirs[i].Name) < strings.ToLower(dirs[j].Name) + }) + + var parent *string + if clean != "/" { + p := filepath.Dir(clean) + parent = &p + } + + return map[string]any{ + "path": clean, + "parent": parent, + "directories": dirs, + }, nil +} diff --git a/backend/internal/admin/groups.go b/backend/internal/admin/groups.go new file mode 100644 index 0000000..0527012 --- /dev/null +++ b/backend/internal/admin/groups.go @@ -0,0 +1,112 @@ +package admin + +import ( + "context" + "encoding/json" + "fmt" + "log/slog" + + "github.com/openscanner/openscanner/internal/db" +) + +// GroupsList returns all groups. +func (o *Operations) GroupsList(ctx context.Context, _ json.RawMessage, _ int64) (any, error) { + groups, err := o.Queries.ListGroups(ctx) + if err != nil { + return nil, fmt.Errorf("failed to list groups: %w", err) + } + return groups, nil +} + +// GroupsCreate creates a new group. +func (o *Operations) GroupsCreate(ctx context.Context, params json.RawMessage, callerID int64) (any, error) { + var req struct { + Label string `json:"label"` + } + if err := json.Unmarshal(params, &req); err != nil { + return nil, UserError("invalid request body") + } + if req.Label == "" { + return nil, UserError("label is required") + } + + id, err := o.Queries.CreateGroup(ctx, req.Label) + if isUniqueViolation(err) { + return nil, UserError("group label already exists") + } + if err != nil { + return nil, fmt.Errorf("failed to create group: %w", err) + } + + group, err := o.Queries.GetGroup(ctx, id) + if err != nil { + return nil, fmt.Errorf("failed to fetch created group: %w", err) + } + slog.Info("admin: group created", "id", group.ID, "label", group.Label, "by", callerID) + o.broadcastAdminEvent("groups.updated", nil) + o.broadcastCFG(ctx) + return group, nil +} + +// GroupsUpdate updates an existing group. +func (o *Operations) GroupsUpdate(ctx context.Context, params json.RawMessage, callerID int64) (any, error) { + var req struct { + ID int64 `json:"id"` + Label string `json:"label"` + } + if err := json.Unmarshal(params, &req); err != nil { + return nil, UserError("invalid request body") + } + if req.ID <= 0 { + return nil, UserError("id is required") + } + if req.Label == "" { + return nil, UserError("label is required") + } + + if _, err := o.Queries.GetGroup(ctx, req.ID); err != nil { + return nil, UserError("group not found") + } + + err := o.Queries.UpdateGroup(ctx, db.UpdateGroupParams{ID: req.ID, Label: req.Label}) + if isUniqueViolation(err) { + return nil, UserError("group label already exists") + } + if err != nil { + return nil, fmt.Errorf("failed to update group: %w", err) + } + + group, err := o.Queries.GetGroup(ctx, req.ID) + if err != nil { + return nil, fmt.Errorf("failed to fetch updated group: %w", err) + } + slog.Info("admin: group updated", "id", group.ID, "label", group.Label, "by", callerID) + o.broadcastAdminEvent("groups.updated", nil) + o.broadcastCFG(ctx) + return group, nil +} + +// GroupsDelete deletes a group. +func (o *Operations) GroupsDelete(ctx context.Context, params json.RawMessage, callerID int64) (any, error) { + var req struct { + ID int64 `json:"id"` + } + if err := json.Unmarshal(params, &req); err != nil { + return nil, UserError("invalid request body") + } + if req.ID <= 0 { + return nil, UserError("id is required") + } + + if _, err := o.Queries.GetGroup(ctx, req.ID); err != nil { + return nil, UserError("group not found") + } + + if err := o.Queries.DeleteGroup(ctx, req.ID); err != nil { + return nil, fmt.Errorf("failed to delete group: %w", err) + } + slog.Info("admin: group deleted", "id", req.ID, "by", callerID) + o.broadcastAdminEvent("groups.updated", nil) + o.broadcastCFG(ctx) + return map[string]bool{"ok": true}, nil +} diff --git a/backend/internal/admin/imports.go b/backend/internal/admin/imports.go new file mode 100644 index 0000000..30dcaa5 --- /dev/null +++ b/backend/internal/admin/imports.go @@ -0,0 +1,396 @@ +package admin + +import ( + "context" + "database/sql" + "encoding/json" + "fmt" + "log/slog" + + "github.com/openscanner/openscanner/internal/auth" + "github.com/openscanner/openscanner/internal/db" +) + +// importAPIKey, importDownstream, and importWebhook mirror the flat shape +// emitted by ExportConfig (plain string/null instead of {String,Valid} +// blobs). Unmarshalling directly into the db.* structs would fail for any +// non-null nullable field because sql.NullString has no JSON unmarshaler. +type importAPIKey struct { + Key string `json:"key"` + Ident *string `json:"ident"` + Disabled int64 `json:"disabled"` + SystemsJson *string `json:"systems_json"` + CallRateLimit *int64 `json:"call_rate_limit"` + Order int64 `json:"order"` +} + +type importDownstream struct { + Url string `json:"url"` + ApiKey string `json:"api_key"` + SystemsJson *string `json:"systems_json"` + Disabled int64 `json:"disabled"` + Order int64 `json:"order"` +} + +type importWebhook struct { + Url string `json:"url"` + Type string `json:"type"` + Secret *string `json:"secret"` + SystemsJson *string `json:"systems_json"` + Disabled int64 `json:"disabled"` + Order int64 `json:"order"` +} + +// ImportConfig applies a full config backup atomically, remapping foreign +// keys between the source and destination databases. +func (o *Operations) ImportConfig(ctx context.Context, params json.RawMessage, callerID int64) (any, error) { + var data struct { + Settings []db.Setting `json:"settings"` + Groups []db.Group `json:"groups"` + Tags []db.Tag `json:"tags"` + Systems []db.System `json:"systems"` + Talkgroups []db.Talkgroup `json:"talkgroups"` + Units []db.Unit `json:"units"` + APIKeys []importAPIKey `json:"apiKeys"` + DirMonitors []db.Dirmonitor `json:"dirmonitors"` + Downstreams []importDownstream `json:"downstreams"` + Webhooks []importWebhook `json:"webhooks"` + } + if err := json.Unmarshal(params, &data); err != nil { + slog.Warn("import config: failed to parse payload", "error", err) + return nil, UserError("invalid backup file: " + err.Error()) + } + + // Validate encrypted values: reject if no key configured, or if the wrong key is configured. + encKey := o.Deps.EncryptionKey + for _, s := range data.Settings { + if SensitiveSettingKeys[s.Key] && auth.IsEncrypted(s.Value) { + if encKey == "" { + return nil, UserError("backup contains encrypted settings but no encryption key is configured — set --encryption-key before importing") + } + if _, err := auth.DecryptString(s.Value, encKey); err != nil { + return nil, UserError("backup contains encrypted settings that cannot be decrypted with the current encryption key — check that --encryption-key matches the key used when the backup was created") + } + } + } + for _, d := range data.Downstreams { + if auth.IsEncrypted(d.ApiKey) { + if encKey == "" { + return nil, UserError("backup contains encrypted downstream API keys but no encryption key is configured — set --encryption-key before importing") + } + if _, err := auth.DecryptString(d.ApiKey, encKey); err != nil { + return nil, UserError("backup contains encrypted downstream API keys that cannot be decrypted with the current encryption key — check that --encryption-key matches the key used when the backup was created") + } + } + } + + sqlDB := o.Deps.SQLDB + if sqlDB == nil { + return nil, fmt.Errorf("transaction support not available") + } + + tx, err := sqlDB.BeginTx(ctx, nil) + if err != nil { + return nil, fmt.Errorf("database error: %w", err) + } + defer tx.Rollback() //nolint:errcheck + + qtx := o.Queries.WithTx(tx) + + // Settings + for _, s := range data.Settings { + if !allowedSettingKeys[s.Key] { + slog.Warn("import config: skipping unknown setting key", "key", s.Key) + continue + } + if err := qtx.UpsertSetting(ctx, db.UpsertSettingParams(s)); err != nil { + return nil, fmt.Errorf("failed to import settings: %w", err) + } + } + + // Groups — capture old→new id remap so talkgroups can rewrite their + // group_id FKs (the export carries the source DB's PKs, but on a fresh + // install those PKs don't exist yet). + groupRemap := make(map[int64]int64, len(data.Groups)) + for _, g := range data.Groups { + newID, err := qtx.CreateGroup(ctx, g.Label) + if err != nil { + if !isUniqueViolation(err) { + return nil, fmt.Errorf("failed to import groups: %w", err) + } + existing, gerr := qtx.GetGroupByLabel(ctx, g.Label) + if gerr != nil { + return nil, fmt.Errorf("failed to look up existing group %q: %w", g.Label, gerr) + } + newID = existing.ID + } + groupRemap[g.ID] = newID + } + + // Tags — same remap pattern as groups. + tagRemap := make(map[int64]int64, len(data.Tags)) + for _, t := range data.Tags { + newID, err := qtx.CreateTag(ctx, t.Label) + if err != nil { + if !isUniqueViolation(err) { + return nil, fmt.Errorf("failed to import tags: %w", err) + } + existing, gerr := qtx.GetTagByLabel(ctx, t.Label) + if gerr != nil { + return nil, fmt.Errorf("failed to look up existing tag %q: %w", t.Label, gerr) + } + newID = existing.ID + } + tagRemap[t.ID] = newID + } + + // Systems — remap by old PK → new PK. The natural key is SystemID + // (the radio-system ID, e.g. 1, 100), which sqlc enforces UNIQUE. + systemRemap := make(map[int64]int64, len(data.Systems)) + for _, s := range data.Systems { + newID, err := qtx.CreateSystem(ctx, db.CreateSystemParams{ + SystemID: s.SystemID, + Label: s.Label, + AutoPopulateTalkgroups: s.AutoPopulateTalkgroups, + BlacklistsJson: s.BlacklistsJson, + Led: s.Led, + Order: s.Order, + }) + if err != nil { + if !isUniqueViolation(err) { + return nil, fmt.Errorf("failed to import systems: %w", err) + } + existing, gerr := qtx.GetSystemBySystemID(ctx, s.SystemID) + if gerr != nil { + return nil, fmt.Errorf("failed to look up existing system %d: %w", s.SystemID, gerr) + } + newID = existing.ID + } + systemRemap[s.ID] = newID + } + + // Talkgroups — translate FKs (system_id, group_id, tag_id) through the + // remaps built above, then upsert. Capture the new PK so dirmonitors + // can rewrite their talkgroup_id FKs. + tgRemap := make(map[int64]int64, len(data.Talkgroups)) + for _, tg := range data.Talkgroups { + newSystemID, ok := systemRemap[tg.SystemID] + if !ok { + slog.Warn("import config: skipping talkgroup with unknown system_id", + "talkgroup_id", tg.TalkgroupID, "system_id", tg.SystemID) + continue + } + groupID := tg.GroupID + if groupID.Valid { + if mapped, ok := groupRemap[groupID.Int64]; ok { + groupID.Int64 = mapped + } else { + // Group wasn't in the export — drop the FK rather than fail. + groupID = sql.NullInt64{} + } + } + tagID := tg.TagID + if tagID.Valid { + if mapped, ok := tagRemap[tagID.Int64]; ok { + tagID.Int64 = mapped + } else { + tagID = sql.NullInt64{} + } + } + if err := qtx.UpsertTalkgroup(ctx, db.UpsertTalkgroupParams{ + SystemID: newSystemID, + TalkgroupID: tg.TalkgroupID, + Label: tg.Label, + Name: tg.Name, + Frequency: tg.Frequency, + Led: tg.Led, + GroupID: groupID, + TagID: tagID, + Order: tg.Order, + }); err != nil { + return nil, fmt.Errorf("failed to import talkgroups: %w", err) + } + row, err := qtx.GetTalkgroupBySystemAndTGID(ctx, db.GetTalkgroupBySystemAndTGIDParams{ + SystemID: newSystemID, + TalkgroupID: tg.TalkgroupID, + }) + if err != nil { + return nil, fmt.Errorf("failed to look up imported talkgroup (system=%d tg=%d): %w", + newSystemID, tg.TalkgroupID, err) + } + tgRemap[tg.ID] = row.ID + } + + // Units — translate system_id. + for _, u := range data.Units { + newSystemID, ok := systemRemap[u.SystemID] + if !ok { + slog.Warn("import config: skipping unit with unknown system_id", + "unit_id", u.UnitID, "system_id", u.SystemID) + continue + } + if err := qtx.UpsertUnit(ctx, db.UpsertUnitParams{ + SystemID: newSystemID, + UnitID: u.UnitID, + Label: u.Label, + Order: u.Order, + }); err != nil { + return nil, fmt.Errorf("failed to import units: %w", err) + } + } + + // API Keys — remap any system PKs embedded in systems_json. + for _, k := range data.APIKeys { + if _, err := qtx.CreateAPIKey(ctx, db.CreateAPIKeyParams{ + Key: k.Key, + Ident: ptrToNullStr(k.Ident), + Disabled: k.Disabled, + SystemsJson: ptrToNullStr(remapSystemsJSON(k.SystemsJson, systemRemap)), + CallRateLimit: ptrToNullInt(k.CallRateLimit), + Order: k.Order, + }); err != nil && !isUniqueViolation(err) { + return nil, fmt.Errorf("failed to import api keys: %w", err) + } + } + + // DirMonitors — translate system_id and talkgroup_id FKs. + for _, d := range data.DirMonitors { + sysID := d.SystemID + if sysID.Valid { + if mapped, ok := systemRemap[sysID.Int64]; ok { + sysID.Int64 = mapped + } else { + slog.Warn("import config: dirmonitor system_id not found in import; dropping FK", + "directory", d.Directory, "system_id", sysID.Int64) + sysID = sql.NullInt64{} + } + } + tgID := d.TalkgroupID + if tgID.Valid { + if mapped, ok := tgRemap[tgID.Int64]; ok { + tgID.Int64 = mapped + } else { + slog.Warn("import config: dirmonitor talkgroup_id not found in import; dropping FK", + "directory", d.Directory, "talkgroup_id", tgID.Int64) + tgID = sql.NullInt64{} + } + } + if _, err := qtx.CreateDirMonitor(ctx, db.CreateDirMonitorParams{ + Directory: d.Directory, + Type: d.Type, + Mask: d.Mask, + Extension: d.Extension, + Frequency: d.Frequency, + Delay: d.Delay, + DeleteAfter: d.DeleteAfter, + UsePolling: d.UsePolling, + Disabled: d.Disabled, + SystemID: sysID, + TalkgroupID: tgID, + Order: d.Order, + }); err != nil && !isUniqueViolation(err) { + return nil, fmt.Errorf("failed to import dirmonitors: %w", err) + } + } + + // Downstreams — remap embedded system PKs. + for _, d := range data.Downstreams { + if !validHTTPURL(d.Url) { + slog.Warn("import config: skipping downstream with invalid URL", "url", d.Url) + continue + } + if _, err := qtx.CreateDownstream(ctx, db.CreateDownstreamParams{ + Url: d.Url, + ApiKey: d.ApiKey, + SystemsJson: ptrToNullStr(remapSystemsJSON(d.SystemsJson, systemRemap)), + Disabled: d.Disabled, + Order: d.Order, + }); err != nil && !isUniqueViolation(err) { + return nil, fmt.Errorf("failed to import downstreams: %w", err) + } + } + + // Webhooks — remap embedded system PKs. + for _, w := range data.Webhooks { + if !validHTTPURL(w.Url) { + slog.Warn("import config: skipping webhook with invalid URL", "url", w.Url) + continue + } + if _, err := qtx.CreateWebhook(ctx, db.CreateWebhookParams{ + Url: w.Url, + Type: w.Type, + Secret: ptrToNullStr(w.Secret), + SystemsJson: ptrToNullStr(remapSystemsJSON(w.SystemsJson, systemRemap)), + Disabled: w.Disabled, + Order: w.Order, + }); err != nil && !isUniqueViolation(err) { + return nil, fmt.Errorf("failed to import webhooks: %w", err) + } + } + + if err := tx.Commit(); err != nil { + return nil, fmt.Errorf("failed to commit import: %w", err) + } + + // Hot-reload subsystems whose live state derives from the rows or + // settings we just rewrote. Without these, the in-process worker + // pools, downstream forwarders, and dirmonitor watchers keep using + // their pre-import config — symptom: transcription stops, downstream + // forwarding goes silent, dirmonitors don't pick up new directories + // until the operator restarts the server. + if o.Deps.TranscriberReload != nil && len(data.Settings) > 0 { + tEnabled, _ := o.Queries.GetSetting(ctx, "transcriptionEnabled") + tURL, _ := o.Queries.GetSetting(ctx, "transcriptionUrl") + tModel, _ := o.Queries.GetSetting(ctx, "transcriptionModel") + tLang, _ := o.Queries.GetSetting(ctx, "transcriptionLanguage") + tDiarize, _ := o.Queries.GetSetting(ctx, "transcriptionDiarize") + + ok := o.Deps.TranscriberReload.Reload( + tEnabled.Value == "true", + tURL.Value, + tModel.Value, + tLang.Value, + tDiarize.Value == "true", + ) + o.Deps.WhisperAvailable = ok && tEnabled.Value == "true" + } + if o.Deps.DirMonitorReload != nil && len(data.DirMonitors) > 0 { + o.Deps.DirMonitorReload.Reload() + } + if o.Deps.DownstreamReload != nil && len(data.Downstreams) > 0 { + o.Deps.DownstreamReload.Reload() + } + + // Notify all admin/listener clients to refetch — without these the + // admin UI shows stale (empty) lists and the user thinks the import + // silently failed. Order doesn't matter; events are fire-and-forget. + for _, topic := range []string{ + "groups.updated", + "tags.updated", + "systems.updated", + "talkgroups.updated", + "units.updated", + "apikeys.updated", + "dirmonitors.updated", + "downstreams.updated", + "webhooks.updated", + } { + o.broadcastAdminEvent(topic, nil) + } + o.broadcastCFG(ctx) + + slog.Info("config imported successfully via WS", + "by", callerID, + "settings", len(data.Settings), + "groups", len(data.Groups), + "tags", len(data.Tags), + "systems", len(data.Systems), + "talkgroups", len(data.Talkgroups), + "units", len(data.Units), + "apiKeys", len(data.APIKeys), + "dirmonitors", len(data.DirMonitors), + "downstreams", len(data.Downstreams), + "webhooks", len(data.Webhooks), + ) + return map[string]bool{"ok": true}, nil +} diff --git a/backend/internal/admin/operations.go b/backend/internal/admin/operations.go new file mode 100644 index 0000000..8612e47 --- /dev/null +++ b/backend/internal/admin/operations.go @@ -0,0 +1,466 @@ +// Package admin holds the transport-agnostic CRUD / config / import-export +// business logic for OpenScanner's admin surface. +// +// Every method on Operations takes (ctx, params, callerID) and returns +// (any, error); callers (currently internal/ws) are responsible for +// transport framing, authentication, authorization, and error envelope. +// This package MUST NOT import internal/ws or net/http-transport packages. +package admin + +import ( + "context" + "database/sql" + "encoding/json" + "log/slog" + "net/url" + "strings" + "time" + + "github.com/openscanner/openscanner/internal/auth" + "github.com/openscanner/openscanner/internal/db" +) + +// ── Public helper types ── + +// UserError is returned by Operations methods for validation errors that +// should be shown verbatim to the client. Callers can use errors.As to +// distinguish these from internal errors. +type UserError string + +func (e UserError) Error() string { return string(e) } + +// Reloader triggers a service config reload (dirmonitor, downstream). +type Reloader interface { + Reload() +} + +// TranscriberReloader can hot-reload the transcription subsystem. +type TranscriberReloader interface { + Reload(enabled bool, baseURL, model, language string, diarize bool) bool + Enabled() bool + BaseURL() string + QueueDepth() int +} + +// EventSink is the interface Operations uses to push admin events and +// broadcast config refreshes without importing the WebSocket package. The +// WS hub implements all three methods today, so the interface is satisfied +// automatically; tests can provide a no-op implementation. +type EventSink interface { + BroadcastAdminEvent(topic string, data any) + BroadcastCFG(ctx context.Context) + DisconnectByUser(userID int64) + ClientCount() int +} + +// Deps are the optional dependencies used by admin operations. Any field +// left zero disables the corresponding feature path at runtime (matches +// the prior ws.HubDeps behaviour exactly). +type Deps struct { + SQLDB *sql.DB + DirMonitorReload Reloader + DownstreamReload Reloader + TranscriberReload TranscriberReloader + FFmpegAvailable bool + FDKAACAvailable bool + WhisperAvailable bool + RecordingsDir string + EncryptionKey string +} + +// Operations owns the admin CRUD business logic. It is transport-agnostic — +// callers wrap its methods in whatever RPC / WS / HTTP envelope they use. +type Operations struct { + Queries *db.Queries + Deps Deps + Events EventSink + + // StartTime is used by activity-stats and uptime calculations. It + // defaults to time.Now() on New() but can be overridden for tests. + StartTime time.Time +} + +// New constructs a new Operations bound to the given queries, deps, and +// event sink. The event sink may be nil for test fixtures that don't +// exercise broadcast-triggering paths. +func New(queries *db.Queries, deps Deps, events EventSink) *Operations { + return &Operations{ + Queries: queries, + Deps: deps, + Events: events, + StartTime: time.Now(), + } +} + +// SetWhisperAvailable updates the cached ffmpeg/whisper capability after a +// transcription hot-reload. Kept for the config-update and import flows +// that mutate the live pool state. +func (o *Operations) SetWhisperAvailable(v bool) { o.Deps.WhisperAvailable = v } + +// broadcastAdminEvent is a nil-safe wrapper around Events.BroadcastAdminEvent. +func (o *Operations) broadcastAdminEvent(topic string, data any) { + if o.Events != nil { + o.Events.BroadcastAdminEvent(topic, data) + } +} + +// broadcastCFG is a nil-safe wrapper around Events.BroadcastCFG. +func (o *Operations) broadcastCFG(ctx context.Context) { + if o.Events != nil { + o.Events.BroadcastCFG(ctx) + } +} + +// disconnectByUser is a nil-safe wrapper around Events.DisconnectByUser. +func (o *Operations) disconnectByUser(userID int64) { + if o.Events != nil { + o.Events.DisconnectByUser(userID) + } +} + +// ── Helpers ── + +func ptrToNullStr(p *string) sql.NullString { + if p == nil { + return sql.NullString{} + } + return sql.NullString{String: *p, Valid: true} +} + +func ptrToNullInt(p *int64) sql.NullInt64 { + if p == nil { + return sql.NullInt64{} + } + return sql.NullInt64{Int64: *p, Valid: true} +} + +func nullStr(n sql.NullString) *string { + if !n.Valid { + return nil + } + return &n.String +} + +func nullInt(n sql.NullInt64) *int64 { + if !n.Valid { + return nil + } + return &n.Int64 +} + +func isUniqueViolation(err error) bool { + return err != nil && strings.Contains(err.Error(), "UNIQUE") +} + +func validHTTPURL(raw string) bool { + u, err := url.Parse(raw) + if err != nil { + return false + } + return u.Scheme == "http" || u.Scheme == "https" +} + +// remapSystemsJSON rewrites the system PKs embedded in a systems_json column +// (used by api_keys, downstreams, webhooks, and users) so that grants +// referring to a system by its old PK end up referring to the freshly +// inserted row's PK after import. Accepts and returns a *string mirroring +// the export shape (nil = "all systems"). Any system PK that doesn't appear +// in the remap is dropped from the grant rather than silently broken. +// +// Shape: `[{"id": , "talkgroups": [...]}]` per +// auth.SystemGrant. +func remapSystemsJSON(in *string, systemRemap map[int64]int64) *string { + if in == nil || strings.TrimSpace(*in) == "" { + return in + } + var grants []auth.SystemGrant + if err := json.Unmarshal([]byte(*in), &grants); err != nil { + // Fall through with nil grants — try the legacy flat-id form. + var ids []int64 + if jerr := json.Unmarshal([]byte(*in), &ids); jerr != nil { + slog.Warn("import config: systems_json not recognised; preserving as-is", + "error", err) + return in + } + mapped := make([]int64, 0, len(ids)) + for _, id := range ids { + if newID, ok := systemRemap[id]; ok { + mapped = append(mapped, newID) + } else { + slog.Warn("import config: dropping unknown system grant", "system_pk", id) + } + } + out, _ := json.Marshal(mapped) + s := string(out) + return &s + } + mapped := make([]auth.SystemGrant, 0, len(grants)) + for _, g := range grants { + newID, ok := systemRemap[g.ID] + if !ok { + slog.Warn("import config: dropping unknown system grant", "system_pk", g.ID) + continue + } + mapped = append(mapped, auth.SystemGrant{ID: newID, Talkgroups: g.Talkgroups}) + } + out, _ := json.Marshal(mapped) + s := string(out) + return &s +} + +// validRoles is the set of allowed user roles. +var validRoles = map[string]bool{ + auth.RoleAdmin: true, + auth.RoleListener: true, +} + +// SensitiveSettingKeys are settings whose values are encrypted at rest. +// Exported because cmd/server/main.go consults it during secret migration. +var SensitiveSettingKeys = map[string]bool{ + "vapidPrivateKey": true, + "jwtSecret": true, +} + +// allowedSettingKeys mirrors the allowed setting keys from config.go. +var allowedSettingKeys = map[string]bool{ + "activityDashboard": true, + "afsSystems": true, + "apiKeyCallRate": true, + "audioConversion": true, + "audioEncodingPreset": true, + "autoPopulateSystems": true, + "branding": true, + "disableDuplicateDetection": true, + "duplicateDetectionTimeFrame": true, + "email": true, + "keypadBeeps": true, + "logLevel": true, + "maxClients": true, + "playbackGoesLive": true, + "pruneDays": true, + "publicAccess": true, + "pushNotifications": true, + "searchPatchedTalkgroups": true, + "shareableLinks": true, + "sharedLinkExpiry": true, + "showListenersCount": true, + "sortTalkgroups": true, + "tagsToggle": true, + "time12hFormat": true, + "transcriptionDiarize": true, + "transcriptionEnabled": true, + "transcriptionLanguage": true, + "liveTranscriptDisplay": true, + "transcriptionModel": true, + "transcriptionUrl": true, + "vapidPrivateKey": true, + "vapidPublicKey": true, + "webhooksEnabled": true, +} + +// AllowedSettingKeys reports whether a settings key is allowed to be mutated +// via the admin API. Exposed for tests. +func AllowedSettingKeys(key string) bool { return allowedSettingKeys[key] } + +// hiddenTopLevelDirs for FS browsing. +var hiddenTopLevelDirs = map[string]bool{ + "bin": true, "boot": true, "dev": true, "lib": true, + "lib32": true, "lib64": true, "libx32": true, + "proc": true, "run": true, "sbin": true, "sys": true, + "usr": true, "etc": true, "snap": true, "lost+found": true, +} + +// ── Response mappers (exported for tests / transport layers) ── + +func mapUser(u db.User) map[string]any { + return map[string]any{ + "id": u.ID, + "username": u.Username, + "role": u.Role, + "disabled": u.Disabled, + "systemsJson": nullStr(u.SystemsJson), + "expiration": nullInt(u.Expiration), + "limit": nullInt(u.Limit), + "createdAt": u.CreatedAt, + "updatedAt": u.UpdatedAt, + } +} + +func mapUsers(users []db.User) []map[string]any { + out := make([]map[string]any, len(users)) + for i, u := range users { + out[i] = mapUser(u) + } + return out +} + +func mapSystem(s db.System) map[string]any { + return map[string]any{ + "id": s.ID, + "systemId": s.SystemID, + "label": s.Label, + "autoPopulateTalkgroups": s.AutoPopulateTalkgroups, + "blacklistsJson": nullStr(s.BlacklistsJson), + "led": nullStr(s.Led), + "order": s.Order, + } +} + +func mapSystems(systems []db.System) []map[string]any { + out := make([]map[string]any, len(systems)) + for i, s := range systems { + out[i] = mapSystem(s) + } + return out +} + +func mapTalkgroup(t db.Talkgroup) map[string]any { + return map[string]any{ + "id": t.ID, + "systemId": t.SystemID, + "talkgroupId": t.TalkgroupID, + "label": nullStr(t.Label), + "name": nullStr(t.Name), + "frequency": nullInt(t.Frequency), + "led": nullStr(t.Led), + "groupId": nullInt(t.GroupID), + "tagId": nullInt(t.TagID), + "order": t.Order, + } +} + +func mapTalkgroups(tgs []db.Talkgroup) []map[string]any { + out := make([]map[string]any, len(tgs)) + for i, t := range tgs { + out[i] = mapTalkgroup(t) + } + return out +} + +func mapUnit(u db.Unit) map[string]any { + return map[string]any{ + "id": u.ID, + "systemId": u.SystemID, + "unitId": u.UnitID, + "label": nullStr(u.Label), + "order": u.Order, + } +} + +func mapUnits(units []db.Unit) []map[string]any { + out := make([]map[string]any, len(units)) + for i, u := range units { + out[i] = mapUnit(u) + } + return out +} + +func mapAPIKey(k db.ApiKey) map[string]any { + fingerprint := auth.HashAPIKey(k.Key) + if len(fingerprint) > 12 { + fingerprint = fingerprint[:12] + } + return map[string]any{ + "id": k.ID, + "fingerprint": fingerprint, + "ident": nullStr(k.Ident), + "disabled": k.Disabled, + "systemsJson": nullStr(k.SystemsJson), + "callRateLimit": nullInt(k.CallRateLimit), + "order": k.Order, + } +} + +func mapAPIKeys(keys []db.ApiKey) []map[string]any { + out := make([]map[string]any, len(keys)) + for i, k := range keys { + out[i] = mapAPIKey(k) + } + return out +} + +func mapDirMonitor(d db.Dirmonitor) map[string]any { + return map[string]any{ + "id": d.ID, + "directory": d.Directory, + "type": d.Type, + "mask": nullStr(d.Mask), + "extension": nullStr(d.Extension), + "frequency": nullInt(d.Frequency), + "delay": nullInt(d.Delay), + "deleteAfter": d.DeleteAfter, + "usePolling": d.UsePolling, + "disabled": d.Disabled, + "systemId": nullInt(d.SystemID), + "talkgroupId": nullInt(d.TalkgroupID), + "order": d.Order, + } +} + +func mapDirMonitors(dms []db.Dirmonitor) []map[string]any { + out := make([]map[string]any, len(dms)) + for i, d := range dms { + out[i] = mapDirMonitor(d) + } + return out +} + +func mapDownstream(d db.Downstream) map[string]any { + return map[string]any{ + "id": d.ID, + "url": d.Url, + "hasApiKey": d.ApiKey != "", + "systemsJson": nullStr(d.SystemsJson), + "disabled": d.Disabled, + "order": d.Order, + } +} + +func mapDownstreams(ds []db.Downstream) []map[string]any { + out := make([]map[string]any, len(ds)) + for i, d := range ds { + out[i] = mapDownstream(d) + } + return out +} + +func mapWebhook(w db.Webhook) map[string]any { + return map[string]any{ + "id": w.ID, + "url": w.Url, + "type": w.Type, + "secret": nullStr(w.Secret), + "systemsJson": nullStr(w.SystemsJson), + "disabled": w.Disabled, + "order": w.Order, + } +} + +func mapWebhooks(ws []db.Webhook) []map[string]any { + out := make([]map[string]any, len(ws)) + for i, w := range ws { + out[i] = mapWebhook(w) + } + return out +} + +func mapSharedLink(r db.ListSharedLinksRow) map[string]any { + m := map[string]any{ + "id": r.ID, + "callId": r.CallID, + "token": r.Token, + "createdAt": r.CreatedAt, + "sharedBy": r.SharedBy, + "dateTime": r.DateTime, + "duration": r.Duration.Int64, + "systemLabel": r.SystemLabel.String, + "talkgroupLabel": r.TalkgroupLabel.String, + "talkgroupName": r.TalkgroupName.String, + } + if r.ExpiresAt.Valid { + m["expiresAt"] = r.ExpiresAt.Int64 + } else { + m["expiresAt"] = nil + } + return m +} diff --git a/backend/internal/admin/radioreference.go b/backend/internal/admin/radioreference.go new file mode 100644 index 0000000..e69f0ff --- /dev/null +++ b/backend/internal/admin/radioreference.go @@ -0,0 +1,233 @@ +package admin + +import ( + "context" + "database/sql" + "encoding/json" + "errors" + "strings" + + "github.com/openscanner/openscanner/internal/db" +) + +// RadioReferenceApply merges RadioReference-sourced talkgroup metadata into +// the local DB, using either fill-missing or overwrite-selected semantics. +func (o *Operations) RadioReferenceApply(ctx context.Context, params json.RawMessage, _ int64) (any, error) { + type rrCandidate struct { + Row int `json:"row"` + TalkgroupID int64 `json:"talkgroupId"` + Label *string `json:"label,omitempty"` + Name *string `json:"name,omitempty"` + Group *string `json:"group,omitempty"` + Tag *string `json:"tag,omitempty"` + Led *string `json:"led,omitempty"` + Order *int64 `json:"order,omitempty"` + } + + var req struct { + SystemID int64 `json:"systemId"` + Candidates []rrCandidate `json:"candidates"` + MergeMode string `json:"mergeMode"` + SelectedFields []string `json:"selectedFields"` + } + if err := json.Unmarshal(params, &req); err != nil { + return nil, UserError("invalid request body") + } + if req.SystemID <= 0 { + return nil, UserError("systemId is required") + } + if len(req.Candidates) == 0 { + return nil, UserError("candidates are required") + } + if len(req.Candidates) > 100_000 { + return nil, UserError("too many candidates") + } + if req.MergeMode == "" { + req.MergeMode = "fill_missing" + } + if req.MergeMode != "fill_missing" && req.MergeMode != "overwrite_selected" { + return nil, UserError("mergeMode must be 'fill_missing' or 'overwrite_selected'") + } + if _, err := o.Queries.GetSystem(ctx, req.SystemID); err != nil { + return nil, UserError("system not found") + } + + // Sanitize selected fields. + rrUpdatable := map[string]bool{"label": true, "name": true, "group": true, "tag": true, "led": true, "order": true} + selected := make([]string, 0, len(req.SelectedFields)) + for _, f := range req.SelectedFields { + v := strings.ToLower(strings.TrimSpace(f)) + if rrUpdatable[v] { + selected = append(selected, v) + } + } + + type rowErr struct { + Row int `json:"row"` + Reason string `json:"reason"` + } + resp := map[string]any{ + "processed": 0, + "matched": 0, + "updated": 0, + "skipped": 0, + "errors": 0, + "rowErrors": []rowErr{}, + } + processed, matched, updated, skippedCount, errCount := 0, 0, 0, 0, 0 + rowErrors := make([]rowErr, 0) + + for _, candidate := range req.Candidates { + processed++ + + tg, tgErr := o.Queries.GetTalkgroupBySystemAndTGID(ctx, db.GetTalkgroupBySystemAndTGIDParams{ + SystemID: req.SystemID, + TalkgroupID: candidate.TalkgroupID, + }) + if tgErr != nil { + if errors.Is(tgErr, sql.ErrNoRows) { + skippedCount++ + rowErrors = append(rowErrors, rowErr{Row: candidate.Row, Reason: "talkgroup not found in selected system"}) + continue + } + errCount++ + rowErrors = append(rowErrors, rowErr{Row: candidate.Row, Reason: "database error"}) + continue + } + matched++ + + p := db.UpdateTalkgroupParams{ + ID: tg.ID, + TalkgroupID: tg.TalkgroupID, + Label: tg.Label, + Name: tg.Name, + Frequency: tg.Frequency, + Led: tg.Led, + GroupID: tg.GroupID, + TagID: tg.TagID, + Order: tg.Order, + } + + // Determine which fields to apply. + allow := map[string]bool{} + if req.MergeMode == "overwrite_selected" { + for _, f := range selected { + allow[f] = true + } + } + + applyFields := make([]string, 0, 6) + check := func(field string, hasCand bool, targetEmpty bool) { + if !hasCand { + return + } + if req.MergeMode == "overwrite_selected" { + if allow[field] { + applyFields = append(applyFields, field) + } + return + } + if targetEmpty { + applyFields = append(applyFields, field) + } + } + check("label", candidate.Label != nil, !tg.Label.Valid || strings.TrimSpace(tg.Label.String) == "") + check("name", candidate.Name != nil, !tg.Name.Valid || strings.TrimSpace(tg.Name.String) == "") + check("group", candidate.Group != nil, !tg.GroupID.Valid) + check("tag", candidate.Tag != nil, !tg.TagID.Valid) + check("led", candidate.Led != nil, !tg.Led.Valid || strings.TrimSpace(tg.Led.String) == "") + check("order", candidate.Order != nil, tg.Order == 0) + + if len(applyFields) == 0 { + skippedCount++ + continue + } + + // Apply field updates. + applyErr := false + for _, field := range applyFields { + switch field { + case "label": + if candidate.Label != nil { + p.Label = sql.NullString{String: *candidate.Label, Valid: true} + } + case "name": + if candidate.Name != nil { + p.Name = sql.NullString{String: *candidate.Name, Valid: true} + } + case "group": + if candidate.Group != nil { + g, err := o.Queries.GetGroupByLabel(ctx, *candidate.Group) + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + newID, createErr := o.Queries.CreateGroup(ctx, *candidate.Group) + if createErr != nil { + errCount++ + rowErrors = append(rowErrors, rowErr{Row: candidate.Row, Reason: "database error"}) + applyErr = true + break + } + p.GroupID = sql.NullInt64{Int64: newID, Valid: true} + } else { + errCount++ + rowErrors = append(rowErrors, rowErr{Row: candidate.Row, Reason: "database error"}) + applyErr = true + break + } + } else { + p.GroupID = sql.NullInt64{Int64: g.ID, Valid: true} + } + } + case "tag": + if candidate.Tag != nil { + t, err := o.Queries.GetTagByLabel(ctx, *candidate.Tag) + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + newID, createErr := o.Queries.CreateTag(ctx, *candidate.Tag) + if createErr != nil { + errCount++ + rowErrors = append(rowErrors, rowErr{Row: candidate.Row, Reason: "database error"}) + applyErr = true + break + } + p.TagID = sql.NullInt64{Int64: newID, Valid: true} + } else { + errCount++ + rowErrors = append(rowErrors, rowErr{Row: candidate.Row, Reason: "database error"}) + applyErr = true + break + } + } else { + p.TagID = sql.NullInt64{Int64: t.ID, Valid: true} + } + } + case "led": + if candidate.Led != nil { + p.Led = sql.NullString{String: *candidate.Led, Valid: true} + } + case "order": + if candidate.Order != nil { + p.Order = *candidate.Order + } + } + } + if applyErr { + continue + } + + if err := o.Queries.UpdateTalkgroup(ctx, p); err != nil { + errCount++ + rowErrors = append(rowErrors, rowErr{Row: candidate.Row, Reason: "database error"}) + continue + } + updated++ + } + + resp["processed"] = processed + resp["matched"] = matched + resp["updated"] = updated + resp["skipped"] = skippedCount + resp["errors"] = errCount + resp["rowErrors"] = rowErrors + return resp, nil +} diff --git a/backend/internal/admin/settings.go b/backend/internal/admin/settings.go new file mode 100644 index 0000000..f7c13a9 --- /dev/null +++ b/backend/internal/admin/settings.go @@ -0,0 +1,174 @@ +package admin + +import ( + "context" + "encoding/json" + "fmt" + "log/slog" + "strconv" + + "github.com/openscanner/openscanner/internal/audio" + "github.com/openscanner/openscanner/internal/auth" + "github.com/openscanner/openscanner/internal/db" + "github.com/openscanner/openscanner/internal/logging" +) + +// ConfigGet returns the current settings (sensitive values decrypted) along +// with server capabilities. +func (o *Operations) ConfigGet(ctx context.Context, _ json.RawMessage, _ int64) (any, error) { + settings, err := o.Queries.ListSettings(ctx) + if err != nil { + return nil, fmt.Errorf("failed to list settings: %w", err) + } + + settingsList := make([]map[string]string, len(settings)) + for i, s := range settings { + val := s.Value + if SensitiveSettingKeys[s.Key] && o.Deps.EncryptionKey != "" { + if plain, err := auth.DecryptString(val, o.Deps.EncryptionKey); err == nil { + val = plain + } + } + settingsList[i] = map[string]string{"key": s.Key, "value": val} + } + + return map[string]any{ + "settings": settingsList, + "capabilities": map[string]bool{ + "ffmpeg": o.Deps.FFmpegAvailable, + "fdkAac": o.Deps.FDKAACAvailable, + "whisper": o.Deps.WhisperAvailable, + }, + }, nil +} + +// ConfigUpdate applies a batch of settings atomically, encrypting sensitive +// values, hot-reloading transcription if touched, and rebroadcasting CFG. +func (o *Operations) ConfigUpdate(ctx context.Context, params json.RawMessage, callerID int64) (any, error) { + var body struct { + Settings []struct { + Key string `json:"key"` + Value string `json:"value"` + } `json:"settings"` + } + if err := json.Unmarshal(params, &body); err != nil { + return nil, UserError("invalid request body") + } + settings := body.Settings + + // Validate all keys first. + for _, s := range settings { + if !allowedSettingKeys[s.Key] { + return nil, UserError("unknown setting key: " + s.Key) + } + if s.Key == "logLevel" { + if _, ok := logging.ParseLevel(s.Value); !ok { + return nil, UserError("invalid logLevel; expected debug, info, warn, or error") + } + } + if s.Key == "audioEncodingPreset" { + if !audio.IsValidEncodingPreset(s.Value) { + return nil, UserError("invalid audioEncodingPreset value") + } + if audio.IsHEEncodingPreset(s.Value) && !o.Deps.FDKAACAvailable { + return nil, UserError("selected HE-AAC preset requires libfdk_aac support in ffmpeg") + } + } + if s.Key == "audioConversion" { + if v, err := strconv.Atoi(s.Value); err == nil && v != 0 && !o.Deps.FFmpegAvailable { + return nil, UserError("ffmpeg is not installed — install it and restart the service to enable audio conversion") + } + } + } + + sqlDB := o.Deps.SQLDB + if sqlDB == nil { + return nil, fmt.Errorf("transaction support not available") + } + + tx, err := sqlDB.BeginTx(ctx, nil) + if err != nil { + return nil, fmt.Errorf("failed to begin transaction: %w", err) + } + defer tx.Rollback() //nolint:errcheck + + qtx := o.Queries.WithTx(tx) + for _, s := range settings { + val := s.Value + if SensitiveSettingKeys[s.Key] && o.Deps.EncryptionKey != "" && val != "" { + enc, err := auth.EncryptString(val, o.Deps.EncryptionKey) + if err != nil { + return nil, fmt.Errorf("encrypt setting %q: %w", s.Key, err) + } + val = enc + } + if err := qtx.UpsertSetting(ctx, db.UpsertSettingParams{Key: s.Key, Value: val}); err != nil { + return nil, fmt.Errorf("failed to save config: %w", err) + } + } + + if err := tx.Commit(); err != nil { + return nil, fmt.Errorf("failed to commit config: %w", err) + } + + // Log each changed setting, redacting sensitive keys. + for _, s := range settings { + v := s.Value + if s.Key == "vapidPrivateKey" { + v = "[REDACTED]" + } + slog.Info("admin: config updated", "key", s.Key, "value", v, "by", callerID) + } + + // Apply log level change at runtime. + for _, s := range settings { + if s.Key == "logLevel" { + if err := logging.SetLevel(s.Value); err != nil { + slog.Warn("invalid logLevel setting, keeping previous runtime level", "value", s.Value, "error", err) + } + break + } + } + + // Hot-reload transcription if any transcription setting changed. + if o.Deps.TranscriberReload != nil { + transcriptionKeys := map[string]bool{ + "transcriptionEnabled": true, + "transcriptionUrl": true, + "transcriptionModel": true, + "transcriptionLanguage": true, + "transcriptionDiarize": true, + } + needsReload := false + for _, s := range settings { + if transcriptionKeys[s.Key] { + needsReload = true + break + } + } + if needsReload { + // Read current settings from DB (just committed). + tEnabled, _ := o.Queries.GetSetting(ctx, "transcriptionEnabled") + tURL, _ := o.Queries.GetSetting(ctx, "transcriptionUrl") + tModel, _ := o.Queries.GetSetting(ctx, "transcriptionModel") + tLang, _ := o.Queries.GetSetting(ctx, "transcriptionLanguage") + tDiarize, _ := o.Queries.GetSetting(ctx, "transcriptionDiarize") + + ok := o.Deps.TranscriberReload.Reload( + tEnabled.Value == "true", + tURL.Value, + tModel.Value, + tLang.Value, + tDiarize.Value == "true", + ) + o.Deps.WhisperAvailable = ok && tEnabled.Value == "true" + } + } + + // Broadcast updated config to all WS clients using the safe, + // curated CFG builder (excludes secrets like VAPID keys). + o.broadcastCFG(ctx) + + o.broadcastAdminEvent("config.updated", nil) + return map[string]bool{"ok": true}, nil +} diff --git a/backend/internal/ws/admin_ops_settings_test.go b/backend/internal/admin/settings_test.go similarity index 60% rename from backend/internal/ws/admin_ops_settings_test.go rename to backend/internal/admin/settings_test.go index 5c7474d..2e5967c 100644 --- a/backend/internal/ws/admin_ops_settings_test.go +++ b/backend/internal/admin/settings_test.go @@ -1,14 +1,12 @@ -// Tests for the settings encryption round-trip in opConfigGet / opConfigUpdate. +// Tests for the settings encryption round-trip in ConfigGet / ConfigUpdate. // -// The handlers are methods on *Client; rather than standing up a full -// WebSocket connection, we construct a minimal Client with only the fields -// the handlers touch (hub, userID, isAdmin) and call the methods directly. -// This is an internal test (package ws) so it can reach unexported fields. -package ws +// These live in the admin package (after the Phase-2 restructure) because +// the CRUD semantics belong here; the WebSocket framing layer is tested +// separately in internal/ws/admin_router_test.go. +package admin import ( "context" - "database/sql" "encoding/json" "strings" "testing" @@ -18,9 +16,8 @@ import ( _ "modernc.org/sqlite" ) -// newAdminClientForSettings builds a minimal Client/Hub wired against an -// in-memory SQLite instance and returns everything the tests need. -func newAdminClientForSettings(t *testing.T, encryptionKey string) (*Client, *db.Queries, *sql.DB) { +// newTestOperations builds an Operations bound to an in-memory SQLite DB. +func newTestOperations(t *testing.T, encryptionKey string) (*Operations, *db.Queries) { t.Helper() sqlDB, err := db.Open(":memory:") if err != nil { @@ -29,29 +26,23 @@ func newAdminClientForSettings(t *testing.T, encryptionKey string) (*Client, *db t.Cleanup(func() { _ = sqlDB.Close() }) queries := db.New(sqlDB) - hub := NewHub(queries, "test", HubDeps{ + ops := New(queries, Deps{ SQLDB: sqlDB, EncryptionKey: encryptionKey, - }) - - c := &Client{ - hub: hub, - userID: 1, - isAdmin: true, - } - return c, queries, sqlDB + }, nil) + return ops, queries } -func TestAdminOps_SettingsUpsert_EncryptsSensitiveKey(t *testing.T) { +func TestConfigUpdate_EncryptsSensitiveKey(t *testing.T) { const encKey = "test-encryption-key" - c, queries, _ := newAdminClientForSettings(t, encKey) + ops, queries := newTestOperations(t, encKey) params, _ := json.Marshal(map[string]any{ "settings": []map[string]string{{"key": "vapidPrivateKey", "value": "secret123"}}, }) - if _, err := c.opConfigUpdate(context.Background(), params); err != nil { - t.Fatalf("opConfigUpdate: %v", err) + if _, err := ops.ConfigUpdate(context.Background(), params, 1); err != nil { + t.Fatalf("ConfigUpdate: %v", err) } stored, err := queries.GetSetting(context.Background(), "vapidPrivateKey") @@ -61,7 +52,6 @@ func TestAdminOps_SettingsUpsert_EncryptsSensitiveKey(t *testing.T) { if !auth.IsEncrypted(stored.Value) { t.Errorf("stored value should start with enc::; got %q", stored.Value) } - // Decrypt and confirm round-trip. plain, err := auth.DecryptString(stored.Value, encKey) if err != nil { t.Fatalf("DecryptString: %v", err) @@ -71,44 +61,42 @@ func TestAdminOps_SettingsUpsert_EncryptsSensitiveKey(t *testing.T) { } } -// TestAdminOps_SettingsUpsert_JwtSecret_NotUserMutable asserts that jwtSecret -// is marked sensitive (so ListSettings decrypts it when returning) BUT is not -// in the admin-allowed mutation set — it is managed exclusively by -// auth.InitJWTSecret at startup. -func TestAdminOps_SettingsUpsert_JwtSecret_NotUserMutable(t *testing.T) { +// TestConfigUpdate_JwtSecret_NotUserMutable asserts that jwtSecret is marked +// sensitive (so ConfigGet decrypts it) BUT is not in the admin-allowed +// mutation set — it is managed exclusively by auth.InitJWTSecret at startup. +func TestConfigUpdate_JwtSecret_NotUserMutable(t *testing.T) { const encKey = "another-test-key" - c, _, _ := newAdminClientForSettings(t, encKey) + ops, _ := newTestOperations(t, encKey) if !SensitiveSettingKeys["jwtSecret"] { t.Error("jwtSecret must be in SensitiveSettingKeys") } - if wsAllowedSettingKeys["jwtSecret"] { - t.Error("jwtSecret must NOT be in wsAllowedSettingKeys (managed by InitJWTSecret)") + if AllowedSettingKeys("jwtSecret") { + t.Error("jwtSecret must NOT be in allowedSettingKeys (managed by InitJWTSecret)") } - // Confirm that attempting to mutate it via the admin op is rejected. params, _ := json.Marshal(map[string]any{ "settings": []map[string]string{{"key": "jwtSecret", "value": "raw-signing-secret"}}, }) - _, err := c.opConfigUpdate(context.Background(), params) + _, err := ops.ConfigUpdate(context.Background(), params, 1) if err == nil { - t.Fatal("opConfigUpdate should reject jwtSecret as an unknown key") + t.Fatal("ConfigUpdate should reject jwtSecret as an unknown key") } if !strings.Contains(err.Error(), "jwtSecret") { t.Errorf("error should mention 'jwtSecret'; got: %v", err) } } -func TestAdminOps_SettingsUpsert_NonSensitiveNotEncrypted(t *testing.T) { +func TestConfigUpdate_NonSensitiveNotEncrypted(t *testing.T) { const encKey = "test-encryption-key" - c, queries, _ := newAdminClientForSettings(t, encKey) + ops, queries := newTestOperations(t, encKey) params, _ := json.Marshal(map[string]any{ "settings": []map[string]string{{"key": "logLevel", "value": "debug"}}, }) - if _, err := c.opConfigUpdate(context.Background(), params); err != nil { - t.Fatalf("opConfigUpdate: %v", err) + if _, err := ops.ConfigUpdate(context.Background(), params, 1); err != nil { + t.Fatalf("ConfigUpdate: %v", err) } stored, err := queries.GetSetting(context.Background(), "logLevel") @@ -123,17 +111,16 @@ func TestAdminOps_SettingsUpsert_NonSensitiveNotEncrypted(t *testing.T) { } } -func TestAdminOps_SettingsUpsert_NoEncryptionKey_StoresPlaintext(t *testing.T) { - // Empty encryption key — sensitive keys are stored plaintext (with warning - // logged at runtime; we don't assert on logs here). - c, queries, _ := newAdminClientForSettings(t, "") +func TestConfigUpdate_NoEncryptionKey_StoresPlaintext(t *testing.T) { + // Empty encryption key — sensitive keys are stored plaintext. + ops, queries := newTestOperations(t, "") params, _ := json.Marshal(map[string]any{ "settings": []map[string]string{{"key": "vapidPrivateKey", "value": "plain-secret"}}, }) - if _, err := c.opConfigUpdate(context.Background(), params); err != nil { - t.Fatalf("opConfigUpdate: %v", err) + if _, err := ops.ConfigUpdate(context.Background(), params, 1); err != nil { + t.Fatalf("ConfigUpdate: %v", err) } stored, err := queries.GetSetting(context.Background(), "vapidPrivateKey") @@ -148,11 +135,10 @@ func TestAdminOps_SettingsUpsert_NoEncryptionKey_StoresPlaintext(t *testing.T) { } } -func TestAdminOps_SettingsList_DecryptsSensitiveKey(t *testing.T) { +func TestConfigGet_DecryptsSensitiveKey(t *testing.T) { const encKey = "list-test-key" - c, queries, _ := newAdminClientForSettings(t, encKey) + ops, queries := newTestOperations(t, encKey) - // Seed an already-encrypted sensitive setting and a plaintext normal one. encrypted, err := auth.EncryptString("my-vapid-key", encKey) if err != nil { t.Fatalf("seed EncryptString: %v", err) @@ -168,9 +154,9 @@ func TestAdminOps_SettingsList_DecryptsSensitiveKey(t *testing.T) { t.Fatalf("seed UpsertSetting (logLevel): %v", err) } - result, err := c.opConfigGet(context.Background(), nil) + result, err := ops.ConfigGet(context.Background(), nil, 1) if err != nil { - t.Fatalf("opConfigGet: %v", err) + t.Fatalf("ConfigGet: %v", err) } m, ok := result.(map[string]any) @@ -195,9 +181,7 @@ func TestAdminOps_SettingsList_DecryptsSensitiveKey(t *testing.T) { } } -// TestSensitiveSettingKeys_Documented is a schema-level sanity check: any key -// added to the sensitive list without being wired through the encryption path -// would be a latent bug. +// TestSensitiveSettingKeys_Documented is a schema-level sanity check. func TestSensitiveSettingKeys_Documented(t *testing.T) { want := []string{"vapidPrivateKey", "jwtSecret"} for _, k := range want { diff --git a/backend/internal/admin/shared_links.go b/backend/internal/admin/shared_links.go new file mode 100644 index 0000000..2608810 --- /dev/null +++ b/backend/internal/admin/shared_links.go @@ -0,0 +1,39 @@ +package admin + +import ( + "context" + "encoding/json" + "fmt" +) + +// SharedLinksList returns all shared links. +func (o *Operations) SharedLinksList(ctx context.Context, _ json.RawMessage, _ int64) (any, error) { + rows, err := o.Queries.ListSharedLinks(ctx) + if err != nil { + return nil, fmt.Errorf("failed to list shared links: %w", err) + } + items := make([]map[string]any, 0, len(rows)) + for _, r := range rows { + items = append(items, mapSharedLink(r)) + } + return items, nil +} + +// SharedLinksDelete deletes a shared link. +func (o *Operations) SharedLinksDelete(ctx context.Context, params json.RawMessage, _ int64) (any, error) { + var req struct { + ID int64 `json:"id"` + } + if err := json.Unmarshal(params, &req); err != nil { + return nil, UserError("invalid request body") + } + if req.ID <= 0 { + return nil, UserError("id is required") + } + + if err := o.Queries.DeleteSharedLink(ctx, req.ID); err != nil { + return nil, fmt.Errorf("failed to delete shared link: %w", err) + } + o.broadcastAdminEvent("shared-links.updated", nil) + return map[string]bool{"deleted": true}, nil +} diff --git a/backend/internal/admin/systems.go b/backend/internal/admin/systems.go new file mode 100644 index 0000000..86d6d1d --- /dev/null +++ b/backend/internal/admin/systems.go @@ -0,0 +1,131 @@ +package admin + +import ( + "context" + "encoding/json" + "fmt" + "log/slog" + + "github.com/openscanner/openscanner/internal/db" +) + +// SystemsList returns all systems. +func (o *Operations) SystemsList(ctx context.Context, _ json.RawMessage, _ int64) (any, error) { + systems, err := o.Queries.ListSystems(ctx) + if err != nil { + return nil, fmt.Errorf("failed to list systems: %w", err) + } + return mapSystems(systems), nil +} + +// SystemsCreate creates a new system. +func (o *Operations) SystemsCreate(ctx context.Context, params json.RawMessage, callerID int64) (any, error) { + var req struct { + SystemID int64 `json:"systemId"` + Label string `json:"label"` + AutoPopulateTalkgroups int64 `json:"autoPopulateTalkgroups"` + BlacklistsJson *string `json:"blacklistsJson"` + Led *string `json:"led"` + Order int64 `json:"order"` + } + if err := json.Unmarshal(params, &req); err != nil { + return nil, UserError("invalid request body") + } + + id, err := o.Queries.CreateSystem(ctx, db.CreateSystemParams{ + SystemID: req.SystemID, + Label: req.Label, + AutoPopulateTalkgroups: req.AutoPopulateTalkgroups, + BlacklistsJson: ptrToNullStr(req.BlacklistsJson), + Led: ptrToNullStr(req.Led), + Order: req.Order, + }) + if isUniqueViolation(err) { + return nil, UserError("system_id already exists") + } + if err != nil { + return nil, fmt.Errorf("failed to create system: %w", err) + } + + system, err := o.Queries.GetSystem(ctx, id) + if err != nil { + return nil, fmt.Errorf("failed to fetch created system: %w", err) + } + slog.Info("admin: system created", "id", system.ID, "system_id", system.SystemID, "label", system.Label, "by", callerID) + o.broadcastAdminEvent("systems.updated", nil) + o.broadcastCFG(ctx) + return mapSystem(system), nil +} + +// SystemsUpdate updates an existing system. +func (o *Operations) SystemsUpdate(ctx context.Context, params json.RawMessage, callerID int64) (any, error) { + var req struct { + ID int64 `json:"id"` + SystemID int64 `json:"systemId"` + Label string `json:"label"` + AutoPopulateTalkgroups int64 `json:"autoPopulateTalkgroups"` + BlacklistsJson *string `json:"blacklistsJson"` + Led *string `json:"led"` + Order int64 `json:"order"` + } + if err := json.Unmarshal(params, &req); err != nil { + return nil, UserError("invalid request body") + } + if req.ID <= 0 { + return nil, UserError("id is required") + } + + if _, err := o.Queries.GetSystem(ctx, req.ID); err != nil { + return nil, UserError("system not found") + } + + err := o.Queries.UpdateSystem(ctx, db.UpdateSystemParams{ + ID: req.ID, + SystemID: req.SystemID, + Label: req.Label, + AutoPopulateTalkgroups: req.AutoPopulateTalkgroups, + BlacklistsJson: ptrToNullStr(req.BlacklistsJson), + Led: ptrToNullStr(req.Led), + Order: req.Order, + }) + if isUniqueViolation(err) { + return nil, UserError("system_id already exists") + } + if err != nil { + return nil, fmt.Errorf("failed to update system: %w", err) + } + + system, err := o.Queries.GetSystem(ctx, req.ID) + if err != nil { + return nil, fmt.Errorf("failed to fetch updated system: %w", err) + } + slog.Info("admin: system updated", "id", system.ID, "system_id", system.SystemID, "by", callerID) + o.broadcastAdminEvent("systems.updated", nil) + o.broadcastCFG(ctx) + return mapSystem(system), nil +} + +// SystemsDelete deletes a system. +func (o *Operations) SystemsDelete(ctx context.Context, params json.RawMessage, callerID int64) (any, error) { + var req struct { + ID int64 `json:"id"` + } + if err := json.Unmarshal(params, &req); err != nil { + return nil, UserError("invalid request body") + } + if req.ID <= 0 { + return nil, UserError("id is required") + } + + if _, err := o.Queries.GetSystem(ctx, req.ID); err != nil { + return nil, UserError("system not found") + } + + if err := o.Queries.DeleteSystem(ctx, req.ID); err != nil { + return nil, fmt.Errorf("failed to delete system: %w", err) + } + slog.Info("admin: system deleted", "id", req.ID, "by", callerID) + o.broadcastAdminEvent("systems.updated", nil) + o.broadcastCFG(ctx) + return map[string]bool{"ok": true}, nil +} diff --git a/backend/internal/admin/tags.go b/backend/internal/admin/tags.go new file mode 100644 index 0000000..1ba382a --- /dev/null +++ b/backend/internal/admin/tags.go @@ -0,0 +1,112 @@ +package admin + +import ( + "context" + "encoding/json" + "fmt" + "log/slog" + + "github.com/openscanner/openscanner/internal/db" +) + +// TagsList returns all tags. +func (o *Operations) TagsList(ctx context.Context, _ json.RawMessage, _ int64) (any, error) { + tags, err := o.Queries.ListTags(ctx) + if err != nil { + return nil, fmt.Errorf("failed to list tags: %w", err) + } + return tags, nil +} + +// TagsCreate creates a new tag. +func (o *Operations) TagsCreate(ctx context.Context, params json.RawMessage, callerID int64) (any, error) { + var req struct { + Label string `json:"label"` + } + if err := json.Unmarshal(params, &req); err != nil { + return nil, UserError("invalid request body") + } + if req.Label == "" { + return nil, UserError("label is required") + } + + id, err := o.Queries.CreateTag(ctx, req.Label) + if isUniqueViolation(err) { + return nil, UserError("tag label already exists") + } + if err != nil { + return nil, fmt.Errorf("failed to create tag: %w", err) + } + + tag, err := o.Queries.GetTag(ctx, id) + if err != nil { + return nil, fmt.Errorf("failed to fetch created tag: %w", err) + } + slog.Info("admin: tag created", "id", tag.ID, "label", tag.Label, "by", callerID) + o.broadcastAdminEvent("tags.updated", nil) + o.broadcastCFG(ctx) + return tag, nil +} + +// TagsUpdate updates an existing tag. +func (o *Operations) TagsUpdate(ctx context.Context, params json.RawMessage, callerID int64) (any, error) { + var req struct { + ID int64 `json:"id"` + Label string `json:"label"` + } + if err := json.Unmarshal(params, &req); err != nil { + return nil, UserError("invalid request body") + } + if req.ID <= 0 { + return nil, UserError("id is required") + } + if req.Label == "" { + return nil, UserError("label is required") + } + + if _, err := o.Queries.GetTag(ctx, req.ID); err != nil { + return nil, UserError("tag not found") + } + + err := o.Queries.UpdateTag(ctx, db.UpdateTagParams{ID: req.ID, Label: req.Label}) + if isUniqueViolation(err) { + return nil, UserError("tag label already exists") + } + if err != nil { + return nil, fmt.Errorf("failed to update tag: %w", err) + } + + tag, err := o.Queries.GetTag(ctx, req.ID) + if err != nil { + return nil, fmt.Errorf("failed to fetch updated tag: %w", err) + } + slog.Info("admin: tag updated", "id", tag.ID, "label", tag.Label, "by", callerID) + o.broadcastAdminEvent("tags.updated", nil) + o.broadcastCFG(ctx) + return tag, nil +} + +// TagsDelete deletes a tag. +func (o *Operations) TagsDelete(ctx context.Context, params json.RawMessage, callerID int64) (any, error) { + var req struct { + ID int64 `json:"id"` + } + if err := json.Unmarshal(params, &req); err != nil { + return nil, UserError("invalid request body") + } + if req.ID <= 0 { + return nil, UserError("id is required") + } + + if _, err := o.Queries.GetTag(ctx, req.ID); err != nil { + return nil, UserError("tag not found") + } + + if err := o.Queries.DeleteTag(ctx, req.ID); err != nil { + return nil, fmt.Errorf("failed to delete tag: %w", err) + } + slog.Info("admin: tag deleted", "id", req.ID, "by", callerID) + o.broadcastAdminEvent("tags.updated", nil) + o.broadcastCFG(ctx) + return map[string]bool{"ok": true}, nil +} diff --git a/backend/internal/admin/talkgroups.go b/backend/internal/admin/talkgroups.go new file mode 100644 index 0000000..ce621c7 --- /dev/null +++ b/backend/internal/admin/talkgroups.go @@ -0,0 +1,141 @@ +package admin + +import ( + "context" + "encoding/json" + "fmt" + "log/slog" + + "github.com/openscanner/openscanner/internal/db" +) + +// TalkgroupsList returns all talkgroups. +func (o *Operations) TalkgroupsList(ctx context.Context, _ json.RawMessage, _ int64) (any, error) { + tgs, err := o.Queries.ListAllTalkgroups(ctx) + if err != nil { + return nil, fmt.Errorf("failed to list talkgroups: %w", err) + } + return mapTalkgroups(tgs), nil +} + +// TalkgroupsCreate creates a new talkgroup. +func (o *Operations) TalkgroupsCreate(ctx context.Context, params json.RawMessage, callerID int64) (any, error) { + var req struct { + SystemID int64 `json:"systemId"` + TalkgroupID int64 `json:"talkgroupId"` + Label *string `json:"label"` + Name *string `json:"name"` + Frequency *int64 `json:"frequency"` + Led *string `json:"led"` + GroupID *int64 `json:"groupId"` + TagID *int64 `json:"tagId"` + Order int64 `json:"order"` + } + if err := json.Unmarshal(params, &req); err != nil { + return nil, UserError("invalid request body") + } + + id, err := o.Queries.CreateTalkgroup(ctx, db.CreateTalkgroupParams{ + SystemID: req.SystemID, + TalkgroupID: req.TalkgroupID, + Label: ptrToNullStr(req.Label), + Name: ptrToNullStr(req.Name), + Frequency: ptrToNullInt(req.Frequency), + Led: ptrToNullStr(req.Led), + GroupID: ptrToNullInt(req.GroupID), + TagID: ptrToNullInt(req.TagID), + Order: req.Order, + }) + if isUniqueViolation(err) { + return nil, UserError("talkgroup already exists") + } + if err != nil { + return nil, fmt.Errorf("failed to create talkgroup: %w", err) + } + + tg, err := o.Queries.GetTalkgroup(ctx, id) + if err != nil { + return nil, fmt.Errorf("failed to fetch created talkgroup: %w", err) + } + slog.Info("admin: talkgroup created", "id", tg.ID, "talkgroup_id", tg.TalkgroupID, "by", callerID) + o.broadcastAdminEvent("talkgroups.updated", nil) + o.broadcastCFG(ctx) + return mapTalkgroup(tg), nil +} + +// TalkgroupsUpdate updates an existing talkgroup. +func (o *Operations) TalkgroupsUpdate(ctx context.Context, params json.RawMessage, callerID int64) (any, error) { + var req struct { + ID int64 `json:"id"` + TalkgroupID int64 `json:"talkgroupId"` + Label *string `json:"label"` + Name *string `json:"name"` + Frequency *int64 `json:"frequency"` + Led *string `json:"led"` + GroupID *int64 `json:"groupId"` + TagID *int64 `json:"tagId"` + Order int64 `json:"order"` + } + if err := json.Unmarshal(params, &req); err != nil { + return nil, UserError("invalid request body") + } + if req.ID <= 0 { + return nil, UserError("id is required") + } + + if _, err := o.Queries.GetTalkgroup(ctx, req.ID); err != nil { + return nil, UserError("talkgroup not found") + } + + err := o.Queries.UpdateTalkgroup(ctx, db.UpdateTalkgroupParams{ + ID: req.ID, + TalkgroupID: req.TalkgroupID, + Label: ptrToNullStr(req.Label), + Name: ptrToNullStr(req.Name), + Frequency: ptrToNullInt(req.Frequency), + Led: ptrToNullStr(req.Led), + GroupID: ptrToNullInt(req.GroupID), + TagID: ptrToNullInt(req.TagID), + Order: req.Order, + }) + if isUniqueViolation(err) { + return nil, UserError("talkgroup already exists") + } + if err != nil { + return nil, fmt.Errorf("failed to update talkgroup: %w", err) + } + + tg, err := o.Queries.GetTalkgroup(ctx, req.ID) + if err != nil { + return nil, fmt.Errorf("failed to fetch updated talkgroup: %w", err) + } + slog.Info("admin: talkgroup updated", "id", tg.ID, "talkgroup_id", tg.TalkgroupID, "by", callerID) + o.broadcastAdminEvent("talkgroups.updated", nil) + o.broadcastCFG(ctx) + return mapTalkgroup(tg), nil +} + +// TalkgroupsDelete deletes a talkgroup. +func (o *Operations) TalkgroupsDelete(ctx context.Context, params json.RawMessage, callerID int64) (any, error) { + var req struct { + ID int64 `json:"id"` + } + if err := json.Unmarshal(params, &req); err != nil { + return nil, UserError("invalid request body") + } + if req.ID <= 0 { + return nil, UserError("id is required") + } + + if _, err := o.Queries.GetTalkgroup(ctx, req.ID); err != nil { + return nil, UserError("talkgroup not found") + } + + if err := o.Queries.DeleteTalkgroup(ctx, req.ID); err != nil { + return nil, fmt.Errorf("failed to delete talkgroup: %w", err) + } + slog.Info("admin: talkgroup deleted", "id", req.ID, "by", callerID) + o.broadcastAdminEvent("talkgroups.updated", nil) + o.broadcastCFG(ctx) + return map[string]bool{"ok": true}, nil +} diff --git a/backend/internal/admin/transcription.go b/backend/internal/admin/transcription.go new file mode 100644 index 0000000..6e1feb3 --- /dev/null +++ b/backend/internal/admin/transcription.go @@ -0,0 +1,289 @@ +package admin + +import ( + "context" + "encoding/json" + "fmt" + "io" + "log/slog" + "net/http" + "strings" + "time" +) + +// transcriptionBaseURL reads the transcriptionUrl setting from DB. +func (o *Operations) transcriptionBaseURL(ctx context.Context) (string, error) { + s, err := o.Queries.GetSetting(ctx, "transcriptionUrl") + if err == nil && s.Value != "" && validHTTPURL(s.Value) { + return strings.TrimRight(s.Value, "/"), nil + } + // Fall back to the live manager's URL (e.g. when DB setting was just saved + // but the query above fails due to timing). + if tr := o.Deps.TranscriberReload; tr != nil { + if u := tr.BaseURL(); u != "" { + return strings.TrimRight(u, "/"), nil + } + } + return "", UserError("transcriptionUrl setting is not configured") +} + +// TranscriptionStatus returns whether transcription is enabled, the +// configured model/language, and live connectivity to go-whisper. +func (o *Operations) TranscriptionStatus(ctx context.Context, _ json.RawMessage, _ int64) (any, error) { + // Read settings from DB. + getVal := func(key string) string { + s, err := o.Queries.GetSetting(ctx, key) + if err != nil { + return "" + } + return s.Value + } + + enabled := getVal("transcriptionEnabled") == "true" + baseURL := getVal("transcriptionUrl") + model := getVal("transcriptionModel") + language := getVal("transcriptionLanguage") + diarize := getVal("transcriptionDiarize") == "true" + liveDisplay := getVal("liveTranscriptDisplay") == "true" + + // Check live connection to go-whisper. + connected := false + if baseURL != "" && validHTTPURL(baseURL) { + trimmed := strings.TrimRight(baseURL, "/") + reqCtx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + req, err := http.NewRequestWithContext(reqCtx, http.MethodGet, trimmed+"/api/whisper/model", nil) + if err == nil { + resp, err := http.DefaultClient.Do(req) + if err == nil { + resp.Body.Close() + connected = resp.StatusCode >= 200 && resp.StatusCode < 400 + } + } + } + + return map[string]any{ + "enabled": enabled, + "url": baseURL, + "model": model, + "language": language, + "diarize": diarize, + "liveDisplay": liveDisplay, + "connected": connected, + }, nil +} + +// TranscriptionModels proxies the model list from go-whisper. +func (o *Operations) TranscriptionModels(ctx context.Context, _ json.RawMessage, _ int64) (any, error) { + baseURL, err := o.transcriptionBaseURL(ctx) + if err != nil { + return nil, err + } + + reqCtx, cancel := context.WithTimeout(ctx, 30*time.Second) + defer cancel() + + req, err := http.NewRequestWithContext(reqCtx, http.MethodGet, baseURL+"/api/whisper/model", nil) + if err != nil { + return nil, fmt.Errorf("failed to create request: %w", err) + } + + resp, err := http.DefaultClient.Do(req) + if err != nil { + return nil, fmt.Errorf("go-whisper unreachable: %w", err) + } + defer resp.Body.Close() + + body, err := io.ReadAll(io.LimitReader(resp.Body, 1<<20)) + if err != nil { + return nil, fmt.Errorf("failed to read response: %w", err) + } + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("go-whisper returned status %d", resp.StatusCode) + } + + var result json.RawMessage + if err := json.Unmarshal(body, &result); err != nil { + return nil, fmt.Errorf("invalid JSON from go-whisper: %w", err) + } + return result, nil +} + +// TranscriptionDownload triggers a model download on go-whisper. +func (o *Operations) TranscriptionDownload(ctx context.Context, params json.RawMessage, _ int64) (any, error) { + var req struct { + Model string `json:"model"` + } + if err := json.Unmarshal(params, &req); err != nil { + return nil, UserError("invalid request body") + } + if req.Model == "" { + return nil, UserError("model name is required") + } + + // go-whisper expects model names with .bin extension + model := req.Model + if !strings.HasSuffix(model, ".bin") { + model += ".bin" + } + + // tdrz (tinydiarize) models live in a different HuggingFace repo. + // go-whisper's store accepts a full URL as the model path for non-default repos. + if strings.Contains(model, "tdrz") { + model = "https://huggingface.co/akashmjn/tinydiarize-whisper.cpp/resolve/main/ggml-" + strings.TrimPrefix(model, "ggml-") + } + + baseURL, err := o.transcriptionBaseURL(ctx) + if err != nil { + return nil, err + } + + reqBody, _ := json.Marshal(map[string]string{"model": model}) + + // Model downloads can take a long time (500MB+). + reqCtx, cancel := context.WithTimeout(ctx, 5*time.Minute) + defer cancel() + + httpReq, err := http.NewRequestWithContext(reqCtx, http.MethodPost, baseURL+"/api/whisper/model", strings.NewReader(string(reqBody))) + if err != nil { + return nil, fmt.Errorf("failed to create request: %w", err) + } + httpReq.Header.Set("Content-Type", "application/json") + + resp, err := http.DefaultClient.Do(httpReq) + if err != nil { + return nil, fmt.Errorf("go-whisper unreachable: %w", err) + } + defer resp.Body.Close() + + body, err := io.ReadAll(io.LimitReader(resp.Body, 1<<20)) + if err != nil { + return nil, fmt.Errorf("failed to read response: %w", err) + } + + if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusCreated { + slog.Warn("go-whisper model download failed", "status", resp.StatusCode, "body", string(body)) + return nil, fmt.Errorf("go-whisper returned status %d", resp.StatusCode) + } + + var result json.RawMessage + if err := json.Unmarshal(body, &result); err != nil { + return nil, fmt.Errorf("invalid JSON from go-whisper: %w", err) + } + return result, nil +} + +// TranscriptionDelete deletes a model on go-whisper. +func (o *Operations) TranscriptionDelete(ctx context.Context, params json.RawMessage, _ int64) (any, error) { + var req struct { + ID string `json:"id"` + } + if err := json.Unmarshal(params, &req); err != nil { + return nil, UserError("invalid request body") + } + if req.ID == "" { + return nil, UserError("model id is required") + } + + // Sanitise: model ID should be alphanumeric + hyphens/dots/underscores only. + for _, ch := range req.ID { + if !((ch >= 'a' && ch <= 'z') || (ch >= 'A' && ch <= 'Z') || (ch >= '0' && ch <= '9') || ch == '-' || ch == '.' || ch == '_') { + return nil, UserError("invalid model id") + } + } + + baseURL, err := o.transcriptionBaseURL(ctx) + if err != nil { + return nil, err + } + + reqCtx, cancel := context.WithTimeout(ctx, 30*time.Second) + defer cancel() + + httpReq, err := http.NewRequestWithContext(reqCtx, http.MethodDelete, baseURL+"/api/whisper/model/"+req.ID, nil) + if err != nil { + return nil, fmt.Errorf("failed to create request: %w", err) + } + + resp, err := http.DefaultClient.Do(httpReq) + if err != nil { + return nil, fmt.Errorf("go-whisper unreachable: %w", err) + } + defer resp.Body.Close() + io.Copy(io.Discard, resp.Body) //nolint:errcheck + + if resp.StatusCode != http.StatusNoContent && resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("go-whisper returned status %d", resp.StatusCode) + } + + return map[string]any{"deleted": true}, nil +} + +// TranscriptionStats aggregates transcription DB stats and live pool status. +func (o *Operations) TranscriptionStats(ctx context.Context, _ json.RawMessage, _ int64) (any, error) { + // DB aggregate stats — "recent" = last 24 hours. + since := time.Now().Add(-24 * time.Hour).Unix() + stats, err := o.Queries.TranscriptionStats(ctx, since) + if err != nil { + return nil, fmt.Errorf("query transcription stats: %w", err) + } + + byLang, err := o.Queries.TranscriptionsByLanguage(ctx) + if err != nil { + return nil, fmt.Errorf("query transcriptions by language: %w", err) + } + + byModel, err := o.Queries.TranscriptionsByModel(ctx) + if err != nil { + return nil, fmt.Errorf("query transcriptions by model: %w", err) + } + + // Pool stats (live). + queueDepth := 0 + poolEnabled := false + if tr := o.Deps.TranscriberReload; tr != nil { + poolEnabled = tr.Enabled() + queueDepth = tr.QueueDepth() + } + + // Convert interface{} values from COALESCE/AVG to int64. + toInt64 := func(v interface{}) int64 { + switch n := v.(type) { + case int64: + return n + case float64: + return int64(n) + default: + return 0 + } + } + + langBreakdown := make([]map[string]any, 0, len(byLang)) + for _, l := range byLang { + langBreakdown = append(langBreakdown, map[string]any{ + "language": l.Lang, + "count": l.Cnt, + }) + } + + modelBreakdown := make([]map[string]any, 0, len(byModel)) + for _, m := range byModel { + modelBreakdown = append(modelBreakdown, map[string]any{ + "model": m.ModelName, + "count": m.Cnt, + }) + } + + return map[string]any{ + "total": stats.Total, + "recent24h": stats.RecentCount, + "avgDurationMs": toInt64(stats.AvgDurationMs), + "minDurationMs": toInt64(stats.MinDurationMs), + "maxDurationMs": toInt64(stats.MaxDurationMs), + "queueDepth": queueDepth, + "poolEnabled": poolEnabled, + "byLanguage": langBreakdown, + "byModel": modelBreakdown, + }, nil +} diff --git a/backend/internal/admin/units.go b/backend/internal/admin/units.go new file mode 100644 index 0000000..9dedf47 --- /dev/null +++ b/backend/internal/admin/units.go @@ -0,0 +1,146 @@ +package admin + +import ( + "context" + "encoding/json" + "fmt" + "log/slog" + "strconv" + "strings" + + "github.com/openscanner/openscanner/internal/db" +) + +// UnitsList returns units filtered by optional systemId + unitIdPattern. +func (o *Operations) UnitsList(ctx context.Context, params json.RawMessage, _ int64) (any, error) { + var req struct { + SystemID *int64 `json:"systemId"` + UnitIDPattern *string `json:"unitIdPattern"` + } + if params != nil { + _ = json.Unmarshal(params, &req) // ignore parse errors — treat as no filter + } + + var units []db.Unit + var err error + if req.SystemID != nil { + units, err = o.Queries.ListUnitsBySystem(ctx, *req.SystemID) + } else { + units, err = o.Queries.ListAllUnits(ctx) + } + if err != nil { + return nil, fmt.Errorf("failed to list units: %w", err) + } + + // Apply unit_id pattern filter if provided (prefix matching). + if req.UnitIDPattern != nil && *req.UnitIDPattern != "" { + filtered := make([]db.Unit, 0, len(units)) + for _, u := range units { + if strings.HasPrefix(strconv.FormatInt(u.UnitID, 10), *req.UnitIDPattern) { + filtered = append(filtered, u) + } + } + units = filtered + } + + return mapUnits(units), nil +} + +// UnitsCreate creates a new unit. +func (o *Operations) UnitsCreate(ctx context.Context, params json.RawMessage, callerID int64) (any, error) { + var req struct { + SystemID int64 `json:"systemId"` + UnitID int64 `json:"unitId"` + Label *string `json:"label"` + Order int64 `json:"order"` + } + if err := json.Unmarshal(params, &req); err != nil { + return nil, UserError("invalid request body") + } + + id, err := o.Queries.CreateUnit(ctx, db.CreateUnitParams{ + SystemID: req.SystemID, + UnitID: req.UnitID, + Label: ptrToNullStr(req.Label), + Order: req.Order, + }) + if isUniqueViolation(err) { + return nil, UserError("unit already exists") + } + if err != nil { + return nil, fmt.Errorf("failed to create unit: %w", err) + } + + unit, err := o.Queries.GetUnit(ctx, id) + if err != nil { + return nil, fmt.Errorf("failed to fetch created unit: %w", err) + } + slog.Info("admin: unit created", "id", unit.ID, "unit_id", unit.UnitID, "by", callerID) + o.broadcastAdminEvent("units.updated", nil) + return mapUnit(unit), nil +} + +// UnitsUpdate updates an existing unit. +func (o *Operations) UnitsUpdate(ctx context.Context, params json.RawMessage, callerID int64) (any, error) { + var req struct { + ID int64 `json:"id"` + UnitID int64 `json:"unitId"` + Label *string `json:"label"` + Order int64 `json:"order"` + } + if err := json.Unmarshal(params, &req); err != nil { + return nil, UserError("invalid request body") + } + if req.ID <= 0 { + return nil, UserError("id is required") + } + + if _, err := o.Queries.GetUnit(ctx, req.ID); err != nil { + return nil, UserError("unit not found") + } + + err := o.Queries.UpdateUnit(ctx, db.UpdateUnitParams{ + ID: req.ID, + UnitID: req.UnitID, + Label: ptrToNullStr(req.Label), + Order: req.Order, + }) + if isUniqueViolation(err) { + return nil, UserError("unit already exists") + } + if err != nil { + return nil, fmt.Errorf("failed to update unit: %w", err) + } + + unit, err := o.Queries.GetUnit(ctx, req.ID) + if err != nil { + return nil, fmt.Errorf("failed to fetch updated unit: %w", err) + } + slog.Info("admin: unit updated", "id", unit.ID, "unit_id", unit.UnitID, "by", callerID) + o.broadcastAdminEvent("units.updated", nil) + return mapUnit(unit), nil +} + +// UnitsDelete deletes a unit. +func (o *Operations) UnitsDelete(ctx context.Context, params json.RawMessage, callerID int64) (any, error) { + var req struct { + ID int64 `json:"id"` + } + if err := json.Unmarshal(params, &req); err != nil { + return nil, UserError("invalid request body") + } + if req.ID <= 0 { + return nil, UserError("id is required") + } + + if _, err := o.Queries.GetUnit(ctx, req.ID); err != nil { + return nil, UserError("unit not found") + } + + if err := o.Queries.DeleteUnit(ctx, req.ID); err != nil { + return nil, fmt.Errorf("failed to delete unit: %w", err) + } + slog.Info("admin: unit deleted", "id", req.ID, "by", callerID) + o.broadcastAdminEvent("units.updated", nil) + return map[string]bool{"ok": true}, nil +} diff --git a/backend/internal/admin/users.go b/backend/internal/admin/users.go new file mode 100644 index 0000000..ece74ba --- /dev/null +++ b/backend/internal/admin/users.go @@ -0,0 +1,203 @@ +package admin + +import ( + "context" + "encoding/json" + "fmt" + "log/slog" + "time" + + "github.com/openscanner/openscanner/internal/auth" + "github.com/openscanner/openscanner/internal/db" +) + +// UsersList returns all users. +func (o *Operations) UsersList(ctx context.Context, _ json.RawMessage, _ int64) (any, error) { + users, err := o.Queries.ListUsers(ctx) + if err != nil { + return nil, fmt.Errorf("failed to list users: %w", err) + } + return mapUsers(users), nil +} + +// UsersCreate creates a new user. +func (o *Operations) UsersCreate(ctx context.Context, params json.RawMessage, callerID int64) (any, error) { + var req struct { + Username string `json:"username"` + Password string `json:"password"` + Role string `json:"role"` + Disabled int64 `json:"disabled"` + SystemsJson *string `json:"systemsJson"` + Expiration *int64 `json:"expiration"` + Limit *int64 `json:"limit"` + } + if err := json.Unmarshal(params, &req); err != nil { + return nil, UserError("invalid request body") + } + if req.Username == "" { + return nil, UserError("username is required") + } + if len(req.Username) > 64 { + return nil, UserError("username must be at most 64 characters") + } + if len(req.Password) < 8 { + return nil, UserError("password must be at least 8 characters") + } + if len(req.Password) > 128 { + return nil, UserError("password must be at most 128 characters") + } + if req.Role == "" { + req.Role = "listener" + } + if !validRoles[req.Role] { + return nil, UserError("role must be 'admin' or 'listener'") + } + + hash, err := auth.HashPassword(req.Password) + if err != nil { + return nil, fmt.Errorf("failed to hash password: %w", err) + } + + now := time.Now().Unix() + id, err := o.Queries.CreateUser(ctx, db.CreateUserParams{ + Username: req.Username, + PasswordHash: hash, + Role: req.Role, + Disabled: req.Disabled, + SystemsJson: ptrToNullStr(req.SystemsJson), + Expiration: ptrToNullInt(req.Expiration), + Limit: ptrToNullInt(req.Limit), + PasswordNeedChange: 1, + CreatedAt: now, + UpdatedAt: now, + }) + if isUniqueViolation(err) { + return nil, UserError("username already exists") + } + if err != nil { + return nil, fmt.Errorf("failed to create user: %w", err) + } + + user, err := o.Queries.GetUser(ctx, id) + if err != nil { + return nil, fmt.Errorf("failed to fetch created user: %w", err) + } + slog.Info("admin: user created", "id", user.ID, "username", user.Username, "role", user.Role, "by", callerID) + o.broadcastAdminEvent("users.updated", nil) + return mapUser(user), nil +} + +// UsersUpdate updates an existing user. +func (o *Operations) UsersUpdate(ctx context.Context, params json.RawMessage, callerID int64) (any, error) { + var req struct { + ID int64 `json:"id"` + Username string `json:"username"` + Role string `json:"role"` + Disabled int64 `json:"disabled"` + SystemsJson *string `json:"systemsJson"` + Expiration *int64 `json:"expiration"` + Limit *int64 `json:"limit"` + } + if err := json.Unmarshal(params, &req); err != nil { + return nil, UserError("invalid request body") + } + if req.ID <= 0 { + return nil, UserError("id is required") + } + if req.Username == "" { + return nil, UserError("username is required") + } + if len(req.Username) > 64 { + return nil, UserError("username must be at most 64 characters") + } + if req.Role == "" { + return nil, UserError("role is required") + } + if !validRoles[req.Role] { + return nil, UserError("role must be 'admin' or 'listener'") + } + + if _, err := o.Queries.GetUser(ctx, req.ID); err != nil { + return nil, UserError("user not found") + } + + // Prevent disabling the bootstrap admin (id=1). + if req.ID == 1 && req.Disabled != 0 { + return nil, UserError("cannot disable the primary admin account") + } + // Protect bootstrap admin role/expiration/limit. + if req.ID == 1 { + req.Role = "admin" + req.Expiration = nil + req.Limit = nil + } + + err := o.Queries.UpdateUser(ctx, db.UpdateUserParams{ + ID: req.ID, + Username: req.Username, + Role: req.Role, + Disabled: req.Disabled, + SystemsJson: ptrToNullStr(req.SystemsJson), + Expiration: ptrToNullInt(req.Expiration), + Limit: ptrToNullInt(req.Limit), + UpdatedAt: time.Now().Unix(), + }) + if isUniqueViolation(err) { + return nil, UserError("username already exists") + } + if err != nil { + return nil, fmt.Errorf("failed to update user: %w", err) + } + + // Revoke all tokens so stale claims are not trusted after update. + auth.Tokens.RevokeAllForUser(req.ID) + + // Immediately disconnect all active WS sessions for the updated user. + o.disconnectByUser(req.ID) + + user, err := o.Queries.GetUser(ctx, req.ID) + if err != nil { + return nil, fmt.Errorf("failed to fetch updated user: %w", err) + } + slog.Info("admin: user updated", "id", user.ID, "username", user.Username, "role", user.Role, "disabled", user.Disabled, "by", callerID) + o.broadcastAdminEvent("users.updated", nil) + return mapUser(user), nil +} + +// UsersDelete deletes a user. +func (o *Operations) UsersDelete(ctx context.Context, params json.RawMessage, callerID int64) (any, error) { + var req struct { + ID int64 `json:"id"` + } + if err := json.Unmarshal(params, &req); err != nil { + return nil, UserError("invalid request body") + } + if req.ID <= 0 { + return nil, UserError("id is required") + } + + // Cannot delete your own account. + if callerID == req.ID { + return nil, UserError("cannot delete your own account") + } + // Cannot delete bootstrap admin. + if req.ID == 1 { + return nil, UserError("cannot delete the primary admin account") + } + + if _, err := o.Queries.GetUser(ctx, req.ID); err != nil { + return nil, UserError("user not found") + } + + if err := o.Queries.DeleteUser(ctx, req.ID); err != nil { + return nil, fmt.Errorf("failed to delete user: %w", err) + } + + // Revoke tokens and disconnect active WS sessions for the deleted user. + auth.Tokens.RevokeAllForUser(req.ID) + o.disconnectByUser(req.ID) + + slog.Info("admin: user deleted", "id", req.ID, "by", callerID) + o.broadcastAdminEvent("users.updated", nil) + return map[string]bool{"ok": true}, nil +} diff --git a/backend/internal/admin/webhooks.go b/backend/internal/admin/webhooks.go new file mode 100644 index 0000000..6345b79 --- /dev/null +++ b/backend/internal/admin/webhooks.go @@ -0,0 +1,130 @@ +package admin + +import ( + "context" + "encoding/json" + "fmt" + "log/slog" + + "github.com/openscanner/openscanner/internal/db" +) + +// WebhooksList returns all webhooks. +func (o *Operations) WebhooksList(ctx context.Context, _ json.RawMessage, _ int64) (any, error) { + whs, err := o.Queries.ListWebhooks(ctx) + if err != nil { + return nil, fmt.Errorf("failed to list webhooks: %w", err) + } + return mapWebhooks(whs), nil +} + +// WebhooksCreate creates a new webhook. +func (o *Operations) WebhooksCreate(ctx context.Context, params json.RawMessage, callerID int64) (any, error) { + var req struct { + Url string `json:"url"` + Type string `json:"type"` + Secret *string `json:"secret"` + SystemsJson *string `json:"systemsJson"` + Disabled int64 `json:"disabled"` + Order int64 `json:"order"` + } + if err := json.Unmarshal(params, &req); err != nil { + return nil, UserError("invalid request body") + } + if req.Url == "" { + return nil, UserError("url is required") + } + if !validHTTPURL(req.Url) { + return nil, UserError("url must use http or https scheme") + } + + id, err := o.Queries.CreateWebhook(ctx, db.CreateWebhookParams{ + Url: req.Url, + Type: req.Type, + Secret: ptrToNullStr(req.Secret), + SystemsJson: ptrToNullStr(req.SystemsJson), + Disabled: req.Disabled, + Order: req.Order, + }) + if err != nil { + return nil, fmt.Errorf("failed to create webhook: %w", err) + } + + wh, err := o.Queries.GetWebhook(ctx, id) + if err != nil { + return nil, fmt.Errorf("failed to fetch created webhook: %w", err) + } + slog.Info("admin: webhook created", "id", wh.ID, "url", wh.Url, "by", callerID) + o.broadcastAdminEvent("webhooks.updated", nil) + return mapWebhook(wh), nil +} + +// WebhooksUpdate updates an existing webhook. +func (o *Operations) WebhooksUpdate(ctx context.Context, params json.RawMessage, callerID int64) (any, error) { + var req struct { + ID int64 `json:"id"` + Url string `json:"url"` + Type string `json:"type"` + Secret *string `json:"secret"` + SystemsJson *string `json:"systemsJson"` + Disabled int64 `json:"disabled"` + Order int64 `json:"order"` + } + if err := json.Unmarshal(params, &req); err != nil { + return nil, UserError("invalid request body") + } + if req.ID <= 0 { + return nil, UserError("id is required") + } + if req.Url != "" && !validHTTPURL(req.Url) { + return nil, UserError("url must use http or https scheme") + } + + if _, err := o.Queries.GetWebhook(ctx, req.ID); err != nil { + return nil, UserError("webhook not found") + } + + if err := o.Queries.UpdateWebhook(ctx, db.UpdateWebhookParams{ + ID: req.ID, + Url: req.Url, + Type: req.Type, + Secret: ptrToNullStr(req.Secret), + SystemsJson: ptrToNullStr(req.SystemsJson), + Disabled: req.Disabled, + Order: req.Order, + }); err != nil { + return nil, fmt.Errorf("failed to update webhook: %w", err) + } + + wh, err := o.Queries.GetWebhook(ctx, req.ID) + if err != nil { + return nil, fmt.Errorf("failed to fetch updated webhook: %w", err) + } + slog.Info("admin: webhook updated", "id", wh.ID, "url", wh.Url, "by", callerID) + o.broadcastAdminEvent("webhooks.updated", nil) + return mapWebhook(wh), nil +} + +// WebhooksDelete deletes a webhook. +func (o *Operations) WebhooksDelete(ctx context.Context, params json.RawMessage, callerID int64) (any, error) { + var req struct { + ID int64 `json:"id"` + } + if err := json.Unmarshal(params, &req); err != nil { + return nil, UserError("invalid request body") + } + if req.ID <= 0 { + return nil, UserError("id is required") + } + + if _, err := o.Queries.GetWebhook(ctx, req.ID); err != nil { + return nil, UserError("webhook not found") + } + + if err := o.Queries.DeleteWebhook(ctx, req.ID); err != nil { + return nil, fmt.Errorf("failed to delete webhook: %w", err) + } + slog.Info("admin: webhook deleted", "id", req.ID, "by", callerID) + o.broadcastAdminEvent("webhooks.updated", nil) + return map[string]bool{"ok": true}, nil +} diff --git a/backend/internal/ws/admin_ops.go b/backend/internal/ws/admin_ops.go deleted file mode 100644 index 0bd53bd..0000000 --- a/backend/internal/ws/admin_ops.go +++ /dev/null @@ -1,3201 +0,0 @@ -// Package ws — admin CRUD operation handlers for the WebSocket protocol. -package ws - -import ( - "context" - "database/sql" - "encoding/csv" - "encoding/json" - "errors" - "fmt" - "io" - "log/slog" - "net/http" - "net/url" - "os" - "path/filepath" - "sort" - "strconv" - "strings" - "time" - - "github.com/google/uuid" - "github.com/openscanner/openscanner/internal/audio" - "github.com/openscanner/openscanner/internal/auth" - "github.com/openscanner/openscanner/internal/db" - "github.com/openscanner/openscanner/internal/logging" -) - -// ── Helpers ── - -func wsPtrToNullStr(p *string) sql.NullString { - if p == nil { - return sql.NullString{} - } - return sql.NullString{String: *p, Valid: true} -} - -func wsPtrToNullInt(p *int64) sql.NullInt64 { - if p == nil { - return sql.NullInt64{} - } - return sql.NullInt64{Int64: *p, Valid: true} -} - -func wsNullStr(n sql.NullString) *string { - if !n.Valid { - return nil - } - return &n.String -} - -func wsNullInt(n sql.NullInt64) *int64 { - if !n.Valid { - return nil - } - return &n.Int64 -} - -func wsIsUniqueViolation(err error) bool { - return err != nil && strings.Contains(err.Error(), "UNIQUE") -} - -// remapSystemsJSON rewrites the system PKs embedded in a systems_json column -// (used by api_keys, downstreams, webhooks, and users) so that grants -// referring to a system by its old PK end up referring to the freshly -// inserted row's PK after import. Accepts and returns a *string mirroring -// the export shape (nil = "all systems"). Any system PK that doesn't appear -// in the remap is dropped from the grant rather than silently broken. -// -// Shape: `[{"id": , "talkgroups": [...]}]` per -// auth.SystemGrant. -func remapSystemsJSON(in *string, systemRemap map[int64]int64) *string { - if in == nil || strings.TrimSpace(*in) == "" { - return in - } - var grants []auth.SystemGrant - if err := json.Unmarshal([]byte(*in), &grants); err != nil { - // Fall through with nil grants — try the legacy flat-id form. - var ids []int64 - if jerr := json.Unmarshal([]byte(*in), &ids); jerr != nil { - slog.Warn("import config: systems_json not recognised; preserving as-is", - "error", err) - return in - } - mapped := make([]int64, 0, len(ids)) - for _, id := range ids { - if newID, ok := systemRemap[id]; ok { - mapped = append(mapped, newID) - } else { - slog.Warn("import config: dropping unknown system grant", "system_pk", id) - } - } - out, _ := json.Marshal(mapped) - s := string(out) - return &s - } - mapped := make([]auth.SystemGrant, 0, len(grants)) - for _, g := range grants { - newID, ok := systemRemap[g.ID] - if !ok { - slog.Warn("import config: dropping unknown system grant", "system_pk", g.ID) - continue - } - mapped = append(mapped, auth.SystemGrant{ID: newID, Talkgroups: g.Talkgroups}) - } - out, _ := json.Marshal(mapped) - s := string(out) - return &s -} - -func wsValidHTTPURL(raw string) bool { - u, err := url.Parse(raw) - if err != nil { - return false - } - return u.Scheme == "http" || u.Scheme == "https" -} - -// validRoles is the set of allowed user roles. -var wsValidRoles = map[string]bool{ - auth.RoleAdmin: true, - auth.RoleListener: true, -} - -// SensitiveSettingKeys are settings whose values are encrypted at rest. -var SensitiveSettingKeys = map[string]bool{ - "vapidPrivateKey": true, - "jwtSecret": true, -} - -// wsAllowedSettingKeys mirrors the allowed setting keys from config.go. -var wsAllowedSettingKeys = map[string]bool{ - "activityDashboard": true, - "afsSystems": true, - "apiKeyCallRate": true, - "audioConversion": true, - "audioEncodingPreset": true, - "autoPopulateSystems": true, - "branding": true, - "disableDuplicateDetection": true, - "duplicateDetectionTimeFrame": true, - "email": true, - "keypadBeeps": true, - "logLevel": true, - "maxClients": true, - "playbackGoesLive": true, - "pruneDays": true, - "publicAccess": true, - "pushNotifications": true, - "searchPatchedTalkgroups": true, - "shareableLinks": true, - "sharedLinkExpiry": true, - "showListenersCount": true, - "sortTalkgroups": true, - "tagsToggle": true, - "time12hFormat": true, - "transcriptionDiarize": true, - "transcriptionEnabled": true, - "transcriptionLanguage": true, - "liveTranscriptDisplay": true, - "transcriptionModel": true, - "transcriptionUrl": true, - "vapidPrivateKey": true, - "vapidPublicKey": true, - "webhooksEnabled": true, -} - -// hiddenTopLevelDirs for FS browsing. -var wsHiddenTopLevelDirs = map[string]bool{ - "bin": true, "boot": true, "dev": true, "lib": true, - "lib32": true, "lib64": true, "libx32": true, - "proc": true, "run": true, "sbin": true, "sys": true, - "usr": true, "etc": true, "snap": true, "lost+found": true, -} - -// ── Response mappers ── - -func mapUser(u db.User) map[string]any { - return map[string]any{ - "id": u.ID, - "username": u.Username, - "role": u.Role, - "disabled": u.Disabled, - "systemsJson": wsNullStr(u.SystemsJson), - "expiration": wsNullInt(u.Expiration), - "limit": wsNullInt(u.Limit), - "createdAt": u.CreatedAt, - "updatedAt": u.UpdatedAt, - } -} - -func mapUsers(users []db.User) []map[string]any { - out := make([]map[string]any, len(users)) - for i, u := range users { - out[i] = mapUser(u) - } - return out -} - -func mapSystem(s db.System) map[string]any { - return map[string]any{ - "id": s.ID, - "systemId": s.SystemID, - "label": s.Label, - "autoPopulateTalkgroups": s.AutoPopulateTalkgroups, - "blacklistsJson": wsNullStr(s.BlacklistsJson), - "led": wsNullStr(s.Led), - "order": s.Order, - } -} - -func mapSystems(systems []db.System) []map[string]any { - out := make([]map[string]any, len(systems)) - for i, s := range systems { - out[i] = mapSystem(s) - } - return out -} - -func mapTalkgroup(t db.Talkgroup) map[string]any { - return map[string]any{ - "id": t.ID, - "systemId": t.SystemID, - "talkgroupId": t.TalkgroupID, - "label": wsNullStr(t.Label), - "name": wsNullStr(t.Name), - "frequency": wsNullInt(t.Frequency), - "led": wsNullStr(t.Led), - "groupId": wsNullInt(t.GroupID), - "tagId": wsNullInt(t.TagID), - "order": t.Order, - } -} - -func mapTalkgroups(tgs []db.Talkgroup) []map[string]any { - out := make([]map[string]any, len(tgs)) - for i, t := range tgs { - out[i] = mapTalkgroup(t) - } - return out -} - -func mapUnit(u db.Unit) map[string]any { - return map[string]any{ - "id": u.ID, - "systemId": u.SystemID, - "unitId": u.UnitID, - "label": wsNullStr(u.Label), - "order": u.Order, - } -} - -func mapUnits(units []db.Unit) []map[string]any { - out := make([]map[string]any, len(units)) - for i, u := range units { - out[i] = mapUnit(u) - } - return out -} - -func mapAPIKey(k db.ApiKey) map[string]any { - fingerprint := auth.HashAPIKey(k.Key) - if len(fingerprint) > 12 { - fingerprint = fingerprint[:12] - } - return map[string]any{ - "id": k.ID, - "fingerprint": fingerprint, - "ident": wsNullStr(k.Ident), - "disabled": k.Disabled, - "systemsJson": wsNullStr(k.SystemsJson), - "callRateLimit": wsNullInt(k.CallRateLimit), - "order": k.Order, - } -} - -func mapAPIKeys(keys []db.ApiKey) []map[string]any { - out := make([]map[string]any, len(keys)) - for i, k := range keys { - out[i] = mapAPIKey(k) - } - return out -} - -func mapDirMonitor(d db.Dirmonitor) map[string]any { - return map[string]any{ - "id": d.ID, - "directory": d.Directory, - "type": d.Type, - "mask": wsNullStr(d.Mask), - "extension": wsNullStr(d.Extension), - "frequency": wsNullInt(d.Frequency), - "delay": wsNullInt(d.Delay), - "deleteAfter": d.DeleteAfter, - "usePolling": d.UsePolling, - "disabled": d.Disabled, - "systemId": wsNullInt(d.SystemID), - "talkgroupId": wsNullInt(d.TalkgroupID), - "order": d.Order, - } -} - -func mapDirMonitors(dms []db.Dirmonitor) []map[string]any { - out := make([]map[string]any, len(dms)) - for i, d := range dms { - out[i] = mapDirMonitor(d) - } - return out -} - -func mapDownstream(d db.Downstream) map[string]any { - return map[string]any{ - "id": d.ID, - "url": d.Url, - "hasApiKey": d.ApiKey != "", - "systemsJson": wsNullStr(d.SystemsJson), - "disabled": d.Disabled, - "order": d.Order, - } -} - -func mapDownstreams(ds []db.Downstream) []map[string]any { - out := make([]map[string]any, len(ds)) - for i, d := range ds { - out[i] = mapDownstream(d) - } - return out -} - -func mapWebhook(w db.Webhook) map[string]any { - return map[string]any{ - "id": w.ID, - "url": w.Url, - "type": w.Type, - "secret": wsNullStr(w.Secret), - "systemsJson": wsNullStr(w.SystemsJson), - "disabled": w.Disabled, - "order": w.Order, - } -} - -func mapWebhooks(ws []db.Webhook) []map[string]any { - out := make([]map[string]any, len(ws)) - for i, w := range ws { - out[i] = mapWebhook(w) - } - return out -} - -func mapSharedLink(r db.ListSharedLinksRow) map[string]any { - m := map[string]any{ - "id": r.ID, - "callId": r.CallID, - "token": r.Token, - "createdAt": r.CreatedAt, - "sharedBy": r.SharedBy, - "dateTime": r.DateTime, - "duration": r.Duration.Int64, - "systemLabel": r.SystemLabel.String, - "talkgroupLabel": r.TalkgroupLabel.String, - "talkgroupName": r.TalkgroupName.String, - } - if r.ExpiresAt.Valid { - m["expiresAt"] = r.ExpiresAt.Int64 - } else { - m["expiresAt"] = nil - } - return m -} - -// ── Handler map ── - -// adminOpHandlers returns the complete map of supported admin WS operations. -func (c *Client) adminOpHandlers() map[string]adminOpHandler { - return map[string]adminOpHandler{ - // Activity & Logs (existing handlers in client.go) - "activity.stats": c.opActivityStats, - "activity.chart": c.opActivityChart, - "activity.top-talkgroups": c.opTopTalkgroups, - "logs.query": c.opLogsQuery, - "logs.level": c.opLogsLevel, - - // Users - "users.list": c.opUsersList, - "users.create": c.opUsersCreate, - "users.update": c.opUsersUpdate, - "users.delete": c.opUsersDelete, - - // Systems - "systems.list": c.opSystemsList, - "systems.create": c.opSystemsCreate, - "systems.update": c.opSystemsUpdate, - "systems.delete": c.opSystemsDelete, - - // Talkgroups - "talkgroups.list": c.opTalkgroupsList, - "talkgroups.create": c.opTalkgroupsCreate, - "talkgroups.update": c.opTalkgroupsUpdate, - "talkgroups.delete": c.opTalkgroupsDelete, - - // Units - "units.list": c.opUnitsList, - "units.create": c.opUnitsCreate, - "units.update": c.opUnitsUpdate, - "units.delete": c.opUnitsDelete, - - // Groups - "groups.list": c.opGroupsList, - "groups.create": c.opGroupsCreate, - "groups.update": c.opGroupsUpdate, - "groups.delete": c.opGroupsDelete, - - // Tags - "tags.list": c.opTagsList, - "tags.create": c.opTagsCreate, - "tags.update": c.opTagsUpdate, - "tags.delete": c.opTagsDelete, - - // API Keys - "apikeys.list": c.opAPIKeysList, - "apikeys.create": c.opAPIKeysCreate, - "apikeys.update": c.opAPIKeysUpdate, - "apikeys.delete": c.opAPIKeysDelete, - - // DirMonitors - "dirmonitors.list": c.opDirMonitorsList, - "dirmonitors.create": c.opDirMonitorsCreate, - "dirmonitors.update": c.opDirMonitorsUpdate, - "dirmonitors.delete": c.opDirMonitorsDelete, - - // Downstreams - "downstreams.list": c.opDownstreamsList, - "downstreams.create": c.opDownstreamsCreate, - "downstreams.update": c.opDownstreamsUpdate, - "downstreams.delete": c.opDownstreamsDelete, - - // Webhooks - "webhooks.list": c.opWebhooksList, - "webhooks.create": c.opWebhooksCreate, - "webhooks.update": c.opWebhooksUpdate, - "webhooks.delete": c.opWebhooksDelete, - - // Shared Links - "shared-links.list": c.opSharedLinksList, - "shared-links.delete": c.opSharedLinksDelete, - - // Config - "config.get": c.opConfigGet, - "config.update": c.opConfigUpdate, - - // Filesystem - "fs.directories": c.opFSDirectories, - - // Export - "export.config": c.opExportConfig, - "export.talkgroups": c.opExportTalkgroups, - "export.units": c.opExportUnits, - "export.groups": c.opExportGroups, - "export.tags": c.opExportTags, - - // Import - "import.config": c.opImportConfig, - - // RadioReference - "radioreference.apply": c.opRadioReferenceApply, - - // Transcription model management - "transcription.status": c.opTranscriptionStatus, - "transcription.models": c.opTranscriptionModels, - "transcription.download": c.opTranscriptionDownload, - "transcription.delete": c.opTranscriptionDelete, - "transcription.stats": c.opTranscriptionStats, - } -} - -// ── Logs level (moved from inline in client.go) ── - -func (c *Client) opLogsLevel(_ context.Context, _ json.RawMessage) (any, error) { - return map[string]string{"level": logging.GetLevel()}, nil -} - -// ══════════════════════════════════════════════════════════════════════════════ -// USERS -// ══════════════════════════════════════════════════════════════════════════════ - -func (c *Client) opUsersList(ctx context.Context, _ json.RawMessage) (any, error) { - users, err := c.hub.queries.ListUsers(ctx) - if err != nil { - return nil, fmt.Errorf("failed to list users: %w", err) - } - return mapUsers(users), nil -} - -func (c *Client) opUsersCreate(ctx context.Context, params json.RawMessage) (any, error) { - var req struct { - Username string `json:"username"` - Password string `json:"password"` - Role string `json:"role"` - Disabled int64 `json:"disabled"` - SystemsJson *string `json:"systemsJson"` - Expiration *int64 `json:"expiration"` - Limit *int64 `json:"limit"` - } - if err := json.Unmarshal(params, &req); err != nil { - return nil, userError("invalid request body") - } - if req.Username == "" { - return nil, userError("username is required") - } - if len(req.Username) > 64 { - return nil, userError("username must be at most 64 characters") - } - if len(req.Password) < 8 { - return nil, userError("password must be at least 8 characters") - } - if len(req.Password) > 128 { - return nil, userError("password must be at most 128 characters") - } - if req.Role == "" { - req.Role = "listener" - } - if !wsValidRoles[req.Role] { - return nil, userError("role must be 'admin' or 'listener'") - } - - hash, err := auth.HashPassword(req.Password) - if err != nil { - return nil, fmt.Errorf("failed to hash password: %w", err) - } - - now := time.Now().Unix() - id, err := c.hub.queries.CreateUser(ctx, db.CreateUserParams{ - Username: req.Username, - PasswordHash: hash, - Role: req.Role, - Disabled: req.Disabled, - SystemsJson: wsPtrToNullStr(req.SystemsJson), - Expiration: wsPtrToNullInt(req.Expiration), - Limit: wsPtrToNullInt(req.Limit), - PasswordNeedChange: 1, - CreatedAt: now, - UpdatedAt: now, - }) - if wsIsUniqueViolation(err) { - return nil, userError("username already exists") - } - if err != nil { - return nil, fmt.Errorf("failed to create user: %w", err) - } - - user, err := c.hub.queries.GetUser(ctx, id) - if err != nil { - return nil, fmt.Errorf("failed to fetch created user: %w", err) - } - slog.Info("admin: user created", "id", user.ID, "username", user.Username, "role", user.Role, "by", c.userID) - c.hub.BroadcastAdminEvent("users.updated", nil) - return mapUser(user), nil -} - -func (c *Client) opUsersUpdate(ctx context.Context, params json.RawMessage) (any, error) { - var req struct { - ID int64 `json:"id"` - Username string `json:"username"` - Role string `json:"role"` - Disabled int64 `json:"disabled"` - SystemsJson *string `json:"systemsJson"` - Expiration *int64 `json:"expiration"` - Limit *int64 `json:"limit"` - } - if err := json.Unmarshal(params, &req); err != nil { - return nil, userError("invalid request body") - } - if req.ID <= 0 { - return nil, userError("id is required") - } - if req.Username == "" { - return nil, userError("username is required") - } - if len(req.Username) > 64 { - return nil, userError("username must be at most 64 characters") - } - if req.Role == "" { - return nil, userError("role is required") - } - if !wsValidRoles[req.Role] { - return nil, userError("role must be 'admin' or 'listener'") - } - - if _, err := c.hub.queries.GetUser(ctx, req.ID); err != nil { - return nil, userError("user not found") - } - - // Prevent disabling the bootstrap admin (id=1). - if req.ID == 1 && req.Disabled != 0 { - return nil, userError("cannot disable the primary admin account") - } - // Protect bootstrap admin role/expiration/limit. - if req.ID == 1 { - req.Role = "admin" - req.Expiration = nil - req.Limit = nil - } - - err := c.hub.queries.UpdateUser(ctx, db.UpdateUserParams{ - ID: req.ID, - Username: req.Username, - Role: req.Role, - Disabled: req.Disabled, - SystemsJson: wsPtrToNullStr(req.SystemsJson), - Expiration: wsPtrToNullInt(req.Expiration), - Limit: wsPtrToNullInt(req.Limit), - UpdatedAt: time.Now().Unix(), - }) - if wsIsUniqueViolation(err) { - return nil, userError("username already exists") - } - if err != nil { - return nil, fmt.Errorf("failed to update user: %w", err) - } - - // Revoke all tokens so stale claims are not trusted after update. - auth.Tokens.RevokeAllForUser(req.ID) - - // Immediately disconnect all active WS sessions for the updated user. - c.hub.DisconnectByUser(req.ID) - - user, err := c.hub.queries.GetUser(ctx, req.ID) - if err != nil { - return nil, fmt.Errorf("failed to fetch updated user: %w", err) - } - slog.Info("admin: user updated", "id", user.ID, "username", user.Username, "role", user.Role, "disabled", user.Disabled, "by", c.userID) - c.hub.BroadcastAdminEvent("users.updated", nil) - return mapUser(user), nil -} - -func (c *Client) opUsersDelete(ctx context.Context, params json.RawMessage) (any, error) { - var req struct { - ID int64 `json:"id"` - } - if err := json.Unmarshal(params, &req); err != nil { - return nil, userError("invalid request body") - } - if req.ID <= 0 { - return nil, userError("id is required") - } - - // Cannot delete your own account. - if c.userID == req.ID { - return nil, userError("cannot delete your own account") - } - // Cannot delete bootstrap admin. - if req.ID == 1 { - return nil, userError("cannot delete the primary admin account") - } - - if _, err := c.hub.queries.GetUser(ctx, req.ID); err != nil { - return nil, userError("user not found") - } - - if err := c.hub.queries.DeleteUser(ctx, req.ID); err != nil { - return nil, fmt.Errorf("failed to delete user: %w", err) - } - - // Revoke tokens and disconnect active WS sessions for the deleted user. - auth.Tokens.RevokeAllForUser(req.ID) - c.hub.DisconnectByUser(req.ID) - - slog.Info("admin: user deleted", "id", req.ID, "by", c.userID) - c.hub.BroadcastAdminEvent("users.updated", nil) - return map[string]bool{"ok": true}, nil -} - -// ══════════════════════════════════════════════════════════════════════════════ -// SYSTEMS -// ══════════════════════════════════════════════════════════════════════════════ - -func (c *Client) opSystemsList(ctx context.Context, _ json.RawMessage) (any, error) { - systems, err := c.hub.queries.ListSystems(ctx) - if err != nil { - return nil, fmt.Errorf("failed to list systems: %w", err) - } - return mapSystems(systems), nil -} - -func (c *Client) opSystemsCreate(ctx context.Context, params json.RawMessage) (any, error) { - var req struct { - SystemID int64 `json:"systemId"` - Label string `json:"label"` - AutoPopulateTalkgroups int64 `json:"autoPopulateTalkgroups"` - BlacklistsJson *string `json:"blacklistsJson"` - Led *string `json:"led"` - Order int64 `json:"order"` - } - if err := json.Unmarshal(params, &req); err != nil { - return nil, userError("invalid request body") - } - - id, err := c.hub.queries.CreateSystem(ctx, db.CreateSystemParams{ - SystemID: req.SystemID, - Label: req.Label, - AutoPopulateTalkgroups: req.AutoPopulateTalkgroups, - BlacklistsJson: wsPtrToNullStr(req.BlacklistsJson), - Led: wsPtrToNullStr(req.Led), - Order: req.Order, - }) - if wsIsUniqueViolation(err) { - return nil, userError("system_id already exists") - } - if err != nil { - return nil, fmt.Errorf("failed to create system: %w", err) - } - - system, err := c.hub.queries.GetSystem(ctx, id) - if err != nil { - return nil, fmt.Errorf("failed to fetch created system: %w", err) - } - slog.Info("admin: system created", "id", system.ID, "system_id", system.SystemID, "label", system.Label, "by", c.userID) - c.hub.BroadcastAdminEvent("systems.updated", nil) - c.hub.BroadcastCFG(ctx) - return mapSystem(system), nil -} - -func (c *Client) opSystemsUpdate(ctx context.Context, params json.RawMessage) (any, error) { - var req struct { - ID int64 `json:"id"` - SystemID int64 `json:"systemId"` - Label string `json:"label"` - AutoPopulateTalkgroups int64 `json:"autoPopulateTalkgroups"` - BlacklistsJson *string `json:"blacklistsJson"` - Led *string `json:"led"` - Order int64 `json:"order"` - } - if err := json.Unmarshal(params, &req); err != nil { - return nil, userError("invalid request body") - } - if req.ID <= 0 { - return nil, userError("id is required") - } - - if _, err := c.hub.queries.GetSystem(ctx, req.ID); err != nil { - return nil, userError("system not found") - } - - err := c.hub.queries.UpdateSystem(ctx, db.UpdateSystemParams{ - ID: req.ID, - SystemID: req.SystemID, - Label: req.Label, - AutoPopulateTalkgroups: req.AutoPopulateTalkgroups, - BlacklistsJson: wsPtrToNullStr(req.BlacklistsJson), - Led: wsPtrToNullStr(req.Led), - Order: req.Order, - }) - if wsIsUniqueViolation(err) { - return nil, userError("system_id already exists") - } - if err != nil { - return nil, fmt.Errorf("failed to update system: %w", err) - } - - system, err := c.hub.queries.GetSystem(ctx, req.ID) - if err != nil { - return nil, fmt.Errorf("failed to fetch updated system: %w", err) - } - slog.Info("admin: system updated", "id", system.ID, "system_id", system.SystemID, "by", c.userID) - c.hub.BroadcastAdminEvent("systems.updated", nil) - c.hub.BroadcastCFG(ctx) - return mapSystem(system), nil -} - -func (c *Client) opSystemsDelete(ctx context.Context, params json.RawMessage) (any, error) { - var req struct { - ID int64 `json:"id"` - } - if err := json.Unmarshal(params, &req); err != nil { - return nil, userError("invalid request body") - } - if req.ID <= 0 { - return nil, userError("id is required") - } - - if _, err := c.hub.queries.GetSystem(ctx, req.ID); err != nil { - return nil, userError("system not found") - } - - if err := c.hub.queries.DeleteSystem(ctx, req.ID); err != nil { - return nil, fmt.Errorf("failed to delete system: %w", err) - } - slog.Info("admin: system deleted", "id", req.ID, "by", c.userID) - c.hub.BroadcastAdminEvent("systems.updated", nil) - c.hub.BroadcastCFG(ctx) - return map[string]bool{"ok": true}, nil -} - -// ══════════════════════════════════════════════════════════════════════════════ -// TALKGROUPS -// ══════════════════════════════════════════════════════════════════════════════ - -func (c *Client) opTalkgroupsList(ctx context.Context, _ json.RawMessage) (any, error) { - tgs, err := c.hub.queries.ListAllTalkgroups(ctx) - if err != nil { - return nil, fmt.Errorf("failed to list talkgroups: %w", err) - } - return mapTalkgroups(tgs), nil -} - -func (c *Client) opTalkgroupsCreate(ctx context.Context, params json.RawMessage) (any, error) { - var req struct { - SystemID int64 `json:"systemId"` - TalkgroupID int64 `json:"talkgroupId"` - Label *string `json:"label"` - Name *string `json:"name"` - Frequency *int64 `json:"frequency"` - Led *string `json:"led"` - GroupID *int64 `json:"groupId"` - TagID *int64 `json:"tagId"` - Order int64 `json:"order"` - } - if err := json.Unmarshal(params, &req); err != nil { - return nil, userError("invalid request body") - } - - id, err := c.hub.queries.CreateTalkgroup(ctx, db.CreateTalkgroupParams{ - SystemID: req.SystemID, - TalkgroupID: req.TalkgroupID, - Label: wsPtrToNullStr(req.Label), - Name: wsPtrToNullStr(req.Name), - Frequency: wsPtrToNullInt(req.Frequency), - Led: wsPtrToNullStr(req.Led), - GroupID: wsPtrToNullInt(req.GroupID), - TagID: wsPtrToNullInt(req.TagID), - Order: req.Order, - }) - if wsIsUniqueViolation(err) { - return nil, userError("talkgroup already exists") - } - if err != nil { - return nil, fmt.Errorf("failed to create talkgroup: %w", err) - } - - tg, err := c.hub.queries.GetTalkgroup(ctx, id) - if err != nil { - return nil, fmt.Errorf("failed to fetch created talkgroup: %w", err) - } - slog.Info("admin: talkgroup created", "id", tg.ID, "talkgroup_id", tg.TalkgroupID, "by", c.userID) - c.hub.BroadcastAdminEvent("talkgroups.updated", nil) - c.hub.BroadcastCFG(ctx) - return mapTalkgroup(tg), nil -} - -func (c *Client) opTalkgroupsUpdate(ctx context.Context, params json.RawMessage) (any, error) { - var req struct { - ID int64 `json:"id"` - TalkgroupID int64 `json:"talkgroupId"` - Label *string `json:"label"` - Name *string `json:"name"` - Frequency *int64 `json:"frequency"` - Led *string `json:"led"` - GroupID *int64 `json:"groupId"` - TagID *int64 `json:"tagId"` - Order int64 `json:"order"` - } - if err := json.Unmarshal(params, &req); err != nil { - return nil, userError("invalid request body") - } - if req.ID <= 0 { - return nil, userError("id is required") - } - - if _, err := c.hub.queries.GetTalkgroup(ctx, req.ID); err != nil { - return nil, userError("talkgroup not found") - } - - err := c.hub.queries.UpdateTalkgroup(ctx, db.UpdateTalkgroupParams{ - ID: req.ID, - TalkgroupID: req.TalkgroupID, - Label: wsPtrToNullStr(req.Label), - Name: wsPtrToNullStr(req.Name), - Frequency: wsPtrToNullInt(req.Frequency), - Led: wsPtrToNullStr(req.Led), - GroupID: wsPtrToNullInt(req.GroupID), - TagID: wsPtrToNullInt(req.TagID), - Order: req.Order, - }) - if wsIsUniqueViolation(err) { - return nil, userError("talkgroup already exists") - } - if err != nil { - return nil, fmt.Errorf("failed to update talkgroup: %w", err) - } - - tg, err := c.hub.queries.GetTalkgroup(ctx, req.ID) - if err != nil { - return nil, fmt.Errorf("failed to fetch updated talkgroup: %w", err) - } - slog.Info("admin: talkgroup updated", "id", tg.ID, "talkgroup_id", tg.TalkgroupID, "by", c.userID) - c.hub.BroadcastAdminEvent("talkgroups.updated", nil) - c.hub.BroadcastCFG(ctx) - return mapTalkgroup(tg), nil -} - -func (c *Client) opTalkgroupsDelete(ctx context.Context, params json.RawMessage) (any, error) { - var req struct { - ID int64 `json:"id"` - } - if err := json.Unmarshal(params, &req); err != nil { - return nil, userError("invalid request body") - } - if req.ID <= 0 { - return nil, userError("id is required") - } - - if _, err := c.hub.queries.GetTalkgroup(ctx, req.ID); err != nil { - return nil, userError("talkgroup not found") - } - - if err := c.hub.queries.DeleteTalkgroup(ctx, req.ID); err != nil { - return nil, fmt.Errorf("failed to delete talkgroup: %w", err) - } - slog.Info("admin: talkgroup deleted", "id", req.ID, "by", c.userID) - c.hub.BroadcastAdminEvent("talkgroups.updated", nil) - c.hub.BroadcastCFG(ctx) - return map[string]bool{"ok": true}, nil -} - -// ══════════════════════════════════════════════════════════════════════════════ -// UNITS -// ══════════════════════════════════════════════════════════════════════════════ - -func (c *Client) opUnitsList(ctx context.Context, params json.RawMessage) (any, error) { - // Optional filter by systemId and unitId pattern. - var req struct { - SystemID *int64 `json:"systemId"` - UnitIDPattern *string `json:"unitIdPattern"` - } - if params != nil { - _ = json.Unmarshal(params, &req) // ignore parse errors — treat as no filter - } - - var units []db.Unit - var err error - if req.SystemID != nil { - units, err = c.hub.queries.ListUnitsBySystem(ctx, *req.SystemID) - } else { - units, err = c.hub.queries.ListAllUnits(ctx) - } - if err != nil { - return nil, fmt.Errorf("failed to list units: %w", err) - } - - // Apply unit_id pattern filter if provided (prefix matching). - if req.UnitIDPattern != nil && *req.UnitIDPattern != "" { - filtered := make([]db.Unit, 0, len(units)) - for _, u := range units { - if strings.HasPrefix(strconv.FormatInt(u.UnitID, 10), *req.UnitIDPattern) { - filtered = append(filtered, u) - } - } - units = filtered - } - - return mapUnits(units), nil -} - -func (c *Client) opUnitsCreate(ctx context.Context, params json.RawMessage) (any, error) { - var req struct { - SystemID int64 `json:"systemId"` - UnitID int64 `json:"unitId"` - Label *string `json:"label"` - Order int64 `json:"order"` - } - if err := json.Unmarshal(params, &req); err != nil { - return nil, userError("invalid request body") - } - - id, err := c.hub.queries.CreateUnit(ctx, db.CreateUnitParams{ - SystemID: req.SystemID, - UnitID: req.UnitID, - Label: wsPtrToNullStr(req.Label), - Order: req.Order, - }) - if wsIsUniqueViolation(err) { - return nil, userError("unit already exists") - } - if err != nil { - return nil, fmt.Errorf("failed to create unit: %w", err) - } - - unit, err := c.hub.queries.GetUnit(ctx, id) - if err != nil { - return nil, fmt.Errorf("failed to fetch created unit: %w", err) - } - slog.Info("admin: unit created", "id", unit.ID, "unit_id", unit.UnitID, "by", c.userID) - c.hub.BroadcastAdminEvent("units.updated", nil) - return mapUnit(unit), nil -} - -func (c *Client) opUnitsUpdate(ctx context.Context, params json.RawMessage) (any, error) { - var req struct { - ID int64 `json:"id"` - UnitID int64 `json:"unitId"` - Label *string `json:"label"` - Order int64 `json:"order"` - } - if err := json.Unmarshal(params, &req); err != nil { - return nil, userError("invalid request body") - } - if req.ID <= 0 { - return nil, userError("id is required") - } - - if _, err := c.hub.queries.GetUnit(ctx, req.ID); err != nil { - return nil, userError("unit not found") - } - - err := c.hub.queries.UpdateUnit(ctx, db.UpdateUnitParams{ - ID: req.ID, - UnitID: req.UnitID, - Label: wsPtrToNullStr(req.Label), - Order: req.Order, - }) - if wsIsUniqueViolation(err) { - return nil, userError("unit already exists") - } - if err != nil { - return nil, fmt.Errorf("failed to update unit: %w", err) - } - - unit, err := c.hub.queries.GetUnit(ctx, req.ID) - if err != nil { - return nil, fmt.Errorf("failed to fetch updated unit: %w", err) - } - slog.Info("admin: unit updated", "id", unit.ID, "unit_id", unit.UnitID, "by", c.userID) - c.hub.BroadcastAdminEvent("units.updated", nil) - return mapUnit(unit), nil -} - -func (c *Client) opUnitsDelete(ctx context.Context, params json.RawMessage) (any, error) { - var req struct { - ID int64 `json:"id"` - } - if err := json.Unmarshal(params, &req); err != nil { - return nil, userError("invalid request body") - } - if req.ID <= 0 { - return nil, userError("id is required") - } - - if _, err := c.hub.queries.GetUnit(ctx, req.ID); err != nil { - return nil, userError("unit not found") - } - - if err := c.hub.queries.DeleteUnit(ctx, req.ID); err != nil { - return nil, fmt.Errorf("failed to delete unit: %w", err) - } - slog.Info("admin: unit deleted", "id", req.ID, "by", c.userID) - c.hub.BroadcastAdminEvent("units.updated", nil) - return map[string]bool{"ok": true}, nil -} - -// ══════════════════════════════════════════════════════════════════════════════ -// GROUPS -// ══════════════════════════════════════════════════════════════════════════════ - -func (c *Client) opGroupsList(ctx context.Context, _ json.RawMessage) (any, error) { - groups, err := c.hub.queries.ListGroups(ctx) - if err != nil { - return nil, fmt.Errorf("failed to list groups: %w", err) - } - return groups, nil -} - -func (c *Client) opGroupsCreate(ctx context.Context, params json.RawMessage) (any, error) { - var req struct { - Label string `json:"label"` - } - if err := json.Unmarshal(params, &req); err != nil { - return nil, userError("invalid request body") - } - if req.Label == "" { - return nil, userError("label is required") - } - - id, err := c.hub.queries.CreateGroup(ctx, req.Label) - if wsIsUniqueViolation(err) { - return nil, userError("group label already exists") - } - if err != nil { - return nil, fmt.Errorf("failed to create group: %w", err) - } - - group, err := c.hub.queries.GetGroup(ctx, id) - if err != nil { - return nil, fmt.Errorf("failed to fetch created group: %w", err) - } - slog.Info("admin: group created", "id", group.ID, "label", group.Label, "by", c.userID) - c.hub.BroadcastAdminEvent("groups.updated", nil) - c.hub.BroadcastCFG(ctx) - return group, nil -} - -func (c *Client) opGroupsUpdate(ctx context.Context, params json.RawMessage) (any, error) { - var req struct { - ID int64 `json:"id"` - Label string `json:"label"` - } - if err := json.Unmarshal(params, &req); err != nil { - return nil, userError("invalid request body") - } - if req.ID <= 0 { - return nil, userError("id is required") - } - if req.Label == "" { - return nil, userError("label is required") - } - - if _, err := c.hub.queries.GetGroup(ctx, req.ID); err != nil { - return nil, userError("group not found") - } - - err := c.hub.queries.UpdateGroup(ctx, db.UpdateGroupParams{ID: req.ID, Label: req.Label}) - if wsIsUniqueViolation(err) { - return nil, userError("group label already exists") - } - if err != nil { - return nil, fmt.Errorf("failed to update group: %w", err) - } - - group, err := c.hub.queries.GetGroup(ctx, req.ID) - if err != nil { - return nil, fmt.Errorf("failed to fetch updated group: %w", err) - } - slog.Info("admin: group updated", "id", group.ID, "label", group.Label, "by", c.userID) - c.hub.BroadcastAdminEvent("groups.updated", nil) - c.hub.BroadcastCFG(ctx) - return group, nil -} - -func (c *Client) opGroupsDelete(ctx context.Context, params json.RawMessage) (any, error) { - var req struct { - ID int64 `json:"id"` - } - if err := json.Unmarshal(params, &req); err != nil { - return nil, userError("invalid request body") - } - if req.ID <= 0 { - return nil, userError("id is required") - } - - if _, err := c.hub.queries.GetGroup(ctx, req.ID); err != nil { - return nil, userError("group not found") - } - - if err := c.hub.queries.DeleteGroup(ctx, req.ID); err != nil { - return nil, fmt.Errorf("failed to delete group: %w", err) - } - slog.Info("admin: group deleted", "id", req.ID, "by", c.userID) - c.hub.BroadcastAdminEvent("groups.updated", nil) - c.hub.BroadcastCFG(ctx) - return map[string]bool{"ok": true}, nil -} - -// ══════════════════════════════════════════════════════════════════════════════ -// TAGS -// ══════════════════════════════════════════════════════════════════════════════ - -func (c *Client) opTagsList(ctx context.Context, _ json.RawMessage) (any, error) { - tags, err := c.hub.queries.ListTags(ctx) - if err != nil { - return nil, fmt.Errorf("failed to list tags: %w", err) - } - return tags, nil -} - -func (c *Client) opTagsCreate(ctx context.Context, params json.RawMessage) (any, error) { - var req struct { - Label string `json:"label"` - } - if err := json.Unmarshal(params, &req); err != nil { - return nil, userError("invalid request body") - } - if req.Label == "" { - return nil, userError("label is required") - } - - id, err := c.hub.queries.CreateTag(ctx, req.Label) - if wsIsUniqueViolation(err) { - return nil, userError("tag label already exists") - } - if err != nil { - return nil, fmt.Errorf("failed to create tag: %w", err) - } - - tag, err := c.hub.queries.GetTag(ctx, id) - if err != nil { - return nil, fmt.Errorf("failed to fetch created tag: %w", err) - } - slog.Info("admin: tag created", "id", tag.ID, "label", tag.Label, "by", c.userID) - c.hub.BroadcastAdminEvent("tags.updated", nil) - c.hub.BroadcastCFG(ctx) - return tag, nil -} - -func (c *Client) opTagsUpdate(ctx context.Context, params json.RawMessage) (any, error) { - var req struct { - ID int64 `json:"id"` - Label string `json:"label"` - } - if err := json.Unmarshal(params, &req); err != nil { - return nil, userError("invalid request body") - } - if req.ID <= 0 { - return nil, userError("id is required") - } - if req.Label == "" { - return nil, userError("label is required") - } - - if _, err := c.hub.queries.GetTag(ctx, req.ID); err != nil { - return nil, userError("tag not found") - } - - err := c.hub.queries.UpdateTag(ctx, db.UpdateTagParams{ID: req.ID, Label: req.Label}) - if wsIsUniqueViolation(err) { - return nil, userError("tag label already exists") - } - if err != nil { - return nil, fmt.Errorf("failed to update tag: %w", err) - } - - tag, err := c.hub.queries.GetTag(ctx, req.ID) - if err != nil { - return nil, fmt.Errorf("failed to fetch updated tag: %w", err) - } - slog.Info("admin: tag updated", "id", tag.ID, "label", tag.Label, "by", c.userID) - c.hub.BroadcastAdminEvent("tags.updated", nil) - c.hub.BroadcastCFG(ctx) - return tag, nil -} - -func (c *Client) opTagsDelete(ctx context.Context, params json.RawMessage) (any, error) { - var req struct { - ID int64 `json:"id"` - } - if err := json.Unmarshal(params, &req); err != nil { - return nil, userError("invalid request body") - } - if req.ID <= 0 { - return nil, userError("id is required") - } - - if _, err := c.hub.queries.GetTag(ctx, req.ID); err != nil { - return nil, userError("tag not found") - } - - if err := c.hub.queries.DeleteTag(ctx, req.ID); err != nil { - return nil, fmt.Errorf("failed to delete tag: %w", err) - } - slog.Info("admin: tag deleted", "id", req.ID, "by", c.userID) - c.hub.BroadcastAdminEvent("tags.updated", nil) - c.hub.BroadcastCFG(ctx) - return map[string]bool{"ok": true}, nil -} - -// ══════════════════════════════════════════════════════════════════════════════ -// API KEYS -// ══════════════════════════════════════════════════════════════════════════════ - -func (c *Client) opAPIKeysList(ctx context.Context, _ json.RawMessage) (any, error) { - keys, err := c.hub.queries.ListAPIKeys(ctx) - if err != nil { - return nil, fmt.Errorf("failed to list API keys: %w", err) - } - return mapAPIKeys(keys), nil -} - -func (c *Client) opAPIKeysCreate(ctx context.Context, params json.RawMessage) (any, error) { - var req struct { - Key *string `json:"key"` - Ident *string `json:"ident"` - Disabled int64 `json:"disabled"` - SystemsJson *string `json:"systemsJson"` - CallRateLimit *int64 `json:"callRateLimit"` - Order int64 `json:"order"` - } - if err := json.Unmarshal(params, &req); err != nil { - return nil, userError("invalid request body") - } - - plainKey := uuid.New().String() - if req.Key != nil && *req.Key != "" { - plainKey = *req.Key - } - hashedKey := auth.HashAPIKey(plainKey) - - id, err := c.hub.queries.CreateAPIKey(ctx, db.CreateAPIKeyParams{ - Key: hashedKey, - Ident: wsPtrToNullStr(req.Ident), - Disabled: req.Disabled, - SystemsJson: wsPtrToNullStr(req.SystemsJson), - CallRateLimit: wsPtrToNullInt(req.CallRateLimit), - Order: req.Order, - }) - if wsIsUniqueViolation(err) { - return nil, userError("API key already exists") - } - if err != nil { - return nil, fmt.Errorf("failed to create API key: %w", err) - } - - key, err := c.hub.queries.GetAPIKey(ctx, id) - if err != nil { - return nil, fmt.Errorf("failed to fetch created API key: %w", err) - } - slog.Info("admin: api key created", "id", key.ID, "ident", key.Ident.String, "by", c.userID) - c.hub.BroadcastAdminEvent("apikeys.updated", nil) - - resp := mapAPIKey(key) - resp["createdKey"] = plainKey // Return plain key once on creation. - return resp, nil -} - -func (c *Client) opAPIKeysUpdate(ctx context.Context, params json.RawMessage) (any, error) { - var req struct { - ID int64 `json:"id"` - Key *string `json:"key"` - Ident *string `json:"ident"` - Disabled int64 `json:"disabled"` - SystemsJson *string `json:"systemsJson"` - CallRateLimit *int64 `json:"callRateLimit"` - Order int64 `json:"order"` - } - if err := json.Unmarshal(params, &req); err != nil { - return nil, userError("invalid request body") - } - if req.ID <= 0 { - return nil, userError("id is required") - } - - current, err := c.hub.queries.GetAPIKey(ctx, req.ID) - if err != nil { - return nil, userError("API key not found") - } - - keyHash := current.Key - if req.Key != nil && *req.Key != "" { - keyHash = auth.HashAPIKey(*req.Key) - } - - err = c.hub.queries.UpdateAPIKey(ctx, db.UpdateAPIKeyParams{ - ID: req.ID, - Key: keyHash, - Ident: wsPtrToNullStr(req.Ident), - Disabled: req.Disabled, - SystemsJson: wsPtrToNullStr(req.SystemsJson), - CallRateLimit: wsPtrToNullInt(req.CallRateLimit), - Order: req.Order, - }) - if wsIsUniqueViolation(err) { - return nil, userError("API key already exists") - } - if err != nil { - return nil, fmt.Errorf("failed to update API key: %w", err) - } - - key, err := c.hub.queries.GetAPIKey(ctx, req.ID) - if err != nil { - return nil, fmt.Errorf("failed to fetch updated API key: %w", err) - } - slog.Info("admin: api key updated", "id", key.ID, "ident", key.Ident.String, "by", c.userID) - c.hub.BroadcastAdminEvent("apikeys.updated", nil) - return mapAPIKey(key), nil -} - -func (c *Client) opAPIKeysDelete(ctx context.Context, params json.RawMessage) (any, error) { - var req struct { - ID int64 `json:"id"` - } - if err := json.Unmarshal(params, &req); err != nil { - return nil, userError("invalid request body") - } - if req.ID <= 0 { - return nil, userError("id is required") - } - - if _, err := c.hub.queries.GetAPIKey(ctx, req.ID); err != nil { - return nil, userError("API key not found") - } - - if err := c.hub.queries.DeleteAPIKey(ctx, req.ID); err != nil { - return nil, fmt.Errorf("failed to delete API key: %w", err) - } - slog.Info("admin: api key deleted", "id", req.ID, "by", c.userID) - c.hub.BroadcastAdminEvent("apikeys.updated", nil) - return map[string]bool{"ok": true}, nil -} - -// ══════════════════════════════════════════════════════════════════════════════ -// DIRMONITORS -// ══════════════════════════════════════════════════════════════════════════════ - -func (c *Client) opDirMonitorsList(ctx context.Context, _ json.RawMessage) (any, error) { - dms, err := c.hub.queries.ListDirMonitors(ctx) - if err != nil { - return nil, fmt.Errorf("failed to list dirmonitors: %w", err) - } - return mapDirMonitors(dms), nil -} - -func (c *Client) opDirMonitorsCreate(ctx context.Context, params json.RawMessage) (any, error) { - var req struct { - Directory string `json:"directory"` - Type string `json:"type"` - Mask *string `json:"mask"` - Extension *string `json:"extension"` - Frequency *int64 `json:"frequency"` - Delay *int64 `json:"delay"` - DeleteAfter int64 `json:"deleteAfter"` - UsePolling int64 `json:"usePolling"` - Disabled int64 `json:"disabled"` - SystemID *int64 `json:"systemId"` - TalkgroupID *int64 `json:"talkgroupId"` - Order int64 `json:"order"` - } - if err := json.Unmarshal(params, &req); err != nil { - return nil, userError("invalid request body") - } - if req.Directory == "" { - return nil, userError("directory is required") - } - if info, statErr := os.Stat(req.Directory); statErr != nil { - return nil, userError("directory does not exist or is not accessible: " + statErr.Error()) - } else if !info.IsDir() { - return nil, userError("path is not a directory: " + req.Directory) - } - - id, err := c.hub.queries.CreateDirMonitor(ctx, db.CreateDirMonitorParams{ - Directory: req.Directory, - Type: req.Type, - Mask: wsPtrToNullStr(req.Mask), - Extension: wsPtrToNullStr(req.Extension), - Frequency: wsPtrToNullInt(req.Frequency), - Delay: wsPtrToNullInt(req.Delay), - DeleteAfter: req.DeleteAfter, - UsePolling: req.UsePolling, - Disabled: req.Disabled, - SystemID: wsPtrToNullInt(req.SystemID), - TalkgroupID: wsPtrToNullInt(req.TalkgroupID), - Order: req.Order, - }) - if err != nil { - return nil, fmt.Errorf("failed to create dirmonitor: %w", err) - } - - dm, err := c.hub.queries.GetDirMonitor(ctx, id) - if err != nil { - return nil, fmt.Errorf("failed to fetch created dirmonitor: %w", err) - } - if c.hub.deps.DirMonitorReload != nil { - c.hub.deps.DirMonitorReload.Reload() - } - slog.Info("admin: dirmonitor created", "id", dm.ID, "dir", dm.Directory, "by", c.userID) - c.hub.BroadcastAdminEvent("dirmonitors.updated", nil) - return mapDirMonitor(dm), nil -} - -func (c *Client) opDirMonitorsUpdate(ctx context.Context, params json.RawMessage) (any, error) { - var req struct { - ID int64 `json:"id"` - Directory string `json:"directory"` - Type string `json:"type"` - Mask *string `json:"mask"` - Extension *string `json:"extension"` - Frequency *int64 `json:"frequency"` - Delay *int64 `json:"delay"` - DeleteAfter int64 `json:"deleteAfter"` - UsePolling int64 `json:"usePolling"` - Disabled int64 `json:"disabled"` - SystemID *int64 `json:"systemId"` - TalkgroupID *int64 `json:"talkgroupId"` - Order int64 `json:"order"` - } - if err := json.Unmarshal(params, &req); err != nil { - return nil, userError("invalid request body") - } - if req.ID <= 0 { - return nil, userError("id is required") - } - if req.Directory == "" { - return nil, userError("directory is required") - } - if info, statErr := os.Stat(req.Directory); statErr != nil { - return nil, userError("directory does not exist or is not accessible: " + statErr.Error()) - } else if !info.IsDir() { - return nil, userError("path is not a directory: " + req.Directory) - } - - if _, err := c.hub.queries.GetDirMonitor(ctx, req.ID); err != nil { - return nil, userError("dirmonitor not found") - } - - if err := c.hub.queries.UpdateDirMonitor(ctx, db.UpdateDirMonitorParams{ - ID: req.ID, - Directory: req.Directory, - Type: req.Type, - Mask: wsPtrToNullStr(req.Mask), - Extension: wsPtrToNullStr(req.Extension), - Frequency: wsPtrToNullInt(req.Frequency), - Delay: wsPtrToNullInt(req.Delay), - DeleteAfter: req.DeleteAfter, - UsePolling: req.UsePolling, - Disabled: req.Disabled, - SystemID: wsPtrToNullInt(req.SystemID), - TalkgroupID: wsPtrToNullInt(req.TalkgroupID), - Order: req.Order, - }); err != nil { - return nil, fmt.Errorf("failed to update dirmonitor: %w", err) - } - - dm, err := c.hub.queries.GetDirMonitor(ctx, req.ID) - if err != nil { - return nil, fmt.Errorf("failed to fetch updated dirmonitor: %w", err) - } - if c.hub.deps.DirMonitorReload != nil { - c.hub.deps.DirMonitorReload.Reload() - } - slog.Info("admin: dirmonitor updated", "id", dm.ID, "dir", dm.Directory, "by", c.userID) - c.hub.BroadcastAdminEvent("dirmonitors.updated", nil) - return mapDirMonitor(dm), nil -} - -func (c *Client) opDirMonitorsDelete(ctx context.Context, params json.RawMessage) (any, error) { - var req struct { - ID int64 `json:"id"` - } - if err := json.Unmarshal(params, &req); err != nil { - return nil, userError("invalid request body") - } - if req.ID <= 0 { - return nil, userError("id is required") - } - - if _, err := c.hub.queries.GetDirMonitor(ctx, req.ID); err != nil { - return nil, userError("dirmonitor not found") - } - - if err := c.hub.queries.DeleteDirMonitor(ctx, req.ID); err != nil { - return nil, fmt.Errorf("failed to delete dirmonitor: %w", err) - } - if c.hub.deps.DirMonitorReload != nil { - c.hub.deps.DirMonitorReload.Reload() - } - slog.Info("admin: dirmonitor deleted", "id", req.ID, "by", c.userID) - c.hub.BroadcastAdminEvent("dirmonitors.updated", nil) - return map[string]bool{"ok": true}, nil -} - -// ══════════════════════════════════════════════════════════════════════════════ -// DOWNSTREAMS -// ══════════════════════════════════════════════════════════════════════════════ - -func (c *Client) opDownstreamsList(ctx context.Context, _ json.RawMessage) (any, error) { - ds, err := c.hub.queries.ListDownstreams(ctx) - if err != nil { - return nil, fmt.Errorf("failed to list downstreams: %w", err) - } - return mapDownstreams(ds), nil -} - -func (c *Client) opDownstreamsCreate(ctx context.Context, params json.RawMessage) (any, error) { - var req struct { - Url string `json:"url"` - ApiKey string `json:"apiKey"` - SystemsJson *string `json:"systemsJson"` - Disabled int64 `json:"disabled"` - Order int64 `json:"order"` - } - if err := json.Unmarshal(params, &req); err != nil { - return nil, userError("invalid request body") - } - if req.Url == "" { - return nil, userError("url is required") - } - if !wsValidHTTPURL(req.Url) { - return nil, userError("url must use http or https scheme") - } - - apiKey := req.ApiKey - if c.hub.deps.EncryptionKey != "" && apiKey != "" { - enc, err := auth.EncryptString(apiKey, c.hub.deps.EncryptionKey) - if err != nil { - return nil, fmt.Errorf("encrypt downstream API key: %w", err) - } - apiKey = enc - } - - id, err := c.hub.queries.CreateDownstream(ctx, db.CreateDownstreamParams{ - Url: req.Url, - ApiKey: apiKey, - SystemsJson: wsPtrToNullStr(req.SystemsJson), - Disabled: req.Disabled, - Order: req.Order, - }) - if err != nil { - return nil, fmt.Errorf("failed to create downstream: %w", err) - } - - ds, err := c.hub.queries.GetDownstream(ctx, id) - if err != nil { - return nil, fmt.Errorf("failed to fetch created downstream: %w", err) - } - if c.hub.deps.DownstreamReload != nil { - c.hub.deps.DownstreamReload.Reload() - } - slog.Info("admin: downstream created", "id", ds.ID, "url", ds.Url, "by", c.userID) - c.hub.BroadcastAdminEvent("downstreams.updated", nil) - return mapDownstream(ds), nil -} - -func (c *Client) opDownstreamsUpdate(ctx context.Context, params json.RawMessage) (any, error) { - var req struct { - ID int64 `json:"id"` - Url string `json:"url"` - ApiKey string `json:"apiKey"` - SystemsJson *string `json:"systemsJson"` - Disabled int64 `json:"disabled"` - Order int64 `json:"order"` - } - if err := json.Unmarshal(params, &req); err != nil { - return nil, userError("invalid request body") - } - if req.ID <= 0 { - return nil, userError("id is required") - } - if req.Url != "" && !wsValidHTTPURL(req.Url) { - return nil, userError("url must use http or https scheme") - } - - existing, err := c.hub.queries.GetDownstream(ctx, req.ID) - if err != nil { - return nil, userError("downstream not found") - } - - // Preserve existing API key if none provided (key is never sent to clients). - apiKey := existing.ApiKey - if req.ApiKey != "" { - if c.hub.deps.EncryptionKey != "" { - enc, err := auth.EncryptString(req.ApiKey, c.hub.deps.EncryptionKey) - if err != nil { - return nil, fmt.Errorf("encrypt downstream API key: %w", err) - } - apiKey = enc - } else { - apiKey = req.ApiKey - } - } - - if err := c.hub.queries.UpdateDownstream(ctx, db.UpdateDownstreamParams{ - ID: req.ID, - Url: req.Url, - ApiKey: apiKey, - SystemsJson: wsPtrToNullStr(req.SystemsJson), - Disabled: req.Disabled, - Order: req.Order, - }); err != nil { - return nil, fmt.Errorf("failed to update downstream: %w", err) - } - - ds, err := c.hub.queries.GetDownstream(ctx, req.ID) - if err != nil { - return nil, fmt.Errorf("failed to fetch updated downstream: %w", err) - } - if c.hub.deps.DownstreamReload != nil { - c.hub.deps.DownstreamReload.Reload() - } - slog.Info("admin: downstream updated", "id", ds.ID, "url", ds.Url, "by", c.userID) - c.hub.BroadcastAdminEvent("downstreams.updated", nil) - return mapDownstream(ds), nil -} - -func (c *Client) opDownstreamsDelete(ctx context.Context, params json.RawMessage) (any, error) { - var req struct { - ID int64 `json:"id"` - } - if err := json.Unmarshal(params, &req); err != nil { - return nil, userError("invalid request body") - } - if req.ID <= 0 { - return nil, userError("id is required") - } - - if _, err := c.hub.queries.GetDownstream(ctx, req.ID); err != nil { - return nil, userError("downstream not found") - } - - if err := c.hub.queries.DeleteDownstream(ctx, req.ID); err != nil { - return nil, fmt.Errorf("failed to delete downstream: %w", err) - } - if c.hub.deps.DownstreamReload != nil { - c.hub.deps.DownstreamReload.Reload() - } - slog.Info("admin: downstream deleted", "id", req.ID, "by", c.userID) - c.hub.BroadcastAdminEvent("downstreams.updated", nil) - return map[string]bool{"ok": true}, nil -} - -// ══════════════════════════════════════════════════════════════════════════════ -// WEBHOOKS -// ══════════════════════════════════════════════════════════════════════════════ - -func (c *Client) opWebhooksList(ctx context.Context, _ json.RawMessage) (any, error) { - whs, err := c.hub.queries.ListWebhooks(ctx) - if err != nil { - return nil, fmt.Errorf("failed to list webhooks: %w", err) - } - return mapWebhooks(whs), nil -} - -func (c *Client) opWebhooksCreate(ctx context.Context, params json.RawMessage) (any, error) { - var req struct { - Url string `json:"url"` - Type string `json:"type"` - Secret *string `json:"secret"` - SystemsJson *string `json:"systemsJson"` - Disabled int64 `json:"disabled"` - Order int64 `json:"order"` - } - if err := json.Unmarshal(params, &req); err != nil { - return nil, userError("invalid request body") - } - if req.Url == "" { - return nil, userError("url is required") - } - if !wsValidHTTPURL(req.Url) { - return nil, userError("url must use http or https scheme") - } - - id, err := c.hub.queries.CreateWebhook(ctx, db.CreateWebhookParams{ - Url: req.Url, - Type: req.Type, - Secret: wsPtrToNullStr(req.Secret), - SystemsJson: wsPtrToNullStr(req.SystemsJson), - Disabled: req.Disabled, - Order: req.Order, - }) - if err != nil { - return nil, fmt.Errorf("failed to create webhook: %w", err) - } - - wh, err := c.hub.queries.GetWebhook(ctx, id) - if err != nil { - return nil, fmt.Errorf("failed to fetch created webhook: %w", err) - } - slog.Info("admin: webhook created", "id", wh.ID, "url", wh.Url, "by", c.userID) - c.hub.BroadcastAdminEvent("webhooks.updated", nil) - return mapWebhook(wh), nil -} - -func (c *Client) opWebhooksUpdate(ctx context.Context, params json.RawMessage) (any, error) { - var req struct { - ID int64 `json:"id"` - Url string `json:"url"` - Type string `json:"type"` - Secret *string `json:"secret"` - SystemsJson *string `json:"systemsJson"` - Disabled int64 `json:"disabled"` - Order int64 `json:"order"` - } - if err := json.Unmarshal(params, &req); err != nil { - return nil, userError("invalid request body") - } - if req.ID <= 0 { - return nil, userError("id is required") - } - if req.Url != "" && !wsValidHTTPURL(req.Url) { - return nil, userError("url must use http or https scheme") - } - - if _, err := c.hub.queries.GetWebhook(ctx, req.ID); err != nil { - return nil, userError("webhook not found") - } - - if err := c.hub.queries.UpdateWebhook(ctx, db.UpdateWebhookParams{ - ID: req.ID, - Url: req.Url, - Type: req.Type, - Secret: wsPtrToNullStr(req.Secret), - SystemsJson: wsPtrToNullStr(req.SystemsJson), - Disabled: req.Disabled, - Order: req.Order, - }); err != nil { - return nil, fmt.Errorf("failed to update webhook: %w", err) - } - - wh, err := c.hub.queries.GetWebhook(ctx, req.ID) - if err != nil { - return nil, fmt.Errorf("failed to fetch updated webhook: %w", err) - } - slog.Info("admin: webhook updated", "id", wh.ID, "url", wh.Url, "by", c.userID) - c.hub.BroadcastAdminEvent("webhooks.updated", nil) - return mapWebhook(wh), nil -} - -func (c *Client) opWebhooksDelete(ctx context.Context, params json.RawMessage) (any, error) { - var req struct { - ID int64 `json:"id"` - } - if err := json.Unmarshal(params, &req); err != nil { - return nil, userError("invalid request body") - } - if req.ID <= 0 { - return nil, userError("id is required") - } - - if _, err := c.hub.queries.GetWebhook(ctx, req.ID); err != nil { - return nil, userError("webhook not found") - } - - if err := c.hub.queries.DeleteWebhook(ctx, req.ID); err != nil { - return nil, fmt.Errorf("failed to delete webhook: %w", err) - } - slog.Info("admin: webhook deleted", "id", req.ID, "by", c.userID) - c.hub.BroadcastAdminEvent("webhooks.updated", nil) - return map[string]bool{"ok": true}, nil -} - -// ══════════════════════════════════════════════════════════════════════════════ -// SHARED LINKS -// ══════════════════════════════════════════════════════════════════════════════ - -func (c *Client) opSharedLinksList(ctx context.Context, _ json.RawMessage) (any, error) { - rows, err := c.hub.queries.ListSharedLinks(ctx) - if err != nil { - return nil, fmt.Errorf("failed to list shared links: %w", err) - } - items := make([]map[string]any, 0, len(rows)) - for _, r := range rows { - items = append(items, mapSharedLink(r)) - } - return items, nil -} - -func (c *Client) opSharedLinksDelete(ctx context.Context, params json.RawMessage) (any, error) { - var req struct { - ID int64 `json:"id"` - } - if err := json.Unmarshal(params, &req); err != nil { - return nil, userError("invalid request body") - } - if req.ID <= 0 { - return nil, userError("id is required") - } - - if err := c.hub.queries.DeleteSharedLink(ctx, req.ID); err != nil { - return nil, fmt.Errorf("failed to delete shared link: %w", err) - } - c.hub.BroadcastAdminEvent("shared-links.updated", nil) - return map[string]bool{"deleted": true}, nil -} - -// ══════════════════════════════════════════════════════════════════════════════ -// CONFIG -// ══════════════════════════════════════════════════════════════════════════════ - -func (c *Client) opConfigGet(ctx context.Context, _ json.RawMessage) (any, error) { - settings, err := c.hub.queries.ListSettings(ctx) - if err != nil { - return nil, fmt.Errorf("failed to list settings: %w", err) - } - - settingsList := make([]map[string]string, len(settings)) - for i, s := range settings { - val := s.Value - if SensitiveSettingKeys[s.Key] && c.hub.deps.EncryptionKey != "" { - if plain, err := auth.DecryptString(val, c.hub.deps.EncryptionKey); err == nil { - val = plain - } - } - settingsList[i] = map[string]string{"key": s.Key, "value": val} - } - - return map[string]any{ - "settings": settingsList, - "capabilities": map[string]bool{ - "ffmpeg": c.hub.deps.FFmpegAvailable, - "fdkAac": c.hub.deps.FDKAACAvailable, - "whisper": c.hub.deps.WhisperAvailable, - }, - }, nil -} - -func (c *Client) opConfigUpdate(ctx context.Context, params json.RawMessage) (any, error) { - var body struct { - Settings []struct { - Key string `json:"key"` - Value string `json:"value"` - } `json:"settings"` - } - if err := json.Unmarshal(params, &body); err != nil { - return nil, userError("invalid request body") - } - settings := body.Settings - - // Validate all keys first. - for _, s := range settings { - if !wsAllowedSettingKeys[s.Key] { - return nil, userError("unknown setting key: " + s.Key) - } - if s.Key == "logLevel" { - if _, ok := logging.ParseLevel(s.Value); !ok { - return nil, userError("invalid logLevel; expected debug, info, warn, or error") - } - } - if s.Key == "audioEncodingPreset" { - if !audio.IsValidEncodingPreset(s.Value) { - return nil, userError("invalid audioEncodingPreset value") - } - if audio.IsHEEncodingPreset(s.Value) && !c.hub.deps.FDKAACAvailable { - return nil, userError("selected HE-AAC preset requires libfdk_aac support in ffmpeg") - } - } - if s.Key == "audioConversion" { - if v, err := strconv.Atoi(s.Value); err == nil && v != 0 && !c.hub.deps.FFmpegAvailable { - return nil, userError("ffmpeg is not installed — install it and restart the service to enable audio conversion") - } - } - } - - sqlDB := c.hub.deps.SQLDB - if sqlDB == nil { - return nil, fmt.Errorf("transaction support not available") - } - - tx, err := sqlDB.BeginTx(ctx, nil) - if err != nil { - return nil, fmt.Errorf("failed to begin transaction: %w", err) - } - defer tx.Rollback() //nolint:errcheck - - qtx := c.hub.queries.WithTx(tx) - for _, s := range settings { - val := s.Value - if SensitiveSettingKeys[s.Key] && c.hub.deps.EncryptionKey != "" && val != "" { - enc, err := auth.EncryptString(val, c.hub.deps.EncryptionKey) - if err != nil { - return nil, fmt.Errorf("encrypt setting %q: %w", s.Key, err) - } - val = enc - } - if err := qtx.UpsertSetting(ctx, db.UpsertSettingParams{Key: s.Key, Value: val}); err != nil { - return nil, fmt.Errorf("failed to save config: %w", err) - } - } - - if err := tx.Commit(); err != nil { - return nil, fmt.Errorf("failed to commit config: %w", err) - } - - // Log each changed setting, redacting sensitive keys. - for _, s := range settings { - v := s.Value - if s.Key == "vapidPrivateKey" { - v = "[REDACTED]" - } - slog.Info("admin: config updated", "key", s.Key, "value", v, "by", c.userID) - } - - // Apply log level change at runtime. - for _, s := range settings { - if s.Key == "logLevel" { - if err := logging.SetLevel(s.Value); err != nil { - slog.Warn("invalid logLevel setting, keeping previous runtime level", "value", s.Value, "error", err) - } - break - } - } - - // Hot-reload transcription if any transcription setting changed. - if c.hub.deps.TranscriberReload != nil { - transcriptionKeys := map[string]bool{ - "transcriptionEnabled": true, - "transcriptionUrl": true, - "transcriptionModel": true, - "transcriptionLanguage": true, - "transcriptionDiarize": true, - } - needsReload := false - for _, s := range settings { - if transcriptionKeys[s.Key] { - needsReload = true - break - } - } - if needsReload { - // Read current settings from DB (just committed). - tEnabled, _ := c.hub.queries.GetSetting(ctx, "transcriptionEnabled") - tURL, _ := c.hub.queries.GetSetting(ctx, "transcriptionUrl") - tModel, _ := c.hub.queries.GetSetting(ctx, "transcriptionModel") - tLang, _ := c.hub.queries.GetSetting(ctx, "transcriptionLanguage") - tDiarize, _ := c.hub.queries.GetSetting(ctx, "transcriptionDiarize") - - ok := c.hub.deps.TranscriberReload.Reload( - tEnabled.Value == "true", - tURL.Value, - tModel.Value, - tLang.Value, - tDiarize.Value == "true", - ) - c.hub.deps.WhisperAvailable = ok && tEnabled.Value == "true" - } - } - - // Broadcast updated config to all WS clients using the safe, - // curated CFG builder (excludes secrets like VAPID keys). - c.hub.BroadcastCFG(ctx) - - c.hub.BroadcastAdminEvent("config.updated", nil) - return map[string]bool{"ok": true}, nil -} - -// ══════════════════════════════════════════════════════════════════════════════ -// FILESYSTEM -// ══════════════════════════════════════════════════════════════════════════════ - -func (c *Client) opFSDirectories(_ context.Context, params json.RawMessage) (any, error) { - var req struct { - Path string `json:"path"` - } - if params != nil { - _ = json.Unmarshal(params, &req) - } - if req.Path == "" { - req.Path = "/" - } - - clean := filepath.Clean(req.Path) - if !filepath.IsAbs(clean) { - return nil, userError("path must be absolute") - } - - info, err := os.Stat(clean) - if err != nil { - return nil, userError("directory does not exist or is not accessible: " + err.Error()) - } - if !info.IsDir() { - return nil, userError("path is not a directory: " + clean) - } - - entries, err := os.ReadDir(clean) - if err != nil { - return nil, userError("failed to read directory: " + err.Error()) - } - - type dirEntry struct { - Name string `json:"name"` - Path string `json:"path"` - } - - dirs := make([]dirEntry, 0, len(entries)) - for _, e := range entries { - if !e.IsDir() { - continue - } - name := e.Name() - if clean == "/" && wsHiddenTopLevelDirs[name] { - continue - } - if strings.HasPrefix(name, ".") { - continue - } - dirs = append(dirs, dirEntry{Name: name, Path: filepath.Join(clean, name)}) - } - sort.Slice(dirs, func(i, j int) bool { - return strings.ToLower(dirs[i].Name) < strings.ToLower(dirs[j].Name) - }) - - var parent *string - if clean != "/" { - p := filepath.Dir(clean) - parent = &p - } - - return map[string]any{ - "path": clean, - "parent": parent, - "directories": dirs, - }, nil -} - -// ══════════════════════════════════════════════════════════════════════════════ -// EXPORT -// ══════════════════════════════════════════════════════════════════════════════ - -func (c *Client) opExportConfig(ctx context.Context, _ json.RawMessage) (any, error) { - settings, err := c.hub.queries.ListSettings(ctx) - if err != nil { - return nil, fmt.Errorf("failed to export settings: %w", err) - } - users, err := c.hub.queries.ListUsers(ctx) - if err != nil { - return nil, fmt.Errorf("failed to export users: %w", err) - } - systems, err := c.hub.queries.ListSystems(ctx) - if err != nil { - return nil, fmt.Errorf("failed to export systems: %w", err) - } - talkgroups, err := c.hub.queries.ListAllTalkgroups(ctx) - if err != nil { - return nil, fmt.Errorf("failed to export talkgroups: %w", err) - } - units, err := c.hub.queries.ListAllUnits(ctx) - if err != nil { - return nil, fmt.Errorf("failed to export units: %w", err) - } - groups, err := c.hub.queries.ListGroups(ctx) - if err != nil { - return nil, fmt.Errorf("failed to export groups: %w", err) - } - tags, err := c.hub.queries.ListTags(ctx) - if err != nil { - return nil, fmt.Errorf("failed to export tags: %w", err) - } - apiKeys, err := c.hub.queries.ListAPIKeys(ctx) - if err != nil { - return nil, fmt.Errorf("failed to export api keys: %w", err) - } - dirmonitors, err := c.hub.queries.ListDirMonitors(ctx) - if err != nil { - return nil, fmt.Errorf("failed to export dirmonitors: %w", err) - } - downstreams, err := c.hub.queries.ListDownstreams(ctx) - if err != nil { - return nil, fmt.Errorf("failed to export downstreams: %w", err) - } - webhooks, err := c.hub.queries.ListWebhooks(ctx) - if err != nil { - return nil, fmt.Errorf("failed to export webhooks: %w", err) - } - - // Export all fields — use snake_case keys to match db struct JSON tags. - // API keys include the hashed key so import can restore authentication. - // Downstream API keys and webhook secrets are included for full backup. - // The exported JSON file should be treated as sensitive. - exportAPIKeys := make([]map[string]any, len(apiKeys)) - for i, k := range apiKeys { - exportAPIKeys[i] = map[string]any{ - "id": k.ID, - "key": k.Key, - "ident": wsNullStr(k.Ident), - "disabled": k.Disabled, - "systems_json": wsNullStr(k.SystemsJson), - "call_rate_limit": wsNullInt(k.CallRateLimit), - "order": k.Order, - } - } - exportDownstreams := make([]map[string]any, len(downstreams)) - for i, d := range downstreams { - exportDownstreams[i] = map[string]any{ - "id": d.ID, - "url": d.Url, - "api_key": d.ApiKey, - "systems_json": wsNullStr(d.SystemsJson), - "disabled": d.Disabled, - "order": d.Order, - } - } - exportWebhooks := make([]map[string]any, len(webhooks)) - for i, w := range webhooks { - exportWebhooks[i] = map[string]any{ - "id": w.ID, - "url": w.Url, - "type": w.Type, - "secret": wsNullStr(w.Secret), - "systems_json": wsNullStr(w.SystemsJson), - "disabled": w.Disabled, - "order": w.Order, - } - } - - return map[string]any{ - "settings": settings, - "users": users, - "systems": systems, - "talkgroups": talkgroups, - "units": units, - "groups": groups, - "tags": tags, - "apiKeys": exportAPIKeys, - "dirmonitors": dirmonitors, - "downstreams": exportDownstreams, - "webhooks": exportWebhooks, - }, nil -} - -func (c *Client) opExportTalkgroups(ctx context.Context, params json.RawMessage) (any, error) { - var req struct { - SystemID *int64 `json:"systemId"` - } - if params != nil { - _ = json.Unmarshal(params, &req) - } - if req.SystemID == nil { - return nil, fmt.Errorf("systemId is required") - } - - talkgroups, err := c.hub.queries.ListTalkgroupsBySystem(ctx, *req.SystemID) - if err != nil { - return nil, fmt.Errorf("failed to list talkgroups: %w", err) - } - - // Build ID→label maps so we can emit portable text names instead of - // PK integers (PKs are not stable across instances). - groupMap := make(map[int64]string) - if gs, err := c.hub.queries.ListGroups(ctx); err == nil { - for _, g := range gs { - groupMap[g.ID] = g.Label - } - } - tagMap := make(map[int64]string) - if ts, err := c.hub.queries.ListTags(ctx); err == nil { - for _, t := range ts { - tagMap[t.ID] = t.Label - } - } - - var buf strings.Builder - w := csv.NewWriter(&buf) - _ = w.Write([]string{"talkgroup_id", "label", "name", "tag", "group", "frequency", "led", "order"}) - for _, tg := range talkgroups { - freq := "" - if tg.Frequency.Valid { - freq = strconv.FormatInt(tg.Frequency.Int64, 10) - } - groupLabel := "" - if tg.GroupID.Valid { - groupLabel = groupMap[tg.GroupID.Int64] - } - tagLabel := "" - if tg.TagID.Valid { - tagLabel = tagMap[tg.TagID.Int64] - } - _ = w.Write([]string{ - strconv.FormatInt(tg.TalkgroupID, 10), - tg.Label.String, - tg.Name.String, - tagLabel, - groupLabel, - freq, - tg.Led.String, - strconv.FormatInt(tg.Order, 10), - }) - } - w.Flush() - - return buf.String(), nil -} - -func (c *Client) opExportUnits(ctx context.Context, params json.RawMessage) (any, error) { - var req struct { - SystemID *int64 `json:"systemId"` - } - if params != nil { - _ = json.Unmarshal(params, &req) - } - if req.SystemID == nil { - return nil, fmt.Errorf("systemId is required") - } - - units, err := c.hub.queries.ListUnitsBySystem(ctx, *req.SystemID) - if err != nil { - return nil, fmt.Errorf("failed to list units: %w", err) - } - - var buf strings.Builder - w := csv.NewWriter(&buf) - _ = w.Write([]string{"unit_id", "label", "order"}) - for _, u := range units { - _ = w.Write([]string{ - strconv.FormatInt(u.UnitID, 10), - u.Label.String, - strconv.FormatInt(u.Order, 10), - }) - } - w.Flush() - - return buf.String(), nil -} - -func (c *Client) opExportGroups(ctx context.Context, _ json.RawMessage) (any, error) { - groups, err := c.hub.queries.ListGroups(ctx) - if err != nil { - return nil, fmt.Errorf("failed to list groups: %w", err) - } - var buf strings.Builder - w := csv.NewWriter(&buf) - _ = w.Write([]string{"label"}) - for _, g := range groups { - _ = w.Write([]string{g.Label}) - } - w.Flush() - return buf.String(), nil -} - -func (c *Client) opExportTags(ctx context.Context, _ json.RawMessage) (any, error) { - tags, err := c.hub.queries.ListTags(ctx) - if err != nil { - return nil, fmt.Errorf("failed to list tags: %w", err) - } - var buf strings.Builder - w := csv.NewWriter(&buf) - _ = w.Write([]string{"label"}) - for _, t := range tags { - _ = w.Write([]string{t.Label}) - } - w.Flush() - return buf.String(), nil -} - -// ══════════════════════════════════════════════════════════════════════════════ -// IMPORT -// ══════════════════════════════════════════════════════════════════════════════ - -// importAPIKey, importDownstream, and importWebhook mirror the flat shape -// emitted by opExportConfig (plain string/null instead of {String,Valid} -// blobs). Unmarshalling directly into the db.* structs would fail for any -// non-null nullable field because sql.NullString has no JSON unmarshaler. -type importAPIKey struct { - Key string `json:"key"` - Ident *string `json:"ident"` - Disabled int64 `json:"disabled"` - SystemsJson *string `json:"systems_json"` - CallRateLimit *int64 `json:"call_rate_limit"` - Order int64 `json:"order"` -} - -type importDownstream struct { - Url string `json:"url"` - ApiKey string `json:"api_key"` - SystemsJson *string `json:"systems_json"` - Disabled int64 `json:"disabled"` - Order int64 `json:"order"` -} - -type importWebhook struct { - Url string `json:"url"` - Type string `json:"type"` - Secret *string `json:"secret"` - SystemsJson *string `json:"systems_json"` - Disabled int64 `json:"disabled"` - Order int64 `json:"order"` -} - -func (c *Client) opImportConfig(ctx context.Context, params json.RawMessage) (any, error) { - var data struct { - Settings []db.Setting `json:"settings"` - Groups []db.Group `json:"groups"` - Tags []db.Tag `json:"tags"` - Systems []db.System `json:"systems"` - Talkgroups []db.Talkgroup `json:"talkgroups"` - Units []db.Unit `json:"units"` - APIKeys []importAPIKey `json:"apiKeys"` - DirMonitors []db.Dirmonitor `json:"dirmonitors"` - Downstreams []importDownstream `json:"downstreams"` - Webhooks []importWebhook `json:"webhooks"` - } - if err := json.Unmarshal(params, &data); err != nil { - slog.Warn("import config: failed to parse payload", "error", err) - return nil, userError("invalid backup file: " + err.Error()) - } - - // Validate encrypted values: reject if no key configured, or if the wrong key is configured. - encKey := c.hub.deps.EncryptionKey - for _, s := range data.Settings { - if SensitiveSettingKeys[s.Key] && auth.IsEncrypted(s.Value) { - if encKey == "" { - return nil, userError("backup contains encrypted settings but no encryption key is configured — set --encryption-key before importing") - } - if _, err := auth.DecryptString(s.Value, encKey); err != nil { - return nil, userError("backup contains encrypted settings that cannot be decrypted with the current encryption key — check that --encryption-key matches the key used when the backup was created") - } - } - } - for _, d := range data.Downstreams { - if auth.IsEncrypted(d.ApiKey) { - if encKey == "" { - return nil, userError("backup contains encrypted downstream API keys but no encryption key is configured — set --encryption-key before importing") - } - if _, err := auth.DecryptString(d.ApiKey, encKey); err != nil { - return nil, userError("backup contains encrypted downstream API keys that cannot be decrypted with the current encryption key — check that --encryption-key matches the key used when the backup was created") - } - } - } - - sqlDB := c.hub.deps.SQLDB - if sqlDB == nil { - return nil, fmt.Errorf("transaction support not available") - } - - tx, err := sqlDB.BeginTx(ctx, nil) - if err != nil { - return nil, fmt.Errorf("database error: %w", err) - } - defer tx.Rollback() //nolint:errcheck - - qtx := c.hub.queries.WithTx(tx) - - // Settings - for _, s := range data.Settings { - if !wsAllowedSettingKeys[s.Key] { - slog.Warn("import config: skipping unknown setting key", "key", s.Key) - continue - } - if err := qtx.UpsertSetting(ctx, db.UpsertSettingParams(s)); err != nil { - return nil, fmt.Errorf("failed to import settings: %w", err) - } - } - - // Groups — capture old→new id remap so talkgroups can rewrite their - // group_id FKs (the export carries the source DB's PKs, but on a fresh - // install those PKs don't exist yet). - groupRemap := make(map[int64]int64, len(data.Groups)) - for _, g := range data.Groups { - newID, err := qtx.CreateGroup(ctx, g.Label) - if err != nil { - if !wsIsUniqueViolation(err) { - return nil, fmt.Errorf("failed to import groups: %w", err) - } - existing, gerr := qtx.GetGroupByLabel(ctx, g.Label) - if gerr != nil { - return nil, fmt.Errorf("failed to look up existing group %q: %w", g.Label, gerr) - } - newID = existing.ID - } - groupRemap[g.ID] = newID - } - - // Tags — same remap pattern as groups. - tagRemap := make(map[int64]int64, len(data.Tags)) - for _, t := range data.Tags { - newID, err := qtx.CreateTag(ctx, t.Label) - if err != nil { - if !wsIsUniqueViolation(err) { - return nil, fmt.Errorf("failed to import tags: %w", err) - } - existing, gerr := qtx.GetTagByLabel(ctx, t.Label) - if gerr != nil { - return nil, fmt.Errorf("failed to look up existing tag %q: %w", t.Label, gerr) - } - newID = existing.ID - } - tagRemap[t.ID] = newID - } - - // Systems — remap by old PK → new PK. The natural key is SystemID - // (the radio-system ID, e.g. 1, 100), which sqlc enforces UNIQUE. - systemRemap := make(map[int64]int64, len(data.Systems)) - for _, s := range data.Systems { - newID, err := qtx.CreateSystem(ctx, db.CreateSystemParams{ - SystemID: s.SystemID, - Label: s.Label, - AutoPopulateTalkgroups: s.AutoPopulateTalkgroups, - BlacklistsJson: s.BlacklistsJson, - Led: s.Led, - Order: s.Order, - }) - if err != nil { - if !wsIsUniqueViolation(err) { - return nil, fmt.Errorf("failed to import systems: %w", err) - } - existing, gerr := qtx.GetSystemBySystemID(ctx, s.SystemID) - if gerr != nil { - return nil, fmt.Errorf("failed to look up existing system %d: %w", s.SystemID, gerr) - } - newID = existing.ID - } - systemRemap[s.ID] = newID - } - - // Talkgroups — translate FKs (system_id, group_id, tag_id) through the - // remaps built above, then upsert. Capture the new PK so dirmonitors - // can rewrite their talkgroup_id FKs. - tgRemap := make(map[int64]int64, len(data.Talkgroups)) - for _, tg := range data.Talkgroups { - newSystemID, ok := systemRemap[tg.SystemID] - if !ok { - slog.Warn("import config: skipping talkgroup with unknown system_id", - "talkgroup_id", tg.TalkgroupID, "system_id", tg.SystemID) - continue - } - groupID := tg.GroupID - if groupID.Valid { - if mapped, ok := groupRemap[groupID.Int64]; ok { - groupID.Int64 = mapped - } else { - // Group wasn't in the export — drop the FK rather than fail. - groupID = sql.NullInt64{} - } - } - tagID := tg.TagID - if tagID.Valid { - if mapped, ok := tagRemap[tagID.Int64]; ok { - tagID.Int64 = mapped - } else { - tagID = sql.NullInt64{} - } - } - if err := qtx.UpsertTalkgroup(ctx, db.UpsertTalkgroupParams{ - SystemID: newSystemID, - TalkgroupID: tg.TalkgroupID, - Label: tg.Label, - Name: tg.Name, - Frequency: tg.Frequency, - Led: tg.Led, - GroupID: groupID, - TagID: tagID, - Order: tg.Order, - }); err != nil { - return nil, fmt.Errorf("failed to import talkgroups: %w", err) - } - row, err := qtx.GetTalkgroupBySystemAndTGID(ctx, db.GetTalkgroupBySystemAndTGIDParams{ - SystemID: newSystemID, - TalkgroupID: tg.TalkgroupID, - }) - if err != nil { - return nil, fmt.Errorf("failed to look up imported talkgroup (system=%d tg=%d): %w", - newSystemID, tg.TalkgroupID, err) - } - tgRemap[tg.ID] = row.ID - } - - // Units — translate system_id. - for _, u := range data.Units { - newSystemID, ok := systemRemap[u.SystemID] - if !ok { - slog.Warn("import config: skipping unit with unknown system_id", - "unit_id", u.UnitID, "system_id", u.SystemID) - continue - } - if err := qtx.UpsertUnit(ctx, db.UpsertUnitParams{ - SystemID: newSystemID, - UnitID: u.UnitID, - Label: u.Label, - Order: u.Order, - }); err != nil { - return nil, fmt.Errorf("failed to import units: %w", err) - } - } - - // API Keys — remap any system PKs embedded in systems_json. - for _, k := range data.APIKeys { - if _, err := qtx.CreateAPIKey(ctx, db.CreateAPIKeyParams{ - Key: k.Key, - Ident: wsPtrToNullStr(k.Ident), - Disabled: k.Disabled, - SystemsJson: wsPtrToNullStr(remapSystemsJSON(k.SystemsJson, systemRemap)), - CallRateLimit: wsPtrToNullInt(k.CallRateLimit), - Order: k.Order, - }); err != nil && !wsIsUniqueViolation(err) { - return nil, fmt.Errorf("failed to import api keys: %w", err) - } - } - - // DirMonitors — translate system_id and talkgroup_id FKs. - for _, d := range data.DirMonitors { - sysID := d.SystemID - if sysID.Valid { - if mapped, ok := systemRemap[sysID.Int64]; ok { - sysID.Int64 = mapped - } else { - slog.Warn("import config: dirmonitor system_id not found in import; dropping FK", - "directory", d.Directory, "system_id", sysID.Int64) - sysID = sql.NullInt64{} - } - } - tgID := d.TalkgroupID - if tgID.Valid { - if mapped, ok := tgRemap[tgID.Int64]; ok { - tgID.Int64 = mapped - } else { - slog.Warn("import config: dirmonitor talkgroup_id not found in import; dropping FK", - "directory", d.Directory, "talkgroup_id", tgID.Int64) - tgID = sql.NullInt64{} - } - } - if _, err := qtx.CreateDirMonitor(ctx, db.CreateDirMonitorParams{ - Directory: d.Directory, - Type: d.Type, - Mask: d.Mask, - Extension: d.Extension, - Frequency: d.Frequency, - Delay: d.Delay, - DeleteAfter: d.DeleteAfter, - UsePolling: d.UsePolling, - Disabled: d.Disabled, - SystemID: sysID, - TalkgroupID: tgID, - Order: d.Order, - }); err != nil && !wsIsUniqueViolation(err) { - return nil, fmt.Errorf("failed to import dirmonitors: %w", err) - } - } - - // Downstreams — remap embedded system PKs. - for _, d := range data.Downstreams { - if !wsValidHTTPURL(d.Url) { - slog.Warn("import config: skipping downstream with invalid URL", "url", d.Url) - continue - } - if _, err := qtx.CreateDownstream(ctx, db.CreateDownstreamParams{ - Url: d.Url, - ApiKey: d.ApiKey, - SystemsJson: wsPtrToNullStr(remapSystemsJSON(d.SystemsJson, systemRemap)), - Disabled: d.Disabled, - Order: d.Order, - }); err != nil && !wsIsUniqueViolation(err) { - return nil, fmt.Errorf("failed to import downstreams: %w", err) - } - } - - // Webhooks — remap embedded system PKs. - for _, w := range data.Webhooks { - if !wsValidHTTPURL(w.Url) { - slog.Warn("import config: skipping webhook with invalid URL", "url", w.Url) - continue - } - if _, err := qtx.CreateWebhook(ctx, db.CreateWebhookParams{ - Url: w.Url, - Type: w.Type, - Secret: wsPtrToNullStr(w.Secret), - SystemsJson: wsPtrToNullStr(remapSystemsJSON(w.SystemsJson, systemRemap)), - Disabled: w.Disabled, - Order: w.Order, - }); err != nil && !wsIsUniqueViolation(err) { - return nil, fmt.Errorf("failed to import webhooks: %w", err) - } - } - - if err := tx.Commit(); err != nil { - return nil, fmt.Errorf("failed to commit import: %w", err) - } - - // Hot-reload subsystems whose live state derives from the rows or - // settings we just rewrote. Without these, the in-process worker - // pools, downstream forwarders, and dirmonitor watchers keep using - // their pre-import config — symptom: transcription stops, downstream - // forwarding goes silent, dirmonitors don't pick up new directories - // until the operator restarts the server. - if c.hub.deps.TranscriberReload != nil && len(data.Settings) > 0 { - tEnabled, _ := c.hub.queries.GetSetting(ctx, "transcriptionEnabled") - tURL, _ := c.hub.queries.GetSetting(ctx, "transcriptionUrl") - tModel, _ := c.hub.queries.GetSetting(ctx, "transcriptionModel") - tLang, _ := c.hub.queries.GetSetting(ctx, "transcriptionLanguage") - tDiarize, _ := c.hub.queries.GetSetting(ctx, "transcriptionDiarize") - - ok := c.hub.deps.TranscriberReload.Reload( - tEnabled.Value == "true", - tURL.Value, - tModel.Value, - tLang.Value, - tDiarize.Value == "true", - ) - c.hub.deps.WhisperAvailable = ok && tEnabled.Value == "true" - } - if c.hub.deps.DirMonitorReload != nil && len(data.DirMonitors) > 0 { - c.hub.deps.DirMonitorReload.Reload() - } - if c.hub.deps.DownstreamReload != nil && len(data.Downstreams) > 0 { - c.hub.deps.DownstreamReload.Reload() - } - - // Notify all admin/listener clients to refetch — without these the - // admin UI shows stale (empty) lists and the user thinks the import - // silently failed. Order doesn't matter; events are fire-and-forget. - for _, topic := range []string{ - "groups.updated", - "tags.updated", - "systems.updated", - "talkgroups.updated", - "units.updated", - "apikeys.updated", - "dirmonitors.updated", - "downstreams.updated", - "webhooks.updated", - } { - c.hub.BroadcastAdminEvent(topic, nil) - } - c.hub.BroadcastCFG(ctx) - - slog.Info("config imported successfully via WS", - "by", c.userID, - "settings", len(data.Settings), - "groups", len(data.Groups), - "tags", len(data.Tags), - "systems", len(data.Systems), - "talkgroups", len(data.Talkgroups), - "units", len(data.Units), - "apiKeys", len(data.APIKeys), - "dirmonitors", len(data.DirMonitors), - "downstreams", len(data.Downstreams), - "webhooks", len(data.Webhooks), - ) - return map[string]bool{"ok": true}, nil -} - -// ══════════════════════════════════════════════════════════════════════════════ -// RADIOREFERENCE -// ══════════════════════════════════════════════════════════════════════════════ - -func (c *Client) opRadioReferenceApply(ctx context.Context, params json.RawMessage) (any, error) { - type rrCandidate struct { - Row int `json:"row"` - TalkgroupID int64 `json:"talkgroupId"` - Label *string `json:"label,omitempty"` - Name *string `json:"name,omitempty"` - Group *string `json:"group,omitempty"` - Tag *string `json:"tag,omitempty"` - Led *string `json:"led,omitempty"` - Order *int64 `json:"order,omitempty"` - } - - var req struct { - SystemID int64 `json:"systemId"` - Candidates []rrCandidate `json:"candidates"` - MergeMode string `json:"mergeMode"` - SelectedFields []string `json:"selectedFields"` - } - if err := json.Unmarshal(params, &req); err != nil { - return nil, userError("invalid request body") - } - if req.SystemID <= 0 { - return nil, userError("systemId is required") - } - if len(req.Candidates) == 0 { - return nil, userError("candidates are required") - } - if len(req.Candidates) > 100_000 { - return nil, userError("too many candidates") - } - if req.MergeMode == "" { - req.MergeMode = "fill_missing" - } - if req.MergeMode != "fill_missing" && req.MergeMode != "overwrite_selected" { - return nil, userError("mergeMode must be 'fill_missing' or 'overwrite_selected'") - } - if _, err := c.hub.queries.GetSystem(ctx, req.SystemID); err != nil { - return nil, userError("system not found") - } - - // Sanitize selected fields. - rrUpdatable := map[string]bool{"label": true, "name": true, "group": true, "tag": true, "led": true, "order": true} - selected := make([]string, 0, len(req.SelectedFields)) - for _, f := range req.SelectedFields { - v := strings.ToLower(strings.TrimSpace(f)) - if rrUpdatable[v] { - selected = append(selected, v) - } - } - - type rowErr struct { - Row int `json:"row"` - Reason string `json:"reason"` - } - resp := map[string]any{ - "processed": 0, - "matched": 0, - "updated": 0, - "skipped": 0, - "errors": 0, - "rowErrors": []rowErr{}, - } - processed, matched, updated, skippedCount, errCount := 0, 0, 0, 0, 0 - rowErrors := make([]rowErr, 0) - - for _, candidate := range req.Candidates { - processed++ - - tg, tgErr := c.hub.queries.GetTalkgroupBySystemAndTGID(ctx, db.GetTalkgroupBySystemAndTGIDParams{ - SystemID: req.SystemID, - TalkgroupID: candidate.TalkgroupID, - }) - if tgErr != nil { - if errors.Is(tgErr, sql.ErrNoRows) { - skippedCount++ - rowErrors = append(rowErrors, rowErr{Row: candidate.Row, Reason: "talkgroup not found in selected system"}) - continue - } - errCount++ - rowErrors = append(rowErrors, rowErr{Row: candidate.Row, Reason: "database error"}) - continue - } - matched++ - - p := db.UpdateTalkgroupParams{ - ID: tg.ID, - TalkgroupID: tg.TalkgroupID, - Label: tg.Label, - Name: tg.Name, - Frequency: tg.Frequency, - Led: tg.Led, - GroupID: tg.GroupID, - TagID: tg.TagID, - Order: tg.Order, - } - - // Determine which fields to apply. - allow := map[string]bool{} - if req.MergeMode == "overwrite_selected" { - for _, f := range selected { - allow[f] = true - } - } - - applyFields := make([]string, 0, 6) - check := func(field string, hasCand bool, targetEmpty bool) { - if !hasCand { - return - } - if req.MergeMode == "overwrite_selected" { - if allow[field] { - applyFields = append(applyFields, field) - } - return - } - if targetEmpty { - applyFields = append(applyFields, field) - } - } - check("label", candidate.Label != nil, !tg.Label.Valid || strings.TrimSpace(tg.Label.String) == "") - check("name", candidate.Name != nil, !tg.Name.Valid || strings.TrimSpace(tg.Name.String) == "") - check("group", candidate.Group != nil, !tg.GroupID.Valid) - check("tag", candidate.Tag != nil, !tg.TagID.Valid) - check("led", candidate.Led != nil, !tg.Led.Valid || strings.TrimSpace(tg.Led.String) == "") - check("order", candidate.Order != nil, tg.Order == 0) - - if len(applyFields) == 0 { - skippedCount++ - continue - } - - // Apply field updates. - applyErr := false - for _, field := range applyFields { - switch field { - case "label": - if candidate.Label != nil { - p.Label = sql.NullString{String: *candidate.Label, Valid: true} - } - case "name": - if candidate.Name != nil { - p.Name = sql.NullString{String: *candidate.Name, Valid: true} - } - case "group": - if candidate.Group != nil { - g, err := c.hub.queries.GetGroupByLabel(ctx, *candidate.Group) - if err != nil { - if errors.Is(err, sql.ErrNoRows) { - newID, createErr := c.hub.queries.CreateGroup(ctx, *candidate.Group) - if createErr != nil { - errCount++ - rowErrors = append(rowErrors, rowErr{Row: candidate.Row, Reason: "database error"}) - applyErr = true - break - } - p.GroupID = sql.NullInt64{Int64: newID, Valid: true} - } else { - errCount++ - rowErrors = append(rowErrors, rowErr{Row: candidate.Row, Reason: "database error"}) - applyErr = true - break - } - } else { - p.GroupID = sql.NullInt64{Int64: g.ID, Valid: true} - } - } - case "tag": - if candidate.Tag != nil { - t, err := c.hub.queries.GetTagByLabel(ctx, *candidate.Tag) - if err != nil { - if errors.Is(err, sql.ErrNoRows) { - newID, createErr := c.hub.queries.CreateTag(ctx, *candidate.Tag) - if createErr != nil { - errCount++ - rowErrors = append(rowErrors, rowErr{Row: candidate.Row, Reason: "database error"}) - applyErr = true - break - } - p.TagID = sql.NullInt64{Int64: newID, Valid: true} - } else { - errCount++ - rowErrors = append(rowErrors, rowErr{Row: candidate.Row, Reason: "database error"}) - applyErr = true - break - } - } else { - p.TagID = sql.NullInt64{Int64: t.ID, Valid: true} - } - } - case "led": - if candidate.Led != nil { - p.Led = sql.NullString{String: *candidate.Led, Valid: true} - } - case "order": - if candidate.Order != nil { - p.Order = *candidate.Order - } - } - } - if applyErr { - continue - } - - if err := c.hub.queries.UpdateTalkgroup(ctx, p); err != nil { - errCount++ - rowErrors = append(rowErrors, rowErr{Row: candidate.Row, Reason: "database error"}) - continue - } - updated++ - } - - resp["processed"] = processed - resp["matched"] = matched - resp["updated"] = updated - resp["skipped"] = skippedCount - resp["errors"] = errCount - resp["rowErrors"] = rowErrors - return resp, nil -} - -// ══════════════════════════════════════════════════════════════════════════════ -// TRANSCRIPTION MODEL MANAGEMENT -// ══════════════════════════════════════════════════════════════════════════════ - -// transcriptionBaseURL reads the transcriptionUrl setting from DB. -func (c *Client) transcriptionBaseURL(ctx context.Context) (string, error) { - s, err := c.hub.queries.GetSetting(ctx, "transcriptionUrl") - if err == nil && s.Value != "" && wsValidHTTPURL(s.Value) { - return strings.TrimRight(s.Value, "/"), nil - } - // Fall back to the live manager's URL (e.g. when DB setting was just saved - // but the query above fails due to timing). - if tr := c.hub.deps.TranscriberReload; tr != nil { - if u := tr.BaseURL(); u != "" { - return strings.TrimRight(u, "/"), nil - } - } - return "", userError("transcriptionUrl setting is not configured") -} - -func (c *Client) opTranscriptionStatus(ctx context.Context, _ json.RawMessage) (any, error) { - // Read settings from DB. - getVal := func(key string) string { - s, err := c.hub.queries.GetSetting(ctx, key) - if err != nil { - return "" - } - return s.Value - } - - enabled := getVal("transcriptionEnabled") == "true" - baseURL := getVal("transcriptionUrl") - model := getVal("transcriptionModel") - language := getVal("transcriptionLanguage") - diarize := getVal("transcriptionDiarize") == "true" - liveDisplay := getVal("liveTranscriptDisplay") == "true" - - // Check live connection to go-whisper. - connected := false - if baseURL != "" && wsValidHTTPURL(baseURL) { - trimmed := strings.TrimRight(baseURL, "/") - reqCtx, cancel := context.WithTimeout(ctx, 5*time.Second) - defer cancel() - req, err := http.NewRequestWithContext(reqCtx, http.MethodGet, trimmed+"/api/whisper/model", nil) - if err == nil { - resp, err := http.DefaultClient.Do(req) - if err == nil { - resp.Body.Close() - connected = resp.StatusCode >= 200 && resp.StatusCode < 400 - } - } - } - - return map[string]any{ - "enabled": enabled, - "url": baseURL, - "model": model, - "language": language, - "diarize": diarize, - "liveDisplay": liveDisplay, - "connected": connected, - }, nil -} - -func (c *Client) opTranscriptionModels(ctx context.Context, _ json.RawMessage) (any, error) { - baseURL, err := c.transcriptionBaseURL(ctx) - if err != nil { - return nil, err - } - - reqCtx, cancel := context.WithTimeout(ctx, 30*time.Second) - defer cancel() - - req, err := http.NewRequestWithContext(reqCtx, http.MethodGet, baseURL+"/api/whisper/model", nil) - if err != nil { - return nil, fmt.Errorf("failed to create request: %w", err) - } - - resp, err := http.DefaultClient.Do(req) - if err != nil { - return nil, fmt.Errorf("go-whisper unreachable: %w", err) - } - defer resp.Body.Close() - - body, err := io.ReadAll(io.LimitReader(resp.Body, 1<<20)) - if err != nil { - return nil, fmt.Errorf("failed to read response: %w", err) - } - - if resp.StatusCode != http.StatusOK { - return nil, fmt.Errorf("go-whisper returned status %d", resp.StatusCode) - } - - var result json.RawMessage - if err := json.Unmarshal(body, &result); err != nil { - return nil, fmt.Errorf("invalid JSON from go-whisper: %w", err) - } - return result, nil -} - -func (c *Client) opTranscriptionDownload(ctx context.Context, params json.RawMessage) (any, error) { - var req struct { - Model string `json:"model"` - } - if err := json.Unmarshal(params, &req); err != nil { - return nil, userError("invalid request body") - } - if req.Model == "" { - return nil, userError("model name is required") - } - - // go-whisper expects model names with .bin extension - model := req.Model - if !strings.HasSuffix(model, ".bin") { - model += ".bin" - } - - // tdrz (tinydiarize) models live in a different HuggingFace repo. - // go-whisper's store accepts a full URL as the model path for non-default repos. - if strings.Contains(model, "tdrz") { - model = "https://huggingface.co/akashmjn/tinydiarize-whisper.cpp/resolve/main/ggml-" + strings.TrimPrefix(model, "ggml-") - } - - baseURL, err := c.transcriptionBaseURL(ctx) - if err != nil { - return nil, err - } - - reqBody, _ := json.Marshal(map[string]string{"model": model}) - - // Model downloads can take a long time (500MB+). - reqCtx, cancel := context.WithTimeout(ctx, 5*time.Minute) - defer cancel() - - httpReq, err := http.NewRequestWithContext(reqCtx, http.MethodPost, baseURL+"/api/whisper/model", strings.NewReader(string(reqBody))) - if err != nil { - return nil, fmt.Errorf("failed to create request: %w", err) - } - httpReq.Header.Set("Content-Type", "application/json") - - resp, err := http.DefaultClient.Do(httpReq) - if err != nil { - return nil, fmt.Errorf("go-whisper unreachable: %w", err) - } - defer resp.Body.Close() - - body, err := io.ReadAll(io.LimitReader(resp.Body, 1<<20)) - if err != nil { - return nil, fmt.Errorf("failed to read response: %w", err) - } - - if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusCreated { - slog.Warn("go-whisper model download failed", "status", resp.StatusCode, "body", string(body)) - return nil, fmt.Errorf("go-whisper returned status %d", resp.StatusCode) - } - - var result json.RawMessage - if err := json.Unmarshal(body, &result); err != nil { - return nil, fmt.Errorf("invalid JSON from go-whisper: %w", err) - } - return result, nil -} - -func (c *Client) opTranscriptionDelete(ctx context.Context, params json.RawMessage) (any, error) { - var req struct { - ID string `json:"id"` - } - if err := json.Unmarshal(params, &req); err != nil { - return nil, userError("invalid request body") - } - if req.ID == "" { - return nil, userError("model id is required") - } - - // Sanitise: model ID should be alphanumeric + hyphens/dots/underscores only. - for _, ch := range req.ID { - if !((ch >= 'a' && ch <= 'z') || (ch >= 'A' && ch <= 'Z') || (ch >= '0' && ch <= '9') || ch == '-' || ch == '.' || ch == '_') { - return nil, userError("invalid model id") - } - } - - baseURL, err := c.transcriptionBaseURL(ctx) - if err != nil { - return nil, err - } - - reqCtx, cancel := context.WithTimeout(ctx, 30*time.Second) - defer cancel() - - httpReq, err := http.NewRequestWithContext(reqCtx, http.MethodDelete, baseURL+"/api/whisper/model/"+req.ID, nil) - if err != nil { - return nil, fmt.Errorf("failed to create request: %w", err) - } - - resp, err := http.DefaultClient.Do(httpReq) - if err != nil { - return nil, fmt.Errorf("go-whisper unreachable: %w", err) - } - defer resp.Body.Close() - io.Copy(io.Discard, resp.Body) //nolint:errcheck - - if resp.StatusCode != http.StatusNoContent && resp.StatusCode != http.StatusOK { - return nil, fmt.Errorf("go-whisper returned status %d", resp.StatusCode) - } - - return map[string]any{"deleted": true}, nil -} - -func (c *Client) opTranscriptionStats(ctx context.Context, _ json.RawMessage) (any, error) { - // DB aggregate stats — "recent" = last 24 hours. - since := time.Now().Add(-24 * time.Hour).Unix() - stats, err := c.hub.queries.TranscriptionStats(ctx, since) - if err != nil { - return nil, fmt.Errorf("query transcription stats: %w", err) - } - - byLang, err := c.hub.queries.TranscriptionsByLanguage(ctx) - if err != nil { - return nil, fmt.Errorf("query transcriptions by language: %w", err) - } - - byModel, err := c.hub.queries.TranscriptionsByModel(ctx) - if err != nil { - return nil, fmt.Errorf("query transcriptions by model: %w", err) - } - - // Pool stats (live). - queueDepth := 0 - poolEnabled := false - if tr := c.hub.deps.TranscriberReload; tr != nil { - poolEnabled = tr.Enabled() - queueDepth = tr.QueueDepth() - } - - // Convert interface{} values from COALESCE/AVG to int64. - toInt64 := func(v interface{}) int64 { - switch n := v.(type) { - case int64: - return n - case float64: - return int64(n) - default: - return 0 - } - } - - langBreakdown := make([]map[string]any, 0, len(byLang)) - for _, l := range byLang { - langBreakdown = append(langBreakdown, map[string]any{ - "language": l.Lang, - "count": l.Cnt, - }) - } - - modelBreakdown := make([]map[string]any, 0, len(byModel)) - for _, m := range byModel { - modelBreakdown = append(modelBreakdown, map[string]any{ - "model": m.ModelName, - "count": m.Cnt, - }) - } - - return map[string]any{ - "total": stats.Total, - "recent24h": stats.RecentCount, - "avgDurationMs": toInt64(stats.AvgDurationMs), - "minDurationMs": toInt64(stats.MinDurationMs), - "maxDurationMs": toInt64(stats.MaxDurationMs), - "queueDepth": queueDepth, - "poolEnabled": poolEnabled, - "byLanguage": langBreakdown, - "byModel": modelBreakdown, - }, nil -} diff --git a/backend/internal/ws/admin_router.go b/backend/internal/ws/admin_router.go new file mode 100644 index 0000000..5d4f0a4 --- /dev/null +++ b/backend/internal/ws/admin_router.go @@ -0,0 +1,162 @@ +// Package ws — admin WS request router. +// +// This file is the thin transport adapter between the WebSocket admin +// protocol (ADM_REQ / ADM_RES frames) and the transport-agnostic business +// logic in internal/admin. It preserves the wire protocol byte-for-byte: +// no op renames, no payload reshaping, no new error envelopes. +// +// Live-state ops that read from the hub's in-memory state (activity stats, +// log ring buffer) still live on *Client — they need hub/logging access +// the admin package explicitly does not have. +package ws + +import ( + "context" + "encoding/json" + "errors" + + "github.com/openscanner/openscanner/internal/admin" + "github.com/openscanner/openscanner/internal/logging" +) + +// adminOp is the generic admin.Operations method signature. Every handler +// in adminOpHandlers returns one of these (or a thin local wrapper) so the +// router can call them uniformly. +type adminOp func(ctx context.Context, params json.RawMessage, callerID int64) (any, error) + +// adminOpHandlers returns the complete map of supported admin WS operations. +// The keys are the wire-protocol op names (e.g. "users.list"); changing them +// breaks the frontend — don't. +func (c *Client) adminOpHandlers() map[string]adminOp { + o := c.hub.admin + return map[string]adminOp{ + // Activity & Logs — live hub state, stay on *Client. + "activity.stats": c.adaptClientOp(c.opActivityStats), + "activity.chart": c.adaptClientOp(c.opActivityChart), + "activity.top-talkgroups": c.adaptClientOp(c.opTopTalkgroups), + "logs.query": c.adaptClientOp(c.opLogsQuery), + "logs.level": c.adaptClientOp(c.opLogsLevel), + + // Users + "users.list": o.UsersList, + "users.create": o.UsersCreate, + "users.update": o.UsersUpdate, + "users.delete": o.UsersDelete, + + // Systems + "systems.list": o.SystemsList, + "systems.create": o.SystemsCreate, + "systems.update": o.SystemsUpdate, + "systems.delete": o.SystemsDelete, + + // Talkgroups + "talkgroups.list": o.TalkgroupsList, + "talkgroups.create": o.TalkgroupsCreate, + "talkgroups.update": o.TalkgroupsUpdate, + "talkgroups.delete": o.TalkgroupsDelete, + + // Units + "units.list": o.UnitsList, + "units.create": o.UnitsCreate, + "units.update": o.UnitsUpdate, + "units.delete": o.UnitsDelete, + + // Groups + "groups.list": o.GroupsList, + "groups.create": o.GroupsCreate, + "groups.update": o.GroupsUpdate, + "groups.delete": o.GroupsDelete, + + // Tags + "tags.list": o.TagsList, + "tags.create": o.TagsCreate, + "tags.update": o.TagsUpdate, + "tags.delete": o.TagsDelete, + + // API Keys + "apikeys.list": o.APIKeysList, + "apikeys.create": o.APIKeysCreate, + "apikeys.update": o.APIKeysUpdate, + "apikeys.delete": o.APIKeysDelete, + + // DirMonitors + "dirmonitors.list": o.DirMonitorsList, + "dirmonitors.create": o.DirMonitorsCreate, + "dirmonitors.update": o.DirMonitorsUpdate, + "dirmonitors.delete": o.DirMonitorsDelete, + + // Downstreams + "downstreams.list": o.DownstreamsList, + "downstreams.create": o.DownstreamsCreate, + "downstreams.update": o.DownstreamsUpdate, + "downstreams.delete": o.DownstreamsDelete, + + // Webhooks + "webhooks.list": o.WebhooksList, + "webhooks.create": o.WebhooksCreate, + "webhooks.update": o.WebhooksUpdate, + "webhooks.delete": o.WebhooksDelete, + + // Shared Links + "shared-links.list": o.SharedLinksList, + "shared-links.delete": o.SharedLinksDelete, + + // Config + "config.get": o.ConfigGet, + "config.update": o.ConfigUpdate, + + // Filesystem + "fs.directories": o.FSDirectories, + + // Export + "export.config": o.ExportConfig, + "export.talkgroups": o.ExportTalkgroups, + "export.units": o.ExportUnits, + "export.groups": o.ExportGroups, + "export.tags": o.ExportTags, + + // Import + "import.config": o.ImportConfig, + + // RadioReference + "radioreference.apply": o.RadioReferenceApply, + + // Transcription model management + "transcription.status": o.TranscriptionStatus, + "transcription.models": o.TranscriptionModels, + "transcription.download": o.TranscriptionDownload, + "transcription.delete": o.TranscriptionDelete, + "transcription.stats": o.TranscriptionStats, + } +} + +// clientOp is the legacy signature used by the live-state handlers in +// client.go (they don't need callerID). +type clientOp func(ctx context.Context, params json.RawMessage) (any, error) + +// adaptClientOp wraps a client-scoped op into the adminOp signature by +// dropping the callerID argument. The hub's live state functions don't +// need it — they're read-only. +func (c *Client) adaptClientOp(fn clientOp) adminOp { + return func(ctx context.Context, params json.RawMessage, _ int64) (any, error) { + return fn(ctx, params) + } +} + +// opLogsLevel returns the current runtime log level. Kept here (vs +// client.go) because it's a tiny admin-only query with no other natural +// home. +func (c *Client) opLogsLevel(_ context.Context, _ json.RawMessage) (any, error) { + return map[string]string{"level": logging.GetLevel()}, nil +} + +// errorString unwraps admin.UserError into the byte-identical envelope the +// old dispatcher sent: the raw message string for validation errors, and +// "internal error" for anything else. +func errorString(err error) (msg string, isUser bool) { + var uerr admin.UserError + if errors.As(err, &uerr) { + return err.Error(), true + } + return "internal error", false +} diff --git a/backend/internal/ws/admin_router_test.go b/backend/internal/ws/admin_router_test.go new file mode 100644 index 0000000..0071a0a --- /dev/null +++ b/backend/internal/ws/admin_router_test.go @@ -0,0 +1,131 @@ +// Tests for the WS admin request router — specifically the framing / +// dispatch layer (unknown op → error envelope, known op → delegated to +// admin.Operations). Business logic for each op is covered in +// internal/admin's own test files. +package ws + +import ( + "context" + "encoding/json" + "errors" + "testing" + + "github.com/openscanner/openscanner/internal/admin" + "github.com/openscanner/openscanner/internal/db" + _ "modernc.org/sqlite" +) + +func TestAdminOpHandlers_CoversEveryWireOp(t *testing.T) { + // If a new admin op is added to admin.Operations but not wired into + // adminOpHandlers, the WS layer silently drops it. This sanity check + // catches that before it hits production. + sqlDB, err := db.Open(":memory:") + if err != nil { + t.Fatalf("open DB: %v", err) + } + t.Cleanup(func() { _ = sqlDB.Close() }) + queries := db.New(sqlDB) + + hub := NewHub(queries, "test") + c := &Client{hub: hub, userID: 1, isAdmin: true} + + handlers := c.adminOpHandlers() + + // The 58 ops expected on the wire. Keep this list sorted so diffs are + // readable when an op is intentionally added. + want := []string{ + "activity.chart", "activity.stats", "activity.top-talkgroups", + "apikeys.create", "apikeys.delete", "apikeys.list", "apikeys.update", + "config.get", "config.update", + "dirmonitors.create", "dirmonitors.delete", "dirmonitors.list", "dirmonitors.update", + "downstreams.create", "downstreams.delete", "downstreams.list", "downstreams.update", + "export.config", "export.groups", "export.tags", "export.talkgroups", "export.units", + "fs.directories", + "groups.create", "groups.delete", "groups.list", "groups.update", + "import.config", + "logs.level", "logs.query", + "radioreference.apply", + "shared-links.delete", "shared-links.list", + "systems.create", "systems.delete", "systems.list", "systems.update", + "tags.create", "tags.delete", "tags.list", "tags.update", + "talkgroups.create", "talkgroups.delete", "talkgroups.list", "talkgroups.update", + "transcription.delete", "transcription.download", "transcription.models", + "transcription.stats", "transcription.status", + "units.create", "units.delete", "units.list", "units.update", + "users.create", "users.delete", "users.list", "users.update", + "webhooks.create", "webhooks.delete", "webhooks.list", "webhooks.update", + } + for _, op := range want { + if _, ok := handlers[op]; !ok { + t.Errorf("adminOpHandlers missing wire op %q", op) + } + } + if got := len(handlers); got != len(want) { + t.Errorf("adminOpHandlers has %d entries, want %d", got, len(want)) + } +} + +func TestHandleAdminRequest_UnknownOp_ReturnsErrorEnvelope(t *testing.T) { + sqlDB, err := db.Open(":memory:") + if err != nil { + t.Fatalf("open DB: %v", err) + } + t.Cleanup(func() { _ = sqlDB.Close() }) + queries := db.New(sqlDB) + hub := NewHub(queries, "test") + + // Capture anything the router tries to send. + sendCh := make(chan []byte, 1) + c := &Client{ + hub: hub, + userID: 1, + isAdmin: true, + send: sendCh, + } + + c.handleAdminRequest(context.Background(), adminRequest{ReqID: "r1", Op: "does.not.exist"}) + + select { + case msg := <-sendCh: + // Must be a valid ADM_RES error envelope referencing reqId "r1". + var frame []json.RawMessage + if err := json.Unmarshal(msg, &frame); err != nil { + t.Fatalf("response is not JSON array: %v", err) + } + var cmd string + if err := json.Unmarshal(frame[0], &cmd); err != nil || cmd != "ADM_RES" { + t.Fatalf("cmd = %q (err %v), want ADM_RES", cmd, err) + } + if !containsSub(string(msg), `"ok":false`) { + t.Errorf("expected error envelope ok:false; got %s", msg) + } + if !containsSub(string(msg), "unknown op") { + t.Errorf("expected 'unknown op' in error; got %s", msg) + } + default: + t.Fatal("no ADM_RES frame was sent") + } +} + +func TestErrorString_DistinguishesUserAndInternal(t *testing.T) { + uerr := admin.UserError("bad input") + msg, isUser := errorString(uerr) + if !isUser || msg != "bad input" { + t.Errorf("UserError path: got (%q, %v), want (\"bad input\", true)", msg, isUser) + } + + other := errors.New("boom") + msg, isUser = errorString(other) + if isUser || msg != "internal error" { + t.Errorf("internal path: got (%q, %v), want (\"internal error\", false)", msg, isUser) + } +} + +func containsSub(s, sub string) bool { + for i := 0; i+len(sub) <= len(s); i++ { + if s[i:i+len(sub)] == sub { + return true + } + } + return false +} diff --git a/backend/internal/ws/client.go b/backend/internal/ws/client.go index 8340933..a067d45 100644 --- a/backend/internal/ws/client.go +++ b/backend/internal/ws/client.go @@ -105,15 +105,6 @@ type adminRequest struct { Params json.RawMessage `json:"params,omitempty"` } -// adminOpHandler is the function signature for all admin WS operation handlers. -type adminOpHandler func(ctx context.Context, params json.RawMessage) (any, error) - -// userError is returned by op handlers for validation errors that should be -// shown verbatim to the client. Other errors are treated as internal. -type userError string - -func (e userError) Error() string { return string(e) } - // CanReceive reports whether this client is authorized to receive a call for // the given system and talkgroup. If grants is nil/empty, everything is allowed. func (c *Client) CanReceive(systemID, talkgroupID int64) bool { @@ -484,15 +475,14 @@ func (c *Client) handleAdminRequest(ctx context.Context, req adminRequest) { return } - data, err := handler(ctx, req.Params) + data, err := handler(ctx, req.Params, c.userID) var msg []byte if err != nil { - var uerr userError - if errors.As(err, &uerr) { - msg, _ = NewADMRESErrorMessage(req.ReqID, err.Error()) + if errMsg, isUser := errorString(err); isUser { + msg, _ = NewADMRESErrorMessage(req.ReqID, errMsg) } else { slog.Error("ws: admin op failed", "op", req.Op, "reqId", req.ReqID, "error", err) - msg, _ = NewADMRESErrorMessage(req.ReqID, "internal error") + msg, _ = NewADMRESErrorMessage(req.ReqID, errMsg) } } else { msg, _ = NewADMRESMessage(req.ReqID, data) diff --git a/backend/internal/ws/hub.go b/backend/internal/ws/hub.go index e47cbbd..95b8f7d 100644 --- a/backend/internal/ws/hub.go +++ b/backend/internal/ws/hub.go @@ -3,39 +3,26 @@ package ws import ( "context" - "database/sql" "log/slog" "sync" "time" + "github.com/openscanner/openscanner/internal/admin" "github.com/openscanner/openscanner/internal/db" ) // Reloader triggers a service config reload (e.g. dirmonitor, downstream). -type Reloader interface { - Reload() -} +// Kept as a ws-local alias to admin.Reloader so external callers that +// reference ws.Reloader continue to compile. +type Reloader = admin.Reloader // TranscriberReloader can hot-reload the transcription subsystem. -type TranscriberReloader interface { - Reload(enabled bool, baseURL, model, language string, diarize bool) bool - Enabled() bool - BaseURL() string - QueueDepth() int -} +type TranscriberReloader = admin.TranscriberReloader -// HubDeps holds optional dependencies injected into the Hub for admin WS operations. -type HubDeps struct { - SQLDB *sql.DB - DirMonitorReload Reloader - DownstreamReload Reloader - TranscriberReload TranscriberReloader - FFmpegAvailable bool - FDKAACAvailable bool - WhisperAvailable bool - RecordingsDir string - EncryptionKey string -} +// HubDeps holds optional dependencies injected into the Hub for admin WS +// operations. It is an alias for admin.Deps so callers can keep using +// ws.HubDeps{...} while the underlying fields live in the admin package. +type HubDeps = admin.Deps // StartTime is the process start time, used for uptime calculations. var StartTime = time.Now() @@ -44,7 +31,7 @@ var StartTime = time.Now() type Hub struct { queries *db.Queries version string - deps HubDeps + admin *admin.Operations // transport-agnostic admin ops mu sync.RWMutex clients map[*Client]struct{} @@ -73,16 +60,17 @@ func NewHub(queries *db.Queries, version string, deps ...HubDeps) *Hub { if len(deps) > 0 { d = deps[0] } - return &Hub{ + h := &Hub{ queries: queries, version: version, - deps: d, clients: make(map[*Client]struct{}), register: make(chan *Client), unregister: make(chan *Client), broadcast: make(chan broadcastMsg, 256), done: make(chan struct{}), } + h.admin = admin.New(queries, d, h) + return h } // Run starts the hub's event loop. It blocks until ctx is cancelled. @@ -274,7 +262,9 @@ func (h *Hub) DisconnectByJTI(jti string) { // This handles the circular dependency where dwService needs hub but hub // needs dwService's Reloader. func (h *Hub) SetDirMonitorReloader(r Reloader) { - h.deps.DirMonitorReload = r + if h.admin != nil { + h.admin.Deps.DirMonitorReload = r + } } // debounceLSC schedules an LSC broadcast, resetting the timer if one is already From 5cc5d040edade7903acfe67169ccf3063fde387b Mon Sep 17 00:00:00 2001 From: Randy Hammond Date: Fri, 24 Apr 2026 15:27:34 -0400 Subject: [PATCH 03/27] ci: add custom CodeQL workflow for backend/ Go module (#17) Default CodeQL setup runs autobuild from the repo root, which cannot locate the Go module (go.mod lives in backend/), causing 'package could not be found: github.com/openscanner/openscanner/docs' warnings. This workflow mirrors ci.yml: installs swag, regenerates backend/docs/docs.go, then runs 'go build ./...' from backend/ under CodeQL's manual build mode. Also analyzes JS/TS with build-mode: none. --- .github/workflows/codeql.yml | 65 ++++++++++++++++++++++++++++++++++++ 1 file changed, 65 insertions(+) create mode 100644 .github/workflows/codeql.yml diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml new file mode 100644 index 0000000..b958233 --- /dev/null +++ b/.github/workflows/codeql.yml @@ -0,0 +1,65 @@ +name: CodeQL + +on: + push: + branches: [main, dev] + pull_request: + branches: [main, dev] + schedule: + - cron: "23 5 * * 1" + +jobs: + analyze: + name: Analyze (${{ matrix.language }}) + runs-on: ubuntu-latest + permissions: + actions: read + contents: read + security-events: write + + strategy: + fail-fast: false + matrix: + include: + - language: go + build-mode: manual + - language: javascript-typescript + build-mode: none + + steps: + - name: Checkout + uses: actions/checkout@v5 + + - name: Set up Go + if: matrix.language == 'go' + uses: actions/setup-go@v6 + with: + go-version: "1.25" + + - name: Install swag + if: matrix.language == 'go' + run: go install github.com/swaggo/swag/cmd/swag@latest + + - name: Generate Swagger docs + if: matrix.language == 'go' + working-directory: backend + run: swag init -d cmd/server,internal/api -g main.go --parseDependency --parseInternal -o docs + + - name: Initialize CodeQL + uses: github/codeql-action/init@v3 + with: + languages: ${{ matrix.language }} + build-mode: ${{ matrix.build-mode }} + + # The Go module lives in backend/ and its swagger docs package is + # generated above. Autobuild cannot locate the module from the repo + # root, so build explicitly from backend/ here. + - name: Build Go + if: matrix.language == 'go' + working-directory: backend + run: go build ./... + + - name: Perform CodeQL Analysis + uses: github/codeql-action/analyze@v3 + with: + category: "/language:${{ matrix.language }}" From 8ff74f7890ac225dff36919146922a1c3f5b6595 Mon Sep 17 00:00:00 2001 From: Randy Hammond Date: Fri, 24 Apr 2026 19:42:48 +0000 Subject: [PATCH 04/27] ci: allow manual CodeQL runs via workflow_dispatch --- .github/workflows/codeql.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index b958233..58ada8a 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -7,6 +7,7 @@ on: branches: [main, dev] schedule: - cron: "23 5 * * 1" + workflow_dispatch: jobs: analyze: From 129b3bb34bb0a9c71952b0070b0c8243108f7b03 Mon Sep 17 00:00:00 2001 From: Randy Hammond Date: Fri, 24 Apr 2026 19:48:08 +0000 Subject: [PATCH 05/27] ci: bump codeql-action v3 -> v4 (Node 24) --- .github/workflows/codeql.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 58ada8a..fa585ea 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -47,7 +47,7 @@ jobs: run: swag init -d cmd/server,internal/api -g main.go --parseDependency --parseInternal -o docs - name: Initialize CodeQL - uses: github/codeql-action/init@v3 + uses: github/codeql-action/init@v4 with: languages: ${{ matrix.language }} build-mode: ${{ matrix.build-mode }} @@ -61,6 +61,6 @@ jobs: run: go build ./... - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@v3 + uses: github/codeql-action/analyze@v4 with: category: "/language:${{ matrix.language }}" From 2eacfa909c6c5c1ae702208ff35e82e1bfc29f7e Mon Sep 17 00:00:00 2001 From: Randy Hammond Date: Fri, 24 Apr 2026 19:52:40 +0000 Subject: [PATCH 06/27] ci: point setup-go cache at backend/go.sum Module lives in backend/, so setup-go's default cache lookup at the repo root fails with 'Dependencies file is not found'. Set cache-dependency-path on all three workflows (codeql, ci, release). --- .github/workflows/ci.yml | 1 + .github/workflows/codeql.yml | 1 + .github/workflows/release.yml | 1 + 3 files changed, 3 insertions(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 6e18a0f..15e5a8b 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -35,6 +35,7 @@ jobs: - uses: actions/setup-go@v6 with: go-version: "1.25" + cache-dependency-path: backend/go.sum - name: Install swag run: go install github.com/swaggo/swag/cmd/swag@latest - name: Generate Swagger docs diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index fa585ea..978f692 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -36,6 +36,7 @@ jobs: uses: actions/setup-go@v6 with: go-version: "1.25" + cache-dependency-path: backend/go.sum - name: Install swag if: matrix.language == 'go' diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index b9a2f30..87a21b0 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -39,6 +39,7 @@ jobs: - uses: actions/setup-go@v6 with: go-version: "1.25" + cache-dependency-path: backend/go.sum - name: Install swag run: go install github.com/swaggo/swag/cmd/swag@latest From 3f8cb6fcc05cf71273f03cc8f0093b3ea161ae8e Mon Sep 17 00:00:00 2001 From: Randy Hammond Date: Fri, 24 Apr 2026 19:58:20 +0000 Subject: [PATCH 07/27] ci: pin GITHUB_TOKEN to read-only in CI workflow MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit CodeQL (actions/missing-workflow-permissions) flagged ci.yml jobs as not restricting the default GITHUB_TOKEN scopes. Add a workflow-level 'permissions: contents: read' block — none of the CI jobs write to the repo. --- .github/workflows/ci.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 15e5a8b..5ed14b7 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -6,6 +6,9 @@ on: pull_request: branches: [main, dev] +permissions: + contents: read + jobs: changelog: name: CHANGELOG updated From 9dff28f4d5e059447885c245596db27fc1e49650 Mon Sep 17 00:00:00 2001 From: Randy Hammond Date: Fri, 24 Apr 2026 20:36:06 -0400 Subject: [PATCH 08/27] =?UTF-8?q?refactor(backend):=20Phase=203=20?= =?UTF-8?q?=E2=80=94=20decompose=20internal/api=20into=20feature-scoped=20?= =?UTF-8?q?handler=20packages=20(#18)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit refactor(backend): decompose internal/api into feature-scoped handler packages - handler/{auth,calls,bookmarks,share,setup,health}/... for listener + auth routes - handler/admin/{imports,radioreference,transcriptions}/... for the admin REST surface (11 WS-only admin features remain in internal/admin + internal/ws, already extracted in Phase 2) - handler/shared/ for swagger DTOs and common helpers - handler/routes/ owns route registration, including /ws and /api/ws - internal/api/ removed - Swag invocation in Makefile + 3 workflows updated to scan internal/handler - Swagger regenerated - No route, method, body, header, or middleware changes --- .github/workflows/ci.yml | 2 +- .github/workflows/codeql.yml | 2 +- .github/workflows/release.yml | 2 +- CHANGELOG.md | 7 + backend/.golangci.yml | 2 +- backend/Makefile | 2 +- backend/cmd/server/main.go | 4 +- backend/internal/api/health.go | 25 -- .../admin/imports/imports.go} | 147 +++---- .../admin/radioreference}/radioreference.go | 23 +- .../admin/transcriptions/transcriptions.go} | 50 +-- .../{api/admin.go => handler/auth/auth.go} | 61 ++- .../{api => handler/bookmarks}/bookmarks.go | 24 +- .../internal/{api => handler/calls}/calls.go | 390 +++++------------- .../calls/limiter_internal_test.go} | 24 +- backend/internal/handler/health/health.go | 33 ++ .../{api => handler/routes}/admin_test.go | 6 +- .../{api => handler/routes}/auth_test.go | 2 +- .../{api => handler/routes}/bookmarks_test.go | 2 +- .../{api => handler/routes}/calls_test.go | 6 +- .../routes}/listener_ws_alias_test.go | 2 +- .../routes}/radioreference_test.go | 2 +- .../{api => handler/routes}/refresh_test.go | 2 +- .../{api => handler/routes}/routes.go | 105 +++-- .../{api => handler/routes}/setup_test.go | 2 +- .../{api => handler/routes}/share_test.go | 6 +- .../routes}/testhelpers_test.go | 6 +- .../internal/{api => handler/setup}/setup.go | 18 +- .../handler/share/limiter_internal_test.go | 28 ++ .../internal/{api => handler/share}/share.go | 103 ++++- .../internal/handler/shared/call_search.go | 34 ++ .../shared}/content_disposition.go | 6 +- .../shared}/content_disposition_test.go | 4 +- .../shared/dto.go} | 4 +- backend/internal/handler/shared/grants.go | 69 ++++ backend/internal/handler/shared/resolve.go | 52 +++ backend/internal/handler/shared/settings.go | 18 + 37 files changed, 695 insertions(+), 580 deletions(-) delete mode 100644 backend/internal/api/health.go rename backend/internal/{api/import.go => handler/admin/imports/imports.go} (79%) rename backend/internal/{api => handler/admin/radioreference}/radioreference.go (93%) rename backend/internal/{api/crud.go => handler/admin/transcriptions/transcriptions.go} (59%) rename backend/internal/{api/admin.go => handler/auth/auth.go} (91%) rename backend/internal/{api => handler/bookmarks}/bookmarks.go (92%) rename backend/internal/{api => handler/calls}/calls.go (84%) rename backend/internal/{api/calls_limiter_internal_test.go => handler/calls/limiter_internal_test.go} (76%) create mode 100644 backend/internal/handler/health/health.go rename backend/internal/{api => handler/routes}/admin_test.go (98%) rename backend/internal/{api => handler/routes}/auth_test.go (99%) rename backend/internal/{api => handler/routes}/bookmarks_test.go (99%) rename backend/internal/{api => handler/routes}/calls_test.go (99%) rename backend/internal/{api => handler/routes}/listener_ws_alias_test.go (97%) rename backend/internal/{api => handler/routes}/radioreference_test.go (99%) rename backend/internal/{api => handler/routes}/refresh_test.go (99%) rename backend/internal/{api => handler/routes}/routes.go (66%) rename backend/internal/{api => handler/routes}/setup_test.go (99%) rename backend/internal/{api => handler/routes}/share_test.go (97%) rename backend/internal/{api => handler/routes}/testhelpers_test.go (93%) rename backend/internal/{api => handler/setup}/setup.go (91%) create mode 100644 backend/internal/handler/share/limiter_internal_test.go rename backend/internal/{api => handler/share}/share.go (82%) create mode 100644 backend/internal/handler/shared/call_search.go rename backend/internal/{api => handler/shared}/content_disposition.go (87%) rename backend/internal/{api => handler/shared}/content_disposition_test.go (97%) rename backend/internal/{api/swagger_models.go => handler/shared/dto.go} (97%) create mode 100644 backend/internal/handler/shared/grants.go create mode 100644 backend/internal/handler/shared/resolve.go create mode 100644 backend/internal/handler/shared/settings.go diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 5ed14b7..33d1da7 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -43,7 +43,7 @@ jobs: run: go install github.com/swaggo/swag/cmd/swag@latest - name: Generate Swagger docs working-directory: backend - run: swag init -d cmd/server,internal/api -g main.go --parseDependency --parseInternal -o docs + run: swag init -d cmd/server,internal/handler -g main.go --parseDependency --parseInternal -o docs - name: Test run: cd backend && go test ./... diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 978f692..123b264 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -45,7 +45,7 @@ jobs: - name: Generate Swagger docs if: matrix.language == 'go' working-directory: backend - run: swag init -d cmd/server,internal/api -g main.go --parseDependency --parseInternal -o docs + run: swag init -d cmd/server,internal/handler -g main.go --parseDependency --parseInternal -o docs - name: Initialize CodeQL uses: github/codeql-action/init@v4 diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 87a21b0..b8c0a46 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -57,7 +57,7 @@ jobs: - name: Generate Swagger docs working-directory: backend - run: swag init -d cmd/server,internal/api -g main.go --parseDependency --parseInternal -o docs + run: swag init -d cmd/server,internal/handler -g main.go --parseDependency --parseInternal -o docs - name: Build binary working-directory: backend diff --git a/CHANGELOG.md b/CHANGELOG.md index 1d9abdb..b819201 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -17,6 +17,13 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Changed +- HTTP handlers have been decomposed from the monolithic `internal/api` + package into feature-scoped subpackages under `internal/handler/` + (`auth`, `calls`, `bookmarks`, `share`, `setup`, `health`, + `admin/{imports,radioreference,transcriptions}`). Route registration + now lives in `internal/handler/routes`, and shared swagger DTOs and + helpers live in `internal/handler/shared`. No route paths, methods, + middleware ordering, response shapes, or status codes changed. - Admin CRUD business logic has been extracted from `internal/ws` into a new transport-agnostic `internal/admin` package. The WebSocket layer now only routes `ADM_REQ` frames to `admin.Operations` methods; the diff --git a/backend/.golangci.yml b/backend/.golangci.yml index c38c365..eac7edf 100644 --- a/backend/.golangci.yml +++ b/backend/.golangci.yml @@ -50,7 +50,7 @@ issues: - "internal/db/models\\.go$" - "internal/db/db\\.go$" - "internal/db/querier\\.go$" - - "internal/api/swagger_models\\.go$" # swagger stubs — field names must mirror wire shape for doc readability + - "internal/handler/shared/dto\\.go$" # swagger stubs — field names must mirror wire shape for doc readability exclude-rules: # Test files: relax a handful of rules that don't add value there. - path: _test\.go diff --git a/backend/Makefile b/backend/Makefile index 703d484..33adad4 100644 --- a/backend/Makefile +++ b/backend/Makefile @@ -9,7 +9,7 @@ VERSION?=$(shell git describe --tags --always --dirty 2>/dev/null || echo dev) LDFLAGS=-s -w -X github.com/openscanner/openscanner/internal/config.Version=$(VERSION) build: - swag init -d cmd/server,internal/api -g main.go --parseDependency --parseInternal -o docs + swag init -d cmd/server,internal/handler -g main.go --parseDependency --parseInternal -o docs go build -ldflags="$(LDFLAGS)" -o $(OUTPUT) $(CMD) dev: diff --git a/backend/cmd/server/main.go b/backend/cmd/server/main.go index bf7e7d7..bf7eeda 100644 --- a/backend/cmd/server/main.go +++ b/backend/cmd/server/main.go @@ -37,7 +37,7 @@ import ( "github.com/gin-gonic/gin" "github.com/kardianos/service" "github.com/openscanner/openscanner/internal/admin" - "github.com/openscanner/openscanner/internal/api" + "github.com/openscanner/openscanner/internal/handler/routes" "github.com/openscanner/openscanner/internal/audio" "github.com/openscanner/openscanner/internal/auth" "github.com/openscanner/openscanner/internal/cli" @@ -892,7 +892,7 @@ func (p *program) run() { // Start transcription result consumer (stores results in DB, broadcasts TRN). go consumeTranscriptionResults(ctx, queries, hub, transcriberMgr) - api.RegisterRoutes(router, api.Deps{ + routes.RegisterRoutes(router, routes.Deps{ Queries: queries, RateLimiter: rateLimiter, Processor: processor, diff --git a/backend/internal/api/health.go b/backend/internal/api/health.go deleted file mode 100644 index 3c96762..0000000 --- a/backend/internal/api/health.go +++ /dev/null @@ -1,25 +0,0 @@ -// Package api — health check endpoint for readiness probes and Docker HEALTHCHECK. -package api - -import ( - "net/http" - - "github.com/gin-gonic/gin" -) - -// RegisterHealth godoc -// -// @Summary Health check -// @Description Returns server status and version for readiness probes and Docker HEALTHCHECK. -// @Tags Health -// @Produce json -// @Success 200 {object} object{status=string,version=string} "Server is healthy" -// @Router /health [get] -func RegisterHealth(rg *gin.RouterGroup, version string) { - rg.GET("/health", func(c *gin.Context) { - c.JSON(http.StatusOK, gin.H{ - "status": "ok", - "version": version, - }) - }) -} diff --git a/backend/internal/api/import.go b/backend/internal/handler/admin/imports/imports.go similarity index 79% rename from backend/internal/api/import.go rename to backend/internal/handler/admin/imports/imports.go index 27ed409..e11304b 100644 --- a/backend/internal/api/import.go +++ b/backend/internal/handler/admin/imports/imports.go @@ -1,4 +1,6 @@ -package api +// Package imports provides the admin CSV import endpoints +// (talkgroups, units, groups, tags). +package imports import ( "context" @@ -14,8 +16,25 @@ import ( "github.com/gin-gonic/gin" "github.com/openscanner/openscanner/internal/db" + "github.com/openscanner/openscanner/internal/handler/shared" ) +// AdminBroadcaster is the subset of ws.Hub used to broadcast admin events. +type AdminBroadcaster interface { + BroadcastAdminEvent(event string, payload any) +} + +// Handler serves the admin CSV import endpoints. +type Handler struct { + queries *db.Queries + hub AdminBroadcaster +} + +// New constructs an imports Handler. +func New(queries *db.Queries, hub AdminBroadcaster) *Handler { + return &Handler{queries: queries, hub: hub} +} + // tgColumnMap maps logical field names to their CSV column index. // A value of -1 means the column is not present. type tgColumnMap struct { @@ -51,21 +70,16 @@ func detectTgColumns(header []string) *tgColumnMap { for i, raw := range header { col := strings.ToLower(strings.TrimSpace(raw)) switch col { - // OpenScanner + rdio-scanner: decimal talkgroup ID case "talkgroup_id", "dec", "decimal": m.talkgroupID = i - // OpenScanner label, rdio-scanner alpha_tag case "label", "alpha_tag", "alpha tag": m.label = i - // OpenScanner name, rdio-scanner description case "name", "description": m.name = i - // OpenScanner integer FK columns case "tag_id": m.tagID = i case "group_id": m.groupID = i - // rdio-scanner text name columns case "tag", "category": m.tagName = i case "group", "service_type": @@ -76,14 +90,11 @@ func detectTgColumns(header []string) *tgColumnMap { m.led = i case "order", "priority": m.order = i - // skip unknown columns (e.g. "hex") } } return m } -// defaultTgColumns returns the positional column map matching -// OpenScanner's native CSV format (no header present). func defaultTgColumns() *tgColumnMap { return &tgColumnMap{ talkgroupID: 0, label: 1, name: 2, @@ -92,7 +103,6 @@ func defaultTgColumns() *tgColumnMap { } } -// col returns the trimmed value at index i, or "" if out of range. func col(record []string, i int) string { if i < 0 || i >= len(record) { return "" @@ -101,23 +111,21 @@ func col(record []string, i int) string { } // ImportTalkgroups handles POST /api/admin/import/talkgroups. -// Accepts a multipart CSV file, a system_id form field, and an optional mode field. -// Supports both OpenScanner and rdio-scanner CSV formats via header detection. // -// @Summary Import talkgroups from CSV -// @Description Accepts a multipart CSV file with talkgroup data and a system_id form field. Supports OpenScanner format (talkgroup_id, label, name, tag_id, group_id, frequency, led, order) and rdio-scanner format (dec, hex, alpha_tag, description, tag, group, priority). Header rows are auto-detected; tag/group names are resolved to IDs automatically. Use mode=overwrite (default) to update existing talkgroups or mode=skip to leave existing talkgroups unchanged. -// @Tags Admin -// @Accept multipart/form-data -// @Produce json -// @Param system_id formData int true "System ID to import talkgroups into" -// @Param file formData file true "CSV file" -// @Param mode formData string false "Duplicate handling: overwrite (default) or skip" -// @Success 200 {object} object "inserted, updated, skipped counts" -// @Failure 400 {object} ErrorResponse -// @Failure 500 {object} ErrorResponse -// @Security BearerAuth -// @Router /admin/import/talkgroups [post] -func (h *AdminHandler) ImportTalkgroups(c *gin.Context) { +// @Summary Import talkgroups from CSV +// @Description Accepts a multipart CSV file with talkgroup data and a system_id form field. Supports OpenScanner format (talkgroup_id, label, name, tag_id, group_id, frequency, led, order) and rdio-scanner format (dec, hex, alpha_tag, description, tag, group, priority). Header rows are auto-detected; tag/group names are resolved to IDs automatically. Use mode=overwrite (default) to update existing talkgroups or mode=skip to leave existing talkgroups unchanged. +// @Tags Admin +// @Accept multipart/form-data +// @Produce json +// @Param system_id formData int true "System ID to import talkgroups into" +// @Param file formData file true "CSV file" +// @Param mode formData string false "Duplicate handling: overwrite (default) or skip" +// @Success 200 {object} object "inserted, updated, skipped counts" +// @Failure 400 {object} shared.ErrorResponse +// @Failure 500 {object} shared.ErrorResponse +// @Security BearerAuth +// @Router /admin/import/talkgroups [post] +func (h *Handler) ImportTalkgroups(c *gin.Context) { ctx := c.Request.Context() systemIDStr := c.PostForm("system_id") @@ -131,7 +139,6 @@ func (h *AdminHandler) ImportTalkgroups(c *gin.Context) { return } - // Verify system exists. if _, err := h.queries.GetSystem(ctx, systemID); err != nil { c.JSON(http.StatusBadRequest, gin.H{"error": "system not found"}) return @@ -151,10 +158,9 @@ func (h *AdminHandler) ImportTalkgroups(c *gin.Context) { defer file.Close() reader := csv.NewReader(file) - reader.FieldsPerRecord = -1 // allow variable number of fields + reader.FieldsPerRecord = -1 reader.TrimLeadingSpace = true - // Read the first non-blank row to detect column layout. var columns *tgColumnMap var firstDataRow []string for { @@ -176,7 +182,6 @@ func (h *AdminHandler) ImportTalkgroups(c *gin.Context) { } columns = detectTgColumns(record) if columns == nil { - // First row is data, not a header — use default positional layout. columns = defaultTgColumns() firstDataRow = record } @@ -200,10 +205,9 @@ func (h *AdminHandler) ImportTalkgroups(c *gin.Context) { tgID, err := strconv.ParseInt(tgIDStr, 10, 64) if err != nil { failed++ - return nil //nolint:nilerr // invalid talkgroup_id: count and skip + return nil //nolint:nilerr } - // Check if talkgroup already exists. _, existsErr := h.queries.GetTalkgroupBySystemAndTGID(ctx, db.GetTalkgroupBySystemAndTGIDParams{ SystemID: systemID, TalkgroupID: tgID, @@ -227,30 +231,28 @@ func (h *AdminHandler) ImportTalkgroups(c *gin.Context) { params.Name = sql.NullString{String: v, Valid: true} } - // Tag: prefer integer FK (but verify it exists), fall back to name. if v := col(record, columns.tagID); v != "" { if id, err := strconv.ParseInt(v, 10, 64); err == nil { if _, gerr := h.queries.GetTag(ctx, id); gerr == nil { params.TagID = sql.NullInt64{Int64: id, Valid: true} } else if name := col(record, columns.tagName); name != "" { - params.TagID = resolveTagID(ctx, h.queries, name) + params.TagID = shared.ResolveTagID(ctx, h.queries, name) } } } else if v := col(record, columns.tagName); v != "" { - params.TagID = resolveTagID(ctx, h.queries, v) + params.TagID = shared.ResolveTagID(ctx, h.queries, v) } - // Group: prefer integer FK (but verify it exists), fall back to name. if v := col(record, columns.groupID); v != "" { if id, err := strconv.ParseInt(v, 10, 64); err == nil { if _, gerr := h.queries.GetGroup(ctx, id); gerr == nil { params.GroupID = sql.NullInt64{Int64: id, Valid: true} } else if name := col(record, columns.groupName); name != "" { - params.GroupID = resolveGroupID(ctx, h.queries, name) + params.GroupID = shared.ResolveGroupID(ctx, h.queries, name) } } } else if v := col(record, columns.groupName); v != "" { - params.GroupID = resolveGroupID(ctx, h.queries, v) + params.GroupID = shared.ResolveGroupID(ctx, h.queries, v) } if v := col(record, columns.frequency); v != "" { @@ -278,7 +280,6 @@ func (h *AdminHandler) ImportTalkgroups(c *gin.Context) { return nil } - // Process the first data row if header detection consumed it. if firstDataRow != nil { if err := processRow(firstDataRow); err != nil { slog.Error("failed to upsert talkgroup", "error", err) @@ -288,8 +289,8 @@ func (h *AdminHandler) ImportTalkgroups(c *gin.Context) { } for { - if inserted+updated+skipped >= maxImportRows { - slog.Warn("CSV import row limit reached", "limit", maxImportRows) + if inserted+updated+skipped >= shared.MaxImportRows { + slog.Warn("CSV import row limit reached", "limit", shared.MaxImportRows) break } @@ -324,22 +325,21 @@ func (h *AdminHandler) ImportTalkgroups(c *gin.Context) { } // ImportUnits handles POST /api/admin/import/units. -// Accepts a multipart CSV file, a system_id form field, and an optional mode field. // -// @Summary Import units from CSV -// @Description Accepts a multipart CSV file with unit data and a system_id form field. Columns: unit_id, label, order. Header rows are auto-skipped. Use mode=overwrite (default) to update existing units or mode=skip to leave existing units unchanged. -// @Tags Admin -// @Accept multipart/form-data -// @Produce json -// @Param system_id formData int true "System ID to import units into" -// @Param file formData file true "CSV file" -// @Param mode formData string false "Duplicate handling: overwrite (default) or skip" -// @Success 200 {object} object "inserted, updated, skipped counts" -// @Failure 400 {object} ErrorResponse -// @Failure 500 {object} ErrorResponse -// @Security BearerAuth -// @Router /admin/import/units [post] -func (h *AdminHandler) ImportUnits(c *gin.Context) { +// @Summary Import units from CSV +// @Description Accepts a multipart CSV file with unit data and a system_id form field. Columns: unit_id, label, order. Header rows are auto-skipped. Use mode=overwrite (default) to update existing units or mode=skip to leave existing units unchanged. +// @Tags Admin +// @Accept multipart/form-data +// @Produce json +// @Param system_id formData int true "System ID to import units into" +// @Param file formData file true "CSV file" +// @Param mode formData string false "Duplicate handling: overwrite (default) or skip" +// @Success 200 {object} object "inserted, updated, skipped counts" +// @Failure 400 {object} shared.ErrorResponse +// @Failure 500 {object} shared.ErrorResponse +// @Security BearerAuth +// @Router /admin/import/units [post] +func (h *Handler) ImportUnits(c *gin.Context) { ctx := c.Request.Context() systemIDStr := c.PostForm("system_id") @@ -353,7 +353,6 @@ func (h *AdminHandler) ImportUnits(c *gin.Context) { return } - // Verify system exists. if _, err := h.queries.GetSystem(ctx, systemID); err != nil { c.JSON(http.StatusBadRequest, gin.H{"error": "system not found"}) return @@ -376,8 +375,6 @@ func (h *AdminHandler) ImportUnits(c *gin.Context) { reader.FieldsPerRecord = -1 reader.TrimLeadingSpace = true - // Column layout: positional default is [unit_id, label, order]. If the - // first non-blank row is a header, parse its column positions. unitIDCol, labelCol, orderCol := 0, 1, 2 var firstDataRow []string @@ -402,7 +399,6 @@ func (h *AdminHandler) ImportUnits(c *gin.Context) { col0 := strings.TrimSpace(record[0]) if len(col0) > 0 && !unicode.IsDigit(rune(col0[0])) { - // Header row — parse column positions. unitIDCol, labelCol, orderCol = -1, -1, -1 for i, raw := range record { switch strings.ToLower(strings.TrimSpace(raw)) { @@ -480,8 +476,8 @@ func (h *AdminHandler) ImportUnits(c *gin.Context) { } for { - if inserted+updated+skipped >= maxImportRows { - slog.Warn("CSV import row limit reached", "limit", maxImportRows) + if inserted+updated+skipped >= shared.MaxImportRows { + slog.Warn("CSV import row limit reached", "limit", shared.MaxImportRows) break } record, err := reader.Read() @@ -513,12 +509,8 @@ func (h *AdminHandler) ImportUnits(c *gin.Context) { }) } -// importLabelOnly is the shared core for groups and tags. Both are -// global label-only entities, so the import is just "insert if a -// row with this label doesn't already exist". `mode` is honoured but -// only `inserted` vs `skipped` apply (there are no other fields to -// overwrite). -func (h *AdminHandler) importLabelOnly(c *gin.Context, kind string, +// importLabelOnly is the shared core for groups and tags. +func (h *Handler) importLabelOnly(c *gin.Context, kind string, getByLabel func(ctx context.Context, label string) (int64, bool, error), create func(ctx context.Context, label string) error, ) { @@ -535,8 +527,6 @@ func (h *AdminHandler) importLabelOnly(c *gin.Context, kind string, reader.FieldsPerRecord = -1 reader.TrimLeadingSpace = true - // Optional header. If first non-blank cell looks like the word - // "label" / "name", treat as header; otherwise treat as data. labelCol := 0 headerSeen := false @@ -569,8 +559,8 @@ func (h *AdminHandler) importLabelOnly(c *gin.Context, kind string, } for { - if inserted+skipped >= maxImportRows { - slog.Warn("CSV import row limit reached", "limit", maxImportRows) + if inserted+skipped >= shared.MaxImportRows { + slog.Warn("CSV import row limit reached", "limit", shared.MaxImportRows) break } record, err := reader.Read() @@ -589,7 +579,6 @@ func (h *AdminHandler) importLabelOnly(c *gin.Context, kind string, headerSeen = true first := strings.ToLower(strings.TrimSpace(record[0])) if first == "label" || first == "name" || first == "tag" || first == "group" { - // Header row — find the label column and skip the row. for i, raw := range record { col := strings.ToLower(strings.TrimSpace(raw)) if col == "label" || col == "name" || col == "tag" || col == "group" { @@ -633,11 +622,11 @@ func (h *AdminHandler) importLabelOnly(c *gin.Context, kind string, // @Produce json // @Param file formData file true "CSV file" // @Success 200 {object} object "inserted, skipped, failed counts" -// @Failure 400 {object} ErrorResponse -// @Failure 500 {object} ErrorResponse +// @Failure 400 {object} shared.ErrorResponse +// @Failure 500 {object} shared.ErrorResponse // @Security BearerAuth // @Router /admin/import/groups [post] -func (h *AdminHandler) ImportGroups(c *gin.Context) { +func (h *Handler) ImportGroups(c *gin.Context) { h.importLabelOnly(c, "groups", func(ctx context.Context, label string) (int64, bool, error) { g, err := h.queries.GetGroupByLabel(ctx, label) @@ -665,11 +654,11 @@ func (h *AdminHandler) ImportGroups(c *gin.Context) { // @Produce json // @Param file formData file true "CSV file" // @Success 200 {object} object "inserted, skipped, failed counts" -// @Failure 400 {object} ErrorResponse -// @Failure 500 {object} ErrorResponse +// @Failure 400 {object} shared.ErrorResponse +// @Failure 500 {object} shared.ErrorResponse // @Security BearerAuth // @Router /admin/import/tags [post] -func (h *AdminHandler) ImportTags(c *gin.Context) { +func (h *Handler) ImportTags(c *gin.Context) { h.importLabelOnly(c, "tags", func(ctx context.Context, label string) (int64, bool, error) { t, err := h.queries.GetTagByLabel(ctx, label) diff --git a/backend/internal/api/radioreference.go b/backend/internal/handler/admin/radioreference/radioreference.go similarity index 93% rename from backend/internal/api/radioreference.go rename to backend/internal/handler/admin/radioreference/radioreference.go index 1b4e37c..3f5b1ae 100644 --- a/backend/internal/api/radioreference.go +++ b/backend/internal/handler/admin/radioreference/radioreference.go @@ -1,4 +1,5 @@ -package api +// Package radioreference provides the admin RadioReference CSV preview endpoint. +package radioreference import ( "database/sql" @@ -12,6 +13,7 @@ import ( "github.com/gin-gonic/gin" "github.com/openscanner/openscanner/internal/db" + "github.com/openscanner/openscanner/internal/handler/shared" ) const rrMergeModeFillMissing = "fill_missing" @@ -55,7 +57,17 @@ type RRPreviewResponse struct { Rows []RRPreviewRow `json:"rows"` } // @name RRPreviewResponse -// RadioReferencePreviewCSV handles POST /api/admin/radioreference/preview/csv. +// Handler serves the RadioReference CSV preview endpoint. +type Handler struct { + queries *db.Queries +} + +// New constructs a Handler. +func New(queries *db.Queries) *Handler { + return &Handler{queries: queries} +} + +// PreviewCSV handles POST /api/admin/radioreference/preview/csv. // // @Summary Preview RadioReference CSV enrichment // @Description Upload a RadioReference CSV export and preview which local talkgroups would be enriched. Frequency is never updated. Columns: talkgroup id (decimal/tgid), alpha tag, description, group/category, tag/service type, led, order. @@ -65,10 +77,10 @@ type RRPreviewResponse struct { // @Param system_id formData int true "Local system ID to match talkgroups against" // @Param file formData file true "RadioReference CSV file" // @Success 200 {object} RRPreviewResponse -// @Failure 400 {object} ErrorResponse +// @Failure 400 {object} shared.ErrorResponse // @Security BearerAuth // @Router /admin/radioreference/preview/csv [post] -func (h *AdminHandler) RadioReferencePreviewCSV(c *gin.Context) { +func (h *Handler) PreviewCSV(c *gin.Context) { ctx := c.Request.Context() systemID, ok := parseSystemIDForm(c) @@ -106,7 +118,6 @@ func (h *AdminHandler) RadioReferencePreviewCSV(c *gin.Context) { if tgErr != nil { if errors.Is(tgErr, sql.ErrNoRows) { resp.Skipped++ - // Do not add not-found rows to the preview; skipped count is enough. continue } resp.Errors++ @@ -178,7 +189,7 @@ func parseRadioReferenceCSV(r io.Reader) ([]RRTalkgroupCandidate, []RRRowError, if len(record) == 0 || (len(record) == 1 && strings.TrimSpace(record[0]) == "") { continue } - if len(candidates) >= maxImportRows { + if len(candidates) >= shared.MaxImportRows { break } diff --git a/backend/internal/api/crud.go b/backend/internal/handler/admin/transcriptions/transcriptions.go similarity index 59% rename from backend/internal/api/crud.go rename to backend/internal/handler/admin/transcriptions/transcriptions.go index 3fa31b2..47a3949 100644 --- a/backend/internal/api/crud.go +++ b/backend/internal/handler/admin/transcriptions/transcriptions.go @@ -1,32 +1,15 @@ -package api +// Package transcriptions provides the admin transcription status endpoint. +package transcriptions import ( - "database/sql" "log/slog" "net/http" - "strings" "github.com/gin-gonic/gin" "github.com/openscanner/openscanner/internal/db" - "github.com/openscanner/openscanner/internal/ws" ) -const maxImportRows = 100_000 // CSV import safety limit. - -// AdminHandler handles admin CRUD endpoints. -type AdminHandler struct { - queries *db.Queries - hub *ws.Hub - sqlDB *sql.DB - dwReload DirMonitorReloader - dsReload DownstreamReloader - recordingsDir string - ffmpegAvailable bool - fdkAACAvailable bool - whisperAvailable bool -} - -// transcriptionStatusResponse is the JSON shape returned by GetTranscriptionStatus. +// transcriptionStatusResponse is the JSON shape returned by GetStatus. type transcriptionStatusResponse struct { Enabled bool `json:"enabled"` URL string `json:"url"` @@ -36,8 +19,18 @@ type transcriptionStatusResponse struct { WhisperAvailable bool `json:"whisperAvailable"` } // @name TranscriptionStatusResponse -// GetTranscriptionStatus handles GET /api/admin/transcriptions/status. -// Returns the current transcription configuration and statistics. +// Handler serves the transcription status endpoint. +type Handler struct { + queries *db.Queries + whisperAvailable bool +} + +// New constructs a Handler. +func New(queries *db.Queries, whisperAvailable bool) *Handler { + return &Handler{queries: queries, whisperAvailable: whisperAvailable} +} + +// GetStatus handles GET /api/admin/transcriptions/status. // // @Summary Get transcription status // @Description Returns transcription settings, total count, and whisper availability. @@ -45,9 +38,9 @@ type transcriptionStatusResponse struct { // @Produce json // @Security BearerAuth // @Success 200 {object} transcriptionStatusResponse -// @Failure 500 {object} ErrorResponse +// @Failure 500 {object} shared.ErrorResponse // @Router /admin/transcriptions/status [get] -func (h *AdminHandler) GetTranscriptionStatus(c *gin.Context) { +func (h *Handler) GetStatus(c *gin.Context) { ctx := c.Request.Context() getSetting := func(key string) string { @@ -74,12 +67,3 @@ func (h *AdminHandler) GetTranscriptionStatus(c *gin.Context) { WhisperAvailable: h.whisperAvailable, }) } - -// NewAdminHandler constructs an AdminHandler. -func NewAdminHandler(queries *db.Queries, hub *ws.Hub, sqlDB *sql.DB, dwReload DirMonitorReloader, dsReload DownstreamReloader, recordingsDir ...string) *AdminHandler { - rd := "." - if len(recordingsDir) > 0 && strings.TrimSpace(recordingsDir[0]) != "" { - rd = recordingsDir[0] - } - return &AdminHandler{queries: queries, hub: hub, sqlDB: sqlDB, dwReload: dwReload, dsReload: dsReload, recordingsDir: rd} -} diff --git a/backend/internal/api/admin.go b/backend/internal/handler/auth/auth.go similarity index 91% rename from backend/internal/api/admin.go rename to backend/internal/handler/auth/auth.go index ef03ebd..e7cc0e9 100644 --- a/backend/internal/api/admin.go +++ b/backend/internal/handler/auth/auth.go @@ -1,5 +1,6 @@ -// Package api — admin handlers (auth, config, CRUD endpoints). -package api +// Package auth contains authentication handlers (login, refresh, logout, password change, /me, TG selection) +// and the Swagger docs session endpoint that mints a short-lived HTTP-only cookie. +package auth import ( "context" @@ -15,22 +16,22 @@ import ( "github.com/openscanner/openscanner/internal/db" ) -// AuthHandler handles authentication endpoints. -type AuthHandler struct { - queries *db.Queries - rateLimiter *auth.RateLimiter - hub WSDisconnecter -} - -// WSDisconnecter is the subset of ws.Hub used by AuthHandler for session eviction. +// WSDisconnecter is the subset of ws.Hub used by Handler for session eviction. type WSDisconnecter interface { DisconnectByUser(userID int64) DisconnectByJTI(jti string) } -// NewAuthHandler constructs an AuthHandler. -func NewAuthHandler(queries *db.Queries, rateLimiter *auth.RateLimiter, hub WSDisconnecter) *AuthHandler { - return &AuthHandler{ +// Handler handles authentication endpoints. +type Handler struct { + queries *db.Queries + rateLimiter *auth.RateLimiter + hub WSDisconnecter +} + +// New constructs an auth Handler. +func New(queries *db.Queries, rateLimiter *auth.RateLimiter, hub WSDisconnecter) *Handler { + return &Handler{ queries: queries, rateLimiter: rateLimiter, hub: hub, @@ -70,7 +71,7 @@ type loginResponse struct { // @Failure 429 {object} ErrorResponse // @Failure 500 {object} ErrorResponse // @Router /auth/login [post] -func (h *AuthHandler) PostLogin(c *gin.Context) { +func (h *Handler) PostLogin(c *gin.Context) { ip := c.ClientIP() var req loginRequest @@ -198,7 +199,7 @@ func (h *AuthHandler) PostLogin(c *gin.Context) { // logAuthEvent writes an authentication event to the logs table for auditing // (OWASP A09 — security logging & monitoring). -func (h *AuthHandler) logAuthEvent(ctx context.Context, level, message, ip string) { +func (h *Handler) logAuthEvent(ctx context.Context, level, message, ip string) { _ = h.queries.CreateLog(ctx, db.CreateLogParams{ DateTime: time.Now().Unix(), Level: level, @@ -218,7 +219,7 @@ func (h *AuthHandler) logAuthEvent(ctx context.Context, level, message, ip strin // @Failure 401 {object} ErrorResponse // @Failure 500 {object} ErrorResponse // @Router /auth/logout [post] -func (h *AuthHandler) PostLogout(c *gin.Context) { +func (h *Handler) PostLogout(c *gin.Context) { if jtiVal, ok := c.Get("jti"); ok { if jti, ok := jtiVal.(string); ok { auth.Tokens.Revoke(jti) @@ -257,7 +258,7 @@ type refreshResponse struct { // @Failure 401 {object} ErrorResponse // @Failure 500 {object} ErrorResponse // @Router /auth/refresh [post] -func (h *AuthHandler) PostRefresh(c *gin.Context) { +func (h *Handler) PostRefresh(c *gin.Context) { rawToken, err := c.Cookie(auth.RefreshCookieName) if err != nil || rawToken == "" { c.JSON(http.StatusUnauthorized, gin.H{"error": "no refresh token"}) @@ -378,7 +379,7 @@ type changePasswordRequest struct { // @Failure 401 {object} ErrorResponse // @Failure 500 {object} ErrorResponse // @Router /auth/password [put] -func (h *AuthHandler) PutPassword(c *gin.Context) { +func (h *Handler) PutPassword(c *gin.Context) { userIDVal, _ := c.Get("userID") userID, _ := userIDVal.(int64) @@ -454,7 +455,7 @@ func (h *AuthHandler) PutPassword(c *gin.Context) { // @Failure 401 {object} ErrorResponse // @Failure 500 {object} ErrorResponse // @Router /auth/me [get] -func (h *AuthHandler) GetMe(c *gin.Context) { +func (h *Handler) GetMe(c *gin.Context) { userID, _ := c.Get("userID") username, _ := c.Get("username") role, _ := c.Get("role") @@ -497,7 +498,7 @@ type avoidTGEntry struct { // @Failure 401 {object} ErrorResponse // @Failure 500 {object} ErrorResponse // @Router /auth/tg-selection [get] -func (h *AuthHandler) GetTGSelection(c *gin.Context) { +func (h *Handler) GetTGSelection(c *gin.Context) { userIDVal, _ := c.Get("userID") userID, _ := userIDVal.(int64) @@ -549,7 +550,7 @@ func (h *AuthHandler) GetTGSelection(c *gin.Context) { // @Failure 401 {object} ErrorResponse // @Failure 500 {object} ErrorResponse // @Router /auth/tg-selection [put] -func (h *AuthHandler) PutTGSelection(c *gin.Context) { +func (h *Handler) PutTGSelection(c *gin.Context) { userIDVal, _ := c.Get("userID") userID, _ := userIDVal.(int64) @@ -583,3 +584,21 @@ func (h *AuthHandler) PutTGSelection(c *gin.Context) { c.JSON(http.StatusOK, gin.H{"ok": true}) } + +// PostDocsSession handles POST /api/admin/docs/session. +// It mints a short-lived HTTP-only cookie so Swagger UI can be opened in a new +// browser tab without exposing the JWT. Kept in the auth package because it is +// fundamentally a session-cookie-minting endpoint. +// +// @Summary Create Swagger docs session cookie +// @Description Issues a short-lived HTTP-only cookie used to access /api/admin/docs. +// @Tags Admin +// @Produce json +// @Success 200 {object} object{ok=bool} +// @Security BearerAuth +// @Router /admin/docs/session [post] +func PostDocsSession(c *gin.Context) { + secure := c.Request.TLS != nil || c.GetHeader("X-Forwarded-Proto") == "https" + auth.SetSwaggerCookie(c, secure) + c.JSON(http.StatusOK, gin.H{"ok": true}) +} diff --git a/backend/internal/api/bookmarks.go b/backend/internal/handler/bookmarks/bookmarks.go similarity index 92% rename from backend/internal/api/bookmarks.go rename to backend/internal/handler/bookmarks/bookmarks.go index 260034d..0f4056f 100644 --- a/backend/internal/api/bookmarks.go +++ b/backend/internal/handler/bookmarks/bookmarks.go @@ -1,4 +1,5 @@ -package api +// Package bookmarks provides per-user call bookmark endpoints. +package bookmarks import ( "database/sql" @@ -10,12 +11,19 @@ import ( "github.com/gin-gonic/gin" "github.com/openscanner/openscanner/internal/auth" "github.com/openscanner/openscanner/internal/db" + "github.com/openscanner/openscanner/internal/handler/shared" ) -type BookmarkHandler struct { +// Handler serves bookmark endpoints. +type Handler struct { queries *db.Queries } +// New constructs a Handler. +func New(queries *db.Queries) *Handler { + return &Handler{queries: queries} +} + // ToggleBookmarkRequest is the request body for POST /api/bookmarks. type ToggleBookmarkRequest struct { CallID int64 `json:"callId" example:"42"` @@ -34,7 +42,7 @@ type BookmarkIDsResponse struct { // BookmarkCallsResponse is returned by GET /api/bookmarks/calls. type BookmarkCallsResponse struct { - Calls []CallSearchResult `json:"calls"` + Calls []shared.CallSearchResult `json:"calls"` } // @name BookmarkCallsResponse // PostToggleBookmark handles POST /api/bookmarks — toggles a bookmark for the authenticated user. @@ -51,7 +59,7 @@ type BookmarkCallsResponse struct { // @Failure 401 {object} ErrorResponse "Authentication required" // @Failure 500 {object} ErrorResponse "Internal server error" // @Router /bookmarks [post] -func (h *BookmarkHandler) PostToggleBookmark(c *gin.Context) { +func (h *Handler) PostToggleBookmark(c *gin.Context) { var req struct { CallID int64 `json:"callId"` } @@ -144,7 +152,7 @@ func (h *BookmarkHandler) PostToggleBookmark(c *gin.Context) { // @Failure 401 {object} ErrorResponse "Authentication required" // @Failure 500 {object} ErrorResponse "Internal server error" // @Router /bookmarks [get] -func (h *BookmarkHandler) GetBookmarkIDs(c *gin.Context) { +func (h *Handler) GetBookmarkIDs(c *gin.Context) { uid, _ := c.Get("userID") userID := uid.(int64) @@ -170,7 +178,7 @@ func (h *BookmarkHandler) GetBookmarkIDs(c *gin.Context) { // @Failure 401 {object} ErrorResponse "Authentication required" // @Failure 500 {object} ErrorResponse "Internal server error" // @Router /bookmarks/calls [get] -func (h *BookmarkHandler) GetBookmarkCalls(c *gin.Context) { +func (h *Handler) GetBookmarkCalls(c *gin.Context) { uid, _ := c.Get("userID") userID := uid.(int64) @@ -180,9 +188,9 @@ func (h *BookmarkHandler) GetBookmarkCalls(c *gin.Context) { return } - results := make([]CallSearchResult, 0, len(rows)) + results := make([]shared.CallSearchResult, 0, len(rows)) for _, row := range rows { - r := CallSearchResult{ + r := shared.CallSearchResult{ ID: row.ID, AudioName: row.AudioName, AudioType: row.AudioType, diff --git a/backend/internal/api/calls.go b/backend/internal/handler/calls/calls.go similarity index 84% rename from backend/internal/api/calls.go rename to backend/internal/handler/calls/calls.go index 2ba0fe1..035a3e5 100644 --- a/backend/internal/api/calls.go +++ b/backend/internal/handler/calls/calls.go @@ -1,5 +1,6 @@ -// Package api — call upload (POST /api/call-upload, /api/trunk-recorder-call-upload). -package api +// Package calls — call upload (POST /api/call-upload, /api/trunk-recorder-call-upload) +// plus the authenticated call search, audio, and transcript endpoints. +package calls import ( "context" @@ -18,9 +19,9 @@ import ( "github.com/gin-gonic/gin" "github.com/openscanner/openscanner/internal/audio" - "github.com/openscanner/openscanner/internal/auth" "github.com/openscanner/openscanner/internal/db" "github.com/openscanner/openscanner/internal/downstream" + "github.com/openscanner/openscanner/internal/handler/shared" "github.com/openscanner/openscanner/internal/ws" ) @@ -28,9 +29,13 @@ const ( defaultCallRatePerMin = 60 maxCallRatePerMin = 600 rateWindowDuration = time.Minute - shareRatePerMin = 10 ) +// DownstreamNotifier sends call events to downstream pushers. +type DownstreamNotifier interface { + Notify(event downstream.CallEvent) +} + // apiKeyLimiter is a per-API-key sliding-window rate limiter. type apiKeyLimiter struct { mu sync.Mutex @@ -54,33 +59,31 @@ func (l *apiKeyLimiter) allow() bool { return true } -// CallHandler handles call upload endpoints. -type CallHandler struct { - queries *db.Queries - processor *audio.Processor - hub *ws.Hub - dsNotifier DownstreamNotifier - transcriber audio.Transcriber // nil when transcription is disabled - mu sync.Mutex - limiters map[int64]*apiKeyLimiter - shareMu sync.Mutex - shareLimiters map[int64]*apiKeyLimiter +// Handler handles call upload and archive endpoints. +type Handler struct { + queries *db.Queries + processor *audio.Processor + hub *ws.Hub + dsNotifier DownstreamNotifier + transcriber audio.Transcriber // nil when transcription is disabled + + mu sync.Mutex + limiters map[int64]*apiKeyLimiter } -// NewCallHandler creates a CallHandler. -func NewCallHandler(queries *db.Queries, processor *audio.Processor, hub *ws.Hub, dsNotifier DownstreamNotifier, transcriber audio.Transcriber) *CallHandler { - return &CallHandler{ - queries: queries, - processor: processor, - hub: hub, - dsNotifier: dsNotifier, - transcriber: transcriber, - limiters: make(map[int64]*apiKeyLimiter), - shareLimiters: make(map[int64]*apiKeyLimiter), +// New creates a call Handler. +func New(queries *db.Queries, processor *audio.Processor, hub *ws.Hub, dsNotifier DownstreamNotifier, transcriber audio.Transcriber) *Handler { + return &Handler{ + queries: queries, + processor: processor, + hub: hub, + dsNotifier: dsNotifier, + transcriber: transcriber, + limiters: make(map[int64]*apiKeyLimiter), } } -func (h *CallHandler) getLimiter(apiKeyID int64, rateLimit int) *apiKeyLimiter { +func (h *Handler) getLimiter(apiKeyID int64, rateLimit int) *apiKeyLimiter { h.mu.Lock() defer h.mu.Unlock() @@ -113,102 +116,6 @@ func (h *CallHandler) getLimiter(apiKeyID int64, rateLimit int) *apiKeyLimiter { return l } -// getShareLimiter returns a per-user rate limiter for share creation. -func (h *CallHandler) getShareLimiter(userID int64) *apiKeyLimiter { - h.shareMu.Lock() - defer h.shareMu.Unlock() - - if len(h.shareLimiters) > 100 { - now := time.Now() - for id, l := range h.shareLimiters { - l.mu.Lock() - stale := now.Sub(l.windowStart) >= 2*rateWindowDuration - l.mu.Unlock() - if stale { - delete(h.shareLimiters, id) - } - } - } - - l, ok := h.shareLimiters[userID] - if !ok { - l = &apiKeyLimiter{ - windowStart: time.Now(), - rateLimit: shareRatePerMin, - } - h.shareLimiters[userID] = l - } - return l -} - -// systemGrant mirrors ws.systemGrant for grant-based filtering in REST handlers. -type systemGrant struct { - ID int64 `json:"id"` - Talkgroups []int64 `json:"talkgroups,omitempty"` -} - -// loadUserGrants returns the parsed grants for the authenticated user. Returns -// nil (allow-all) for admins, unauthenticated users, or users with no grants. -func (h *CallHandler) loadUserGrants(c *gin.Context) []systemGrant { - role, _ := c.Get("role") - roleStr, _ := role.(string) - if roleStr == auth.RoleAdmin { - return nil - } - userIDVal, exists := c.Get("userID") - if !exists { - return nil - } - uid, _ := userIDVal.(int64) - user, err := h.queries.GetUser(c.Request.Context(), uid) - if err != nil { - return nil - } - if !user.SystemsJson.Valid || user.SystemsJson.String == "" { - return nil - } - var grants []systemGrant - if err := json.Unmarshal([]byte(user.SystemsJson.String), &grants); err != nil { - slog.Warn("failed to parse user grants", "user_id", uid, "error", err) - return nil - } - if len(grants) == 0 { - return nil - } - return grants -} - -// isGranted checks whether a call with the given system/talkgroup passes the -// grant filter. A nil grant list means everything is allowed. -func isGranted(grants []systemGrant, systemID, talkgroupID int64) bool { - if grants == nil { - return true - } - for _, g := range grants { - if g.ID != systemID { - continue - } - if len(g.Talkgroups) == 0 { - return true - } - for _, tg := range g.Talkgroups { - if tg == talkgroupID { - return true - } - } - } - return false -} - -// getSettingValue fetches a setting value from the DB, returning "" on error. -func (h *CallHandler) getSettingValue(c *gin.Context, key string) string { - s, err := h.queries.GetSetting(c.Request.Context(), key) - if err != nil { - return "" - } - return s.Value -} - // PostCallUpload handles POST /api/call-upload and /api/trunk-recorder-call-upload. // // @Summary Upload a call recording @@ -244,7 +151,7 @@ func (h *CallHandler) getSettingValue(c *gin.Context, key string) string { // @Failure 500 {object} ErrorResponse "Internal server error" // @Router /call-upload [post] // @Router /trunk-recorder-call-upload [post] -func (h *CallHandler) PostCallUpload(c *gin.Context) { +func (h *Handler) PostCallUpload(c *gin.Context) { slog.Debug("call-upload: request received", "ip", c.ClientIP()) // Retrieve API key ID injected by APIKeyAuth middleware. apiKeyIDVal, exists := c.Get("apiKeyID") @@ -267,7 +174,7 @@ func (h *CallHandler) PostCallUpload(c *gin.Context) { apiKeyRateOverride = true } } - if rStr := h.getSettingValue(c, "apiKeyCallRate"); rStr != "" { + if rStr := shared.GetSettingValue(c, h.queries, "apiKeyCallRate"); rStr != "" { if r, err := strconv.Atoi(rStr); err == nil && r > 0 && !apiKeyRateOverride { rateLimit = r } @@ -449,7 +356,7 @@ func (h *CallHandler) PostCallUpload(c *gin.Context) { } ctx := c.Request.Context() - autoPopulateSystems := h.getSettingValue(c, "autoPopulateSystems") == "true" + autoPopulateSystems := shared.GetSettingValue(c, h.queries, "autoPopulateSystems") == "true" slog.Debug("call-upload: resolving system and talkgroup", "system_id", systemIDRaw, "talkgroup_id", talkgroupIDRaw) @@ -519,12 +426,12 @@ func (h *CallHandler) PostCallUpload(c *gin.Context) { // Resolve group from talkgroupGroup (e.g. SDRTrunk sends this). var groupID sql.NullInt64 if talkgroupGroup != "" { - groupID = resolveGroupID(ctx, h.queries, talkgroupGroup) + groupID = shared.ResolveGroupID(ctx, h.queries, talkgroupGroup) } // Resolve tag from talkgroupTag (e.g. "Law Dispatch", "Fire-Tac"). var tagID sql.NullInt64 if talkgroupTag != "" { - tagID = resolveTagID(ctx, h.queries, talkgroupTag) + tagID = shared.ResolveTagID(ctx, h.queries, talkgroupTag) } newID, cerr := h.queries.CreateTalkgroup(ctx, db.CreateTalkgroupParams{ SystemID: system.ID, @@ -551,10 +458,10 @@ func (h *CallHandler) PostCallUpload(c *gin.Context) { talkgroup.Name = sql.NullString{String: talkgroupName, Valid: true} } if !talkgroup.GroupID.Valid && talkgroupGroup != "" { - talkgroup.GroupID = resolveGroupID(ctx, h.queries, talkgroupGroup) + talkgroup.GroupID = shared.ResolveGroupID(ctx, h.queries, talkgroupGroup) } if !talkgroup.TagID.Valid && talkgroupTag != "" { - talkgroup.TagID = resolveTagID(ctx, h.queries, talkgroupTag) + talkgroup.TagID = shared.ResolveTagID(ctx, h.queries, talkgroupTag) } if uerr := h.queries.UpdateTalkgroup(ctx, db.UpdateTalkgroupParams{ ID: talkgroup.ID, @@ -577,9 +484,9 @@ func (h *CallHandler) PostCallUpload(c *gin.Context) { } // Duplicate detection (system.ID and talkgroup.ID are the FK values in calls). - if h.getSettingValue(c, "disableDuplicateDetection") != "true" { + if shared.GetSettingValue(c, h.queries, "disableDuplicateDetection") != "true" { windowMs := int64(2000) - if v := h.getSettingValue(c, "duplicateDetectionTimeFrame"); v != "" { + if v := shared.GetSettingValue(c, h.queries, "duplicateDetectionTimeFrame"); v != "" { if wm, err := strconv.ParseInt(v, 10, 64); err == nil { windowMs = wm } @@ -604,14 +511,14 @@ func (h *CallHandler) PostCallUpload(c *gin.Context) { // Resolve audio conversion mode from settings. convMode := audio.ConversionDisabled - if mStr := h.getSettingValue(c, "audioConversion"); mStr != "" { + if mStr := shared.GetSettingValue(c, h.queries, "audioConversion"); mStr != "" { if m, err := strconv.Atoi(mStr); err == nil { convMode = audio.ConversionMode(m) } } // Resolve encoding preset from settings. - convPreset := audio.ParseEncodingPreset(h.getSettingValue(c, "audioEncodingPreset")) + convPreset := audio.ParseEncodingPreset(shared.GetSettingValue(c, h.queries, "audioEncodingPreset")) // Store audio file (conversion handled inside Processor.Store). relPath, err := h.processor.Store(ctx, fh, convMode, convPreset) @@ -857,33 +764,6 @@ func (h *CallHandler) PostCallUpload(c *gin.Context) { } } -// CallSearchResult is a single call in the search response. -type CallSearchResult struct { - ID int64 `json:"id"` - AudioName string `json:"audioName"` - AudioType string `json:"audioType"` - DateTime int64 `json:"dateTime"` - SystemID int64 `json:"systemId"` - SystemLabel string `json:"systemLabel"` - TalkgroupID int64 `json:"talkgroupId"` - TalkgroupLabel string `json:"talkgroupLabel"` - TalkgroupName string `json:"talkgroupName"` - TalkgroupGroup string `json:"talkgroupGroup,omitempty"` - TalkgroupTag string `json:"talkgroupTag,omitempty"` - TalkgroupLed string `json:"talkgroupLed,omitempty"` - Frequency *int64 `json:"frequency,omitempty"` - Duration *int64 `json:"duration,omitempty"` - Source *int64 `json:"source,omitempty"` - Site string `json:"site,omitempty"` - Channel string `json:"channel,omitempty"` - Decoder string `json:"decoder,omitempty"` - ErrorCount *int64 `json:"errorCount,omitempty"` - SpikeCount *int64 `json:"spikeCount,omitempty"` - TalkerAlias string `json:"talkerAlias,omitempty"` - Transcript string `json:"transcript,omitempty"` - Bookmarked bool `json:"bookmarked"` -} // @name CallSearchResult - // GetCallAudio handles GET /api/calls/:id/audio. // // @Summary Get call audio file @@ -898,7 +778,7 @@ type CallSearchResult struct { // @Failure 404 {object} ErrorResponse "Call or audio not found" // @Failure 500 {object} ErrorResponse "Internal server error" // @Router /calls/{id}/audio [get] -func (h *CallHandler) GetCallAudio(c *gin.Context) { +func (h *Handler) GetCallAudio(c *gin.Context) { ctx := c.Request.Context() id, err := strconv.ParseInt(c.Param("id"), 10, 64) if err != nil || id <= 0 { @@ -909,7 +789,7 @@ func (h *CallHandler) GetCallAudio(c *gin.Context) { // Require authentication or publicAccess for direct audio access. // Anonymous users must use /api/shared/:token/audio for shared calls. _, hasUser := c.Get("userID") - if !hasUser && h.getSettingValue(c, "publicAccess") != "true" { + if !hasUser && shared.GetSettingValue(c, h.queries, "publicAccess") != "true" { c.JSON(http.StatusUnauthorized, gin.H{"error": "authentication required"}) return } @@ -926,7 +806,7 @@ func (h *CallHandler) GetCallAudio(c *gin.Context) { } // Enforce per-user grants for non-admin listeners. - if grants := h.loadUserGrants(c); !isGranted(grants, call.SystemID, call.TalkgroupID.Int64) { + if grants := shared.LoadUserGrants(c, h.queries); !shared.IsGranted(grants, call.SystemID, call.TalkgroupID.Int64) { c.JSON(http.StatusNotFound, gin.H{"error": "call not found"}) return } @@ -977,17 +857,11 @@ func (h *CallHandler) GetCallAudio(c *gin.Context) { filename = "call" } - c.Header("Content-Disposition", contentDisposition("inline", filename)) + c.Header("Content-Disposition", shared.ContentDisposition("inline", filename)) c.Header("Content-Type", contentType) http.ServeContent(c.Writer, c.Request, filename, fi.ModTime(), f) } -// CallSearchResponse is the response for GET /api/calls. -type CallSearchResponse struct { - Calls []CallSearchResult `json:"calls"` - Total int64 `json:"total"` -} // @name CallSearchResponse - // GetCalls handles GET /api/calls — paginated call archive search. // // @Summary Search calls @@ -1014,7 +888,7 @@ type CallSearchResponse struct { // @Failure 400 {object} ErrorResponse "Invalid query parameter" // @Failure 500 {object} ErrorResponse "Internal server error" // @Router /calls [get] -func (h *CallHandler) GetCalls(c *gin.Context) { +func (h *Handler) GetCalls(c *gin.Context) { ctx := c.Request.Context() parseCSVInt64 := func(raw string) ([]int64, error) { @@ -1121,7 +995,7 @@ func (h *CallHandler) GetCalls(c *gin.Context) { } } if len(groupLabels) > 0 && len(groupIDs) == 0 { - c.JSON(http.StatusOK, CallSearchResponse{Calls: []CallSearchResult{}, Total: 0}) + c.JSON(http.StatusOK, shared.CallSearchResponse{Calls: []shared.CallSearchResult{}, Total: 0}) return } @@ -1133,7 +1007,7 @@ func (h *CallHandler) GetCalls(c *gin.Context) { } } if len(tagLabels) > 0 && len(tagIDs) == 0 { - c.JSON(http.StatusOK, CallSearchResponse{Calls: []CallSearchResult{}, Total: 0}) + c.JSON(http.StatusOK, shared.CallSearchResponse{Calls: []shared.CallSearchResult{}, Total: 0}) return } @@ -1255,11 +1129,11 @@ func (h *CallHandler) GetCalls(c *gin.Context) { // Enforce per-user grants — filter out calls the listener is not // authorised to see. Admins and unauthenticated public-access users // have nil grants (allow-all). - grants := h.loadUserGrants(c) + grants := shared.LoadUserGrants(c, h.queries) if grants != nil { allowed := calls[:0] for _, call := range calls { - if isGranted(grants, call.SystemID, call.TalkgroupID.Int64) { + if shared.IsGranted(grants, call.SystemID, call.TalkgroupID.Int64) { allowed = append(allowed, call) } } @@ -1293,9 +1167,9 @@ func (h *CallHandler) GetCalls(c *gin.Context) { tagCache := make(map[int64]string) // Build response with joined labels and transcripts. - results := make([]CallSearchResult, 0, len(calls)) + results := make([]shared.CallSearchResult, 0, len(calls)) for _, call := range calls { - r := CallSearchResult{ + r := shared.CallSearchResult{ ID: call.ID, AudioName: call.AudioName, AudioType: call.AudioType, @@ -1409,54 +1283,80 @@ func (h *CallHandler) GetCalls(c *gin.Context) { results = append(results, r) } - c.JSON(http.StatusOK, CallSearchResponse{ + c.JSON(http.StatusOK, shared.CallSearchResponse{ Calls: results, Total: total, }) } -// resolveGroupID looks up an existing group by label or creates one if it -// doesn't exist. Returns a valid sql.NullInt64 with the group's DB ID, or -// an invalid NullInt64 if the operation fails. -func resolveGroupID(ctx context.Context, q db.Querier, label string) sql.NullInt64 { - g, err := q.GetGroupByLabel(ctx, label) - if err == nil { - return sql.NullInt64{Int64: g.ID, Valid: true} - } - if !errors.Is(err, sql.ErrNoRows) { - slog.Warn("failed to look up group by label", "label", label, "error", err) - return sql.NullInt64{} +// transcriptResponse is the JSON shape returned by GetCallTranscript. +type transcriptResponse struct { + Text string `json:"text"` + Segments []audio.TranscriptionSegment `json:"segments"` + Language string `json:"language"` + Model string `json:"model"` +} // @name TranscriptResponse + +// GetCallTranscript handles GET /api/calls/:id/transcript. +// Returns the transcription for a call if one exists. +// +// @Summary Get call transcript +// @Description Returns the transcription text, segments, language and model for a call. Authentication is optional when the publicAccess setting is enabled; otherwise a valid JWT is required. +// @Tags Calls +// @Produce json +// @Security BearerAuth +// @Param id path int true "Call ID" +// @Success 200 {object} transcriptResponse +// @Failure 400 {object} ErrorResponse +// @Failure 404 {object} ErrorResponse +// @Failure 500 {object} ErrorResponse +// @Router /calls/{id}/transcript [get] +func (h *Handler) GetCallTranscript(c *gin.Context) { + ctx := c.Request.Context() + id, err := strconv.ParseInt(c.Param("id"), 10, 64) + if err != nil || id <= 0 { + c.JSON(http.StatusBadRequest, gin.H{"error": "invalid call id"}) + return } - newID, cerr := q.CreateGroup(ctx, label) - if cerr != nil { - slog.Warn("failed to auto-create group", "label", label, "error", cerr) - return sql.NullInt64{} + + // Require authentication or publicAccess. + _, hasUser := c.Get("userID") + if !hasUser && shared.GetSettingValue(c, h.queries, "publicAccess") != "true" { + c.JSON(http.StatusUnauthorized, gin.H{"error": "authentication required"}) + return } - slog.Info("auto-populated group from upload", "label", label, "db_id", newID) - return sql.NullInt64{Int64: newID, Valid: true} -} -// resolveTagID looks up an existing tag by label or creates one if it -// doesn't exist. Returns a valid sql.NullInt64 with the tag's DB ID, or -// an invalid NullInt64 if the operation fails. -func resolveTagID(ctx context.Context, q db.Querier, label string) sql.NullInt64 { - t, err := q.GetTagByLabel(ctx, label) - if err == nil { - return sql.NullInt64{Int64: t.ID, Valid: true} + trx, err := h.queries.GetTranscriptionByCallID(ctx, id) + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + c.JSON(http.StatusNotFound, gin.H{"error": "transcript not found"}) + return + } + slog.Error("failed to get transcript", "call_id", id, "error", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "internal error"}) + return } - if !errors.Is(err, sql.ErrNoRows) { - slog.Warn("failed to look up tag by label", "label", label, "error", err) - return sql.NullInt64{} + + var segments []audio.TranscriptionSegment + if trx.Segments.Valid && trx.Segments.String != "" { + if err := json.Unmarshal([]byte(trx.Segments.String), &segments); err != nil { + slog.Warn("failed to parse transcript segments", "call_id", id, "error", err) + } } - newID, cerr := q.CreateTag(ctx, label) - if cerr != nil { - slog.Warn("failed to auto-create tag", "label", label, "error", cerr) - return sql.NullInt64{} + if segments == nil { + segments = []audio.TranscriptionSegment{} } - slog.Info("auto-populated tag from upload", "label", label, "db_id", newID) - return sql.NullInt64{Int64: newID, Valid: true} + + c.JSON(http.StatusOK, transcriptResponse{ + Text: trx.Text, + Segments: segments, + Language: trx.Language.String, + Model: trx.Model.String, + }) } +// --- helpers --- + // needsBackfill returns true if at least one talkgroup field is empty and a // corresponding value was provided in the upload metadata. func needsBackfill(tg db.Talkgroup, label, name, tag, group string) bool { @@ -1616,69 +1516,3 @@ func aggregateErrorSpikeCounts(raw string) (sql.NullInt64, sql.NullInt64) { return sql.NullInt64{Int64: totalErrors, Valid: true}, sql.NullInt64{Int64: totalSpikes, Valid: true} } - -// transcriptResponse is the JSON shape returned by GetCallTranscript. -type transcriptResponse struct { - Text string `json:"text"` - Segments []audio.TranscriptionSegment `json:"segments"` - Language string `json:"language"` - Model string `json:"model"` -} // @name TranscriptResponse - -// GetCallTranscript handles GET /api/calls/:id/transcript. -// Returns the transcription for a call if one exists. -// -// @Summary Get call transcript -// @Description Returns the transcription text, segments, language and model for a call. Authentication is optional when the publicAccess setting is enabled; otherwise a valid JWT is required. -// @Tags Calls -// @Produce json -// @Security BearerAuth -// @Param id path int true "Call ID" -// @Success 200 {object} transcriptResponse -// @Failure 400 {object} ErrorResponse -// @Failure 404 {object} ErrorResponse -// @Failure 500 {object} ErrorResponse -// @Router /calls/{id}/transcript [get] -func (h *CallHandler) GetCallTranscript(c *gin.Context) { - ctx := c.Request.Context() - id, err := strconv.ParseInt(c.Param("id"), 10, 64) - if err != nil || id <= 0 { - c.JSON(http.StatusBadRequest, gin.H{"error": "invalid call id"}) - return - } - - // Require authentication or publicAccess. - _, hasUser := c.Get("userID") - if !hasUser && h.getSettingValue(c, "publicAccess") != "true" { - c.JSON(http.StatusUnauthorized, gin.H{"error": "authentication required"}) - return - } - - trx, err := h.queries.GetTranscriptionByCallID(ctx, id) - if err != nil { - if errors.Is(err, sql.ErrNoRows) { - c.JSON(http.StatusNotFound, gin.H{"error": "transcript not found"}) - return - } - slog.Error("failed to get transcript", "call_id", id, "error", err) - c.JSON(http.StatusInternalServerError, gin.H{"error": "internal error"}) - return - } - - var segments []audio.TranscriptionSegment - if trx.Segments.Valid && trx.Segments.String != "" { - if err := json.Unmarshal([]byte(trx.Segments.String), &segments); err != nil { - slog.Warn("failed to parse transcript segments", "call_id", id, "error", err) - } - } - if segments == nil { - segments = []audio.TranscriptionSegment{} - } - - c.JSON(http.StatusOK, transcriptResponse{ - Text: trx.Text, - Segments: segments, - Language: trx.Language.String, - Model: trx.Model.String, - }) -} diff --git a/backend/internal/api/calls_limiter_internal_test.go b/backend/internal/handler/calls/limiter_internal_test.go similarity index 76% rename from backend/internal/api/calls_limiter_internal_test.go rename to backend/internal/handler/calls/limiter_internal_test.go index 7964d8f..6f4bd21 100644 --- a/backend/internal/api/calls_limiter_internal_test.go +++ b/backend/internal/handler/calls/limiter_internal_test.go @@ -1,4 +1,4 @@ -package api +package calls import ( "testing" @@ -18,7 +18,7 @@ import ( // triggers the sweep, which should purge every stale entry except the // freshly-inserted one. func TestCallHandler_Limiter_CleansUpStaleEntries(t *testing.T) { - h := NewCallHandler(&db.Queries{}, (*audio.Processor)(nil), (*ws.Hub)(nil), nil, nil) + h := New(&db.Queries{}, (*audio.Processor)(nil), (*ws.Hub)(nil), nil, nil) // Seed 101 entries with a stale windowStart (> 2*rateWindowDuration ago). staleStart := time.Now().Add(-3 * rateWindowDuration) @@ -50,7 +50,7 @@ func TestCallHandler_Limiter_CleansUpStaleEntries(t *testing.T) { } func TestCallHandler_Limiter_KeepsFreshEntriesDuringSweep(t *testing.T) { - h := NewCallHandler(&db.Queries{}, (*audio.Processor)(nil), (*ws.Hub)(nil), nil, nil) + h := New(&db.Queries{}, (*audio.Processor)(nil), (*ws.Hub)(nil), nil, nil) now := time.Now() // 90 stale + 15 fresh = 105 total → > 100 triggers sweep. @@ -74,21 +74,3 @@ func TestCallHandler_Limiter_KeepsFreshEntriesDuringSweep(t *testing.T) { t.Fatalf("post-sweep size = %d, want 16 (15 fresh + 1 new)", got) } } - -func TestCallHandler_ShareLimiter_CleansUpStaleEntries(t *testing.T) { - h := NewCallHandler(&db.Queries{}, (*audio.Processor)(nil), (*ws.Hub)(nil), nil, nil) - - staleStart := time.Now().Add(-3 * rateWindowDuration) - for i := int64(1); i <= 101; i++ { - h.shareLimiters[i] = &apiKeyLimiter{ - windowStart: staleStart, - rateLimit: shareRatePerMin, - } - } - - _ = h.getShareLimiter(9999) - - if got := len(h.shareLimiters); got != 1 { - t.Fatalf("post-sweep shareLimiters size = %d, want 1", got) - } -} diff --git a/backend/internal/handler/health/health.go b/backend/internal/handler/health/health.go new file mode 100644 index 0000000..add6ce3 --- /dev/null +++ b/backend/internal/handler/health/health.go @@ -0,0 +1,33 @@ +// Package health provides the GET /api/health endpoint. +package health + +import ( + "net/http" + + "github.com/gin-gonic/gin" +) + +// Handler serves the health-check endpoint. +type Handler struct { + version string +} + +// New constructs a Handler. +func New(version string) *Handler { + return &Handler{version: version} +} + +// Get godoc +// +// @Summary Health check +// @Description Returns server status and version for readiness probes and Docker HEALTHCHECK. +// @Tags Health +// @Produce json +// @Success 200 {object} object{status=string,version=string} "Server is healthy" +// @Router /health [get] +func (h *Handler) Get(c *gin.Context) { + c.JSON(http.StatusOK, gin.H{ + "status": "ok", + "version": h.version, + }) +} diff --git a/backend/internal/api/admin_test.go b/backend/internal/handler/routes/admin_test.go similarity index 98% rename from backend/internal/api/admin_test.go rename to backend/internal/handler/routes/admin_test.go index 152a53c..e6b302b 100644 --- a/backend/internal/api/admin_test.go +++ b/backend/internal/handler/routes/admin_test.go @@ -1,4 +1,4 @@ -package api_test +package routes_test import ( "bytes" @@ -11,7 +11,7 @@ import ( "testing" "github.com/gin-gonic/gin" - "github.com/openscanner/openscanner/internal/api" + "github.com/openscanner/openscanner/internal/handler/routes" "github.com/openscanner/openscanner/internal/audio" "github.com/openscanner/openscanner/internal/auth" "github.com/openscanner/openscanner/internal/db" @@ -31,7 +31,7 @@ func newAdminTestEngine(t *testing.T) (*gin.Engine, *db.Queries) { router := gin.New() rl := auth.NewRateLimiter(context.Background()) processor := audio.NewProcessor(t.TempDir(), nil) - api.RegisterRoutes(router, api.Deps{ + routes.RegisterRoutes(router, routes.Deps{ Queries: queries, RateLimiter: rl, Processor: processor, diff --git a/backend/internal/api/auth_test.go b/backend/internal/handler/routes/auth_test.go similarity index 99% rename from backend/internal/api/auth_test.go rename to backend/internal/handler/routes/auth_test.go index 9ded864..baafcae 100644 --- a/backend/internal/api/auth_test.go +++ b/backend/internal/handler/routes/auth_test.go @@ -1,4 +1,4 @@ -package api_test +package routes_test import ( "bytes" diff --git a/backend/internal/api/bookmarks_test.go b/backend/internal/handler/routes/bookmarks_test.go similarity index 99% rename from backend/internal/api/bookmarks_test.go rename to backend/internal/handler/routes/bookmarks_test.go index 1578490..538d252 100644 --- a/backend/internal/api/bookmarks_test.go +++ b/backend/internal/handler/routes/bookmarks_test.go @@ -1,4 +1,4 @@ -package api_test +package routes_test import ( "bytes" diff --git a/backend/internal/api/calls_test.go b/backend/internal/handler/routes/calls_test.go similarity index 99% rename from backend/internal/api/calls_test.go rename to backend/internal/handler/routes/calls_test.go index 24ede35..d45f6a4 100644 --- a/backend/internal/api/calls_test.go +++ b/backend/internal/handler/routes/calls_test.go @@ -1,4 +1,4 @@ -package api_test +package routes_test import ( "bytes" @@ -12,7 +12,7 @@ import ( "time" "github.com/gin-gonic/gin" - "github.com/openscanner/openscanner/internal/api" + "github.com/openscanner/openscanner/internal/handler/routes" "github.com/openscanner/openscanner/internal/audio" "github.com/openscanner/openscanner/internal/auth" "github.com/openscanner/openscanner/internal/db" @@ -32,7 +32,7 @@ func newTestEngineWithCalls(t *testing.T) (*gin.Engine, *db.Queries) { router := gin.New() rl := auth.NewRateLimiter(context.Background()) - api.RegisterRoutes(router, api.Deps{ + routes.RegisterRoutes(router, routes.Deps{ Queries: queries, RateLimiter: rl, Processor: proc, diff --git a/backend/internal/api/listener_ws_alias_test.go b/backend/internal/handler/routes/listener_ws_alias_test.go similarity index 97% rename from backend/internal/api/listener_ws_alias_test.go rename to backend/internal/handler/routes/listener_ws_alias_test.go index 8fb4121..f4fb04a 100644 --- a/backend/internal/api/listener_ws_alias_test.go +++ b/backend/internal/handler/routes/listener_ws_alias_test.go @@ -1,4 +1,4 @@ -package api_test +package routes_test import ( "testing" diff --git a/backend/internal/api/radioreference_test.go b/backend/internal/handler/routes/radioreference_test.go similarity index 99% rename from backend/internal/api/radioreference_test.go rename to backend/internal/handler/routes/radioreference_test.go index 9125d1d..612a186 100644 --- a/backend/internal/api/radioreference_test.go +++ b/backend/internal/handler/routes/radioreference_test.go @@ -1,4 +1,4 @@ -package api_test +package routes_test import ( "bytes" diff --git a/backend/internal/api/refresh_test.go b/backend/internal/handler/routes/refresh_test.go similarity index 99% rename from backend/internal/api/refresh_test.go rename to backend/internal/handler/routes/refresh_test.go index 4382c8b..bf01650 100644 --- a/backend/internal/api/refresh_test.go +++ b/backend/internal/handler/routes/refresh_test.go @@ -1,4 +1,4 @@ -package api_test +package routes_test import ( "bytes" diff --git a/backend/internal/api/routes.go b/backend/internal/handler/routes/routes.go similarity index 66% rename from backend/internal/api/routes.go rename to backend/internal/handler/routes/routes.go index 722dc4c..98868a1 100644 --- a/backend/internal/api/routes.go +++ b/backend/internal/handler/routes/routes.go @@ -1,5 +1,9 @@ -// Package api contains Gin route handlers for OpenScanner. -package api +// Package routes wires all OpenScanner HTTP and WebSocket routes onto a Gin engine. +// +// It owns the top-level route registration and middleware ordering, and delegates +// per-feature handling to the handler subpackages (auth, calls, bookmarks, share, +// setup, health, and admin/*). +package routes import ( "database/sql" @@ -18,6 +22,15 @@ import ( "github.com/openscanner/openscanner/internal/auth" "github.com/openscanner/openscanner/internal/db" "github.com/openscanner/openscanner/internal/downstream" + authhandler "github.com/openscanner/openscanner/internal/handler/auth" + "github.com/openscanner/openscanner/internal/handler/admin/imports" + "github.com/openscanner/openscanner/internal/handler/admin/radioreference" + "github.com/openscanner/openscanner/internal/handler/admin/transcriptions" + "github.com/openscanner/openscanner/internal/handler/bookmarks" + "github.com/openscanner/openscanner/internal/handler/calls" + "github.com/openscanner/openscanner/internal/handler/health" + "github.com/openscanner/openscanner/internal/handler/setup" + "github.com/openscanner/openscanner/internal/handler/share" "github.com/openscanner/openscanner/internal/middleware" "github.com/openscanner/openscanner/internal/static" "github.com/openscanner/openscanner/internal/ws" @@ -57,18 +70,15 @@ type Deps struct { // RegisterRoutes wires all API routes onto the Gin engine. func RegisterRoutes(r *gin.Engine, deps Deps) { - setupHandler := NewSetupHandler(deps.Queries) - authHandler := NewAuthHandler(deps.Queries, deps.RateLimiter, deps.Hub) - callHandler := NewCallHandler(deps.Queries, deps.Processor, deps.Hub, deps.DownstreamNotifier, deps.Transcriber) - bookmarkHandler := &BookmarkHandler{queries: deps.Queries} - recordingsDir := "." - if deps.Processor != nil { - recordingsDir = deps.Processor.RecordingsDir() - } - adminHandler := NewAdminHandler(deps.Queries, deps.Hub, deps.SQLDB, deps.DirMonitorReloader, deps.DownstreamReloader, recordingsDir) - adminHandler.ffmpegAvailable = deps.FFmpegAvailable - adminHandler.fdkAACAvailable = deps.FDKAACAvailable - adminHandler.whisperAvailable = deps.WhisperAvailable + healthHandler := health.New(deps.Version) + setupHandler := setup.New(deps.Queries) + authH := authhandler.New(deps.Queries, deps.RateLimiter, deps.Hub) + callHandler := calls.New(deps.Queries, deps.Processor, deps.Hub, deps.DownstreamNotifier, deps.Transcriber) + shareHandler := share.New(deps.Queries, deps.Processor) + bookmarkHandler := bookmarks.New(deps.Queries) + importsHandler := imports.New(deps.Queries, deps.Hub) + rrHandler := radioreference.New(deps.Queries) + transcriptionsHandler := transcriptions.New(deps.Queries, deps.WhisperAvailable) // Global middleware applied to every request. r.Use(middleware.RequestID()) @@ -78,24 +88,24 @@ func RegisterRoutes(r *gin.Engine, deps Deps) { api := r.Group("/api") // Health check — unauthenticated. - RegisterHealth(api, deps.Version) + api.GET("/health", healthHandler.Get) // First-run setup — unauthenticated. api.GET("/setup/status", setupHandler.GetSetupStatus) api.POST("/setup", middleware.MaxBodySize(1<<20), setupHandler.PostSetup) // Auth — login and refresh are unauthenticated; the rest require a valid JWT. - api.POST("/auth/login", middleware.MaxBodySize(1<<20), middleware.RateLimit(deps.RateLimiter), authHandler.PostLogin) - api.POST("/auth/refresh", middleware.MaxBodySize(1<<20), middleware.RateLimit(deps.RateLimiter), authHandler.PostRefresh) + api.POST("/auth/login", middleware.MaxBodySize(1<<20), middleware.RateLimit(deps.RateLimiter), authH.PostLogin) + api.POST("/auth/refresh", middleware.MaxBodySize(1<<20), middleware.RateLimit(deps.RateLimiter), authH.PostRefresh) authRequired := api.Group("/auth") authRequired.Use(middleware.JWTAuth()) { - authRequired.POST("/logout", authHandler.PostLogout) - authRequired.PUT("/password", authHandler.PutPassword) - authRequired.GET("/me", authHandler.GetMe) - authRequired.GET("/tg-selection", authHandler.GetTGSelection) - authRequired.PUT("/tg-selection", authHandler.PutTGSelection) + authRequired.POST("/logout", authH.PostLogout) + authRequired.PUT("/password", authH.PutPassword) + authRequired.GET("/me", authH.GetMe) + authRequired.GET("/tg-selection", authH.GetTGSelection) + authRequired.PUT("/tg-selection", authH.PutTGSelection) } // Call search — public access with optional auth for bookmarks. @@ -106,21 +116,21 @@ func RegisterRoutes(r *gin.Engine, deps Deps) { // Shared calls — token-based public access (no auth required). // Rate-limited to 30 req/min per IP to prevent bandwidth exhaustion. sharedRateLimit := middleware.RateLimitByIP(30) - api.GET("/shared/:token", sharedRateLimit, callHandler.GetSharedCallByToken) - api.GET("/shared/:token/audio", sharedRateLimit, callHandler.GetSharedCallAudio) + api.GET("/shared/:token", sharedRateLimit, shareHandler.GetSharedCallByToken) + api.GET("/shared/:token/audio", sharedRateLimit, shareHandler.GetSharedCallAudio) // Share management — JWT required. - api.POST("/calls/:id/share", middleware.JWTAuth(), callHandler.PostShareCall) - api.DELETE("/calls/:id/share", middleware.JWTAuth(), callHandler.DeleteShareCall) - api.GET("/calls/:id/share", middleware.JWTAuth(), callHandler.GetCallShare) + api.POST("/calls/:id/share", middleware.JWTAuth(), shareHandler.PostShareCall) + api.DELETE("/calls/:id/share", middleware.JWTAuth(), shareHandler.DeleteShareCall) + api.GET("/calls/:id/share", middleware.JWTAuth(), shareHandler.GetCallShare) // Bookmarks — JWT required. - bookmarks := api.Group("/bookmarks") - bookmarks.Use(middleware.JWTAuth()) + bookmarksGroup := api.Group("/bookmarks") + bookmarksGroup.Use(middleware.JWTAuth()) { - bookmarks.GET("", bookmarkHandler.GetBookmarkIDs) - bookmarks.GET("/calls", bookmarkHandler.GetBookmarkCalls) - bookmarks.POST("", bookmarkHandler.PostToggleBookmark) + bookmarksGroup.GET("", bookmarkHandler.GetBookmarkIDs) + bookmarksGroup.GET("/calls", bookmarkHandler.GetBookmarkCalls) + bookmarksGroup.POST("", bookmarkHandler.PostToggleBookmark) } // Call upload — API key auth. @@ -134,26 +144,24 @@ func RegisterRoutes(r *gin.Engine, deps Deps) { } // Admin routes — JWT + admin role required. - // Most admin operations are handled via WebSocket (ADM_REQ/ADM_RES). - // Only file-upload endpoints remain on REST (WebSocket can't handle multipart). admin := api.Group("/admin") admin.Use(middleware.JWTAuth(), middleware.RequireAdmin(), middleware.MaxBodySize(2<<20)) // 2 MiB JSON body limit { // Import (file uploads — must stay REST) - admin.POST("/import/talkgroups", adminHandler.ImportTalkgroups) - admin.POST("/import/units", adminHandler.ImportUnits) - admin.POST("/import/groups", adminHandler.ImportGroups) - admin.POST("/import/tags", adminHandler.ImportTags) + admin.POST("/import/talkgroups", importsHandler.ImportTalkgroups) + admin.POST("/import/units", importsHandler.ImportUnits) + admin.POST("/import/groups", importsHandler.ImportGroups) + admin.POST("/import/tags", importsHandler.ImportTags) // RadioReference CSV preview (file upload — must stay REST) - admin.POST("/radioreference/preview/csv", adminHandler.RadioReferencePreviewCSV) + admin.POST("/radioreference/preview/csv", rrHandler.PreviewCSV) // Transcription status - admin.GET("/transcriptions/status", adminHandler.GetTranscriptionStatus) + admin.GET("/transcriptions/status", transcriptionsHandler.GetStatus) // Swagger: issue a short-lived HTTP-only cookie so Swagger UI // can be opened in a new browser tab without exposing the JWT. - admin.POST("/docs/session", postDocsSession) + admin.POST("/docs/session", authhandler.PostDocsSession) } // Swagger API documentation — protected by the HTTP-only cookie @@ -215,18 +223,3 @@ func serveFrontend(r *gin.Engine) { fileServer.ServeHTTP(c.Writer, c.Request) }) } - -// postDocsSession handles POST /api/admin/docs/session. -// -// @Summary Create Swagger docs session cookie -// @Description Issues a short-lived HTTP-only cookie used to access /api/admin/docs. -// @Tags Admin -// @Produce json -// @Success 200 {object} object{ok=bool} -// @Security BearerAuth -// @Router /admin/docs/session [post] -func postDocsSession(c *gin.Context) { - secure := c.Request.TLS != nil || c.GetHeader("X-Forwarded-Proto") == "https" - auth.SetSwaggerCookie(c, secure) - c.JSON(http.StatusOK, gin.H{"ok": true}) -} diff --git a/backend/internal/api/setup_test.go b/backend/internal/handler/routes/setup_test.go similarity index 99% rename from backend/internal/api/setup_test.go rename to backend/internal/handler/routes/setup_test.go index 1665910..2dba3ed 100644 --- a/backend/internal/api/setup_test.go +++ b/backend/internal/handler/routes/setup_test.go @@ -1,4 +1,4 @@ -package api_test +package routes_test import ( "bytes" diff --git a/backend/internal/api/share_test.go b/backend/internal/handler/routes/share_test.go similarity index 97% rename from backend/internal/api/share_test.go rename to backend/internal/handler/routes/share_test.go index 54330ae..7552f1f 100644 --- a/backend/internal/api/share_test.go +++ b/backend/internal/handler/routes/share_test.go @@ -1,4 +1,4 @@ -package api_test +package routes_test import ( "context" @@ -11,8 +11,8 @@ import ( "time" "github.com/google/uuid" - "github.com/openscanner/openscanner/internal/api" "github.com/openscanner/openscanner/internal/db" + "github.com/openscanner/openscanner/internal/handler/share" ) // seedCallWithSystem creates a system, talkgroup, and call in the DB and @@ -85,7 +85,7 @@ func TestGetSharedCallByToken_Success(t *testing.T) { t.Fatalf("expected 200, got %d (body: %s)", w.Code, w.Body.String()) } - var resp api.ShareResponse + var resp share.ShareResponse if err := json.NewDecoder(w.Body).Decode(&resp); err != nil { t.Fatalf("decode response: %v", err) } diff --git a/backend/internal/api/testhelpers_test.go b/backend/internal/handler/routes/testhelpers_test.go similarity index 93% rename from backend/internal/api/testhelpers_test.go rename to backend/internal/handler/routes/testhelpers_test.go index 465828b..1ff2fa7 100644 --- a/backend/internal/api/testhelpers_test.go +++ b/backend/internal/handler/routes/testhelpers_test.go @@ -1,4 +1,4 @@ -package api_test +package routes_test import ( "context" @@ -7,7 +7,7 @@ import ( "time" "github.com/gin-gonic/gin" - "github.com/openscanner/openscanner/internal/api" + "github.com/openscanner/openscanner/internal/handler/routes" "github.com/openscanner/openscanner/internal/auth" "github.com/openscanner/openscanner/internal/db" "github.com/openscanner/openscanner/internal/logging" @@ -38,7 +38,7 @@ func newTestEngine(t *testing.T) (*gin.Engine, *db.Queries) { router := gin.New() rl := auth.NewRateLimiter(context.Background()) - api.RegisterRoutes(router, api.Deps{ + routes.RegisterRoutes(router, routes.Deps{ Queries: queries, RateLimiter: rl, Version: "test", diff --git a/backend/internal/api/setup.go b/backend/internal/handler/setup/setup.go similarity index 91% rename from backend/internal/api/setup.go rename to backend/internal/handler/setup/setup.go index 7494018..db56d1b 100644 --- a/backend/internal/api/setup.go +++ b/backend/internal/handler/setup/setup.go @@ -1,5 +1,5 @@ -// Package api — first-run setup endpoints (POST /api/setup, GET /api/setup/status). -package api +// Package setup provides first-run setup endpoints (POST /api/setup, GET /api/setup/status). +package setup import ( "database/sql" @@ -14,15 +14,15 @@ import ( "github.com/openscanner/openscanner/internal/db" ) -// SetupHandler holds dependencies for first-run setup endpoints. -type SetupHandler struct { +// Handler holds dependencies for first-run setup endpoints. +type Handler struct { queries *db.Queries mu sync.Mutex // guards the check-then-create in PostSetup (TOCTOU prevention) } -// NewSetupHandler constructs a SetupHandler. -func NewSetupHandler(queries *db.Queries) *SetupHandler { - return &SetupHandler{queries: queries} +// New constructs a Handler. +func New(queries *db.Queries) *Handler { + return &Handler{queries: queries} } type setupStatusResponse struct { @@ -39,7 +39,7 @@ type setupStatusResponse struct { // @Success 200 {object} setupStatusResponse // @Failure 500 {object} ErrorResponse // @Router /setup/status [get] -func (h *SetupHandler) GetSetupStatus(c *gin.Context) { +func (h *Handler) GetSetupStatus(c *gin.Context) { ctx := c.Request.Context() requestID, _ := c.Get("requestID") @@ -84,7 +84,7 @@ type setupRequest struct { // @Failure 409 {object} ErrorResponse // @Failure 500 {object} ErrorResponse // @Router /setup [post] -func (h *SetupHandler) PostSetup(c *gin.Context) { +func (h *Handler) PostSetup(c *gin.Context) { // Serialise concurrent setup requests to prevent TOCTOU race (OWASP A01). h.mu.Lock() defer h.mu.Unlock() diff --git a/backend/internal/handler/share/limiter_internal_test.go b/backend/internal/handler/share/limiter_internal_test.go new file mode 100644 index 0000000..c2e590a --- /dev/null +++ b/backend/internal/handler/share/limiter_internal_test.go @@ -0,0 +1,28 @@ +package share + +import ( + "testing" + "time" + + "github.com/openscanner/openscanner/internal/db" +) + +// TestShareLimiter_CleansUpStaleEntries verifies that getShareLimiter +// sweeps stale entries once the map grows past the threshold (>100). +func TestShareLimiter_CleansUpStaleEntries(t *testing.T) { + h := New(&db.Queries{}, nil) + + staleStart := time.Now().Add(-3 * rateWindowDuration) + for i := int64(1); i <= 101; i++ { + h.limiters[i] = &shareLimiter{ + windowStart: staleStart, + rateLimit: shareRatePerMin, + } + } + + _ = h.getShareLimiter(9999) + + if got := len(h.limiters); got != 1 { + t.Fatalf("post-sweep limiters size = %d, want 1", got) + } +} diff --git a/backend/internal/api/share.go b/backend/internal/handler/share/share.go similarity index 82% rename from backend/internal/api/share.go rename to backend/internal/handler/share/share.go index 0deb287..b30f6cf 100644 --- a/backend/internal/api/share.go +++ b/backend/internal/handler/share/share.go @@ -1,4 +1,5 @@ -package api +// Package share provides endpoints for shareable call links. +package share import ( "database/sql" @@ -10,13 +11,89 @@ import ( "path/filepath" "strconv" "strings" + "sync" "time" "github.com/gin-gonic/gin" "github.com/google/uuid" + "github.com/openscanner/openscanner/internal/audio" "github.com/openscanner/openscanner/internal/db" + "github.com/openscanner/openscanner/internal/handler/shared" ) +const ( + rateWindowDuration = time.Minute + shareRatePerMin = 10 +) + +// shareLimiter is a per-user sliding-window rate limiter for share creation. +type shareLimiter struct { + mu sync.Mutex + windowStart time.Time + count int + rateLimit int +} + +func (l *shareLimiter) allow() bool { + l.mu.Lock() + defer l.mu.Unlock() + now := time.Now() + if now.Sub(l.windowStart) >= rateWindowDuration { + l.windowStart = now + l.count = 0 + } + if l.count >= l.rateLimit { + return false + } + l.count++ + return true +} + +// Handler handles shareable link endpoints. +type Handler struct { + queries *db.Queries + processor *audio.Processor + + mu sync.Mutex + limiters map[int64]*shareLimiter +} + +// New constructs a share Handler. +func New(queries *db.Queries, processor *audio.Processor) *Handler { + return &Handler{ + queries: queries, + processor: processor, + limiters: make(map[int64]*shareLimiter), + } +} + +func (h *Handler) getShareLimiter(userID int64) *shareLimiter { + h.mu.Lock() + defer h.mu.Unlock() + + if len(h.limiters) > 100 { + now := time.Now() + for id, l := range h.limiters { + l.mu.Lock() + stale := now.Sub(l.windowStart) >= 2*rateWindowDuration + l.mu.Unlock() + if stale { + delete(h.limiters, id) + } + } + } + + l, ok := h.limiters[userID] + if !ok { + l = &shareLimiter{ + windowStart: time.Now(), + rateLimit: shareRatePerMin, + } + h.limiters[userID] = l + } + return l +} + // ShareResponse is the JSON payload for a shared call viewed via token. type ShareResponse struct { Token string `json:"token"` @@ -54,10 +131,10 @@ type ShareCreateResponse struct { // @Failure 404 {object} ErrorResponse // @Failure 500 {object} ErrorResponse // @Router /calls/{id}/share [post] -func (h *CallHandler) PostShareCall(c *gin.Context) { +func (h *Handler) PostShareCall(c *gin.Context) { ctx := c.Request.Context() - if h.getSettingValue(c, "shareableLinks") != "true" { + if shared.GetSettingValue(c, h.queries, "shareableLinks") != "true" { c.JSON(http.StatusForbidden, gin.H{"error": "sharing is disabled"}) return } @@ -105,7 +182,7 @@ func (h *CallHandler) PostShareCall(c *gin.Context) { // Enforce per-user grants — restricted listeners cannot share calls // outside their authorised scope. - if grants := h.loadUserGrants(c); !isGranted(grants, call.SystemID, call.TalkgroupID.Int64) { + if grants := shared.LoadUserGrants(c, h.queries); !shared.IsGranted(grants, call.SystemID, call.TalkgroupID.Int64) { c.JSON(http.StatusNotFound, gin.H{"error": "call not found"}) return } @@ -114,7 +191,7 @@ func (h *CallHandler) PostShareCall(c *gin.Context) { // Compute expires_at from the global sharedLinkExpiry setting (stored as days). var expiresAt sql.NullInt64 - if expStr := h.getSettingValue(c, "sharedLinkExpiry"); expStr != "" && expStr != "0" { + if expStr := shared.GetSettingValue(c, h.queries, "sharedLinkExpiry"); expStr != "" && expStr != "0" { if expDays, err := strconv.ParseInt(expStr, 10, 64); err == nil && expDays > 0 { expiresAt = sql.NullInt64{Int64: time.Now().Unix() + expDays*86400, Valid: true} } @@ -153,7 +230,7 @@ func (h *CallHandler) PostShareCall(c *gin.Context) { // @Failure 404 {object} ErrorResponse // @Failure 500 {object} ErrorResponse // @Router /calls/{id}/share [delete] -func (h *CallHandler) DeleteShareCall(c *gin.Context) { +func (h *Handler) DeleteShareCall(c *gin.Context) { ctx := c.Request.Context() id, err := strconv.ParseInt(c.Param("id"), 10, 64) @@ -194,13 +271,13 @@ func (h *CallHandler) DeleteShareCall(c *gin.Context) { // isSharedLinkExpired checks if a shared link has expired. // It checks the explicit expires_at first, then falls back to the global // sharedLinkExpiry setting (days) applied to created_at. Returns false if no expiry is set. -func (h *CallHandler) isSharedLinkExpired(c *gin.Context, expiresAt sql.NullInt64, createdAt int64) bool { +func (h *Handler) isSharedLinkExpired(c *gin.Context, expiresAt sql.NullInt64, createdAt int64) bool { now := time.Now().Unix() if expiresAt.Valid && expiresAt.Int64 > 0 { return now > expiresAt.Int64 } // Fallback: global setting (days) applied to creation time. - if expStr := h.getSettingValue(c, "sharedLinkExpiry"); expStr != "" && expStr != "0" { + if expStr := shared.GetSettingValue(c, h.queries, "sharedLinkExpiry"); expStr != "" && expStr != "0" { if expDays, err := strconv.ParseInt(expStr, 10, 64); err == nil && expDays > 0 { return now > createdAt+expDays*86400 } @@ -221,7 +298,7 @@ func (h *CallHandler) isSharedLinkExpired(c *gin.Context, expiresAt sql.NullInt6 // @Failure 404 {object} ErrorResponse // @Failure 500 {object} ErrorResponse // @Router /shared/{token} [get] -func (h *CallHandler) GetSharedCallByToken(c *gin.Context) { +func (h *Handler) GetSharedCallByToken(c *gin.Context) { ctx := c.Request.Context() token := c.Param("token") @@ -280,7 +357,7 @@ func (h *CallHandler) GetSharedCallByToken(c *gin.Context) { // @Failure 404 {object} ErrorResponse // @Failure 500 {object} ErrorResponse // @Router /shared/{token}/audio [get] -func (h *CallHandler) GetSharedCallAudio(c *gin.Context) { +func (h *Handler) GetSharedCallAudio(c *gin.Context) { token := c.Param("token") if token == "" { c.JSON(http.StatusBadRequest, gin.H{"error": "token required"}) @@ -349,7 +426,7 @@ func (h *CallHandler) GetSharedCallAudio(c *gin.Context) { filename = filepath.Base(sl.AudioPath) } - c.Header("Content-Disposition", contentDisposition("inline", filename)) + c.Header("Content-Disposition", shared.ContentDisposition("inline", filename)) c.Header("Content-Type", contentType) http.ServeContent(c.Writer, c.Request, filename, fi.ModTime(), f) } @@ -368,10 +445,10 @@ func (h *CallHandler) GetSharedCallAudio(c *gin.Context) { // @Failure 404 {object} ErrorResponse // @Failure 500 {object} ErrorResponse // @Router /calls/{id}/share [get] -func (h *CallHandler) GetCallShare(c *gin.Context) { +func (h *Handler) GetCallShare(c *gin.Context) { ctx := c.Request.Context() - if h.getSettingValue(c, "shareableLinks") != "true" { + if shared.GetSettingValue(c, h.queries, "shareableLinks") != "true" { c.JSON(http.StatusNotFound, gin.H{"error": "not found"}) return } diff --git a/backend/internal/handler/shared/call_search.go b/backend/internal/handler/shared/call_search.go new file mode 100644 index 0000000..19b807e --- /dev/null +++ b/backend/internal/handler/shared/call_search.go @@ -0,0 +1,34 @@ +package shared + +// CallSearchResult is a single call in the search response. +type CallSearchResult struct { + ID int64 `json:"id"` + AudioName string `json:"audioName"` + AudioType string `json:"audioType"` + DateTime int64 `json:"dateTime"` + SystemID int64 `json:"systemId"` + SystemLabel string `json:"systemLabel"` + TalkgroupID int64 `json:"talkgroupId"` + TalkgroupLabel string `json:"talkgroupLabel"` + TalkgroupName string `json:"talkgroupName"` + TalkgroupGroup string `json:"talkgroupGroup,omitempty"` + TalkgroupTag string `json:"talkgroupTag,omitempty"` + TalkgroupLed string `json:"talkgroupLed,omitempty"` + Frequency *int64 `json:"frequency,omitempty"` + Duration *int64 `json:"duration,omitempty"` + Source *int64 `json:"source,omitempty"` + Site string `json:"site,omitempty"` + Channel string `json:"channel,omitempty"` + Decoder string `json:"decoder,omitempty"` + ErrorCount *int64 `json:"errorCount,omitempty"` + SpikeCount *int64 `json:"spikeCount,omitempty"` + TalkerAlias string `json:"talkerAlias,omitempty"` + Transcript string `json:"transcript,omitempty"` + Bookmarked bool `json:"bookmarked"` +} // @name CallSearchResult + +// CallSearchResponse is the response for GET /api/calls. +type CallSearchResponse struct { + Calls []CallSearchResult `json:"calls"` + Total int64 `json:"total"` +} // @name CallSearchResponse diff --git a/backend/internal/api/content_disposition.go b/backend/internal/handler/shared/content_disposition.go similarity index 87% rename from backend/internal/api/content_disposition.go rename to backend/internal/handler/shared/content_disposition.go index 7afa39e..0cf70a1 100644 --- a/backend/internal/api/content_disposition.go +++ b/backend/internal/handler/shared/content_disposition.go @@ -1,4 +1,4 @@ -package api +package shared import ( "fmt" @@ -6,11 +6,11 @@ import ( "strings" ) -// contentDispositionAttachment builds an RFC 6266 Content-Disposition header +// ContentDisposition builds an RFC 6266 Content-Disposition header // value with both a legacy filename= token (ASCII-only, sanitised) and the // percent-encoded filename*=UTF-8'' token for non-ASCII / unsafe characters. // The caller supplies the disposition type (e.g. "inline" or "attachment"). -func contentDisposition(dispType, filename string) string { +func ContentDisposition(dispType, filename string) string { if filename == "" { filename = "file" } diff --git a/backend/internal/api/content_disposition_test.go b/backend/internal/handler/shared/content_disposition_test.go similarity index 97% rename from backend/internal/api/content_disposition_test.go rename to backend/internal/handler/shared/content_disposition_test.go index 1a840f2..4994af5 100644 --- a/backend/internal/api/content_disposition_test.go +++ b/backend/internal/handler/shared/content_disposition_test.go @@ -1,4 +1,4 @@ -package api +package shared import ( "strings" @@ -76,7 +76,7 @@ func TestContentDisposition(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { - got := contentDisposition(tc.disposition, tc.filename) + got := ContentDisposition(tc.disposition, tc.filename) if !strings.HasPrefix(got, tc.disposition+"; ") { t.Errorf("header should begin with %q; got %q", tc.disposition+"; ", got) diff --git a/backend/internal/api/swagger_models.go b/backend/internal/handler/shared/dto.go similarity index 97% rename from backend/internal/api/swagger_models.go rename to backend/internal/handler/shared/dto.go index d9a9b22..129a642 100644 --- a/backend/internal/api/swagger_models.go +++ b/backend/internal/handler/shared/dto.go @@ -1,4 +1,5 @@ -package api +// Package shared contains DTOs and helpers reused across handler subpackages. +package shared // Swagger model types — clean mirrors of db types for Swagger documentation. // These are referenced via .swaggo replace directives and never used at runtime. @@ -123,6 +124,7 @@ type swagWebhook struct { //nolint:unused } // @name Webhook var ( + _ ErrorResponse _ swagGroup _ swagTag _ swagSetting diff --git a/backend/internal/handler/shared/grants.go b/backend/internal/handler/shared/grants.go new file mode 100644 index 0000000..5124df1 --- /dev/null +++ b/backend/internal/handler/shared/grants.go @@ -0,0 +1,69 @@ +package shared + +import ( + "encoding/json" + "log/slog" + + "github.com/gin-gonic/gin" + "github.com/openscanner/openscanner/internal/auth" + "github.com/openscanner/openscanner/internal/db" +) + +// SystemGrant mirrors ws.systemGrant for grant-based filtering in REST handlers. +type SystemGrant struct { + ID int64 `json:"id"` + Talkgroups []int64 `json:"talkgroups,omitempty"` +} + +// LoadUserGrants returns the parsed grants for the authenticated user. Returns +// nil (allow-all) for admins, unauthenticated users, or users with no grants. +func LoadUserGrants(c *gin.Context, queries *db.Queries) []SystemGrant { + role, _ := c.Get("role") + roleStr, _ := role.(string) + if roleStr == auth.RoleAdmin { + return nil + } + userIDVal, exists := c.Get("userID") + if !exists { + return nil + } + uid, _ := userIDVal.(int64) + user, err := queries.GetUser(c.Request.Context(), uid) + if err != nil { + return nil + } + if !user.SystemsJson.Valid || user.SystemsJson.String == "" { + return nil + } + var grants []SystemGrant + if err := json.Unmarshal([]byte(user.SystemsJson.String), &grants); err != nil { + slog.Warn("failed to parse user grants", "user_id", uid, "error", err) + return nil + } + if len(grants) == 0 { + return nil + } + return grants +} + +// IsGranted checks whether a call with the given system/talkgroup passes the +// grant filter. A nil grant list means everything is allowed. +func IsGranted(grants []SystemGrant, systemID, talkgroupID int64) bool { + if grants == nil { + return true + } + for _, g := range grants { + if g.ID != systemID { + continue + } + if len(g.Talkgroups) == 0 { + return true + } + for _, tg := range g.Talkgroups { + if tg == talkgroupID { + return true + } + } + } + return false +} diff --git a/backend/internal/handler/shared/resolve.go b/backend/internal/handler/shared/resolve.go new file mode 100644 index 0000000..1c26f9e --- /dev/null +++ b/backend/internal/handler/shared/resolve.go @@ -0,0 +1,52 @@ +package shared + +import ( + "context" + "database/sql" + "errors" + "log/slog" + + "github.com/openscanner/openscanner/internal/db" +) + +// ResolveGroupID looks up an existing group by label or creates one if it +// doesn't exist. Returns a valid sql.NullInt64 with the group's DB ID, or +// an invalid NullInt64 if the operation fails. +func ResolveGroupID(ctx context.Context, q db.Querier, label string) sql.NullInt64 { + g, err := q.GetGroupByLabel(ctx, label) + if err == nil { + return sql.NullInt64{Int64: g.ID, Valid: true} + } + if !errors.Is(err, sql.ErrNoRows) { + slog.Warn("failed to look up group by label", "label", label, "error", err) + return sql.NullInt64{} + } + newID, cerr := q.CreateGroup(ctx, label) + if cerr != nil { + slog.Warn("failed to auto-create group", "label", label, "error", cerr) + return sql.NullInt64{} + } + slog.Info("auto-populated group from upload", "label", label, "db_id", newID) + return sql.NullInt64{Int64: newID, Valid: true} +} + +// ResolveTagID looks up an existing tag by label or creates one if it +// doesn't exist. Returns a valid sql.NullInt64 with the tag's DB ID, or +// an invalid NullInt64 if the operation fails. +func ResolveTagID(ctx context.Context, q db.Querier, label string) sql.NullInt64 { + t, err := q.GetTagByLabel(ctx, label) + if err == nil { + return sql.NullInt64{Int64: t.ID, Valid: true} + } + if !errors.Is(err, sql.ErrNoRows) { + slog.Warn("failed to look up tag by label", "label", label, "error", err) + return sql.NullInt64{} + } + newID, cerr := q.CreateTag(ctx, label) + if cerr != nil { + slog.Warn("failed to auto-create tag", "label", label, "error", cerr) + return sql.NullInt64{} + } + slog.Info("auto-populated tag from upload", "label", label, "db_id", newID) + return sql.NullInt64{Int64: newID, Valid: true} +} diff --git a/backend/internal/handler/shared/settings.go b/backend/internal/handler/shared/settings.go new file mode 100644 index 0000000..7909440 --- /dev/null +++ b/backend/internal/handler/shared/settings.go @@ -0,0 +1,18 @@ +package shared + +import ( + "github.com/gin-gonic/gin" + "github.com/openscanner/openscanner/internal/db" +) + +// GetSettingValue fetches a setting value from the DB, returning "" on error. +func GetSettingValue(c *gin.Context, queries *db.Queries, key string) string { + s, err := queries.GetSetting(c.Request.Context(), key) + if err != nil { + return "" + } + return s.Value +} + +// MaxImportRows is the CSV import safety limit. +const MaxImportRows = 100_000 From 9eebbcea14684b50428bea207200da26628dffd4 Mon Sep 17 00:00:00 2001 From: Randy Hammond Date: Sat, 25 Apr 2026 00:42:02 +0000 Subject: [PATCH 09/27] fix(docker): update swag invocation to scan internal/handler Phase 3 moved handlers from internal/api to internal/handler, but the Dockerfile's 'swag init' call was missed when updating the other swag invocations (Makefile, ci/codeql/release workflows). Without '-d cmd/server,internal/handler' swag cannot resolve handler types like shared.ErrorResponse, breaking the Docker image build. --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index 73e5305..7468a27 100644 --- a/Dockerfile +++ b/Dockerfile @@ -19,7 +19,7 @@ COPY backend/ . # Copy the built frontend dist into the go:embed target path COPY --from=node-builder /src/frontend/dist ./internal/static/dist/ # Generate Swagger docs (gitignored, must be built in CI) -RUN swag init -g cmd/server/main.go --parseDependency --parseInternal +RUN swag init -d cmd/server,internal/handler -g main.go --parseDependency --parseInternal RUN go build -ldflags="-s -w -X github.com/openscanner/openscanner/internal/config.Version=${VERSION}" -o /openscanner ./cmd/server # Stage 3: Minimal runtime image From 2622909f482c9869aabeac1acb1103fbc916771a Mon Sep 17 00:00:00 2001 From: Randy Hammond Date: Sat, 25 Apr 2026 12:29:02 -0400 Subject: [PATCH 10/27] fix(admin): drop Active badge, fix MP3 default, correct audio-conversion copy (#19) - OptionsPanel: remove green 'Active' badge from wired settings; only 'Planned' badges render now. - OptionsPanel: reword 'Audio Conversion' description to reflect that MP3 and AAC outputs are both supported via the encoding preset. - seed: default audioEncodingPreset is now mp3_32k (matching the dropdown's '(default)' label and audio.ParseEncodingPreset's fallback) instead of aac_lc_32k. - audio/worker.go: move the '(default)' comment marker from PresetAACLC32k onto PresetMP3_32k to match the seed and parser. --- CHANGELOG.md | 15 +++++++++++++++ backend/internal/audio/worker.go | 4 ++-- backend/internal/seed/seed.go | 2 +- frontend/src/components/admin/OptionsPanel.tsx | 9 +++------ 4 files changed, 21 insertions(+), 9 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b819201..7ba03dd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -30,6 +30,21 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 wire protocol, action names, and response shapes are unchanged. - Deployment guide reverse-proxy instructions now list `/api/ws` alongside `/ws` and `/api/admin/ws` as paths that need WebSocket-upgrade forwarding. +- Admin Options panel no longer shows an "Active" badge on every wired + setting; only "Planned" badges are rendered for not-yet-implemented + options. +- Admin Options "Audio Conversion" description now reads "Convert incoming + audio with FFmpeg before storing. Select the codec and bitrate below." + to reflect that MP3 and AAC outputs are both supported via the encoding + preset. + +### Fixed + +- Default `audioEncodingPreset` seeded into the settings table is now + `mp3_32k` (matching the dropdown's "(default)" label and the Go + `ParseEncodingPreset` fallback) instead of `aac_lc_32k`. New installs + enabling audio conversion will now default to MP3 32 kbps as the UI + advertises. ## [1.1.2] — 2026-04-24 diff --git a/backend/internal/audio/worker.go b/backend/internal/audio/worker.go index 41a8043..a768880 100644 --- a/backend/internal/audio/worker.go +++ b/backend/internal/audio/worker.go @@ -23,12 +23,12 @@ const ( type EncodingPreset string const ( - PresetAACLC32k EncodingPreset = "aac_lc_32k" // AAC-LC 32 kbps (default) + PresetAACLC32k EncodingPreset = "aac_lc_32k" // AAC-LC 32 kbps PresetAACLC24k EncodingPreset = "aac_lc_24k" // AAC-LC 24 kbps PresetAACLC16k EncodingPreset = "aac_lc_16k" // AAC-LC 16 kbps PresetHEAAC12k EncodingPreset = "he_aac_12k" // HE-AAC 12 kbps PresetHEAAC8k EncodingPreset = "he_aac_8k" // HE-AAC 8 kbps - PresetMP3_32k EncodingPreset = "mp3_32k" // MP3 32 kbps + PresetMP3_32k EncodingPreset = "mp3_32k" // MP3 32 kbps (default) PresetMP3_24k EncodingPreset = "mp3_24k" // MP3 24 kbps PresetMP3_16k EncodingPreset = "mp3_16k" // MP3 16 kbps ) diff --git a/backend/internal/seed/seed.go b/backend/internal/seed/seed.go index 0376d71..3c9faa1 100644 --- a/backend/internal/seed/seed.go +++ b/backend/internal/seed/seed.go @@ -72,7 +72,7 @@ func seedSettings(ctx context.Context, tx *sql.Tx) (bool, error) { {"disableDuplicateDetection", "false"}, {"sortTalkgroups", "false"}, {"audioConversion", "0"}, - {"audioEncodingPreset", "aac_lc_32k"}, + {"audioEncodingPreset", "mp3_32k"}, {"showListenersCount", "false"}, {"tagsToggle", "false"}, {"playbackGoesLive", "false"}, diff --git a/frontend/src/components/admin/OptionsPanel.tsx b/frontend/src/components/admin/OptionsPanel.tsx index 5ed70af..c2a1099 100644 --- a/frontend/src/components/admin/OptionsPanel.tsx +++ b/frontend/src/components/admin/OptionsPanel.tsx @@ -164,7 +164,8 @@ const DESCRIPTIONS: Record = { "Include patched talkgroups in search results (may slow search).", showListenersCount: "Display the active listener count on the main scanner screen.", - audioConversion: "Convert incoming audio to AAC/M4A using FFmpeg.", + audioConversion: + "Convert incoming audio with FFmpeg before storing. Select the codec and bitrate below.", audioEncodingPreset: "Codec and bitrate for converted audio. HE-AAC presets require libfdk_aac in your FFmpeg build. Lower bitrates save storage; choose based on your quality needs.", disableDuplicateDetection: @@ -309,11 +310,7 @@ export default function OptionsPanel() { Planned - ) : ( - - Active - - ); + ) : null; if (isBooleanKey(key)) { return ( From 92044095fa24b5b2c97956391bbc66eeaa3d033c Mon Sep 17 00:00:00 2001 From: Randy Hammond Date: Sat, 25 Apr 2026 12:42:52 -0400 Subject: [PATCH 11/27] refactor(frontend): group services into services/ws and services/audio (Phase 4) (#20) Phase 4 of the directory restructure plan. Pure file moves with import-path updates; no runtime behaviour change. - services/wsClient.ts -> services/ws/client.ts - services/wsClient.test.ts -> services/ws/client.test.ts - services/adminWsClient.ts -> services/ws/adminClient.ts - services/audioPlayer.ts -> services/audio/player.ts - services/beepPlayer.ts -> services/audio/beep.ts services/downloadFilename.ts stays put (not WS, not audio). All @/services/* import sites across components, hooks, and tests have been updated to the new paths. tsc --noEmit clean, 188/188 unit tests pass. --- CHANGELOG.md | 4 ++++ frontend/src/components/scanner/BookmarksPanel.test.tsx | 2 +- frontend/src/components/scanner/BookmarksPanel.tsx | 2 +- frontend/src/components/scanner/ControlToolbar.tsx | 2 +- frontend/src/components/scanner/SearchPanel.tsx | 2 +- frontend/src/hooks/useActiveUnit.ts | 2 +- frontend/src/hooks/useAdminActivity.ts | 2 +- frontend/src/hooks/useAdminLogs.ts | 2 +- frontend/src/hooks/useAdminWebSocket.ts | 2 +- frontend/src/hooks/useAudioPlayer.ts | 4 ++-- frontend/src/hooks/useWebSocket.ts | 2 +- frontend/src/hooks/useWsQuery.ts | 2 +- frontend/src/services/{beepPlayer.ts => audio/beep.ts} | 0 frontend/src/services/{audioPlayer.ts => audio/player.ts} | 2 +- frontend/src/services/{adminWsClient.ts => ws/adminClient.ts} | 0 frontend/src/services/{wsClient.test.ts => ws/client.test.ts} | 2 +- frontend/src/services/{wsClient.ts => ws/client.ts} | 0 17 files changed, 18 insertions(+), 14 deletions(-) rename frontend/src/services/{beepPlayer.ts => audio/beep.ts} (100%) rename frontend/src/services/{audioPlayer.ts => audio/player.ts} (99%) rename frontend/src/services/{adminWsClient.ts => ws/adminClient.ts} (100%) rename frontend/src/services/{wsClient.test.ts => ws/client.test.ts} (99%) rename frontend/src/services/{wsClient.ts => ws/client.ts} (100%) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7ba03dd..d5ea226 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -37,6 +37,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 audio with FFmpeg before storing. Select the codec and bitrate below." to reflect that MP3 and AAC outputs are both supported via the encoding preset. +- Frontend `services/` directory grouped into `services/ws/` (`client.ts`, + `client.test.ts`, `adminClient.ts`) and `services/audio/` (`player.ts`, + `beep.ts`). All `@/services/*` imports across components and hooks have + been updated to the new paths. No runtime behaviour change. ### Fixed diff --git a/frontend/src/components/scanner/BookmarksPanel.test.tsx b/frontend/src/components/scanner/BookmarksPanel.test.tsx index 03b5768..0e4dd93 100644 --- a/frontend/src/components/scanner/BookmarksPanel.test.tsx +++ b/frontend/src/components/scanner/BookmarksPanel.test.tsx @@ -11,7 +11,7 @@ vi.mock("@/app/api", () => ({ useToggleBookmarkMutation: () => mockToggleBookmarkMutation(), })); -vi.mock("@/services/audioPlayer", () => ({ +vi.mock("@/services/audio/player", () => ({ audioPlayer: { play: vi.fn(), }, diff --git a/frontend/src/components/scanner/BookmarksPanel.tsx b/frontend/src/components/scanner/BookmarksPanel.tsx index e5b80d9..e45ab92 100644 --- a/frontend/src/components/scanner/BookmarksPanel.tsx +++ b/frontend/src/components/scanner/BookmarksPanel.tsx @@ -2,7 +2,7 @@ import { useState } from "react"; import { useGetBookmarkCallsQuery, useToggleBookmarkMutation } from "@/app/api"; import { useAppSelector } from "@/app/store"; import { selectToken } from "@/app/slices/authSlice"; -import { audioPlayer } from "@/services/audioPlayer"; +import { audioPlayer } from "@/services/audio/player"; import { sanitizeDownloadFilename } from "@/services/downloadFilename"; import { ShareCallButton } from "@/components/scanner/ShareCallButton"; import { X, Play, Download, Star, ChevronDown } from "lucide-react"; diff --git a/frontend/src/components/scanner/ControlToolbar.tsx b/frontend/src/components/scanner/ControlToolbar.tsx index 1dc918c..18dad6d 100644 --- a/frontend/src/components/scanner/ControlToolbar.tsx +++ b/frontend/src/components/scanner/ControlToolbar.tsx @@ -13,7 +13,7 @@ import { Search, } from "lucide-react"; import { useCallback } from "react"; -import { playBeep } from "@/services/beepPlayer"; +import { playBeep } from "@/services/audio/beep"; import type { AvoidEntry } from "@/types"; interface ControlToolbarProps { diff --git a/frontend/src/components/scanner/SearchPanel.tsx b/frontend/src/components/scanner/SearchPanel.tsx index cb1aab9..73cb7f9 100644 --- a/frontend/src/components/scanner/SearchPanel.tsx +++ b/frontend/src/components/scanner/SearchPanel.tsx @@ -36,7 +36,7 @@ import { } from "@/app/slices/callsSlice"; import { useGetBookmarkIDsQuery, useToggleBookmarkMutation } from "@/app/api"; import { selectToken } from "@/app/slices/authSlice"; -import { audioPlayer } from "@/services/audioPlayer"; +import { audioPlayer } from "@/services/audio/player"; import { sanitizeDownloadFilename } from "@/services/downloadFilename"; import type { Call } from "@/types"; diff --git a/frontend/src/hooks/useActiveUnit.ts b/frontend/src/hooks/useActiveUnit.ts index 7176f1c..02b7e69 100644 --- a/frontend/src/hooks/useActiveUnit.ts +++ b/frontend/src/hooks/useActiveUnit.ts @@ -1,5 +1,5 @@ import { useState, useEffect, useMemo } from "react"; -import { audioPlayer } from "@/services/audioPlayer"; +import { audioPlayer } from "@/services/audio/player"; interface SourceEntry { pos: number; diff --git a/frontend/src/hooks/useAdminActivity.ts b/frontend/src/hooks/useAdminActivity.ts index f7f8f14..bc645b0 100644 --- a/frontend/src/hooks/useAdminActivity.ts +++ b/frontend/src/hooks/useAdminActivity.ts @@ -1,5 +1,5 @@ import { useState, useEffect, useCallback, useRef } from "react"; -import { adminWsClient } from "@/services/adminWsClient"; +import { adminWsClient } from "@/services/ws/adminClient"; import type { ActivityStats, ActivityChartResponse, diff --git a/frontend/src/hooks/useAdminLogs.ts b/frontend/src/hooks/useAdminLogs.ts index 309a7d5..9560cd0 100644 --- a/frontend/src/hooks/useAdminLogs.ts +++ b/frontend/src/hooks/useAdminLogs.ts @@ -1,5 +1,5 @@ import { useState, useEffect, useCallback, useRef } from "react"; -import { adminWsClient } from "@/services/adminWsClient"; +import { adminWsClient } from "@/services/ws/adminClient"; import type { AdminLog } from "@/types"; interface LogQueryParams { diff --git a/frontend/src/hooks/useAdminWebSocket.ts b/frontend/src/hooks/useAdminWebSocket.ts index 1982d78..2b43826 100644 --- a/frontend/src/hooks/useAdminWebSocket.ts +++ b/frontend/src/hooks/useAdminWebSocket.ts @@ -1,6 +1,6 @@ import { useEffect, useCallback } from "react"; import { useAppDispatch, useAppSelector } from "@/app/store"; -import { adminWsClient } from "@/services/adminWsClient"; +import { adminWsClient } from "@/services/ws/adminClient"; import { setCredentials, usePostRefreshMutation } from "@/app/slices/authSlice"; import { api } from "@/app/api"; diff --git a/frontend/src/hooks/useAudioPlayer.ts b/frontend/src/hooks/useAudioPlayer.ts index 01e91cd..68f44f0 100644 --- a/frontend/src/hooks/useAudioPlayer.ts +++ b/frontend/src/hooks/useAudioPlayer.ts @@ -1,8 +1,8 @@ import { useEffect, useCallback, useState, useRef } from "react"; import { useAppDispatch, useAppSelector } from "@/app/store"; import { store } from "@/app/store"; -import { audioPlayer } from "@/services/audioPlayer"; -import { wsClient } from "@/services/wsClient"; +import { audioPlayer } from "@/services/audio/player"; +import { wsClient } from "@/services/ws/client"; import { setCurrentCall, clearCurrentCall, diff --git a/frontend/src/hooks/useWebSocket.ts b/frontend/src/hooks/useWebSocket.ts index 678cf22..33f9bb3 100644 --- a/frontend/src/hooks/useWebSocket.ts +++ b/frontend/src/hooks/useWebSocket.ts @@ -1,6 +1,6 @@ import { useEffect, useRef, useCallback } from "react"; import { useAppDispatch, useAppSelector } from "@/app/store"; -import { wsClient } from "@/services/wsClient"; +import { wsClient } from "@/services/ws/client"; import { setCredentials, usePostRefreshMutation } from "@/app/slices/authSlice"; import type { ConnectionStatus } from "@/types"; diff --git a/frontend/src/hooks/useWsQuery.ts b/frontend/src/hooks/useWsQuery.ts index 9035312..2f184d1 100644 --- a/frontend/src/hooks/useWsQuery.ts +++ b/frontend/src/hooks/useWsQuery.ts @@ -1,5 +1,5 @@ import { useState, useEffect, useCallback, useRef } from "react"; -import { adminWsClient } from "@/services/adminWsClient"; +import { adminWsClient } from "@/services/ws/adminClient"; // ─── useWsQuery ───────────────────────────────────────────────────────────── diff --git a/frontend/src/services/beepPlayer.ts b/frontend/src/services/audio/beep.ts similarity index 100% rename from frontend/src/services/beepPlayer.ts rename to frontend/src/services/audio/beep.ts diff --git a/frontend/src/services/audioPlayer.ts b/frontend/src/services/audio/player.ts similarity index 99% rename from frontend/src/services/audioPlayer.ts rename to frontend/src/services/audio/player.ts index 6c400e6..85da10c 100644 --- a/frontend/src/services/audioPlayer.ts +++ b/frontend/src/services/audio/player.ts @@ -1,5 +1,5 @@ import type { Call } from "@/types"; -import { bootstrapBeepContext } from "@/services/beepPlayer"; +import { bootstrapBeepContext } from "@/services/audio/beep"; interface QueueItem { call: Call; diff --git a/frontend/src/services/adminWsClient.ts b/frontend/src/services/ws/adminClient.ts similarity index 100% rename from frontend/src/services/adminWsClient.ts rename to frontend/src/services/ws/adminClient.ts diff --git a/frontend/src/services/wsClient.test.ts b/frontend/src/services/ws/client.test.ts similarity index 99% rename from frontend/src/services/wsClient.test.ts rename to frontend/src/services/ws/client.test.ts index b173ff1..d0ea66c 100644 --- a/frontend/src/services/wsClient.test.ts +++ b/frontend/src/services/ws/client.test.ts @@ -1,5 +1,5 @@ import { describe, it, expect, vi, beforeEach, afterEach } from "vitest"; -import { wsClient } from "@/services/wsClient"; +import { wsClient } from "@/services/ws/client"; import { configureStore } from "@reduxjs/toolkit"; import { scannerSlice } from "@/app/slices/scannerSlice"; import { authSlice } from "@/app/slices/authSlice"; diff --git a/frontend/src/services/wsClient.ts b/frontend/src/services/ws/client.ts similarity index 100% rename from frontend/src/services/wsClient.ts rename to frontend/src/services/ws/client.ts From 4af92e6810c14dd28df2e9435d93b5bbd669cf05 Mon Sep 17 00:00:00 2001 From: Randy Hammond Date: Sat, 25 Apr 2026 12:49:14 -0400 Subject: [PATCH 12/27] refactor(frontend): split hooks/ into shared, scanner, admin (Phase 5) (#21) Phase 5 of the directory restructure plan. Pure file moves with import-path updates and new barrel index files; no runtime behaviour change. Moves (15 files, all tracked as renames): - hooks/{useAuthInit,useTheme,useTokenRefresh,useWebSocket}.* -> hooks/shared/ - hooks/{useScanner,useAudioPlayer,useTGSelectionSync,useActiveUnit}.ts -> hooks/scanner/ - hooks/{useAdminWebSocket,useAdminWsOps,useAdminActivity,useAdminLogs,useWsQuery}.ts -> hooks/admin/ Added barrel index.ts in each subfolder plus hooks/index.ts (root safety net). All 31 @/hooks/* import sites across components, pages, hooks, and tests have been updated to specific paths. tsc --noEmit clean, 188/188 unit tests pass. --- CHANGELOG.md | 7 +++++++ frontend/src/components/admin/ActivityPanel.tsx | 2 +- frontend/src/components/admin/AdminLayout.tsx | 2 +- frontend/src/components/admin/ApiKeysPanel.test.tsx | 2 +- frontend/src/components/admin/ApiKeysPanel.tsx | 2 +- frontend/src/components/admin/DirMonitorPanel.tsx | 2 +- frontend/src/components/admin/DownstreamsPanel.tsx | 2 +- frontend/src/components/admin/GroupsTagsPanel.tsx | 2 +- frontend/src/components/admin/LogsPanel.tsx | 4 ++-- frontend/src/components/admin/OptionsPanel.tsx | 2 +- frontend/src/components/admin/RadioReferenceCard.tsx | 2 +- frontend/src/components/admin/SharedLinksPanel.tsx | 2 +- frontend/src/components/admin/SystemsPanel.test.tsx | 2 +- frontend/src/components/admin/SystemsPanel.tsx | 2 +- frontend/src/components/admin/ToolsPanel.tsx | 2 +- frontend/src/components/admin/TranscriptionPanel.tsx | 2 +- frontend/src/components/admin/UsersPanel.test.tsx | 2 +- frontend/src/components/admin/UsersPanel.tsx | 2 +- frontend/src/components/admin/WebhooksPanel.tsx | 2 +- frontend/src/components/scanner/DisplayPanel.tsx | 2 +- frontend/src/components/scanner/LEDPanel.test.tsx | 2 +- frontend/src/components/scanner/LEDPanel.tsx | 2 +- frontend/src/hooks/admin/index.ts | 5 +++++ frontend/src/hooks/{ => admin}/useAdminActivity.ts | 0 frontend/src/hooks/{ => admin}/useAdminLogs.ts | 0 frontend/src/hooks/{ => admin}/useAdminWebSocket.ts | 0 frontend/src/hooks/{ => admin}/useAdminWsOps.ts | 2 +- frontend/src/hooks/{ => admin}/useWsQuery.ts | 0 frontend/src/hooks/index.ts | 7 +++++++ frontend/src/hooks/scanner/index.ts | 4 ++++ frontend/src/hooks/{ => scanner}/useActiveUnit.ts | 0 frontend/src/hooks/{ => scanner}/useAudioPlayer.ts | 0 frontend/src/hooks/{ => scanner}/useScanner.ts | 4 ++-- frontend/src/hooks/{ => scanner}/useTGSelectionSync.ts | 0 frontend/src/hooks/shared/index.ts | 4 ++++ frontend/src/hooks/{ => shared}/useAuthInit.test.tsx | 2 +- frontend/src/hooks/{ => shared}/useAuthInit.ts | 0 frontend/src/hooks/{ => shared}/useTheme.ts | 0 frontend/src/hooks/{ => shared}/useTokenRefresh.test.tsx | 2 +- frontend/src/hooks/{ => shared}/useTokenRefresh.ts | 0 frontend/src/hooks/{ => shared}/useWebSocket.ts | 0 frontend/src/main.tsx | 4 ++-- frontend/src/pages/Scanner.tsx | 4 ++-- 43 files changed, 58 insertions(+), 31 deletions(-) create mode 100644 frontend/src/hooks/admin/index.ts rename frontend/src/hooks/{ => admin}/useAdminActivity.ts (100%) rename frontend/src/hooks/{ => admin}/useAdminLogs.ts (100%) rename frontend/src/hooks/{ => admin}/useAdminWebSocket.ts (100%) rename frontend/src/hooks/{ => admin}/useAdminWsOps.ts (99%) rename frontend/src/hooks/{ => admin}/useWsQuery.ts (100%) create mode 100644 frontend/src/hooks/index.ts create mode 100644 frontend/src/hooks/scanner/index.ts rename frontend/src/hooks/{ => scanner}/useActiveUnit.ts (100%) rename frontend/src/hooks/{ => scanner}/useAudioPlayer.ts (100%) rename frontend/src/hooks/{ => scanner}/useScanner.ts (95%) rename frontend/src/hooks/{ => scanner}/useTGSelectionSync.ts (100%) create mode 100644 frontend/src/hooks/shared/index.ts rename frontend/src/hooks/{ => shared}/useAuthInit.test.tsx (98%) rename frontend/src/hooks/{ => shared}/useAuthInit.ts (100%) rename frontend/src/hooks/{ => shared}/useTheme.ts (100%) rename frontend/src/hooks/{ => shared}/useTokenRefresh.test.tsx (98%) rename frontend/src/hooks/{ => shared}/useTokenRefresh.ts (100%) rename frontend/src/hooks/{ => shared}/useWebSocket.ts (100%) diff --git a/CHANGELOG.md b/CHANGELOG.md index d5ea226..401f0c3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -41,6 +41,13 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 `client.test.ts`, `adminClient.ts`) and `services/audio/` (`player.ts`, `beep.ts`). All `@/services/*` imports across components and hooks have been updated to the new paths. No runtime behaviour change. +- Frontend `hooks/` directory split into `hooks/shared/` (`useAuthInit`, + `useTheme`, `useTokenRefresh`, `useWebSocket`), `hooks/scanner/` + (`useScanner`, `useAudioPlayer`, `useTGSelectionSync`, `useActiveUnit`), + and `hooks/admin/` (`useAdminWebSocket`, `useAdminWsOps`, + `useAdminActivity`, `useAdminLogs`, `useWsQuery`), each with a barrel + `index.ts`. All call sites have been updated to the new specific paths. + No runtime behaviour change. ### Fixed diff --git a/frontend/src/components/admin/ActivityPanel.tsx b/frontend/src/components/admin/ActivityPanel.tsx index 7c7804b..99e17df 100644 --- a/frontend/src/components/admin/ActivityPanel.tsx +++ b/frontend/src/components/admin/ActivityPanel.tsx @@ -1,4 +1,4 @@ -import { useAdminActivity } from "@/hooks/useAdminActivity"; +import { useAdminActivity } from "@/hooks/admin/useAdminActivity"; import type { ChartBucket } from "@/app/slices/activitySlice"; import { Activity, diff --git a/frontend/src/components/admin/AdminLayout.tsx b/frontend/src/components/admin/AdminLayout.tsx index 0a7399f..eed4b99 100644 --- a/frontend/src/components/admin/AdminLayout.tsx +++ b/frontend/src/components/admin/AdminLayout.tsx @@ -36,7 +36,7 @@ import { clearCredentials, usePostLogoutMutation, } from "@/app/slices/authSlice"; -import { useAdminWebSocket } from "@/hooks/useAdminWebSocket"; +import { useAdminWebSocket } from "@/hooks/admin/useAdminWebSocket"; import UsersPanel from "@/components/admin/UsersPanel"; import SystemsPanel from "@/components/admin/SystemsPanel"; import GroupsTagsPanel from "@/components/admin/GroupsTagsPanel"; diff --git a/frontend/src/components/admin/ApiKeysPanel.test.tsx b/frontend/src/components/admin/ApiKeysPanel.test.tsx index e51f3e3..464e86a 100644 --- a/frontend/src/components/admin/ApiKeysPanel.test.tsx +++ b/frontend/src/components/admin/ApiKeysPanel.test.tsx @@ -60,7 +60,7 @@ const deleteApiKeyMutate = vi.fn((_arg: unknown) => ({ unwrap: deleteApiKeyUnwrap, })); -vi.mock("@/hooks/useAdminWsOps", () => ({ +vi.mock("@/hooks/admin/useAdminWsOps", () => ({ useListApiKeysQuery: () => ({ data: mockKeys, isLoading: false }), useListSystemsQuery: () => ({ data: mockSystems, isLoading: false }), useGetConfigQuery: () => ({ data: { settings: [] }, isLoading: false }), diff --git a/frontend/src/components/admin/ApiKeysPanel.tsx b/frontend/src/components/admin/ApiKeysPanel.tsx index 40fb8e2..1a8fe8e 100644 --- a/frontend/src/components/admin/ApiKeysPanel.tsx +++ b/frontend/src/components/admin/ApiKeysPanel.tsx @@ -7,7 +7,7 @@ import { useDeleteApiKeyMutation, useListSystemsQuery, useGetConfigQuery, -} from "@/hooks/useAdminWsOps"; +} from "@/hooks/admin/useAdminWsOps"; import type { AdminApiKey } from "@/types"; // ─── Form state ─── diff --git a/frontend/src/components/admin/DirMonitorPanel.tsx b/frontend/src/components/admin/DirMonitorPanel.tsx index 786f6d5..45a6ef1 100644 --- a/frontend/src/components/admin/DirMonitorPanel.tsx +++ b/frontend/src/components/admin/DirMonitorPanel.tsx @@ -8,7 +8,7 @@ import { useDeleteDirMonitorMutation, useListSystemsQuery, useListTalkgroupsQuery, -} from "@/hooks/useAdminWsOps"; +} from "@/hooks/admin/useAdminWsOps"; import type { AdminDirMonitor } from "@/types"; const DIRMONITOR_TYPES = [ diff --git a/frontend/src/components/admin/DownstreamsPanel.tsx b/frontend/src/components/admin/DownstreamsPanel.tsx index 4d7157f..b3dc2b3 100644 --- a/frontend/src/components/admin/DownstreamsPanel.tsx +++ b/frontend/src/components/admin/DownstreamsPanel.tsx @@ -6,7 +6,7 @@ import { useUpdateDownstreamMutation, useDeleteDownstreamMutation, useListSystemsQuery, -} from "@/hooks/useAdminWsOps"; +} from "@/hooks/admin/useAdminWsOps"; import type { AdminDownstream } from "@/types"; interface DownstreamFormState { diff --git a/frontend/src/components/admin/GroupsTagsPanel.tsx b/frontend/src/components/admin/GroupsTagsPanel.tsx index caf6af2..04e56e4 100644 --- a/frontend/src/components/admin/GroupsTagsPanel.tsx +++ b/frontend/src/components/admin/GroupsTagsPanel.tsx @@ -9,7 +9,7 @@ import { useCreateTagMutation, useUpdateTagMutation, useDeleteTagMutation, -} from "@/hooks/useAdminWsOps"; +} from "@/hooks/admin/useAdminWsOps"; import type { AdminGroup, AdminTag } from "@/types"; // ─── Generic label CRUD table ─── diff --git a/frontend/src/components/admin/LogsPanel.tsx b/frontend/src/components/admin/LogsPanel.tsx index d39fea7..14cdb70 100644 --- a/frontend/src/components/admin/LogsPanel.tsx +++ b/frontend/src/components/admin/LogsPanel.tsx @@ -9,8 +9,8 @@ import { X, } from "lucide-react"; import { useVirtualizer } from "@tanstack/react-virtual"; -import { useUpdateConfigMutation } from "@/hooks/useAdminWsOps"; -import { useAdminLogs, useAdminLogLevel } from "@/hooks/useAdminLogs"; +import { useUpdateConfigMutation } from "@/hooks/admin/useAdminWsOps"; +import { useAdminLogs, useAdminLogLevel } from "@/hooks/admin/useAdminLogs"; import type { AdminLog } from "@/types"; // ─── Constants ────────────────────────────────────────────── diff --git a/frontend/src/components/admin/OptionsPanel.tsx b/frontend/src/components/admin/OptionsPanel.tsx index c2a1099..8093b1c 100644 --- a/frontend/src/components/admin/OptionsPanel.tsx +++ b/frontend/src/components/admin/OptionsPanel.tsx @@ -10,7 +10,7 @@ import { import { useGetConfigQuery, useUpdateConfigMutation, -} from "@/hooks/useAdminWsOps"; +} from "@/hooks/admin/useAdminWsOps"; import { useNavigationGuard } from "@/components/admin/NavigationGuardContext"; import type { AdminSetting } from "@/types"; diff --git a/frontend/src/components/admin/RadioReferenceCard.tsx b/frontend/src/components/admin/RadioReferenceCard.tsx index 2b502b5..da5ee78 100644 --- a/frontend/src/components/admin/RadioReferenceCard.tsx +++ b/frontend/src/components/admin/RadioReferenceCard.tsx @@ -1,7 +1,7 @@ import { useState, useRef, useCallback } from "react"; import { Upload, CheckCircle, XCircle, AlertTriangle } from "lucide-react"; import { useRrPreviewCSVMutation } from "@/app/slices/adminSlice"; -import { useRrApplyMutation, useListSystemsQuery } from "@/hooks/useAdminWsOps"; +import { useRrApplyMutation, useListSystemsQuery } from "@/hooks/admin/useAdminWsOps"; import type { RRPreviewResponse, RRPreviewRow, diff --git a/frontend/src/components/admin/SharedLinksPanel.tsx b/frontend/src/components/admin/SharedLinksPanel.tsx index 299073f..22017a3 100644 --- a/frontend/src/components/admin/SharedLinksPanel.tsx +++ b/frontend/src/components/admin/SharedLinksPanel.tsx @@ -2,7 +2,7 @@ import { Trash2, ExternalLink } from "lucide-react"; import { useGetSharedLinksQuery, useDeleteSharedLinkMutation, -} from "@/hooks/useAdminWsOps"; +} from "@/hooks/admin/useAdminWsOps"; function formatDate(unix: number): string { return new Date(unix * 1000).toLocaleString(); diff --git a/frontend/src/components/admin/SystemsPanel.test.tsx b/frontend/src/components/admin/SystemsPanel.test.tsx index 0c158f3..4e51384 100644 --- a/frontend/src/components/admin/SystemsPanel.test.tsx +++ b/frontend/src/components/admin/SystemsPanel.test.tsx @@ -52,7 +52,7 @@ const noopMutate = vi.fn(() => ({ unwrap: vi.fn().mockResolvedValue(undefined), })); -vi.mock("@/hooks/useAdminWsOps", () => ({ +vi.mock("@/hooks/admin/useAdminWsOps", () => ({ useListSystemsQuery: () => ({ data: mockSystems, isLoading: false }), useCreateSystemMutation: () => [createSystemMutate, {}], useUpdateSystemMutation: () => [updateSystemMutate, {}], diff --git a/frontend/src/components/admin/SystemsPanel.tsx b/frontend/src/components/admin/SystemsPanel.tsx index b7ddbc0..40b5562 100644 --- a/frontend/src/components/admin/SystemsPanel.tsx +++ b/frontend/src/components/admin/SystemsPanel.tsx @@ -18,7 +18,7 @@ import { useListTagsQuery, useGetConfigQuery, useUpdateConfigMutation, -} from "@/hooks/useAdminWsOps"; +} from "@/hooks/admin/useAdminWsOps"; import type { AdminSystem, AdminTalkgroup, AdminUnit } from "@/types"; // ─── System card ─── diff --git a/frontend/src/components/admin/ToolsPanel.tsx b/frontend/src/components/admin/ToolsPanel.tsx index 9073840..4d744a1 100644 --- a/frontend/src/components/admin/ToolsPanel.tsx +++ b/frontend/src/components/admin/ToolsPanel.tsx @@ -20,7 +20,7 @@ import { useLazyExportTagsQuery, useImportConfigMutation, useListSystemsQuery, -} from "@/hooks/useAdminWsOps"; +} from "@/hooks/admin/useAdminWsOps"; import { selectToken } from "@/app/slices/authSlice"; import { useAppSelector } from "@/app/store"; import RadioReferenceCard from "@/components/admin/RadioReferenceCard"; diff --git a/frontend/src/components/admin/TranscriptionPanel.tsx b/frontend/src/components/admin/TranscriptionPanel.tsx index 4c2ad4d..812b01d 100644 --- a/frontend/src/components/admin/TranscriptionPanel.tsx +++ b/frontend/src/components/admin/TranscriptionPanel.tsx @@ -7,7 +7,7 @@ import { useTranscriptionDeleteMutation, useTranscriptionStatsQuery, useUpdateConfigMutation, -} from "@/hooks/useAdminWsOps"; +} from "@/hooks/admin/useAdminWsOps"; import type { WhisperModel } from "@/types"; const KNOWN_MODELS = [ diff --git a/frontend/src/components/admin/UsersPanel.test.tsx b/frontend/src/components/admin/UsersPanel.test.tsx index 07a08ab..a40ca7c 100644 --- a/frontend/src/components/admin/UsersPanel.test.tsx +++ b/frontend/src/components/admin/UsersPanel.test.tsx @@ -64,7 +64,7 @@ const deleteUserMutate = vi.fn((_arg: unknown) => ({ unwrap: deleteUserUnwrap, })); -vi.mock("@/hooks/useAdminWsOps", () => ({ +vi.mock("@/hooks/admin/useAdminWsOps", () => ({ useListUsersQuery: () => ({ data: mockUsers, isLoading: false }), useListSystemsQuery: () => ({ data: mockSystems, isLoading: false }), useCreateUserMutation: () => [createUserMutate, {}], diff --git a/frontend/src/components/admin/UsersPanel.tsx b/frontend/src/components/admin/UsersPanel.tsx index 6babb8e..05325db 100644 --- a/frontend/src/components/admin/UsersPanel.tsx +++ b/frontend/src/components/admin/UsersPanel.tsx @@ -6,7 +6,7 @@ import { useCreateUserMutation, useUpdateUserMutation, useDeleteUserMutation, -} from "@/hooks/useAdminWsOps"; +} from "@/hooks/admin/useAdminWsOps"; import type { AdminUser, CreateUserPayload, UpdateUserPayload } from "@/types"; interface UserFormState { diff --git a/frontend/src/components/admin/WebhooksPanel.tsx b/frontend/src/components/admin/WebhooksPanel.tsx index 168eaf2..e1efb56 100644 --- a/frontend/src/components/admin/WebhooksPanel.tsx +++ b/frontend/src/components/admin/WebhooksPanel.tsx @@ -5,7 +5,7 @@ import { useCreateWebhookMutation, useUpdateWebhookMutation, useDeleteWebhookMutation, -} from "@/hooks/useAdminWsOps"; +} from "@/hooks/admin/useAdminWsOps"; import type { AdminWebhook } from "@/types"; interface WebhookFormState { diff --git a/frontend/src/components/scanner/DisplayPanel.tsx b/frontend/src/components/scanner/DisplayPanel.tsx index 9352a8d..26ec420 100644 --- a/frontend/src/components/scanner/DisplayPanel.tsx +++ b/frontend/src/components/scanner/DisplayPanel.tsx @@ -12,7 +12,7 @@ import { useGetBookmarkIDsQuery, useToggleBookmarkMutation } from "@/app/api"; import { useShareCallMutation } from "@/app/slices/shareSlice"; import { HistoryPanel } from "@/components/scanner/HistoryPanel"; import { TranscriptPanel } from "@/components/scanner/TranscriptPanel"; -import { useActiveUnit } from "@/hooks/useActiveUnit"; +import { useActiveUnit } from "@/hooks/scanner/useActiveUnit"; import { useAppSelector } from "@/app/store"; import type { AvoidEntry, Call } from "@/types"; diff --git a/frontend/src/components/scanner/LEDPanel.test.tsx b/frontend/src/components/scanner/LEDPanel.test.tsx index e6fec5b..b80b9b5 100644 --- a/frontend/src/components/scanner/LEDPanel.test.tsx +++ b/frontend/src/components/scanner/LEDPanel.test.tsx @@ -15,7 +15,7 @@ import type { Call, ScannerConfig } from "@/types"; const mockToggle = vi.fn(); let mockIsDark = true; -vi.mock("@/hooks/useTheme", () => ({ +vi.mock("@/hooks/shared/useTheme", () => ({ useTheme: () => ({ isDark: mockIsDark, toggle: mockToggle, diff --git a/frontend/src/components/scanner/LEDPanel.tsx b/frontend/src/components/scanner/LEDPanel.tsx index 6bc8cf1..2e3fcf1 100644 --- a/frontend/src/components/scanner/LEDPanel.tsx +++ b/frontend/src/components/scanner/LEDPanel.tsx @@ -9,7 +9,7 @@ import { } from "lucide-react"; import { useState, useRef, useEffect } from "react"; import { useNavigate } from "react-router-dom"; -import { useTheme } from "@/hooks/useTheme"; +import { useTheme } from "@/hooks/shared/useTheme"; import { useAppSelector, useAppDispatch } from "@/app/store"; import { selectToken, diff --git a/frontend/src/hooks/admin/index.ts b/frontend/src/hooks/admin/index.ts new file mode 100644 index 0000000..70d3b46 --- /dev/null +++ b/frontend/src/hooks/admin/index.ts @@ -0,0 +1,5 @@ +export * from "./useAdminActivity"; +export * from "./useAdminLogs"; +export * from "./useAdminWebSocket"; +export * from "./useAdminWsOps"; +export * from "./useWsQuery"; diff --git a/frontend/src/hooks/useAdminActivity.ts b/frontend/src/hooks/admin/useAdminActivity.ts similarity index 100% rename from frontend/src/hooks/useAdminActivity.ts rename to frontend/src/hooks/admin/useAdminActivity.ts diff --git a/frontend/src/hooks/useAdminLogs.ts b/frontend/src/hooks/admin/useAdminLogs.ts similarity index 100% rename from frontend/src/hooks/useAdminLogs.ts rename to frontend/src/hooks/admin/useAdminLogs.ts diff --git a/frontend/src/hooks/useAdminWebSocket.ts b/frontend/src/hooks/admin/useAdminWebSocket.ts similarity index 100% rename from frontend/src/hooks/useAdminWebSocket.ts rename to frontend/src/hooks/admin/useAdminWebSocket.ts diff --git a/frontend/src/hooks/useAdminWsOps.ts b/frontend/src/hooks/admin/useAdminWsOps.ts similarity index 99% rename from frontend/src/hooks/useAdminWsOps.ts rename to frontend/src/hooks/admin/useAdminWsOps.ts index 838f22a..412a928 100644 --- a/frontend/src/hooks/useAdminWsOps.ts +++ b/frontend/src/hooks/admin/useAdminWsOps.ts @@ -1,4 +1,4 @@ -import { useWsQuery, useWsMutation, useLazyWsQuery } from "@/hooks/useWsQuery"; +import { useWsQuery, useWsMutation, useLazyWsQuery } from "@/hooks/admin/useWsQuery"; import type { AdminUser, AdminSystem, diff --git a/frontend/src/hooks/useWsQuery.ts b/frontend/src/hooks/admin/useWsQuery.ts similarity index 100% rename from frontend/src/hooks/useWsQuery.ts rename to frontend/src/hooks/admin/useWsQuery.ts diff --git a/frontend/src/hooks/index.ts b/frontend/src/hooks/index.ts new file mode 100644 index 0000000..685bad6 --- /dev/null +++ b/frontend/src/hooks/index.ts @@ -0,0 +1,7 @@ +// Barrel re-export for the hooks tree. Prefer specific imports +// (`@/hooks/shared/useTheme`) in new code; this barrel exists as a +// safety net for callers that want to grab a hook without thinking +// about which subfolder it lives in. +export * from "./admin"; +export * from "./scanner"; +export * from "./shared"; diff --git a/frontend/src/hooks/scanner/index.ts b/frontend/src/hooks/scanner/index.ts new file mode 100644 index 0000000..4ddb62a --- /dev/null +++ b/frontend/src/hooks/scanner/index.ts @@ -0,0 +1,4 @@ +export * from "./useActiveUnit"; +export * from "./useAudioPlayer"; +export * from "./useScanner"; +export * from "./useTGSelectionSync"; diff --git a/frontend/src/hooks/useActiveUnit.ts b/frontend/src/hooks/scanner/useActiveUnit.ts similarity index 100% rename from frontend/src/hooks/useActiveUnit.ts rename to frontend/src/hooks/scanner/useActiveUnit.ts diff --git a/frontend/src/hooks/useAudioPlayer.ts b/frontend/src/hooks/scanner/useAudioPlayer.ts similarity index 100% rename from frontend/src/hooks/useAudioPlayer.ts rename to frontend/src/hooks/scanner/useAudioPlayer.ts diff --git a/frontend/src/hooks/useScanner.ts b/frontend/src/hooks/scanner/useScanner.ts similarity index 95% rename from frontend/src/hooks/useScanner.ts rename to frontend/src/hooks/scanner/useScanner.ts index 46a3aca..b2e4837 100644 --- a/frontend/src/hooks/useScanner.ts +++ b/frontend/src/hooks/scanner/useScanner.ts @@ -1,7 +1,7 @@ import { useCallback } from "react"; import { useAppDispatch, useAppSelector } from "@/app/store"; -import { useWebSocket } from "@/hooks/useWebSocket"; -import { useAudioPlayer } from "@/hooks/useAudioPlayer"; +import { useWebSocket } from "@/hooks/shared/useWebSocket"; +import { useAudioPlayer } from "@/hooks/scanner/useAudioPlayer"; import { togglePause, toggleLive, diff --git a/frontend/src/hooks/useTGSelectionSync.ts b/frontend/src/hooks/scanner/useTGSelectionSync.ts similarity index 100% rename from frontend/src/hooks/useTGSelectionSync.ts rename to frontend/src/hooks/scanner/useTGSelectionSync.ts diff --git a/frontend/src/hooks/shared/index.ts b/frontend/src/hooks/shared/index.ts new file mode 100644 index 0000000..23dba99 --- /dev/null +++ b/frontend/src/hooks/shared/index.ts @@ -0,0 +1,4 @@ +export * from "./useAuthInit"; +export * from "./useTheme"; +export * from "./useTokenRefresh"; +export * from "./useWebSocket"; diff --git a/frontend/src/hooks/useAuthInit.test.tsx b/frontend/src/hooks/shared/useAuthInit.test.tsx similarity index 98% rename from frontend/src/hooks/useAuthInit.test.tsx rename to frontend/src/hooks/shared/useAuthInit.test.tsx index 21ec627..2a8dec4 100644 --- a/frontend/src/hooks/useAuthInit.test.tsx +++ b/frontend/src/hooks/shared/useAuthInit.test.tsx @@ -8,7 +8,7 @@ import { authSlice } from "@/app/slices/authSlice"; import { callsSlice } from "@/app/slices/callsSlice"; import { api } from "@/app/api"; import type { RootState } from "@/app/store"; -import { useAuthInit } from "@/hooks/useAuthInit"; +import { useAuthInit } from "@/hooks/shared/useAuthInit"; // ── Mock the refresh mutation ───────────────────────────────────────────── diff --git a/frontend/src/hooks/useAuthInit.ts b/frontend/src/hooks/shared/useAuthInit.ts similarity index 100% rename from frontend/src/hooks/useAuthInit.ts rename to frontend/src/hooks/shared/useAuthInit.ts diff --git a/frontend/src/hooks/useTheme.ts b/frontend/src/hooks/shared/useTheme.ts similarity index 100% rename from frontend/src/hooks/useTheme.ts rename to frontend/src/hooks/shared/useTheme.ts diff --git a/frontend/src/hooks/useTokenRefresh.test.tsx b/frontend/src/hooks/shared/useTokenRefresh.test.tsx similarity index 98% rename from frontend/src/hooks/useTokenRefresh.test.tsx rename to frontend/src/hooks/shared/useTokenRefresh.test.tsx index a847688..484e8e5 100644 --- a/frontend/src/hooks/useTokenRefresh.test.tsx +++ b/frontend/src/hooks/shared/useTokenRefresh.test.tsx @@ -6,7 +6,7 @@ import { scannerSlice } from "@/app/slices/scannerSlice"; import { authSlice, setCredentials } from "@/app/slices/authSlice"; import { callsSlice } from "@/app/slices/callsSlice"; import { api } from "@/app/api"; -import { useTokenRefresh } from "@/hooks/useTokenRefresh"; +import { useTokenRefresh } from "@/hooks/shared/useTokenRefresh"; // ── Mocks ──────────────────────────────────────────────────────────────── diff --git a/frontend/src/hooks/useTokenRefresh.ts b/frontend/src/hooks/shared/useTokenRefresh.ts similarity index 100% rename from frontend/src/hooks/useTokenRefresh.ts rename to frontend/src/hooks/shared/useTokenRefresh.ts diff --git a/frontend/src/hooks/useWebSocket.ts b/frontend/src/hooks/shared/useWebSocket.ts similarity index 100% rename from frontend/src/hooks/useWebSocket.ts rename to frontend/src/hooks/shared/useWebSocket.ts diff --git a/frontend/src/main.tsx b/frontend/src/main.tsx index a388999..84365bd 100644 --- a/frontend/src/main.tsx +++ b/frontend/src/main.tsx @@ -5,8 +5,8 @@ import { BrowserRouter, Routes, Route } from "react-router-dom"; import { store } from "@/app/store"; import { useAppSelector } from "@/app/store"; import { selectAuthReady } from "@/app/slices/authSlice"; -import { useAuthInit } from "@/hooks/useAuthInit"; -import { useTokenRefresh } from "@/hooks/useTokenRefresh"; +import { useAuthInit } from "@/hooks/shared/useAuthInit"; +import { useTokenRefresh } from "@/hooks/shared/useTokenRefresh"; import "@/index.css"; const Scanner = lazy(() => import("@/pages/Scanner")); diff --git a/frontend/src/pages/Scanner.tsx b/frontend/src/pages/Scanner.tsx index d470b69..2de844b 100644 --- a/frontend/src/pages/Scanner.tsx +++ b/frontend/src/pages/Scanner.tsx @@ -9,8 +9,8 @@ import { setLive, resetDisplay, } from "@/app/slices/scannerSlice"; -import { useScanner } from "@/hooks/useScanner"; -import { useTGSelectionSync } from "@/hooks/useTGSelectionSync"; +import { useScanner } from "@/hooks/scanner/useScanner"; +import { useTGSelectionSync } from "@/hooks/scanner/useTGSelectionSync"; import { LEDPanel } from "@/components/scanner/LEDPanel"; import { DisplayPanel } from "@/components/scanner/DisplayPanel"; import { ControlToolbar } from "@/components/scanner/ControlToolbar"; From d9b1c95eadedde233dee750c662871757fc29fa6 Mon Sep 17 00:00:00 2001 From: Randy Hammond Date: Sat, 25 Apr 2026 12:55:43 -0400 Subject: [PATCH 13/27] refactor(frontend): split types/index.ts into topic-scoped modules (Phase 6) (#22) Phase 6 of the directory restructure plan. The 413-line types/index.ts god-file is split into seven topic-scoped modules; the original path becomes a barrel that re-exports everything so all existing @/types import sites continue to compile unchanged. New layout: - types/call.ts - Call, TranscriptionSegment - types/config.ts - SystemConfig, TalkgroupConfig, ScannerConfig - types/ws.ts - WsCommand, ConnectionStatus - types/auth.ts - LoginResponse, RefreshResponse, ChangePasswordRequest - types/api.ts - SetupStatus - types/admin.ts - All Admin* DTOs, Capabilities, ConfigResponse, CreateUserPayload, UpdateUserPayload, RR*, SharedLinkAdmin, ServerDirectory*, Transcription* - types/ui.ts - AvoidEntry - types/index.ts - barrel re-export No type signatures or behaviour changed. tsc --noEmit clean, 188/188 unit tests pass. --- CHANGELOG.md | 5 + frontend/src/types/admin.ts | 281 +++++++++++++++++++++++ frontend/src/types/api.ts | 7 + frontend/src/types/auth.ts | 25 +++ frontend/src/types/call.ts | 40 ++++ frontend/src/types/config.ts | 34 +++ frontend/src/types/index.ts | 423 +---------------------------------- frontend/src/types/ui.ts | 7 + frontend/src/types/ws.ts | 19 ++ 9 files changed, 428 insertions(+), 413 deletions(-) create mode 100644 frontend/src/types/admin.ts create mode 100644 frontend/src/types/api.ts create mode 100644 frontend/src/types/auth.ts create mode 100644 frontend/src/types/call.ts create mode 100644 frontend/src/types/config.ts create mode 100644 frontend/src/types/ui.ts create mode 100644 frontend/src/types/ws.ts diff --git a/CHANGELOG.md b/CHANGELOG.md index 401f0c3..5118a21 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -48,6 +48,11 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 `useAdminActivity`, `useAdminLogs`, `useWsQuery`), each with a barrel `index.ts`. All call sites have been updated to the new specific paths. No runtime behaviour change. +- Frontend `types/index.ts` god-file split into topic-scoped modules + (`call.ts`, `config.ts`, `ws.ts`, `auth.ts`, `api.ts`, `admin.ts`, + `ui.ts`). The original `index.ts` is now a barrel that re-exports + everything, so all existing `@/types` imports keep working unchanged. + New code can also import from a specific module (e.g. `@/types/admin`). ### Fixed diff --git a/frontend/src/types/admin.ts b/frontend/src/types/admin.ts new file mode 100644 index 0000000..f0b9734 --- /dev/null +++ b/frontend/src/types/admin.ts @@ -0,0 +1,281 @@ +// Admin DTOs (mirrors of ADM/ADMRES payloads and admin REST envelopes). + +export interface AdminUser { + id: number; + username: string; + role: "admin" | "listener"; + disabled: number; // 0 or 1 + systemsJson: string | null; + expiration: number | null; // unix timestamp + limit: number | null; // concurrent connection limit + createdAt: number; + updatedAt: number; +} + +export interface AdminSystem { + id: number; + systemId: number; + label: string; + autoPopulateTalkgroups: number; + blacklistsJson: string | null; + led: string | null; + order: number; +} + +export interface AdminTalkgroup { + id: number; + systemId: number; + talkgroupId: number; + label: string | null; + name: string | null; + frequency: number | null; + led: string | null; + groupId: number | null; + tagId: number | null; + order: number; +} + +export interface AdminUnit { + id: number; + systemId: number; + unitId: number; + label: string | null; + order: number; +} + +export interface AdminGroup { + id: number; + label: string; +} + +export interface AdminTag { + id: number; + label: string; +} + +export interface AdminApiKey { + id: number; + fingerprint: string; + ident: string | null; + disabled: number; + systemsJson: string | null; + callRateLimit: number | null; + order: number; +} + +export interface AdminApiKeyCreateResponse extends AdminApiKey { + createdKey: string; +} + +export interface AdminDownstream { + id: number; + url: string; + hasApiKey: boolean; + systemsJson: string | null; + disabled: number; + order: number; +} + +export interface AdminDownstreamCreate { + url: string; + apiKey: string; + systemsJson: string | null; + disabled: number; + order: number; +} + +export interface AdminDownstreamUpdate { + id: number; + url: string; + apiKey: string; + systemsJson: string | null; + disabled: number; + order: number; +} + +export interface AdminWebhook { + id: number; + url: string; + type: string; + secret: string | null; + systemsJson: string | null; + disabled: number; + order: number; +} + +export interface AdminSetting { + key: string; + value: string; +} + +export interface Capabilities { + ffmpeg: boolean; + fdkAac: boolean; + whisper: boolean; +} + +export interface ConfigResponse { + settings: AdminSetting[]; + capabilities: Capabilities; +} + +export interface AdminLog { + dateTime: number; + level: string; + message: string; + attrs?: Record; +} + +// User create/update payload +export interface CreateUserPayload { + username: string; + password: string; + role: "admin" | "listener"; + disabled?: number; + systemsJson?: string | null; + expiration?: number | null; + limit?: number | null; +} + +export interface UpdateUserPayload { + username?: string; + password?: string; + role?: "admin" | "listener"; + disabled?: number; + systemsJson?: string | null; + expiration?: number | null; + limit?: number | null; +} + +export interface AdminDirMonitor { + id: number; + directory: string; + type: string; + mask: string | null; + extension: string | null; + frequency: number | null; + delay: number | null; + deleteAfter: number; + usePolling: number; + disabled: number; + systemId: number | null; + talkgroupId: number | null; + order: number; +} + +// --- RadioReference enrichment types --- + +export interface RRTalkgroupCandidate { + row: number; + talkgroupId: number; + label?: string; + name?: string; + group?: string; + tag?: string; + led?: string; + order?: number; +} + +export interface RRPreviewRow extends RRTalkgroupCandidate { + matched: boolean; + wouldUpdate: boolean; + wouldUpdateFields: string[]; + skipReason?: string; +} + +export interface RRRowError { + row: number; + reason: string; +} + +export interface RRPreviewResponse { + processed: number; + matched: number; + wouldUpdate: number; + skipped: number; + errors: number; + rowErrors: RRRowError[]; + rows: RRPreviewRow[]; +} + +export interface RRApplyRequest { + systemId: number; + candidates: RRTalkgroupCandidate[]; + mergeMode: string; + selectedFields: string[]; +} + +export interface RRApplyResponse { + processed: number; + matched: number; + updated: number; + skipped: number; + errors: number; + rowErrors: RRRowError[]; +} + +// --- Shared Links (admin) --- + +export interface SharedLinkAdmin { + id: number; + callId: number; + token: string; + createdAt: number; + sharedBy: string; + dateTime: number; + duration: number; + systemLabel: string; + talkgroupLabel: string; + talkgroupName: string; + expiresAt: number | null; +} + +// --- Server filesystem types --- + +export interface ServerDirectoryEntry { + name: string; + path: string; +} + +export interface ServerDirectoryListResponse { + path: string; + parent: string | null; + directories: ServerDirectoryEntry[]; +} + +// --- Transcription types --- + +export interface TranscriptionStatus { + enabled: boolean; + url: string; + model: string; + language: string; + diarize: boolean; + liveDisplay: boolean; + connected: boolean; +} + +export interface WhisperModel { + id: string; + object: string; + path: string; + created: number; + owned_by: string; +} + +export interface TranscriptionModelsResponse { + object: string; + models: WhisperModel[]; +} + +export interface TranscriptionStats { + total: number; + recent24h: number; + avgDurationMs: number; + minDurationMs: number; + maxDurationMs: number; + queueDepth: number; + poolEnabled: boolean; + byLanguage: { language: string; count: number }[]; + byModel: { model: string; count: number }[]; +} diff --git a/frontend/src/types/api.ts b/frontend/src/types/api.ts new file mode 100644 index 0000000..fe375c6 --- /dev/null +++ b/frontend/src/types/api.ts @@ -0,0 +1,7 @@ +// REST request/response envelopes (non-admin). + +// Setup status from GET /api/setup/status +export interface SetupStatus { + needsSetup: boolean; + publicAccess: boolean; +} diff --git a/frontend/src/types/auth.ts b/frontend/src/types/auth.ts new file mode 100644 index 0000000..6a40dbf --- /dev/null +++ b/frontend/src/types/auth.ts @@ -0,0 +1,25 @@ +// Authentication request/response shapes. + +export interface LoginResponse { + token: string; + user: { + id: number; + username: string; + role: string; + }; + passwordNeedChange: boolean; +} + +export interface RefreshResponse { + token: string; + user: { + id: number; + username: string; + role: string; + }; +} + +export interface ChangePasswordRequest { + currentPassword: string; + newPassword: string; +} diff --git a/frontend/src/types/call.ts b/frontend/src/types/call.ts new file mode 100644 index 0000000..09a8ed3 --- /dev/null +++ b/frontend/src/types/call.ts @@ -0,0 +1,40 @@ +// Call data from WS CAL event or search results. + +export interface Call { + id: number; + audioName: string; + audioType: string; + dateTime: number; // unix timestamp + systemId: number; // radio system ID + system: number; // DB system ID + talkgroupId: number; // radio TG ID + talkgroup: number; // DB TG ID + frequency?: number; // Hz + duration?: number; // ms + source?: number; // unit ID + sources?: string; // JSON array + frequencies?: string; // JSON array + patches?: string; // JSON array + site?: string; // receiver site name + channel?: string; // channel identifier + decoder?: string; // decoder type (e.g. "P25 Phase 1") + errorCount?: number; // P25 error count + spikeCount?: number; // P25 spike count + talkerAlias?: string; // DMR/P25 talker alias + systemLabel?: string; // populated from config + talkgroupLabel?: string; // populated from config + talkgroupName?: string; // populated from config + talkgroupTag?: string; // populated from config + talkgroupGroup?: string; // populated from config + talkgroupLedColor?: string; // CSS color for LED + transcript?: string; + transcriptSegments?: TranscriptionSegment[]; + audioUrl?: string; // object URL for audio playback +} + +export interface TranscriptionSegment { + speaker?: string; + start: number; + end: number; + text: string; +} diff --git a/frontend/src/types/config.ts b/frontend/src/types/config.ts new file mode 100644 index 0000000..ec22c21 --- /dev/null +++ b/frontend/src/types/config.ts @@ -0,0 +1,34 @@ +// Scanner / system / talkgroup configuration delivered via WS CFG/VER events. + +export interface SystemConfig { + id: number; + systemId: number; + label: string; + ledColor: string; + talkgroups: TalkgroupConfig[]; +} + +export interface TalkgroupConfig { + id: number; + talkgroupId: number; + label: string; + name: string; + tag: string; + group: string; + ledColor: string; // CSS color string + frequency?: number; +} + +export interface ScannerConfig { + systems: SystemConfig[]; + branding?: string; + email?: string; + version?: string; + time12hFormat: boolean; + showListenersCount: boolean; + playbackGoesLive: boolean; + shareableLinks: boolean; + keypadBeeps: string; + transcriptionEnabled: boolean; + liveTranscriptDisplay: boolean; +} diff --git a/frontend/src/types/index.ts b/frontend/src/types/index.ts index d518b33..d04aedc 100644 --- a/frontend/src/types/index.ts +++ b/frontend/src/types/index.ts @@ -1,413 +1,10 @@ -// Call data from WS CAL event or search results -export interface Call { - id: number; - audioName: string; - audioType: string; - dateTime: number; // unix timestamp - systemId: number; // radio system ID - system: number; // DB system ID - talkgroupId: number; // radio TG ID - talkgroup: number; // DB TG ID - frequency?: number; // Hz - duration?: number; // ms - source?: number; // unit ID - sources?: string; // JSON array - frequencies?: string; // JSON array - patches?: string; // JSON array - site?: string; // receiver site name - channel?: string; // channel identifier - decoder?: string; // decoder type (e.g. "P25 Phase 1") - errorCount?: number; // P25 error count - spikeCount?: number; // P25 spike count - talkerAlias?: string; // DMR/P25 talker alias - systemLabel?: string; // populated from config - talkgroupLabel?: string; // populated from config - talkgroupName?: string; // populated from config - talkgroupTag?: string; // populated from config - talkgroupGroup?: string; // populated from config - talkgroupLedColor?: string; // CSS color for LED - transcript?: string; - transcriptSegments?: TranscriptionSegment[]; - audioUrl?: string; // object URL for audio playback -} - -export interface TranscriptionSegment { - speaker?: string; - start: number; - end: number; - text: string; -} - -// System/talkgroup config from CFG event -export interface SystemConfig { - id: number; - systemId: number; - label: string; - ledColor: string; - talkgroups: TalkgroupConfig[]; -} - -export interface TalkgroupConfig { - id: number; - talkgroupId: number; - label: string; - name: string; - tag: string; - group: string; - ledColor: string; // CSS color string - frequency?: number; -} - -// Scanner configuration from CFG/VER events -export interface ScannerConfig { - systems: SystemConfig[]; - branding?: string; - email?: string; - version?: string; - time12hFormat: boolean; - showListenersCount: boolean; - playbackGoesLive: boolean; - shareableLinks: boolean; - keypadBeeps: string; - transcriptionEnabled: boolean; - liveTranscriptDisplay: boolean; -} - -// WS message: JSON array [command, payload?, flags?] -export type WsCommand = - | "CAL" - | "CFG" - | "XPR" - | "LCL" - | "LSC" - | "LFM" - | "MAX" - | "VER" - | "TRN" - | "ADM_EVT" - | "ADM_REQ" - | "ADM_RES"; - -// Setup status from GET /api/setup/status -export interface SetupStatus { - needsSetup: boolean; - publicAccess: boolean; -} - -// Auth login response -export interface LoginResponse { - token: string; - user: { - id: number; - username: string; - role: string; - }; - passwordNeedChange: boolean; -} - -// Auth refresh response -export interface RefreshResponse { - token: string; - user: { - id: number; - username: string; - role: string; - }; -} - -// For avoid timer tracking -export interface AvoidEntry { - talkgroupId: number; - expiresAt: number; // unix ms timestamp, 0 = permanent -} - -// Connection status for WS -export type ConnectionStatus = "connecting" | "connected" | "disconnected"; - -// ─── Admin resource types ─── - -export interface AdminUser { - id: number; - username: string; - role: "admin" | "listener"; - disabled: number; // 0 or 1 - systemsJson: string | null; - expiration: number | null; // unix timestamp - limit: number | null; // concurrent connection limit - createdAt: number; - updatedAt: number; -} - -export interface AdminSystem { - id: number; - systemId: number; - label: string; - autoPopulateTalkgroups: number; - blacklistsJson: string | null; - led: string | null; - order: number; -} - -export interface AdminTalkgroup { - id: number; - systemId: number; - talkgroupId: number; - label: string | null; - name: string | null; - frequency: number | null; - led: string | null; - groupId: number | null; - tagId: number | null; - order: number; -} - -export interface AdminUnit { - id: number; - systemId: number; - unitId: number; - label: string | null; - order: number; -} - -export interface AdminGroup { - id: number; - label: string; -} - -export interface AdminTag { - id: number; - label: string; -} - -export interface AdminApiKey { - id: number; - fingerprint: string; - ident: string | null; - disabled: number; - systemsJson: string | null; - callRateLimit: number | null; - order: number; -} - -export interface AdminApiKeyCreateResponse extends AdminApiKey { - createdKey: string; -} - -export interface AdminDownstream { - id: number; - url: string; - hasApiKey: boolean; - systemsJson: string | null; - disabled: number; - order: number; -} - -export interface AdminDownstreamCreate { - url: string; - apiKey: string; - systemsJson: string | null; - disabled: number; - order: number; -} - -export interface AdminDownstreamUpdate { - id: number; - url: string; - apiKey: string; - systemsJson: string | null; - disabled: number; - order: number; -} - -export interface AdminWebhook { - id: number; - url: string; - type: string; - secret: string | null; - systemsJson: string | null; - disabled: number; - order: number; -} - -export interface AdminSetting { - key: string; - value: string; -} - -export interface Capabilities { - ffmpeg: boolean; - fdkAac: boolean; - whisper: boolean; -} - -export interface ConfigResponse { - settings: AdminSetting[]; - capabilities: Capabilities; -} - -export interface AdminLog { - dateTime: number; - level: string; - message: string; - attrs?: Record; -} - -// Password change request -export interface ChangePasswordRequest { - currentPassword: string; - newPassword: string; -} - -// User create/update payload -export interface CreateUserPayload { - username: string; - password: string; - role: "admin" | "listener"; - disabled?: number; - systemsJson?: string | null; - expiration?: number | null; - limit?: number | null; -} - -export interface UpdateUserPayload { - username?: string; - password?: string; - role?: "admin" | "listener"; - disabled?: number; - systemsJson?: string | null; - expiration?: number | null; - limit?: number | null; -} - -export interface AdminDirMonitor { - id: number; - directory: string; - type: string; - mask: string | null; - extension: string | null; - frequency: number | null; - delay: number | null; - deleteAfter: number; - usePolling: number; - disabled: number; - systemId: number | null; - talkgroupId: number | null; - order: number; -} - -// --- RadioReference enrichment types --- - -export interface RRTalkgroupCandidate { - row: number; - talkgroupId: number; - label?: string; - name?: string; - group?: string; - tag?: string; - led?: string; - order?: number; -} - -export interface RRPreviewRow extends RRTalkgroupCandidate { - matched: boolean; - wouldUpdate: boolean; - wouldUpdateFields: string[]; - skipReason?: string; -} - -export interface RRRowError { - row: number; - reason: string; -} - -export interface RRPreviewResponse { - processed: number; - matched: number; - wouldUpdate: number; - skipped: number; - errors: number; - rowErrors: RRRowError[]; - rows: RRPreviewRow[]; -} - -export interface RRApplyRequest { - systemId: number; - candidates: RRTalkgroupCandidate[]; - mergeMode: string; - selectedFields: string[]; -} - -export interface RRApplyResponse { - processed: number; - matched: number; - updated: number; - skipped: number; - errors: number; - rowErrors: RRRowError[]; -} - -// --- Shared Links (admin) --- - -export interface SharedLinkAdmin { - id: number; - callId: number; - token: string; - createdAt: number; - sharedBy: string; - dateTime: number; - duration: number; - systemLabel: string; - talkgroupLabel: string; - talkgroupName: string; - expiresAt: number | null; -} - -// --- Server filesystem types --- - -export interface ServerDirectoryEntry { - name: string; - path: string; -} - -export interface ServerDirectoryListResponse { - path: string; - parent: string | null; - directories: ServerDirectoryEntry[]; -} - -// --- Transcription types --- - -export interface TranscriptionStatus { - enabled: boolean; - url: string; - model: string; - language: string; - diarize: boolean; - liveDisplay: boolean; - connected: boolean; -} - -export interface WhisperModel { - id: string; - object: string; - path: string; - created: number; - owned_by: string; -} - -export interface TranscriptionModelsResponse { - object: string; - models: WhisperModel[]; -} - -export interface TranscriptionStats { - total: number; - recent24h: number; - avgDurationMs: number; - minDurationMs: number; - maxDurationMs: number; - queueDepth: number; - poolEnabled: boolean; - byLanguage: { language: string; count: number }[]; - byModel: { model: string; count: number }[]; -} +// Barrel re-export for the types tree. Existing `@/types` imports keep +// working; new code can also import from a specific module +// (`@/types/call`, `@/types/admin`, etc.). +export * from "./admin"; +export * from "./api"; +export * from "./auth"; +export * from "./call"; +export * from "./config"; +export * from "./ui"; +export * from "./ws"; diff --git a/frontend/src/types/ui.ts b/frontend/src/types/ui.ts new file mode 100644 index 0000000..b68bbd4 --- /dev/null +++ b/frontend/src/types/ui.ts @@ -0,0 +1,7 @@ +// Purely client-side view-state types. + +// Avoid timer tracking +export interface AvoidEntry { + talkgroupId: number; + expiresAt: number; // unix ms timestamp, 0 = permanent +} diff --git a/frontend/src/types/ws.ts b/frontend/src/types/ws.ts new file mode 100644 index 0000000..95ce172 --- /dev/null +++ b/frontend/src/types/ws.ts @@ -0,0 +1,19 @@ +// WS framing types. + +// WS message: JSON array [command, payload?, flags?] +export type WsCommand = + | "CAL" + | "CFG" + | "XPR" + | "LCL" + | "LSC" + | "LFM" + | "MAX" + | "VER" + | "TRN" + | "ADM_EVT" + | "ADM_REQ" + | "ADM_RES"; + +// Connection status for WS +export type ConnectionStatus = "connecting" | "connected" | "disconnected"; From 3a54c8f74bb628edc5e892bb7b3cca9cc463ce23 Mon Sep 17 00:00:00 2001 From: Randy Hammond Date: Sat, 25 Apr 2026 13:22:37 -0400 Subject: [PATCH 14/27] refactor(frontend): post-Phase-6 layout polish (slices, pages/Admin, util, hooks) (#23) - app/slices/ split into shared/ (authSlice), scanner/ (scannerSlice, callsSlice, shareSlice), admin/ (adminSlice, activitySlice) - components/admin/AdminLayout.tsx inlined into pages/Admin.tsx; default export renamed to Admin; test file moved to pages/Admin.test.tsx - components/admin/NavigationGuardContext.tsx relocated to hooks/admin/useNavigationGuard.tsx and added to the hooks/admin barrel - services/downloadFilename.ts moved to services/util/downloadFilename.ts All 188 tests pass; tsc --noEmit clean. No runtime behaviour change. --- CHANGELOG.md | 9 + .../app/slices/{ => admin}/activitySlice.ts | 0 .../src/app/slices/{ => admin}/adminSlice.ts | 0 .../slices/{ => scanner}/callsSlice.test.ts | 2 +- .../app/slices/{ => scanner}/callsSlice.ts | 0 .../slices/{ => scanner}/scannerSlice.test.ts | 2 +- .../app/slices/{ => scanner}/scannerSlice.ts | 0 .../app/slices/{ => scanner}/shareSlice.ts | 0 .../app/slices/{ => shared}/authSlice.test.ts | 2 +- .../src/app/slices/{ => shared}/authSlice.ts | 0 frontend/src/app/store.ts | 6 +- .../src/components/admin/ActivityPanel.tsx | 2 +- frontend/src/components/admin/AdminLayout.tsx | 260 ------------------ .../components/admin/ApiKeysPanel.test.tsx | 6 +- .../src/components/admin/OptionsPanel.tsx | 2 +- .../components/admin/RadioReferenceCard.tsx | 2 +- .../components/admin/SystemsPanel.test.tsx | 6 +- frontend/src/components/admin/ToolsPanel.tsx | 4 +- .../src/components/admin/UsersPanel.test.tsx | 6 +- .../scanner/BookmarksPanel.test.tsx | 4 +- .../src/components/scanner/BookmarksPanel.tsx | 4 +- .../src/components/scanner/DisplayPanel.tsx | 2 +- .../src/components/scanner/LEDPanel.test.tsx | 6 +- frontend/src/components/scanner/LEDPanel.tsx | 4 +- .../components/scanner/SearchPanel.test.tsx | 12 +- .../src/components/scanner/SearchPanel.tsx | 6 +- .../components/scanner/SelectTGPanel.test.tsx | 6 +- .../src/components/scanner/SelectTGPanel.tsx | 2 +- .../components/scanner/ShareCallButton.tsx | 2 +- frontend/src/hooks/admin/index.ts | 1 + frontend/src/hooks/admin/useAdminActivity.ts | 2 +- frontend/src/hooks/admin/useAdminWebSocket.ts | 2 +- .../admin/useNavigationGuard.tsx} | 0 frontend/src/hooks/scanner/useAudioPlayer.ts | 2 +- frontend/src/hooks/scanner/useScanner.ts | 2 +- .../src/hooks/scanner/useTGSelectionSync.ts | 4 +- .../src/hooks/shared/useAuthInit.test.tsx | 12 +- frontend/src/hooks/shared/useAuthInit.ts | 2 +- .../src/hooks/shared/useTokenRefresh.test.tsx | 12 +- frontend/src/hooks/shared/useTokenRefresh.ts | 2 +- frontend/src/hooks/shared/useWebSocket.ts | 2 +- frontend/src/main.tsx | 2 +- .../Admin.test.tsx} | 12 +- frontend/src/pages/Admin.tsx | 259 ++++++++++++++++- frontend/src/pages/Login.test.tsx | 12 +- frontend/src/pages/Login.tsx | 2 +- frontend/src/pages/Scanner.tsx | 4 +- frontend/src/pages/Setup.test.tsx | 6 +- frontend/src/pages/SharedCall.test.tsx | 2 +- frontend/src/pages/SharedCall.tsx | 2 +- .../services/{ => util}/downloadFilename.ts | 0 frontend/src/services/ws/adminClient.ts | 2 +- frontend/src/services/ws/client.test.ts | 6 +- frontend/src/services/ws/client.ts | 4 +- 54 files changed, 359 insertions(+), 354 deletions(-) rename frontend/src/app/slices/{ => admin}/activitySlice.ts (100%) rename frontend/src/app/slices/{ => admin}/adminSlice.ts (100%) rename frontend/src/app/slices/{ => scanner}/callsSlice.test.ts (99%) rename frontend/src/app/slices/{ => scanner}/callsSlice.ts (100%) rename frontend/src/app/slices/{ => scanner}/scannerSlice.test.ts (99%) rename frontend/src/app/slices/{ => scanner}/scannerSlice.ts (100%) rename frontend/src/app/slices/{ => scanner}/shareSlice.ts (100%) rename frontend/src/app/slices/{ => shared}/authSlice.test.ts (98%) rename frontend/src/app/slices/{ => shared}/authSlice.ts (100%) delete mode 100644 frontend/src/components/admin/AdminLayout.tsx rename frontend/src/{components/admin/NavigationGuardContext.tsx => hooks/admin/useNavigationGuard.tsx} (100%) rename frontend/src/{components/admin/AdminLayout.test.tsx => pages/Admin.test.tsx} (92%) rename frontend/src/services/{ => util}/downloadFilename.ts (100%) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5118a21..7f5dcb8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -53,6 +53,15 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 `ui.ts`). The original `index.ts` is now a barrel that re-exports everything, so all existing `@/types` imports keep working unchanged. New code can also import from a specific module (e.g. `@/types/admin`). +- Frontend layout polish on top of the directory restructure: + `app/slices/` split into `shared/` (`authSlice`), `scanner/` + (`scannerSlice`, `callsSlice`, `shareSlice`), and `admin/` + (`adminSlice`, `activitySlice`); `components/admin/AdminLayout.tsx` + inlined into `pages/Admin.tsx` (replacing the 5-line shim); + `components/admin/NavigationGuardContext.tsx` relocated to + `hooks/admin/useNavigationGuard.tsx`; and `services/downloadFilename.ts` + moved to `services/util/downloadFilename.ts`. All call sites updated; + no runtime behaviour change. ### Fixed diff --git a/frontend/src/app/slices/activitySlice.ts b/frontend/src/app/slices/admin/activitySlice.ts similarity index 100% rename from frontend/src/app/slices/activitySlice.ts rename to frontend/src/app/slices/admin/activitySlice.ts diff --git a/frontend/src/app/slices/adminSlice.ts b/frontend/src/app/slices/admin/adminSlice.ts similarity index 100% rename from frontend/src/app/slices/adminSlice.ts rename to frontend/src/app/slices/admin/adminSlice.ts diff --git a/frontend/src/app/slices/callsSlice.test.ts b/frontend/src/app/slices/scanner/callsSlice.test.ts similarity index 99% rename from frontend/src/app/slices/callsSlice.test.ts rename to frontend/src/app/slices/scanner/callsSlice.test.ts index bfd1462..68c2ae8 100644 --- a/frontend/src/app/slices/callsSlice.test.ts +++ b/frontend/src/app/slices/scanner/callsSlice.test.ts @@ -18,7 +18,7 @@ import { setDownloadMode, setTranscript, resetFilters, -} from "@/app/slices/callsSlice"; +} from "@/app/slices/scanner/callsSlice"; const reducer = callsSlice.reducer; diff --git a/frontend/src/app/slices/callsSlice.ts b/frontend/src/app/slices/scanner/callsSlice.ts similarity index 100% rename from frontend/src/app/slices/callsSlice.ts rename to frontend/src/app/slices/scanner/callsSlice.ts diff --git a/frontend/src/app/slices/scannerSlice.test.ts b/frontend/src/app/slices/scanner/scannerSlice.test.ts similarity index 99% rename from frontend/src/app/slices/scannerSlice.test.ts rename to frontend/src/app/slices/scanner/scannerSlice.test.ts index 2bbcff5..7e6fef9 100644 --- a/frontend/src/app/slices/scannerSlice.test.ts +++ b/frontend/src/app/slices/scanner/scannerSlice.test.ts @@ -17,7 +17,7 @@ import { setAllTGs, setConfig, transcriptReceived, -} from "@/app/slices/scannerSlice"; +} from "@/app/slices/scanner/scannerSlice"; import type { Call, ScannerConfig } from "@/types"; const reducer = scannerSlice.reducer; diff --git a/frontend/src/app/slices/scannerSlice.ts b/frontend/src/app/slices/scanner/scannerSlice.ts similarity index 100% rename from frontend/src/app/slices/scannerSlice.ts rename to frontend/src/app/slices/scanner/scannerSlice.ts diff --git a/frontend/src/app/slices/shareSlice.ts b/frontend/src/app/slices/scanner/shareSlice.ts similarity index 100% rename from frontend/src/app/slices/shareSlice.ts rename to frontend/src/app/slices/scanner/shareSlice.ts diff --git a/frontend/src/app/slices/authSlice.test.ts b/frontend/src/app/slices/shared/authSlice.test.ts similarity index 98% rename from frontend/src/app/slices/authSlice.test.ts rename to frontend/src/app/slices/shared/authSlice.test.ts index 3835e91..758516b 100644 --- a/frontend/src/app/slices/authSlice.test.ts +++ b/frontend/src/app/slices/shared/authSlice.test.ts @@ -5,7 +5,7 @@ import { clearCredentials, setAuthReady, setSetupStatus, -} from "@/app/slices/authSlice"; +} from "@/app/slices/shared/authSlice"; const { reducer } = authSlice; diff --git a/frontend/src/app/slices/authSlice.ts b/frontend/src/app/slices/shared/authSlice.ts similarity index 100% rename from frontend/src/app/slices/authSlice.ts rename to frontend/src/app/slices/shared/authSlice.ts diff --git a/frontend/src/app/store.ts b/frontend/src/app/store.ts index f3d1870..9a8108b 100644 --- a/frontend/src/app/store.ts +++ b/frontend/src/app/store.ts @@ -1,9 +1,9 @@ import { configureStore } from "@reduxjs/toolkit"; import { useDispatch, useSelector } from "react-redux"; import { api } from "@/app/api"; -import { scannerSlice } from "@/app/slices/scannerSlice"; -import { authSlice } from "@/app/slices/authSlice"; -import { callsSlice } from "@/app/slices/callsSlice"; +import { scannerSlice } from "@/app/slices/scanner/scannerSlice"; +import { authSlice } from "@/app/slices/shared/authSlice"; +import { callsSlice } from "@/app/slices/scanner/callsSlice"; export const store = configureStore({ reducer: { diff --git a/frontend/src/components/admin/ActivityPanel.tsx b/frontend/src/components/admin/ActivityPanel.tsx index 99e17df..db6f206 100644 --- a/frontend/src/components/admin/ActivityPanel.tsx +++ b/frontend/src/components/admin/ActivityPanel.tsx @@ -1,5 +1,5 @@ import { useAdminActivity } from "@/hooks/admin/useAdminActivity"; -import type { ChartBucket } from "@/app/slices/activitySlice"; +import type { ChartBucket } from "@/app/slices/admin/activitySlice"; import { Activity, Clock3, diff --git a/frontend/src/components/admin/AdminLayout.tsx b/frontend/src/components/admin/AdminLayout.tsx deleted file mode 100644 index eed4b99..0000000 --- a/frontend/src/components/admin/AdminLayout.tsx +++ /dev/null @@ -1,260 +0,0 @@ -import { useState } from "react"; -import { - NavLink, - Routes, - Route, - Navigate, - useNavigate, - useLocation, -} from "react-router-dom"; -import { - NavigationGuardProvider, - useNavigationGuard, -} from "@/components/admin/NavigationGuardContext"; -import { - Activity, - Users, - Radio, - FolderTree, - Key, - FolderSearch, - ArrowDownToLine, - Settings, - ScrollText, - Wrench, - Share2, - LogOut, - Home, - Menu, - X, - AudioLines, -} from "lucide-react"; -import { useAppSelector, useAppDispatch } from "@/app/store"; -import { - selectToken, - selectRole, - clearCredentials, - usePostLogoutMutation, -} from "@/app/slices/authSlice"; -import { useAdminWebSocket } from "@/hooks/admin/useAdminWebSocket"; -import UsersPanel from "@/components/admin/UsersPanel"; -import SystemsPanel from "@/components/admin/SystemsPanel"; -import GroupsTagsPanel from "@/components/admin/GroupsTagsPanel"; -import ApiKeysPanel from "@/components/admin/ApiKeysPanel"; -import DirMonitorPanel from "@/components/admin/DirMonitorPanel"; -import DownstreamsPanel from "@/components/admin/DownstreamsPanel"; -import OptionsPanel from "@/components/admin/OptionsPanel"; -import LogsPanel from "@/components/admin/LogsPanel"; -import ToolsPanel from "@/components/admin/ToolsPanel"; -import WebhooksPanel from "@/components/admin/WebhooksPanel"; -import ActivityPanel from "@/components/admin/ActivityPanel"; -import SharedLinksPanel from "@/components/admin/SharedLinksPanel"; -import TranscriptionPanel from "@/components/admin/TranscriptionPanel"; - -const navItems = [ - { to: "/admin/activity", label: "Activity", icon: Activity }, - { to: "/admin/users", label: "Users", icon: Users }, - { to: "/admin/systems", label: "Systems", icon: Radio }, - { to: "/admin/groups", label: "Groups & Tags", icon: FolderTree }, - { to: "/admin/apikeys", label: "API Keys", icon: Key }, - { to: "/admin/dirmonitors", label: "Monitors", icon: FolderSearch }, - { to: "/admin/downstreams", label: "Downstreams", icon: ArrowDownToLine }, - { to: "/admin/shared-links", label: "Shared Links", icon: Share2 }, - { to: "/admin/transcription", label: "Transcription", icon: AudioLines }, - { to: "/admin/options", label: "Options", icon: Settings }, - { to: "/admin/logs", label: "Logs", icon: ScrollText }, - { to: "/admin/tools", label: "Tools", icon: Wrench }, -] as const; - -function SidebarContent({ - showLabels, - onSignOut, - onNavClick, -}: { - showLabels: boolean; - onSignOut: () => void; - onNavClick?: () => void; -}) { - const { requestNavigation } = useNavigationGuard(); - const navigate = useNavigate(); - - const handleClick = - (to: string, extra?: () => void) => - (e: React.MouseEvent) => { - e.preventDefault(); - if (requestNavigation(to)) { - navigate(to); - extra?.(); - } - }; - - return ( -
    - {navItems.map(({ to, label, icon: Icon }) => ( -
  • - - isActive - ? "border-l-4 border-primary bg-primary/10" - : "hover:bg-base-300" - } - > - - {showLabels && {label}} - -
  • - ))} -
  • - - - {showLabels && Scanner} - -
  • -
  • - -
  • -
- ); -} - -export default function AdminLayout() { - const token = useAppSelector(selectToken); - const role = useAppSelector(selectRole); - const dispatch = useAppDispatch(); - const navigate = useNavigate(); - const location = useLocation(); - const [drawerOpen, setDrawerOpen] = useState(false); - const [postLogout] = usePostLogoutMutation(); - - useAdminWebSocket(); - - if (!token) { - return ( - - ); - } - - if (role !== "admin") { - return ( -
-
🚫
-

Access Denied

-

- Your account does not have administrator privileges. Contact an admin - if you believe this is a mistake. -

- - Go to Scanner - -
- ); - } - - const handleSignOut = () => { - postLogout() - .unwrap() - .catch(() => {}) - .finally(() => { - dispatch(clearCredentials()); - navigate("/login", { replace: true }); - }); - }; - - return ( - -
- setDrawerOpen(e.target.checked)} - /> - - {/* Main content */} -
- {/* Mobile top bar */} -
-
- -
- - } /> - } /> - } /> - } /> - } /> - } /> - } /> - } /> - } /> - } /> - } /> - } /> - } /> - } /> - -
-
- - {/* Sidebar */} -
-
-
-
- ); -} diff --git a/frontend/src/components/admin/ApiKeysPanel.test.tsx b/frontend/src/components/admin/ApiKeysPanel.test.tsx index 464e86a..f3ab741 100644 --- a/frontend/src/components/admin/ApiKeysPanel.test.tsx +++ b/frontend/src/components/admin/ApiKeysPanel.test.tsx @@ -5,9 +5,9 @@ import { configureStore } from "@reduxjs/toolkit"; import { Provider } from "react-redux"; import { MemoryRouter } from "react-router-dom"; import ApiKeysPanel from "@/components/admin/ApiKeysPanel"; -import { scannerSlice } from "@/app/slices/scannerSlice"; -import { authSlice } from "@/app/slices/authSlice"; -import { callsSlice } from "@/app/slices/callsSlice"; +import { scannerSlice } from "@/app/slices/scanner/scannerSlice"; +import { authSlice } from "@/app/slices/shared/authSlice"; +import { callsSlice } from "@/app/slices/scanner/callsSlice"; import { api } from "@/app/api"; import type { AdminApiKey, AdminSystem } from "@/types"; diff --git a/frontend/src/components/admin/OptionsPanel.tsx b/frontend/src/components/admin/OptionsPanel.tsx index 8093b1c..a7840e7 100644 --- a/frontend/src/components/admin/OptionsPanel.tsx +++ b/frontend/src/components/admin/OptionsPanel.tsx @@ -11,7 +11,7 @@ import { useGetConfigQuery, useUpdateConfigMutation, } from "@/hooks/admin/useAdminWsOps"; -import { useNavigationGuard } from "@/components/admin/NavigationGuardContext"; +import { useNavigationGuard } from "@/hooks/admin/useNavigationGuard"; import type { AdminSetting } from "@/types"; // ─── Known setting keys and their input types ─── diff --git a/frontend/src/components/admin/RadioReferenceCard.tsx b/frontend/src/components/admin/RadioReferenceCard.tsx index da5ee78..0633af7 100644 --- a/frontend/src/components/admin/RadioReferenceCard.tsx +++ b/frontend/src/components/admin/RadioReferenceCard.tsx @@ -1,6 +1,6 @@ import { useState, useRef, useCallback } from "react"; import { Upload, CheckCircle, XCircle, AlertTriangle } from "lucide-react"; -import { useRrPreviewCSVMutation } from "@/app/slices/adminSlice"; +import { useRrPreviewCSVMutation } from "@/app/slices/admin/adminSlice"; import { useRrApplyMutation, useListSystemsQuery } from "@/hooks/admin/useAdminWsOps"; import type { RRPreviewResponse, diff --git a/frontend/src/components/admin/SystemsPanel.test.tsx b/frontend/src/components/admin/SystemsPanel.test.tsx index 4e51384..be6bb0b 100644 --- a/frontend/src/components/admin/SystemsPanel.test.tsx +++ b/frontend/src/components/admin/SystemsPanel.test.tsx @@ -5,9 +5,9 @@ import { configureStore } from "@reduxjs/toolkit"; import { Provider } from "react-redux"; import { MemoryRouter } from "react-router-dom"; import SystemsPanel from "@/components/admin/SystemsPanel"; -import { scannerSlice } from "@/app/slices/scannerSlice"; -import { authSlice } from "@/app/slices/authSlice"; -import { callsSlice } from "@/app/slices/callsSlice"; +import { scannerSlice } from "@/app/slices/scanner/scannerSlice"; +import { authSlice } from "@/app/slices/shared/authSlice"; +import { callsSlice } from "@/app/slices/scanner/callsSlice"; import { api } from "@/app/api"; import type { AdminSystem } from "@/types"; diff --git a/frontend/src/components/admin/ToolsPanel.tsx b/frontend/src/components/admin/ToolsPanel.tsx index 4d744a1..7c27db8 100644 --- a/frontend/src/components/admin/ToolsPanel.tsx +++ b/frontend/src/components/admin/ToolsPanel.tsx @@ -11,7 +11,7 @@ import { useImportUnitsMutation, useImportGroupsMutation, useImportTagsMutation, -} from "@/app/slices/adminSlice"; +} from "@/app/slices/admin/adminSlice"; import { useLazyExportConfigQuery, useLazyExportTalkgroupsQuery, @@ -21,7 +21,7 @@ import { useImportConfigMutation, useListSystemsQuery, } from "@/hooks/admin/useAdminWsOps"; -import { selectToken } from "@/app/slices/authSlice"; +import { selectToken } from "@/app/slices/shared/authSlice"; import { useAppSelector } from "@/app/store"; import RadioReferenceCard from "@/components/admin/RadioReferenceCard"; diff --git a/frontend/src/components/admin/UsersPanel.test.tsx b/frontend/src/components/admin/UsersPanel.test.tsx index a40ca7c..8bf8c23 100644 --- a/frontend/src/components/admin/UsersPanel.test.tsx +++ b/frontend/src/components/admin/UsersPanel.test.tsx @@ -5,9 +5,9 @@ import { configureStore } from "@reduxjs/toolkit"; import { Provider } from "react-redux"; import { MemoryRouter } from "react-router-dom"; import UsersPanel from "@/components/admin/UsersPanel"; -import { scannerSlice } from "@/app/slices/scannerSlice"; -import { authSlice } from "@/app/slices/authSlice"; -import { callsSlice } from "@/app/slices/callsSlice"; +import { scannerSlice } from "@/app/slices/scanner/scannerSlice"; +import { authSlice } from "@/app/slices/shared/authSlice"; +import { callsSlice } from "@/app/slices/scanner/callsSlice"; import { api } from "@/app/api"; import type { AdminUser, AdminSystem } from "@/types"; diff --git a/frontend/src/components/scanner/BookmarksPanel.test.tsx b/frontend/src/components/scanner/BookmarksPanel.test.tsx index 0e4dd93..2221d28 100644 --- a/frontend/src/components/scanner/BookmarksPanel.test.tsx +++ b/frontend/src/components/scanner/BookmarksPanel.test.tsx @@ -17,7 +17,7 @@ vi.mock("@/services/audio/player", () => ({ }, })); -vi.mock("@/app/slices/authSlice", () => ({ +vi.mock("@/app/slices/shared/authSlice", () => ({ selectToken: () => "fake-token", })); @@ -26,7 +26,7 @@ vi.mock("@/app/store", () => ({ selector({ scanner: { config: null } }), })); -vi.mock("@/app/slices/shareSlice", () => ({ +vi.mock("@/app/slices/scanner/shareSlice", () => ({ useShareCallMutation: () => [vi.fn(), {}], })); diff --git a/frontend/src/components/scanner/BookmarksPanel.tsx b/frontend/src/components/scanner/BookmarksPanel.tsx index e45ab92..45e7e00 100644 --- a/frontend/src/components/scanner/BookmarksPanel.tsx +++ b/frontend/src/components/scanner/BookmarksPanel.tsx @@ -1,9 +1,9 @@ import { useState } from "react"; import { useGetBookmarkCallsQuery, useToggleBookmarkMutation } from "@/app/api"; import { useAppSelector } from "@/app/store"; -import { selectToken } from "@/app/slices/authSlice"; +import { selectToken } from "@/app/slices/shared/authSlice"; import { audioPlayer } from "@/services/audio/player"; -import { sanitizeDownloadFilename } from "@/services/downloadFilename"; +import { sanitizeDownloadFilename } from "@/services/util/downloadFilename"; import { ShareCallButton } from "@/components/scanner/ShareCallButton"; import { X, Play, Download, Star, ChevronDown } from "lucide-react"; import type { Call } from "@/types"; diff --git a/frontend/src/components/scanner/DisplayPanel.tsx b/frontend/src/components/scanner/DisplayPanel.tsx index 26ec420..fb1a1d9 100644 --- a/frontend/src/components/scanner/DisplayPanel.tsx +++ b/frontend/src/components/scanner/DisplayPanel.tsx @@ -9,7 +9,7 @@ import { import { Share2, Sun, Copy, X, ExternalLink } from "lucide-react"; import { BookmarkButton } from "@/components/scanner/BookmarkButton"; import { useGetBookmarkIDsQuery, useToggleBookmarkMutation } from "@/app/api"; -import { useShareCallMutation } from "@/app/slices/shareSlice"; +import { useShareCallMutation } from "@/app/slices/scanner/shareSlice"; import { HistoryPanel } from "@/components/scanner/HistoryPanel"; import { TranscriptPanel } from "@/components/scanner/TranscriptPanel"; import { useActiveUnit } from "@/hooks/scanner/useActiveUnit"; diff --git a/frontend/src/components/scanner/LEDPanel.test.tsx b/frontend/src/components/scanner/LEDPanel.test.tsx index b80b9b5..97c6d6d 100644 --- a/frontend/src/components/scanner/LEDPanel.test.tsx +++ b/frontend/src/components/scanner/LEDPanel.test.tsx @@ -4,9 +4,9 @@ import { configureStore } from "@reduxjs/toolkit"; import { Provider } from "react-redux"; import { MemoryRouter } from "react-router-dom"; import { LEDPanel } from "@/components/scanner/LEDPanel"; -import { scannerSlice } from "@/app/slices/scannerSlice"; -import { authSlice } from "@/app/slices/authSlice"; -import { callsSlice } from "@/app/slices/callsSlice"; +import { scannerSlice } from "@/app/slices/scanner/scannerSlice"; +import { authSlice } from "@/app/slices/shared/authSlice"; +import { callsSlice } from "@/app/slices/scanner/callsSlice"; import { api } from "@/app/api"; import type { RootState } from "@/app/store"; import type { Call, ScannerConfig } from "@/types"; diff --git a/frontend/src/components/scanner/LEDPanel.tsx b/frontend/src/components/scanner/LEDPanel.tsx index 2e3fcf1..b7eda59 100644 --- a/frontend/src/components/scanner/LEDPanel.tsx +++ b/frontend/src/components/scanner/LEDPanel.tsx @@ -17,8 +17,8 @@ import { selectUsername, clearCredentials, usePostLogoutMutation, -} from "@/app/slices/authSlice"; -import { useChangePasswordMutation } from "@/app/slices/authSlice"; +} from "@/app/slices/shared/authSlice"; +import { useChangePasswordMutation } from "@/app/slices/shared/authSlice"; export function LEDPanel() { const { isDark, toggle } = useTheme(); diff --git a/frontend/src/components/scanner/SearchPanel.test.tsx b/frontend/src/components/scanner/SearchPanel.test.tsx index 4b00d0d..9f6ec5e 100644 --- a/frontend/src/components/scanner/SearchPanel.test.tsx +++ b/frontend/src/components/scanner/SearchPanel.test.tsx @@ -3,9 +3,9 @@ import { render, screen, fireEvent, act } from "@testing-library/react"; import { configureStore } from "@reduxjs/toolkit"; import { Provider } from "react-redux"; import SearchPanel from "@/components/scanner/SearchPanel"; -import { scannerSlice } from "@/app/slices/scannerSlice"; -import { authSlice } from "@/app/slices/authSlice"; -import { callsSlice } from "@/app/slices/callsSlice"; +import { scannerSlice } from "@/app/slices/scanner/scannerSlice"; +import { authSlice } from "@/app/slices/shared/authSlice"; +import { callsSlice } from "@/app/slices/scanner/callsSlice"; import { api } from "@/app/api"; import type { RootState } from "@/app/store"; import type { ScannerConfig } from "@/types"; @@ -14,10 +14,10 @@ import type { ScannerConfig } from "@/types"; const mockSearchCallsQuery = vi.fn(); -vi.mock("@/app/slices/callsSlice", async () => { +vi.mock("@/app/slices/scanner/callsSlice", async () => { const actual = await vi.importActual< - typeof import("@/app/slices/callsSlice") - >("@/app/slices/callsSlice"); + typeof import("@/app/slices/scanner/callsSlice") + >("@/app/slices/scanner/callsSlice"); return { ...actual, useSearchCallsQuery: (...args: unknown[]) => mockSearchCallsQuery(...args), diff --git a/frontend/src/components/scanner/SearchPanel.tsx b/frontend/src/components/scanner/SearchPanel.tsx index 73cb7f9..b209835 100644 --- a/frontend/src/components/scanner/SearchPanel.tsx +++ b/frontend/src/components/scanner/SearchPanel.tsx @@ -33,11 +33,11 @@ import { setBookmarkedOnly, setTranscript, resetFilters, -} from "@/app/slices/callsSlice"; +} from "@/app/slices/scanner/callsSlice"; import { useGetBookmarkIDsQuery, useToggleBookmarkMutation } from "@/app/api"; -import { selectToken } from "@/app/slices/authSlice"; +import { selectToken } from "@/app/slices/shared/authSlice"; import { audioPlayer } from "@/services/audio/player"; -import { sanitizeDownloadFilename } from "@/services/downloadFilename"; +import { sanitizeDownloadFilename } from "@/services/util/downloadFilename"; import type { Call } from "@/types"; interface SearchPanelProps { diff --git a/frontend/src/components/scanner/SelectTGPanel.test.tsx b/frontend/src/components/scanner/SelectTGPanel.test.tsx index 5843040..5ae40da 100644 --- a/frontend/src/components/scanner/SelectTGPanel.test.tsx +++ b/frontend/src/components/scanner/SelectTGPanel.test.tsx @@ -3,9 +3,9 @@ import { render, screen, fireEvent, within } from "@testing-library/react"; import { configureStore } from "@reduxjs/toolkit"; import { Provider } from "react-redux"; import SelectTGPanel from "@/components/scanner/SelectTGPanel"; -import { scannerSlice } from "@/app/slices/scannerSlice"; -import { authSlice } from "@/app/slices/authSlice"; -import { callsSlice } from "@/app/slices/callsSlice"; +import { scannerSlice } from "@/app/slices/scanner/scannerSlice"; +import { authSlice } from "@/app/slices/shared/authSlice"; +import { callsSlice } from "@/app/slices/scanner/callsSlice"; import { api } from "@/app/api"; import type { RootState } from "@/app/store"; import type { ScannerConfig } from "@/types"; diff --git a/frontend/src/components/scanner/SelectTGPanel.tsx b/frontend/src/components/scanner/SelectTGPanel.tsx index facffe9..4257c0f 100644 --- a/frontend/src/components/scanner/SelectTGPanel.tsx +++ b/frontend/src/components/scanner/SelectTGPanel.tsx @@ -8,7 +8,7 @@ import { setTGsByGroup, setTGsByTag, removeAvoid, -} from "@/app/slices/scannerSlice"; +} from "@/app/slices/scanner/scannerSlice"; import type { TalkgroupConfig, AvoidEntry } from "@/types"; interface SelectTGPanelProps { diff --git a/frontend/src/components/scanner/ShareCallButton.tsx b/frontend/src/components/scanner/ShareCallButton.tsx index e67701d..5105bd1 100644 --- a/frontend/src/components/scanner/ShareCallButton.tsx +++ b/frontend/src/components/scanner/ShareCallButton.tsx @@ -1,6 +1,6 @@ import { useCallback, useState } from "react"; import { Share2, Copy, ExternalLink, X } from "lucide-react"; -import { useShareCallMutation } from "@/app/slices/shareSlice"; +import { useShareCallMutation } from "@/app/slices/scanner/shareSlice"; interface ShareCallButtonProps { callId: number; diff --git a/frontend/src/hooks/admin/index.ts b/frontend/src/hooks/admin/index.ts index 70d3b46..ba54fe1 100644 --- a/frontend/src/hooks/admin/index.ts +++ b/frontend/src/hooks/admin/index.ts @@ -2,4 +2,5 @@ export * from "./useAdminActivity"; export * from "./useAdminLogs"; export * from "./useAdminWebSocket"; export * from "./useAdminWsOps"; +export * from "./useNavigationGuard"; export * from "./useWsQuery"; diff --git a/frontend/src/hooks/admin/useAdminActivity.ts b/frontend/src/hooks/admin/useAdminActivity.ts index bc645b0..a809c6e 100644 --- a/frontend/src/hooks/admin/useAdminActivity.ts +++ b/frontend/src/hooks/admin/useAdminActivity.ts @@ -4,7 +4,7 @@ import type { ActivityStats, ActivityChartResponse, TopTalkgroupsResponse, -} from "@/app/slices/activitySlice"; +} from "@/app/slices/admin/activitySlice"; const REFRESH_INTERVAL = 30_000; const DEBOUNCE_MS = 3_000; // debounce rapid call bursts diff --git a/frontend/src/hooks/admin/useAdminWebSocket.ts b/frontend/src/hooks/admin/useAdminWebSocket.ts index 2b43826..1e274e5 100644 --- a/frontend/src/hooks/admin/useAdminWebSocket.ts +++ b/frontend/src/hooks/admin/useAdminWebSocket.ts @@ -1,7 +1,7 @@ import { useEffect, useCallback } from "react"; import { useAppDispatch, useAppSelector } from "@/app/store"; import { adminWsClient } from "@/services/ws/adminClient"; -import { setCredentials, usePostRefreshMutation } from "@/app/slices/authSlice"; +import { setCredentials, usePostRefreshMutation } from "@/app/slices/shared/authSlice"; import { api } from "@/app/api"; export function useAdminWebSocket(): void { diff --git a/frontend/src/components/admin/NavigationGuardContext.tsx b/frontend/src/hooks/admin/useNavigationGuard.tsx similarity index 100% rename from frontend/src/components/admin/NavigationGuardContext.tsx rename to frontend/src/hooks/admin/useNavigationGuard.tsx diff --git a/frontend/src/hooks/scanner/useAudioPlayer.ts b/frontend/src/hooks/scanner/useAudioPlayer.ts index 68f44f0..7895bf5 100644 --- a/frontend/src/hooks/scanner/useAudioPlayer.ts +++ b/frontend/src/hooks/scanner/useAudioPlayer.ts @@ -7,7 +7,7 @@ import { setCurrentCall, clearCurrentCall, setAudioActive, -} from "@/app/slices/scannerSlice"; +} from "@/app/slices/scanner/scannerSlice"; export function useAudioPlayer() { const dispatch = useAppDispatch(); diff --git a/frontend/src/hooks/scanner/useScanner.ts b/frontend/src/hooks/scanner/useScanner.ts index b2e4837..aa9bf95 100644 --- a/frontend/src/hooks/scanner/useScanner.ts +++ b/frontend/src/hooks/scanner/useScanner.ts @@ -13,7 +13,7 @@ import { toggleTG, setAllTGs, setTGsBySystem, -} from "@/app/slices/scannerSlice"; +} from "@/app/slices/scanner/scannerSlice"; import type { AvoidEntry } from "@/types"; export function useScanner() { diff --git a/frontend/src/hooks/scanner/useTGSelectionSync.ts b/frontend/src/hooks/scanner/useTGSelectionSync.ts index d235fab..c2adad5 100644 --- a/frontend/src/hooks/scanner/useTGSelectionSync.ts +++ b/frontend/src/hooks/scanner/useTGSelectionSync.ts @@ -6,12 +6,12 @@ import { restoreFromDisabledTGs, restoreAvoidList, resetTGSelection, -} from "@/app/slices/scannerSlice"; +} from "@/app/slices/scanner/scannerSlice"; import { selectToken, useGetTGSelectionQuery, useUpdateTGSelectionMutation, -} from "@/app/slices/authSlice"; +} from "@/app/slices/shared/authSlice"; import type { AvoidEntry } from "@/types"; function storageKey(instanceId: string): string { diff --git a/frontend/src/hooks/shared/useAuthInit.test.tsx b/frontend/src/hooks/shared/useAuthInit.test.tsx index 2a8dec4..556f598 100644 --- a/frontend/src/hooks/shared/useAuthInit.test.tsx +++ b/frontend/src/hooks/shared/useAuthInit.test.tsx @@ -3,9 +3,9 @@ import { render, waitFor, act } from "@testing-library/react"; import { configureStore } from "@reduxjs/toolkit"; import { Provider } from "react-redux"; import { MemoryRouter } from "react-router-dom"; -import { scannerSlice } from "@/app/slices/scannerSlice"; -import { authSlice } from "@/app/slices/authSlice"; -import { callsSlice } from "@/app/slices/callsSlice"; +import { scannerSlice } from "@/app/slices/scanner/scannerSlice"; +import { authSlice } from "@/app/slices/shared/authSlice"; +import { callsSlice } from "@/app/slices/scanner/callsSlice"; import { api } from "@/app/api"; import type { RootState } from "@/app/store"; import { useAuthInit } from "@/hooks/shared/useAuthInit"; @@ -13,9 +13,9 @@ import { useAuthInit } from "@/hooks/shared/useAuthInit"; // ── Mock the refresh mutation ───────────────────────────────────────────── const mockPostRefresh = vi.fn(); -vi.mock("@/app/slices/authSlice", async () => { - const actual = await vi.importActual( - "@/app/slices/authSlice", +vi.mock("@/app/slices/shared/authSlice", async () => { + const actual = await vi.importActual( + "@/app/slices/shared/authSlice", ); return { ...actual, diff --git a/frontend/src/hooks/shared/useAuthInit.ts b/frontend/src/hooks/shared/useAuthInit.ts index cc61599..340cf48 100644 --- a/frontend/src/hooks/shared/useAuthInit.ts +++ b/frontend/src/hooks/shared/useAuthInit.ts @@ -4,7 +4,7 @@ import { setCredentials, setAuthReady, usePostRefreshMutation, -} from "@/app/slices/authSlice"; +} from "@/app/slices/shared/authSlice"; /** * Attempts a silent token refresh on app mount. diff --git a/frontend/src/hooks/shared/useTokenRefresh.test.tsx b/frontend/src/hooks/shared/useTokenRefresh.test.tsx index 484e8e5..e7ad0ca 100644 --- a/frontend/src/hooks/shared/useTokenRefresh.test.tsx +++ b/frontend/src/hooks/shared/useTokenRefresh.test.tsx @@ -2,18 +2,18 @@ import { describe, it, expect, vi, beforeEach, afterEach } from "vitest"; import { render, act } from "@testing-library/react"; import { configureStore } from "@reduxjs/toolkit"; import { Provider } from "react-redux"; -import { scannerSlice } from "@/app/slices/scannerSlice"; -import { authSlice, setCredentials } from "@/app/slices/authSlice"; -import { callsSlice } from "@/app/slices/callsSlice"; +import { scannerSlice } from "@/app/slices/scanner/scannerSlice"; +import { authSlice, setCredentials } from "@/app/slices/shared/authSlice"; +import { callsSlice } from "@/app/slices/scanner/callsSlice"; import { api } from "@/app/api"; import { useTokenRefresh } from "@/hooks/shared/useTokenRefresh"; // ── Mocks ──────────────────────────────────────────────────────────────── const mockPostRefresh = vi.fn(); -vi.mock("@/app/slices/authSlice", async () => { - const actual = await vi.importActual( - "@/app/slices/authSlice", +vi.mock("@/app/slices/shared/authSlice", async () => { + const actual = await vi.importActual( + "@/app/slices/shared/authSlice", ); return { ...actual, diff --git a/frontend/src/hooks/shared/useTokenRefresh.ts b/frontend/src/hooks/shared/useTokenRefresh.ts index 808e83c..a9294ec 100644 --- a/frontend/src/hooks/shared/useTokenRefresh.ts +++ b/frontend/src/hooks/shared/useTokenRefresh.ts @@ -5,7 +5,7 @@ import { setCredentials, clearCredentials, usePostRefreshMutation, -} from "@/app/slices/authSlice"; +} from "@/app/slices/shared/authSlice"; /** * Schedules a silent access token refresh 1 minute before the current diff --git a/frontend/src/hooks/shared/useWebSocket.ts b/frontend/src/hooks/shared/useWebSocket.ts index 33f9bb3..0fdc3d2 100644 --- a/frontend/src/hooks/shared/useWebSocket.ts +++ b/frontend/src/hooks/shared/useWebSocket.ts @@ -1,7 +1,7 @@ import { useEffect, useRef, useCallback } from "react"; import { useAppDispatch, useAppSelector } from "@/app/store"; import { wsClient } from "@/services/ws/client"; -import { setCredentials, usePostRefreshMutation } from "@/app/slices/authSlice"; +import { setCredentials, usePostRefreshMutation } from "@/app/slices/shared/authSlice"; import type { ConnectionStatus } from "@/types"; export function useWebSocket(): { connectionStatus: ConnectionStatus } { diff --git a/frontend/src/main.tsx b/frontend/src/main.tsx index 84365bd..9758305 100644 --- a/frontend/src/main.tsx +++ b/frontend/src/main.tsx @@ -4,7 +4,7 @@ import { Provider } from "react-redux"; import { BrowserRouter, Routes, Route } from "react-router-dom"; import { store } from "@/app/store"; import { useAppSelector } from "@/app/store"; -import { selectAuthReady } from "@/app/slices/authSlice"; +import { selectAuthReady } from "@/app/slices/shared/authSlice"; import { useAuthInit } from "@/hooks/shared/useAuthInit"; import { useTokenRefresh } from "@/hooks/shared/useTokenRefresh"; import "@/index.css"; diff --git a/frontend/src/components/admin/AdminLayout.test.tsx b/frontend/src/pages/Admin.test.tsx similarity index 92% rename from frontend/src/components/admin/AdminLayout.test.tsx rename to frontend/src/pages/Admin.test.tsx index e1ea091..77c5c86 100644 --- a/frontend/src/components/admin/AdminLayout.test.tsx +++ b/frontend/src/pages/Admin.test.tsx @@ -3,10 +3,10 @@ import { render, screen, fireEvent, waitFor } from "@testing-library/react"; import { configureStore } from "@reduxjs/toolkit"; import { Provider } from "react-redux"; import { MemoryRouter } from "react-router-dom"; -import AdminLayout from "@/components/admin/AdminLayout"; -import { scannerSlice } from "@/app/slices/scannerSlice"; -import { authSlice } from "@/app/slices/authSlice"; -import { callsSlice } from "@/app/slices/callsSlice"; +import Admin from "@/pages/Admin"; +import { scannerSlice } from "@/app/slices/scanner/scannerSlice"; +import { authSlice } from "@/app/slices/shared/authSlice"; +import { callsSlice } from "@/app/slices/scanner/callsSlice"; import { api } from "@/app/api"; import type { RootState } from "@/app/store"; @@ -49,7 +49,7 @@ function renderAdmin(preloadedState?: Partial) { ...render( - + , ), @@ -58,7 +58,7 @@ function renderAdmin(preloadedState?: Partial) { // --- Tests --- -describe("AdminLayout", () => { +describe("Admin", () => { beforeEach(() => { vi.clearAllMocks(); }); diff --git a/frontend/src/pages/Admin.tsx b/frontend/src/pages/Admin.tsx index 7edd655..a4b9f95 100644 --- a/frontend/src/pages/Admin.tsx +++ b/frontend/src/pages/Admin.tsx @@ -1,5 +1,260 @@ -import AdminLayout from "@/components/admin/AdminLayout"; +import { useState } from "react"; +import { + NavLink, + Routes, + Route, + Navigate, + useNavigate, + useLocation, +} from "react-router-dom"; +import { + NavigationGuardProvider, + useNavigationGuard, +} from "@/hooks/admin/useNavigationGuard"; +import { + Activity, + Users, + Radio, + FolderTree, + Key, + FolderSearch, + ArrowDownToLine, + Settings, + ScrollText, + Wrench, + Share2, + LogOut, + Home, + Menu, + X, + AudioLines, +} from "lucide-react"; +import { useAppSelector, useAppDispatch } from "@/app/store"; +import { + selectToken, + selectRole, + clearCredentials, + usePostLogoutMutation, +} from "@/app/slices/shared/authSlice"; +import { useAdminWebSocket } from "@/hooks/admin/useAdminWebSocket"; +import UsersPanel from "@/components/admin/UsersPanel"; +import SystemsPanel from "@/components/admin/SystemsPanel"; +import GroupsTagsPanel from "@/components/admin/GroupsTagsPanel"; +import ApiKeysPanel from "@/components/admin/ApiKeysPanel"; +import DirMonitorPanel from "@/components/admin/DirMonitorPanel"; +import DownstreamsPanel from "@/components/admin/DownstreamsPanel"; +import OptionsPanel from "@/components/admin/OptionsPanel"; +import LogsPanel from "@/components/admin/LogsPanel"; +import ToolsPanel from "@/components/admin/ToolsPanel"; +import WebhooksPanel from "@/components/admin/WebhooksPanel"; +import ActivityPanel from "@/components/admin/ActivityPanel"; +import SharedLinksPanel from "@/components/admin/SharedLinksPanel"; +import TranscriptionPanel from "@/components/admin/TranscriptionPanel"; + +const navItems = [ + { to: "/admin/activity", label: "Activity", icon: Activity }, + { to: "/admin/users", label: "Users", icon: Users }, + { to: "/admin/systems", label: "Systems", icon: Radio }, + { to: "/admin/groups", label: "Groups & Tags", icon: FolderTree }, + { to: "/admin/apikeys", label: "API Keys", icon: Key }, + { to: "/admin/dirmonitors", label: "Monitors", icon: FolderSearch }, + { to: "/admin/downstreams", label: "Downstreams", icon: ArrowDownToLine }, + { to: "/admin/shared-links", label: "Shared Links", icon: Share2 }, + { to: "/admin/transcription", label: "Transcription", icon: AudioLines }, + { to: "/admin/options", label: "Options", icon: Settings }, + { to: "/admin/logs", label: "Logs", icon: ScrollText }, + { to: "/admin/tools", label: "Tools", icon: Wrench }, +] as const; + +function SidebarContent({ + showLabels, + onSignOut, + onNavClick, +}: { + showLabels: boolean; + onSignOut: () => void; + onNavClick?: () => void; +}) { + const { requestNavigation } = useNavigationGuard(); + const navigate = useNavigate(); + + const handleClick = + (to: string, extra?: () => void) => + (e: React.MouseEvent) => { + e.preventDefault(); + if (requestNavigation(to)) { + navigate(to); + extra?.(); + } + }; + + return ( +
    + {navItems.map(({ to, label, icon: Icon }) => ( +
  • + + isActive + ? "border-l-4 border-primary bg-primary/10" + : "hover:bg-base-300" + } + > + + {showLabels && {label}} + +
  • + ))} +
  • + + + {showLabels && Scanner} + +
  • +
  • + +
  • +
+ ); +} export default function Admin() { - return ; + const token = useAppSelector(selectToken); + const role = useAppSelector(selectRole); + const dispatch = useAppDispatch(); + const navigate = useNavigate(); + const location = useLocation(); + const [drawerOpen, setDrawerOpen] = useState(false); + const [postLogout] = usePostLogoutMutation(); + + useAdminWebSocket(); + + if (!token) { + return ( + + ); + } + + if (role !== "admin") { + return ( +
+
🚫
+

Access Denied

+

+ Your account does not have administrator privileges. Contact an admin + if you believe this is a mistake. +

+ + Go to Scanner + +
+ ); + } + + const handleSignOut = () => { + postLogout() + .unwrap() + .catch(() => {}) + .finally(() => { + dispatch(clearCredentials()); + navigate("/login", { replace: true }); + }); + }; + + return ( + +
+ setDrawerOpen(e.target.checked)} + /> + + {/* Main content */} +
+ {/* Mobile top bar */} +
+
+ +
+ + } /> + } /> + } /> + } /> + } /> + } /> + } /> + } /> + } /> + } /> + } /> + } /> + } /> + } /> + +
+
+ + {/* Sidebar */} +
+
+
+
+ ); } diff --git a/frontend/src/pages/Login.test.tsx b/frontend/src/pages/Login.test.tsx index 88dc816..1c3ab69 100644 --- a/frontend/src/pages/Login.test.tsx +++ b/frontend/src/pages/Login.test.tsx @@ -4,9 +4,9 @@ import { configureStore } from "@reduxjs/toolkit"; import { Provider } from "react-redux"; import { MemoryRouter } from "react-router-dom"; import Login from "@/pages/Login"; -import { scannerSlice } from "@/app/slices/scannerSlice"; -import { authSlice } from "@/app/slices/authSlice"; -import { callsSlice } from "@/app/slices/callsSlice"; +import { scannerSlice } from "@/app/slices/scanner/scannerSlice"; +import { authSlice } from "@/app/slices/shared/authSlice"; +import { callsSlice } from "@/app/slices/scanner/callsSlice"; import { api } from "@/app/api"; import type { RootState } from "@/app/store"; @@ -36,9 +36,9 @@ vi.mock("@/app/api", async () => { useGetSetupStatusQuery: () => mockUseGetSetupStatusQuery(), }; }); -vi.mock("@/app/slices/authSlice", async () => { - const actual = await vi.importActual( - "@/app/slices/authSlice", +vi.mock("@/app/slices/shared/authSlice", async () => { + const actual = await vi.importActual( + "@/app/slices/shared/authSlice", ); return { ...actual, diff --git a/frontend/src/pages/Login.tsx b/frontend/src/pages/Login.tsx index 2c338aa..68847e9 100644 --- a/frontend/src/pages/Login.tsx +++ b/frontend/src/pages/Login.tsx @@ -8,7 +8,7 @@ import { selectToken, usePostLoginMutation, useChangePasswordMutation, -} from "@/app/slices/authSlice"; +} from "@/app/slices/shared/authSlice"; interface LoginLocationState { from?: string; diff --git a/frontend/src/pages/Scanner.tsx b/frontend/src/pages/Scanner.tsx index 2de844b..862e9ad 100644 --- a/frontend/src/pages/Scanner.tsx +++ b/frontend/src/pages/Scanner.tsx @@ -2,13 +2,13 @@ import { useCallback, useEffect, useState } from "react"; import { useNavigate } from "react-router-dom"; import { useGetSetupStatusQuery } from "@/app/api"; import { useAppDispatch, useAppSelector } from "@/app/store"; -import { setSetupStatus, selectToken } from "@/app/slices/authSlice"; +import { setSetupStatus, selectToken } from "@/app/slices/shared/authSlice"; import { expireAvoids, setPaused, setLive, resetDisplay, -} from "@/app/slices/scannerSlice"; +} from "@/app/slices/scanner/scannerSlice"; import { useScanner } from "@/hooks/scanner/useScanner"; import { useTGSelectionSync } from "@/hooks/scanner/useTGSelectionSync"; import { LEDPanel } from "@/components/scanner/LEDPanel"; diff --git a/frontend/src/pages/Setup.test.tsx b/frontend/src/pages/Setup.test.tsx index 5535f27..579fc8e 100644 --- a/frontend/src/pages/Setup.test.tsx +++ b/frontend/src/pages/Setup.test.tsx @@ -3,9 +3,9 @@ import { render, screen, fireEvent, waitFor } from "@testing-library/react"; import { configureStore } from "@reduxjs/toolkit"; import { Provider } from "react-redux"; import Setup from "@/pages/Setup"; -import { scannerSlice } from "@/app/slices/scannerSlice"; -import { authSlice } from "@/app/slices/authSlice"; -import { callsSlice } from "@/app/slices/callsSlice"; +import { scannerSlice } from "@/app/slices/scanner/scannerSlice"; +import { authSlice } from "@/app/slices/shared/authSlice"; +import { callsSlice } from "@/app/slices/scanner/callsSlice"; import { api } from "@/app/api"; import type { RootState } from "@/app/store"; diff --git a/frontend/src/pages/SharedCall.test.tsx b/frontend/src/pages/SharedCall.test.tsx index 41b6901..66d9a59 100644 --- a/frontend/src/pages/SharedCall.test.tsx +++ b/frontend/src/pages/SharedCall.test.tsx @@ -11,7 +11,7 @@ vi.mock("react-router-dom", async () => { }); const mockUseGetSharedCallQuery = vi.fn(); -vi.mock("@/app/slices/shareSlice", () => ({ +vi.mock("@/app/slices/scanner/shareSlice", () => ({ useGetSharedCallQuery: (...args: unknown[]) => mockUseGetSharedCallQuery(...args), })); diff --git a/frontend/src/pages/SharedCall.tsx b/frontend/src/pages/SharedCall.tsx index e6d5e8b..264138b 100644 --- a/frontend/src/pages/SharedCall.tsx +++ b/frontend/src/pages/SharedCall.tsx @@ -1,5 +1,5 @@ import { useParams } from "react-router-dom"; -import { useGetSharedCallQuery } from "@/app/slices/shareSlice"; +import { useGetSharedCallQuery } from "@/app/slices/scanner/shareSlice"; import { Download, Radio } from "lucide-react"; function formatDuration(secs: number): string { diff --git a/frontend/src/services/downloadFilename.ts b/frontend/src/services/util/downloadFilename.ts similarity index 100% rename from frontend/src/services/downloadFilename.ts rename to frontend/src/services/util/downloadFilename.ts diff --git a/frontend/src/services/ws/adminClient.ts b/frontend/src/services/ws/adminClient.ts index 43f6b14..8308dc3 100644 --- a/frontend/src/services/ws/adminClient.ts +++ b/frontend/src/services/ws/adminClient.ts @@ -1,5 +1,5 @@ import type { AppDispatch } from "@/app/store"; -import { clearCredentials } from "@/app/slices/authSlice"; +import { clearCredentials } from "@/app/slices/shared/authSlice"; type TokenExpiredCallback = () => Promise; type EventCallback = (topic: string, data: unknown, at: number) => void; diff --git a/frontend/src/services/ws/client.test.ts b/frontend/src/services/ws/client.test.ts index d0ea66c..ab6b52c 100644 --- a/frontend/src/services/ws/client.test.ts +++ b/frontend/src/services/ws/client.test.ts @@ -1,9 +1,9 @@ import { describe, it, expect, vi, beforeEach, afterEach } from "vitest"; import { wsClient } from "@/services/ws/client"; import { configureStore } from "@reduxjs/toolkit"; -import { scannerSlice } from "@/app/slices/scannerSlice"; -import { authSlice } from "@/app/slices/authSlice"; -import { callsSlice } from "@/app/slices/callsSlice"; +import { scannerSlice } from "@/app/slices/scanner/scannerSlice"; +import { authSlice } from "@/app/slices/shared/authSlice"; +import { callsSlice } from "@/app/slices/scanner/callsSlice"; import { api } from "@/app/api"; // ── Fake WebSocket stub ─────────────────────────────────────────────────── diff --git a/frontend/src/services/ws/client.ts b/frontend/src/services/ws/client.ts index 1e5383e..9c53bd7 100644 --- a/frontend/src/services/ws/client.ts +++ b/frontend/src/services/ws/client.ts @@ -6,8 +6,8 @@ import { setListenerCount, setConnectionStatus, transcriptReceived, -} from "@/app/slices/scannerSlice"; -import { clearCredentials } from "@/app/slices/authSlice"; +} from "@/app/slices/scanner/scannerSlice"; +import { clearCredentials } from "@/app/slices/shared/authSlice"; import type { Call, WsCommand, TranscriptionSegment } from "@/types"; const MAX_BACKOFF = 30_000; From 27a658b09bdd0e6fad011828ee86c720a722eb8c Mon Sep 17 00:00:00 2001 From: Randy Hammond Date: Sat, 25 Apr 2026 13:46:05 -0400 Subject: [PATCH 15/27] refactor(backend): split calls.go and middleware.go by concern (#24) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit internal/handler/calls/calls.go (1518 LOC) split into: - calls.go (104) — Handler struct, New(), getLimiter, apiKeyLimiter, consts - upload.go (833) — PostCallUpload + helpers - audio.go (113) — GetCallAudio - search.go (440) — GetCalls - transcript.go (80) — GetCallTranscript internal/middleware/middleware.go (392 LOC) split into: - middleware.go (2) — package doc only - cors.go (69) — CORS - auth.go (192) — JWTAuth, OptionalJWTAuth, RequireAdmin, APIKeyAuth, SwaggerCookieAuth - logging.go (73) — RequestID, Logger, requestLogLevel - limits.go (83) — RateLimit, MaxBodySize, RateLimitByIP Same package, same exports, no behaviour change. go vet, go build, go test ./... all pass. --- CHANGELOG.md | 6 + backend/internal/handler/calls/audio.go | 113 ++ backend/internal/handler/calls/calls.go | 1414 ------------------ backend/internal/handler/calls/search.go | 440 ++++++ backend/internal/handler/calls/transcript.go | 80 + backend/internal/handler/calls/upload.go | 833 +++++++++++ backend/internal/middleware/auth.go | 192 +++ backend/internal/middleware/cors.go | 69 + backend/internal/middleware/limits.go | 83 + backend/internal/middleware/logging.go | 73 + backend/internal/middleware/middleware.go | 390 ----- 11 files changed, 1889 insertions(+), 1804 deletions(-) create mode 100644 backend/internal/handler/calls/audio.go create mode 100644 backend/internal/handler/calls/search.go create mode 100644 backend/internal/handler/calls/transcript.go create mode 100644 backend/internal/handler/calls/upload.go create mode 100644 backend/internal/middleware/auth.go create mode 100644 backend/internal/middleware/cors.go create mode 100644 backend/internal/middleware/limits.go create mode 100644 backend/internal/middleware/logging.go diff --git a/CHANGELOG.md b/CHANGELOG.md index 7f5dcb8..61d2715 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -24,6 +24,12 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 now lives in `internal/handler/routes`, and shared swagger DTOs and helpers live in `internal/handler/shared`. No route paths, methods, middleware ordering, response shapes, or status codes changed. +- Backend file-level cleanup: `internal/handler/calls/calls.go` (~1500 LOC) + split into `upload.go`, `audio.go`, `search.go`, `transcript.go`, and a + slim `calls.go` retaining the `Handler` struct and constructor; + `internal/middleware/middleware.go` split into `cors.go`, `auth.go`, + `logging.go`, `limits.go`. Same package, same exports, no behaviour + change. - Admin CRUD business logic has been extracted from `internal/ws` into a new transport-agnostic `internal/admin` package. The WebSocket layer now only routes `ADM_REQ` frames to `admin.Operations` methods; the diff --git a/backend/internal/handler/calls/audio.go b/backend/internal/handler/calls/audio.go new file mode 100644 index 0000000..de088c5 --- /dev/null +++ b/backend/internal/handler/calls/audio.go @@ -0,0 +1,113 @@ +package calls + +import ( + "database/sql" + "errors" + "log/slog" + "net/http" + "os" + "path/filepath" + "strconv" + "strings" + + "github.com/gin-gonic/gin" + "github.com/openscanner/openscanner/internal/handler/shared" +) + +// GetCallAudio handles GET /api/calls/:id/audio. +// +// @Summary Get call audio file +// @Description Stream the audio file for a specific call. Authentication is optional when the publicAccess setting is enabled; otherwise a valid JWT is required. +// @Tags Calls +// @Security BearerAuth +// @Produce application/octet-stream +// @Param id path int true "Call ID" +// @Success 200 {file} binary "Audio file" +// @Failure 400 {object} ErrorResponse "Invalid call ID" +// @Failure 401 {object} ErrorResponse "Authentication required" +// @Failure 404 {object} ErrorResponse "Call or audio not found" +// @Failure 500 {object} ErrorResponse "Internal server error" +// @Router /calls/{id}/audio [get] +func (h *Handler) GetCallAudio(c *gin.Context) { + ctx := c.Request.Context() + id, err := strconv.ParseInt(c.Param("id"), 10, 64) + if err != nil || id <= 0 { + c.JSON(http.StatusBadRequest, gin.H{"error": "invalid call id"}) + return + } + + // Require authentication or publicAccess for direct audio access. + // Anonymous users must use /api/shared/:token/audio for shared calls. + _, hasUser := c.Get("userID") + if !hasUser && shared.GetSettingValue(c, h.queries, "publicAccess") != "true" { + c.JSON(http.StatusUnauthorized, gin.H{"error": "authentication required"}) + return + } + + call, err := h.queries.GetCall(ctx, id) + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + c.JSON(http.StatusNotFound, gin.H{"error": "call not found"}) + return + } + slog.Error("failed to get call audio metadata", "id", id, "error", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "internal error"}) + return + } + + // Enforce per-user grants for non-admin listeners. + if grants := shared.LoadUserGrants(c, h.queries); !shared.IsGranted(grants, call.SystemID, call.TalkgroupID.Int64) { + c.JSON(http.StatusNotFound, gin.H{"error": "call not found"}) + return + } + + recordingsDir := h.processor.RecordingsDir() + relPath := filepath.Clean(call.AudioPath) + if strings.HasPrefix(relPath, "..") || filepath.IsAbs(relPath) { + slog.Warn("rejected unsafe audio path", "id", id, "path", call.AudioPath) + c.JSON(http.StatusNotFound, gin.H{"error": "audio not found"}) + return + } + + // Open the file scoped to recordingsDir via os.Root so traversal and + // symlink escapes are impossible regardless of what's in the DB row. + root, err := os.OpenRoot(recordingsDir) + if err != nil { + slog.Error("failed to open recordings root", "id", id, "error", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "internal error"}) + return + } + defer root.Close() + + f, err := root.Open(relPath) + if err != nil { + if errors.Is(err, os.ErrNotExist) { + c.JSON(http.StatusNotFound, gin.H{"error": "audio file not found"}) + return + } + slog.Error("failed to open call audio file", "id", id, "path", relPath, "error", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "internal error"}) + return + } + defer f.Close() + + fi, err := f.Stat() + if err != nil { + slog.Error("failed to stat call audio file", "id", id, "path", relPath, "error", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "internal error"}) + return + } + + contentType := call.AudioType + if contentType == "" { + contentType = "application/octet-stream" + } + filename := call.AudioName + if filename == "" { + filename = "call" + } + + c.Header("Content-Disposition", shared.ContentDisposition("inline", filename)) + c.Header("Content-Type", contentType) + http.ServeContent(c.Writer, c.Request, filename, fi.ModTime(), f) +} diff --git a/backend/internal/handler/calls/calls.go b/backend/internal/handler/calls/calls.go index 035a3e5..3db1129 100644 --- a/backend/internal/handler/calls/calls.go +++ b/backend/internal/handler/calls/calls.go @@ -3,25 +3,12 @@ package calls import ( - "context" - "database/sql" - "encoding/json" - "errors" - "io" - "log/slog" - "net/http" - "os" - "path/filepath" - "strconv" - "strings" "sync" "time" - "github.com/gin-gonic/gin" "github.com/openscanner/openscanner/internal/audio" "github.com/openscanner/openscanner/internal/db" "github.com/openscanner/openscanner/internal/downstream" - "github.com/openscanner/openscanner/internal/handler/shared" "github.com/openscanner/openscanner/internal/ws" ) @@ -115,1404 +102,3 @@ func (h *Handler) getLimiter(apiKeyID int64, rateLimit int) *apiKeyLimiter { } return l } - -// PostCallUpload handles POST /api/call-upload and /api/trunk-recorder-call-upload. -// -// @Summary Upload a call recording -// @Description Ingest a radio call with audio and metadata. Requires a valid API key. -// @Tags Upload -// @Accept multipart/form-data -// @Produce json -// @Security APIKeyAuth -// @Param audio formData file true "Audio file" -// @Param dateTime formData int true "Unix timestamp of the call" -// @Param systemId formData int true "Radio system ID" -// @Param talkgroupId formData int true "Talkgroup ID" -// @Param source formData int false "Source unit ID" -// @Param frequency formData int false "Frequency in Hz" -// @Param duration formData number false "Call duration in seconds" -// @Param talkgroupLabel formData string false "Talkgroup label for auto-populate" -// @Param talkgroupTag formData string false "Talkgroup tag name" -// @Param talkgroupGroup formData string false "Talkgroup group name" -// @Param talkgroupName formData string false "Talkgroup display name" -// @Param systemLabel formData string false "System label" -// @Param patches formData string false "JSON array of patched talkgroup IDs" -// @Param audioName formData string false "Original audio file name" -// @Param audioType formData string false "Audio MIME type" -// @Param site formData string false "Site identifier" -// @Param channel formData string false "Channel identifier" -// @Param decoder formData string false "Decoder software name" -// @Param errorCount formData int false "Decoding error count" -// @Param spikeCount formData int false "Signal spike count" -// @Success 200 {object} object{id=int64} "Call ingested successfully" -// @Failure 400 {object} ErrorResponse "Bad request" -// @Failure 401 {object} ErrorResponse "API key required" -// @Failure 429 {object} ErrorResponse "Rate limit exceeded" -// @Failure 500 {object} ErrorResponse "Internal server error" -// @Router /call-upload [post] -// @Router /trunk-recorder-call-upload [post] -func (h *Handler) PostCallUpload(c *gin.Context) { - slog.Debug("call-upload: request received", "ip", c.ClientIP()) - // Retrieve API key ID injected by APIKeyAuth middleware. - apiKeyIDVal, exists := c.Get("apiKeyID") - if !exists { - c.JSON(http.StatusUnauthorized, gin.H{"error": "API key required"}) - return - } - apiKeyID, ok := apiKeyIDVal.(int64) - if !ok { - c.JSON(http.StatusInternalServerError, gin.H{"error": "internal error"}) - return - } - - // Per-API-key rate limiting. - rateLimit := defaultCallRatePerMin - apiKeyRateOverride := false - if apiKeyRateVal, ok := c.Get("apiKeyCallRate"); ok { - if apiKeyRate, ok := apiKeyRateVal.(int64); ok && apiKeyRate > 0 { - rateLimit = int(apiKeyRate) - apiKeyRateOverride = true - } - } - if rStr := shared.GetSettingValue(c, h.queries, "apiKeyCallRate"); rStr != "" { - if r, err := strconv.Atoi(rStr); err == nil && r > 0 && !apiKeyRateOverride { - rateLimit = r - } - } - if rateLimit > maxCallRatePerMin { - rateLimit = maxCallRatePerMin - } - if !h.getLimiter(apiKeyID, rateLimit).allow() { - slog.Warn("call upload rate limit exceeded", "api_key_id", apiKeyID) - c.JSON(http.StatusTooManyRequests, gin.H{"error": "rate limit exceeded"}) - return - } - - slog.Debug("call-upload: rate limit passed", "api_key_id", apiKeyID) - - // SDRTrunk and other rdio-scanner-compatible clients may send a POST with - // partial data to verify the API key. rdio-scanner responds with plain-text - // "Incomplete call data: " (status 417) which SDRTrunk treats as a - // successful connection test. We replicate that behavior: parse all fields - // first, then return the same message format for missing required fields. - dateTimeStr := c.PostForm("dateTime") - systemIDStr := c.PostForm("systemId") - if systemIDStr == "" { - systemIDStr = c.PostForm("system") - } - talkgroupIDStr := c.PostForm("talkgroupId") - if talkgroupIDStr == "" { - talkgroupIDStr = c.PostForm("talkgroup") - } - _, audioErr := c.FormFile("audio") - - // Check for test=1 explicitly (Trunk Recorder). - if c.PostForm("test") == "1" { - c.String(http.StatusOK, "Incomplete call data: no talkgroup\n") - return - } - - // rdio-scanner's IsValid() checks all fields WITHOUT early returns and - // overwrites the error each time, so the LAST failing check wins. - // SDRTrunk sends system= but no audio/dateTime/talkgroup, so the last - // error is always "no talkgroup" — which SDRTrunk explicitly checks for. - // We replicate this behavior: collect the last error, then return it. - var incompleteReason string - if audioErr != nil { - incompleteReason = "no audio" - } - if dateTimeStr == "" { - incompleteReason = "no datetime" - } - if systemIDStr == "" { - incompleteReason = "no system" - } - if talkgroupIDStr == "" { - incompleteReason = "no talkgroup" - } - if incompleteReason != "" { - slog.Warn("call-upload: incomplete data", - "reason", incompleteReason, - "api_key_id", apiKeyID, - ) - c.String(http.StatusExpectationFailed, "Incomplete call data: %s\n", incompleteReason) - return - } - - // Parse dateTime. - // Try unix timestamp first (Trunk Recorder, SDRTrunk), then ISO 8601 (voxcall). - var dateTimeUnix int64 - if n, err := strconv.ParseInt(dateTimeStr, 10, 64); err == nil { - dateTimeUnix = n - } else if t, err := time.Parse(time.RFC3339Nano, dateTimeStr); err == nil { - dateTimeUnix = t.Unix() - } else if t, err := time.Parse(time.RFC3339, dateTimeStr); err == nil { - dateTimeUnix = t.Unix() - } else { - c.JSON(http.StatusBadRequest, gin.H{"error": "invalid dateTime: expected unix timestamp or ISO 8601"}) - return - } - callTime := time.Unix(dateTimeUnix, 0) - - // Trunk Recorder's rdioscanner_uploader plugin sends "system" and - // "talkgroup" while our canonical field names are "systemId" and - // "talkgroupId". Accept both for backward compatibility. - // (Already parsed above for the connectivity check.) - systemIDRaw, err := strconv.ParseInt(systemIDStr, 10, 64) - if err != nil { - c.JSON(http.StatusBadRequest, gin.H{"error": "invalid systemId"}) - return - } - - talkgroupIDRaw, err := strconv.ParseInt(talkgroupIDStr, 10, 64) - if err != nil { - c.JSON(http.StatusBadRequest, gin.H{"error": "invalid talkgroupId"}) - return - } - - // Parse optional fields. - var frequency, duration, source sql.NullInt64 - if v := c.PostForm("frequency"); v != "" { - if n, err := strconv.ParseInt(v, 10, 64); err == nil { - frequency = sql.NullInt64{Int64: n, Valid: true} - } - } - if v := c.PostForm("duration"); v != "" { - if n, err := strconv.ParseInt(v, 10, 64); err == nil { - duration = sql.NullInt64{Int64: n, Valid: true} - } - } - if v := c.PostForm("source"); v != "" { - if n, err := strconv.ParseInt(v, 10, 64); err == nil { - source = sql.NullInt64{Int64: n, Valid: true} - } - } - - var errorCount, spikeCount sql.NullInt64 - if v := c.PostForm("errorCount"); v != "" { - if n, err := strconv.ParseInt(v, 10, 64); err == nil { - errorCount = sql.NullInt64{Int64: n, Valid: true} - } - } - if v := c.PostForm("spikeCount"); v != "" { - if n, err := strconv.ParseInt(v, 10, 64); err == nil { - spikeCount = sql.NullInt64{Int64: n, Valid: true} - } - } - - var sourcesJSON, frequenciesJSON, patchesJSON sql.NullString - if v := c.PostForm("sources"); v != "" { - sourcesJSON = sql.NullString{String: v, Valid: true} - } - if v := c.PostForm("frequencies"); v != "" { - frequenciesJSON = sql.NullString{String: v, Valid: true} - } - if v := c.PostForm("patches"); v != "" { - patchesJSON = sql.NullString{String: v, Valid: true} - } - - // Trunk-recorder's rdio-scanner uploader embeds unit IDs inside the - // "sources" JSON array rather than sending a top-level "source" field. - // Extract the first source unit ID when not explicitly provided. - if !source.Valid && sourcesJSON.Valid { - source = extractPrimarySource(sourcesJSON.String) - } - - // Similarly, error and spike counts are per-segment inside the - // "frequencies" JSON array. Aggregate them when no top-level values - // were provided. - if !errorCount.Valid && !spikeCount.Valid && frequenciesJSON.Valid { - errorCount, spikeCount = aggregateErrorSpikeCounts(frequenciesJSON.String) - } - - // Optional call metadata fields. - var siteCol, channelCol, decoderCol sql.NullString - if v := c.PostForm("site"); v != "" { - siteCol = sql.NullString{String: v, Valid: true} - } - if v := c.PostForm("channel"); v != "" { - channelCol = sql.NullString{String: v, Valid: true} - } - if v := c.PostForm("decoder"); v != "" { - decoderCol = sql.NullString{String: v, Valid: true} - } - - // Optional talkgroup metadata for auto-populate / backfill. - talkgroupLabel := c.PostForm("talkgroupLabel") - talkgroupTag := c.PostForm("talkgroupTag") - talkgroupGroup := c.PostForm("talkgroupGroup") - talkgroupName := c.PostForm("talkgroupName") - - var talkerAliasCol sql.NullString - if v := c.PostForm("talkerAlias"); v != "" { - talkerAliasCol = sql.NullString{String: v, Valid: true} - } - - // Trunk-recorder embeds OTA aliases in the sources JSON "tag" field - // rather than sending a top-level "talkerAlias". Extract from the - // first source entry when not explicitly provided. - if !talkerAliasCol.Valid && sourcesJSON.Valid { - talkerAliasCol = extractPrimarySourceTag(sourcesJSON.String) - } - - ctx := c.Request.Context() - autoPopulateSystems := shared.GetSettingValue(c, h.queries, "autoPopulateSystems") == "true" - - slog.Debug("call-upload: resolving system and talkgroup", - "system_id", systemIDRaw, "talkgroup_id", talkgroupIDRaw) - - // Resolve system by its radio system_id. - system, err := h.queries.GetSystemBySystemID(ctx, systemIDRaw) - if err != nil { - if !errors.Is(err, sql.ErrNoRows) { - slog.Error("failed to query system", "system_id", systemIDRaw, "error", err) - c.JSON(http.StatusInternalServerError, gin.H{"error": "internal error"}) - return - } - if !autoPopulateSystems { - c.JSON(http.StatusBadRequest, gin.H{"error": "system not found"}) - return - } - label := strconv.FormatInt(systemIDRaw, 10) - // SDRTrunk and other uploaders send systemLabel with a human-readable name. - if sl := c.PostForm("systemLabel"); sl != "" { - label = sl - } - newID, cerr := h.queries.CreateSystem(ctx, db.CreateSystemParams{ - SystemID: systemIDRaw, - Label: label, - AutoPopulateTalkgroups: 1, - }) - if cerr != nil { - slog.Error("failed to auto-create system", "system_id", systemIDRaw, "error", cerr) - c.JSON(http.StatusInternalServerError, gin.H{"error": "internal error"}) - return - } - slog.Info("auto-populated system", "system_id", systemIDRaw, "label", label, "db_id", newID) - system = db.System{ID: newID, SystemID: systemIDRaw, Label: label, AutoPopulateTalkgroups: 1} - h.hub.BroadcastCFG(ctx) - } - - // Blacklist check: reject calls to blacklisted talkgroups. - if isBlacklistedTG(system.BlacklistsJson, talkgroupIDRaw) { - slog.Info("call upload: talkgroup is blacklisted", - "system_id", systemIDRaw, "talkgroup_id", talkgroupIDRaw) - c.JSON(http.StatusOK, gin.H{"message": "blacklisted"}) - return - } - - // Resolve talkgroup by system DB ID + radio talkgroup ID. - talkgroup, err := h.queries.GetTalkgroupBySystemAndTGID(ctx, db.GetTalkgroupBySystemAndTGIDParams{ - SystemID: system.ID, - TalkgroupID: talkgroupIDRaw, - }) - if err != nil { - if !errors.Is(err, sql.ErrNoRows) { - slog.Error("failed to query talkgroup", "system_id", system.ID, "talkgroup_id", talkgroupIDRaw, "error", err) - c.JSON(http.StatusInternalServerError, gin.H{"error": "internal error"}) - return - } - if system.AutoPopulateTalkgroups == 0 { - c.JSON(http.StatusBadRequest, gin.H{"error": "talkgroup not found"}) - return - } - var tgLabel, tgName sql.NullString - if talkgroupLabel != "" { - tgLabel = sql.NullString{String: talkgroupLabel, Valid: true} - } - if talkgroupName != "" { - tgName = sql.NullString{String: talkgroupName, Valid: true} - } - // Resolve group from talkgroupGroup (e.g. SDRTrunk sends this). - var groupID sql.NullInt64 - if talkgroupGroup != "" { - groupID = shared.ResolveGroupID(ctx, h.queries, talkgroupGroup) - } - // Resolve tag from talkgroupTag (e.g. "Law Dispatch", "Fire-Tac"). - var tagID sql.NullInt64 - if talkgroupTag != "" { - tagID = shared.ResolveTagID(ctx, h.queries, talkgroupTag) - } - newID, cerr := h.queries.CreateTalkgroup(ctx, db.CreateTalkgroupParams{ - SystemID: system.ID, - TalkgroupID: talkgroupIDRaw, - Label: tgLabel, - Name: tgName, - GroupID: groupID, - TagID: tagID, - }) - if cerr != nil { - slog.Error("failed to auto-create talkgroup", "talkgroup_id", talkgroupIDRaw, "error", cerr) - c.JSON(http.StatusInternalServerError, gin.H{"error": "internal error"}) - return - } - slog.Info("auto-populated talkgroup", "system_id", system.SystemID, "talkgroup_id", talkgroupIDRaw, "label", tgLabel.String, "db_id", newID) - talkgroup = db.Talkgroup{ID: newID, SystemID: system.ID, TalkgroupID: talkgroupIDRaw, Label: tgLabel, Name: tgName, GroupID: groupID, TagID: tagID} - h.hub.BroadcastCFG(ctx) - } else if needsBackfill(talkgroup, talkgroupLabel, talkgroupName, talkgroupTag, talkgroupGroup) { - // Existing talkgroup has empty fields — backfill from upload metadata. - if !talkgroup.Label.Valid && talkgroupLabel != "" { - talkgroup.Label = sql.NullString{String: talkgroupLabel, Valid: true} - } - if !talkgroup.Name.Valid && talkgroupName != "" { - talkgroup.Name = sql.NullString{String: talkgroupName, Valid: true} - } - if !talkgroup.GroupID.Valid && talkgroupGroup != "" { - talkgroup.GroupID = shared.ResolveGroupID(ctx, h.queries, talkgroupGroup) - } - if !talkgroup.TagID.Valid && talkgroupTag != "" { - talkgroup.TagID = shared.ResolveTagID(ctx, h.queries, talkgroupTag) - } - if uerr := h.queries.UpdateTalkgroup(ctx, db.UpdateTalkgroupParams{ - ID: talkgroup.ID, - TalkgroupID: talkgroup.TalkgroupID, - Label: talkgroup.Label, - Name: talkgroup.Name, - Frequency: talkgroup.Frequency, - Led: talkgroup.Led, - GroupID: talkgroup.GroupID, - TagID: talkgroup.TagID, - Order: talkgroup.Order, - }); uerr != nil { - slog.Warn("failed to backfill talkgroup from upload", - "talkgroup_id", talkgroup.TalkgroupID, "error", uerr) - } else { - slog.Info("backfilled talkgroup from upload", - "talkgroup_id", talkgroup.TalkgroupID) - h.hub.BroadcastCFG(ctx) - } - } - - // Duplicate detection (system.ID and talkgroup.ID are the FK values in calls). - if shared.GetSettingValue(c, h.queries, "disableDuplicateDetection") != "true" { - windowMs := int64(2000) - if v := shared.GetSettingValue(c, h.queries, "duplicateDetectionTimeFrame"); v != "" { - if wm, err := strconv.ParseInt(v, 10, 64); err == nil { - windowMs = wm - } - } - dup, derr := audio.IsDuplicate(ctx, h.queries, system.ID, talkgroup.ID, callTime, windowMs) - if derr != nil { - slog.Error("duplicate detection failed", "error", derr) - // Non-fatal: proceed with ingest. - } else if dup { - slog.Info("duplicate call rejected", "system_id", systemIDRaw, "talkgroup_id", talkgroupIDRaw) - c.JSON(http.StatusOK, gin.H{"message": "duplicate call rejected"}) - return - } - } - - // Get uploaded audio file. - fh, err := c.FormFile("audio") - if err != nil { - c.JSON(http.StatusBadRequest, gin.H{"error": "audio file is required"}) - return - } - - // Resolve audio conversion mode from settings. - convMode := audio.ConversionDisabled - if mStr := shared.GetSettingValue(c, h.queries, "audioConversion"); mStr != "" { - if m, err := strconv.Atoi(mStr); err == nil { - convMode = audio.ConversionMode(m) - } - } - - // Resolve encoding preset from settings. - convPreset := audio.ParseEncodingPreset(shared.GetSettingValue(c, h.queries, "audioEncodingPreset")) - - // Store audio file (conversion handled inside Processor.Store). - relPath, err := h.processor.Store(ctx, fh, convMode, convPreset) - if err != nil { - slog.Error("failed to store audio file", - "system_id", systemIDRaw, - "talkgroup_id", talkgroupIDRaw, - "error", err, - ) - c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to store audio"}) - return - } - - slog.Debug("call-upload: audio stored", "path", relPath, "mode", convMode) - - // If the recorder didn't supply a duration, probe the stored file. - if !duration.Valid { - absPath := filepath.Join(h.processor.RecordingsDir(), relPath) - if d := audio.ProbeDuration(ctx, absPath); d > 0 { - duration = sql.NullInt64{Int64: d, Valid: true} - } - } - - // Determine audio MIME type. - // When conversion is enabled the output format depends on the encoding - // preset (M4A for AAC presets, MP3 for MP3 presets). - // Otherwise validate the client-supplied Content-Type against an allowlist - // to prevent attacker-controlled MIME types from reaching the database. - var audioType string - if convMode != audio.ConversionDisabled { - audioType = audio.OutputMIME(convPreset) - } else { - switch fh.Header.Get("Content-Type") { - case "audio/mpeg", "audio/mp3", "audio/wav", "audio/x-wav", - "audio/ogg", "audio/aac", "audio/m4a", "audio/mp4", - "audio/x-m4a", "audio/opus": - audioType = fh.Header.Get("Content-Type") - default: - audioType = "application/octet-stream" - } - } - - // Insert call record. - callID, err := h.queries.CreateCall(ctx, db.CreateCallParams{ - AudioPath: relPath, - AudioName: filepath.Base(relPath), - AudioType: audioType, - DateTime: dateTimeUnix, - Frequency: frequency, - Duration: duration, - Source: source, - SourcesJson: sourcesJSON, - FrequenciesJson: frequenciesJSON, - PatchesJson: patchesJSON, - SystemID: system.ID, - TalkgroupID: sql.NullInt64{Int64: talkgroup.ID, Valid: true}, - Site: siteCol, - Channel: channelCol, - Decoder: decoderCol, - ErrorCount: errorCount, - SpikeCount: spikeCount, - TalkerAlias: talkerAliasCol, - }) - if err != nil { - slog.Error("failed to insert call", "error", err) - c.JSON(http.StatusInternalServerError, gin.H{"error": "internal error"}) - return - } - - slog.Debug("call-upload: db record inserted", - "call_id", callID, - "system_id", systemIDRaw, - "talkgroup_id", talkgroupIDRaw, - "audio_path", relPath, - ) - - // Extract unit tags from sources JSON and upsert into units table. - // Sources format: [{"pos":0,"src":12345,"tag":"Unit Name"}, ...] - if sourcesJSON.Valid { - upsertUnitsFromSources(ctx, h.queries, system.ID, sourcesJSON.String) - } - - // Map talkerAlias to the source unit as a label (e.g. P25 radios broadcasting a name). - if source.Valid && talkerAliasCol.Valid { - if err := h.queries.UpsertUnit(ctx, db.UpsertUnitParams{ - SystemID: system.ID, - UnitID: source.Int64, - Label: sql.NullString{String: talkerAliasCol.String, Valid: true}, - }); err != nil { - slog.Warn("failed to upsert unit from talkerAlias", - "unit_id", source.Int64, "talkerAlias", talkerAliasCol.String, "error", err) - } - } - - // Broadcast to WebSocket listeners. - if h.hub != nil { - // Read audio file for inline embedding in the CAL JSON frame. - // Use os.Root so the read is scoped to RecordingsDir and cannot - // follow a traversal sequence or symlink out of the directory, - // regardless of what relPath contains. - const maxBroadcastAudioBytes = 20 << 20 // 20 MiB - var audioBytes []byte - if root, rootErr := os.OpenRoot(h.processor.RecordingsDir()); rootErr != nil { - slog.Warn("failed to open recordings root for WS broadcast", "error", rootErr) - } else { - if fi, statErr := root.Stat(relPath); statErr != nil { - slog.Warn("failed to stat audio for WS broadcast", "path", relPath, "error", statErr) - } else if fi.Size() > maxBroadcastAudioBytes { - slog.Warn("audio file too large for inline WS broadcast, sending metadata only", - "path", relPath, "size_bytes", fi.Size(), "max_bytes", maxBroadcastAudioBytes) - } else if f, openErr := root.Open(relPath); openErr != nil { - slog.Warn("failed to open audio for WS broadcast", "path", relPath, "error", openErr) - } else { - readBytes, readErr := io.ReadAll(io.LimitReader(f, maxBroadcastAudioBytes)) - f.Close() - if readErr != nil { - slog.Warn("failed to read audio for WS broadcast", "path", relPath, "error", readErr) - } else { - audioBytes = readBytes - } - } - root.Close() - } - - calPayload := map[string]any{ - "id": callID, - "audioName": filepath.Base(relPath), - "audioType": audioType, - "dateTime": dateTimeUnix, - "systemId": system.SystemID, - "system": system.ID, - "talkgroupId": talkgroup.TalkgroupID, - "talkgroup": talkgroup.ID, - } - if frequency.Valid { - calPayload["frequency"] = frequency.Int64 - } - if duration.Valid { - calPayload["duration"] = duration.Int64 - } - if source.Valid { - calPayload["source"] = source.Int64 - } - if siteCol.Valid { - calPayload["site"] = siteCol.String - } - if channelCol.Valid { - calPayload["channel"] = channelCol.String - } - if decoderCol.Valid { - calPayload["decoder"] = decoderCol.String - } - if errorCount.Valid { - calPayload["errorCount"] = errorCount.Int64 - } - if spikeCount.Valid { - calPayload["spikeCount"] = spikeCount.Int64 - } - if talkerAliasCol.Valid { - calPayload["talkerAlias"] = talkerAliasCol.String - } - if sourcesJSON.Valid { - calPayload["sources"] = sourcesJSON.String - } - if frequenciesJSON.Valid { - calPayload["frequencies"] = frequenciesJSON.String - } - calMsg, err := ws.NewCALMessage(calPayload, audioBytes) - if err != nil { - slog.Error("failed to build CAL message", "error", err) - } else { - h.hub.BroadcastCAL(calMsg, func(cl *ws.Client) bool { - return cl.CanReceive(system.ID, talkgroup.ID) - }) - slog.Debug("call-upload: ws broadcast sent", "call_id", callID) - } - } - - logAttrs := []any{ - "call_id", callID, - "system_id", systemIDRaw, - "talkgroup_id", talkgroupIDRaw, - "audio_path", relPath, - "api_key_id", apiKeyID, - } - if duration.Valid { - logAttrs = append(logAttrs, "duration_ms", duration.Int64) - } - slog.Info("call-upload: complete", logAttrs...) - - c.JSON(http.StatusOK, gin.H{"id": callID, "message": "Call imported successfully."}) - - // Notify downstream pushers (non-blocking, after response is sent). - if h.dsNotifier != nil { - // Resolve labels for downstream consumers. - var groupLabel, tagLabel string - if talkgroup.GroupID.Valid { - if g, err := h.queries.GetGroup(ctx, talkgroup.GroupID.Int64); err == nil { - groupLabel = g.Label - } - } - if talkgroup.TagID.Valid { - if t, err := h.queries.GetTag(ctx, talkgroup.TagID.Int64); err == nil { - tagLabel = t.Label - } - } - - h.dsNotifier.Notify(downstream.CallEvent{ - CallID: callID, - AudioPath: relPath, - AudioName: filepath.Base(relPath), - AudioType: audioType, - DateTime: dateTimeUnix, - SystemID: system.SystemID, - System: system.ID, - TalkgroupID: talkgroup.TalkgroupID, - Talkgroup: talkgroup.ID, - Frequency: frequency.Int64, - Duration: duration.Int64, - Source: source.Int64, - Sources: sourcesJSON.String, - Frequencies: frequenciesJSON.String, - Patches: patchesJSON.String, - SystemLabel: system.Label, - TalkgroupLabel: talkgroup.Label.String, - TalkgroupName: talkgroup.Name.String, - TalkgroupGroup: groupLabel, - TalkgroupTag: tagLabel, - TalkerAlias: talkerAliasCol.String, - }) - slog.Debug("call-upload: downstream notify queued", "call_id", callID) - } - - // Enqueue transcription (non-blocking, after response is sent). - if h.transcriber != nil { - absPath := filepath.Join(h.processor.RecordingsDir(), relPath) - if err := h.transcriber.Submit(ctx, audio.TranscriptionJob{ - CallID: callID, - AudioPath: absPath, - }); err != nil { - slog.Warn("call-upload: failed to enqueue transcription", "call_id", callID, "error", err) - } - } -} - -// GetCallAudio handles GET /api/calls/:id/audio. -// -// @Summary Get call audio file -// @Description Stream the audio file for a specific call. Authentication is optional when the publicAccess setting is enabled; otherwise a valid JWT is required. -// @Tags Calls -// @Security BearerAuth -// @Produce application/octet-stream -// @Param id path int true "Call ID" -// @Success 200 {file} binary "Audio file" -// @Failure 400 {object} ErrorResponse "Invalid call ID" -// @Failure 401 {object} ErrorResponse "Authentication required" -// @Failure 404 {object} ErrorResponse "Call or audio not found" -// @Failure 500 {object} ErrorResponse "Internal server error" -// @Router /calls/{id}/audio [get] -func (h *Handler) GetCallAudio(c *gin.Context) { - ctx := c.Request.Context() - id, err := strconv.ParseInt(c.Param("id"), 10, 64) - if err != nil || id <= 0 { - c.JSON(http.StatusBadRequest, gin.H{"error": "invalid call id"}) - return - } - - // Require authentication or publicAccess for direct audio access. - // Anonymous users must use /api/shared/:token/audio for shared calls. - _, hasUser := c.Get("userID") - if !hasUser && shared.GetSettingValue(c, h.queries, "publicAccess") != "true" { - c.JSON(http.StatusUnauthorized, gin.H{"error": "authentication required"}) - return - } - - call, err := h.queries.GetCall(ctx, id) - if err != nil { - if errors.Is(err, sql.ErrNoRows) { - c.JSON(http.StatusNotFound, gin.H{"error": "call not found"}) - return - } - slog.Error("failed to get call audio metadata", "id", id, "error", err) - c.JSON(http.StatusInternalServerError, gin.H{"error": "internal error"}) - return - } - - // Enforce per-user grants for non-admin listeners. - if grants := shared.LoadUserGrants(c, h.queries); !shared.IsGranted(grants, call.SystemID, call.TalkgroupID.Int64) { - c.JSON(http.StatusNotFound, gin.H{"error": "call not found"}) - return - } - - recordingsDir := h.processor.RecordingsDir() - relPath := filepath.Clean(call.AudioPath) - if strings.HasPrefix(relPath, "..") || filepath.IsAbs(relPath) { - slog.Warn("rejected unsafe audio path", "id", id, "path", call.AudioPath) - c.JSON(http.StatusNotFound, gin.H{"error": "audio not found"}) - return - } - - // Open the file scoped to recordingsDir via os.Root so traversal and - // symlink escapes are impossible regardless of what's in the DB row. - root, err := os.OpenRoot(recordingsDir) - if err != nil { - slog.Error("failed to open recordings root", "id", id, "error", err) - c.JSON(http.StatusInternalServerError, gin.H{"error": "internal error"}) - return - } - defer root.Close() - - f, err := root.Open(relPath) - if err != nil { - if errors.Is(err, os.ErrNotExist) { - c.JSON(http.StatusNotFound, gin.H{"error": "audio file not found"}) - return - } - slog.Error("failed to open call audio file", "id", id, "path", relPath, "error", err) - c.JSON(http.StatusInternalServerError, gin.H{"error": "internal error"}) - return - } - defer f.Close() - - fi, err := f.Stat() - if err != nil { - slog.Error("failed to stat call audio file", "id", id, "path", relPath, "error", err) - c.JSON(http.StatusInternalServerError, gin.H{"error": "internal error"}) - return - } - - contentType := call.AudioType - if contentType == "" { - contentType = "application/octet-stream" - } - filename := call.AudioName - if filename == "" { - filename = "call" - } - - c.Header("Content-Disposition", shared.ContentDisposition("inline", filename)) - c.Header("Content-Type", contentType) - http.ServeContent(c.Writer, c.Request, filename, fi.ModTime(), f) -} - -// GetCalls handles GET /api/calls — paginated call archive search. -// -// @Summary Search calls -// @Description Paginated search of the call archive with optional filters. Authentication is optional when the publicAccess setting is enabled; otherwise a valid JWT is required. -// @Tags Calls -// @Security BearerAuth -// @Produce json -// @Param system_ids query string false "CSV system DB IDs (e.g. 1,2,3)" -// @Param talkgroup_ids query string false "CSV talkgroup DB IDs (e.g. 10,11)" -// @Param groups query string false "CSV group labels (e.g. Police,Fire)" -// @Param tags query string false "CSV tag labels (e.g. Law,EMS)" -// @Param system_id query int false "Legacy single system DB ID" -// @Param talkgroup_id query int false "Legacy single talkgroup DB ID" -// @Param group query string false "Legacy single group label" -// @Param tag query string false "Legacy single tag label" -// @Param date_from query int false "Unix timestamp lower bound" -// @Param date_to query int false "Unix timestamp upper bound" -// @Param sort query string false "Sort order: asc or desc" Enums(asc, desc) default(desc) -// @Param page query int false "Page number (1-based)" default(1) -// @Param limit query int false "Results per page (max 100)" default(25) -// @Param bookmarked_only query bool false "Show only bookmarked calls" -// @Param transcript query string false "Filter by transcript text (partial match)" -// @Success 200 {object} CallSearchResponse "Paginated call results" -// @Failure 400 {object} ErrorResponse "Invalid query parameter" -// @Failure 500 {object} ErrorResponse "Internal server error" -// @Router /calls [get] -func (h *Handler) GetCalls(c *gin.Context) { - ctx := c.Request.Context() - - parseCSVInt64 := func(raw string) ([]int64, error) { - if strings.TrimSpace(raw) == "" { - return nil, nil - } - parts := strings.Split(raw, ",") - vals := make([]int64, 0, len(parts)) - seen := make(map[int64]struct{}) - for _, part := range parts { - part = strings.TrimSpace(part) - if part == "" { - continue - } - n, err := strconv.ParseInt(part, 10, 64) - if err != nil { - return nil, err - } - if _, ok := seen[n]; ok { - continue - } - seen[n] = struct{}{} - vals = append(vals, n) - } - if len(vals) == 0 { - return nil, nil - } - return vals, nil - } - - parseCSVStrings := func(raw string) []string { - if strings.TrimSpace(raw) == "" { - return nil - } - parts := strings.Split(raw, ",") - vals := make([]string, 0, len(parts)) - seen := make(map[string]struct{}) - for _, part := range parts { - v := strings.TrimSpace(part) - if v == "" { - continue - } - if _, ok := seen[v]; ok { - continue - } - seen[v] = struct{}{} - vals = append(vals, v) - } - if len(vals) == 0 { - return nil - } - return vals - } - - toCSVFilter := func(vals []int64) interface{} { - if len(vals) == 0 { - return nil - } - parts := make([]string, 0, len(vals)) - for _, v := range vals { - parts = append(parts, strconv.FormatInt(v, 10)) - } - return strings.Join(parts, ",") - } - - // Parse multi-select IDs (new CSV params) with single-select fallback. - rawSystemIDs := c.Query("system_ids") - if rawSystemIDs == "" { - rawSystemIDs = c.Query("system_id") - } - rawTalkgroupIDs := c.Query("talkgroup_ids") - if rawTalkgroupIDs == "" { - rawTalkgroupIDs = c.Query("talkgroup_id") - } - - systemIDs, err := parseCSVInt64(rawSystemIDs) - if err != nil { - c.JSON(http.StatusBadRequest, gin.H{"error": "invalid system_ids"}) - return - } - talkgroupIDs, err := parseCSVInt64(rawTalkgroupIDs) - if err != nil { - c.JSON(http.StatusBadRequest, gin.H{"error": "invalid talkgroup_ids"}) - return - } - - // Parse multi-select labels (new CSV params) with single-select fallback. - rawGroups := c.Query("groups") - if rawGroups == "" { - rawGroups = c.Query("group") - } - rawTags := c.Query("tags") - if rawTags == "" { - rawTags = c.Query("tag") - } - groupLabels := parseCSVStrings(rawGroups) - tagLabels := parseCSVStrings(rawTags) - - groupIDs := make([]int64, 0, len(groupLabels)) - for _, label := range groupLabels { - g, err := h.queries.GetGroupByLabel(ctx, label) - if err == nil { - groupIDs = append(groupIDs, g.ID) - } - } - if len(groupLabels) > 0 && len(groupIDs) == 0 { - c.JSON(http.StatusOK, shared.CallSearchResponse{Calls: []shared.CallSearchResult{}, Total: 0}) - return - } - - tagIDs := make([]int64, 0, len(tagLabels)) - for _, label := range tagLabels { - t, err := h.queries.GetTagByLabel(ctx, label) - if err == nil { - tagIDs = append(tagIDs, t.ID) - } - } - if len(tagLabels) > 0 && len(tagIDs) == 0 { - c.JSON(http.StatusOK, shared.CallSearchResponse{Calls: []shared.CallSearchResult{}, Total: 0}) - return - } - - systemIDsCSV := toCSVFilter(systemIDs) - talkgroupIDsCSV := toCSVFilter(talkgroupIDs) - groupIDsCSV := toCSVFilter(groupIDs) - tagIDsCSV := toCSVFilter(tagIDs) - - var transcript interface{} - if v := c.Query("transcript"); v != "" { - transcript = v - } - - var dateFrom, dateTo interface{} - if v := c.Query("date_from"); v != "" { - n, err := strconv.ParseInt(v, 10, 64) - if err != nil { - c.JSON(http.StatusBadRequest, gin.H{"error": "invalid date_from"}) - return - } - dateFrom = n - } - if v := c.Query("date_to"); v != "" { - n, err := strconv.ParseInt(v, 10, 64) - if err != nil { - c.JSON(http.StatusBadRequest, gin.H{"error": "invalid date_to"}) - return - } - dateTo = n - } - - page := int64(1) - if v := c.Query("page"); v != "" { - n, err := strconv.ParseInt(v, 10, 64) - if err != nil || n < 1 { - c.JSON(http.StatusBadRequest, gin.H{"error": "invalid page"}) - return - } - page = n - } - - limit := int64(25) - if v := c.Query("limit"); v != "" { - n, err := strconv.ParseInt(v, 10, 64) - if err != nil || n < 1 { - c.JSON(http.StatusBadRequest, gin.H{"error": "invalid limit"}) - return - } - if n > 100 { - n = 100 - } - limit = n - } - - sortOrder := "desc" - if v := c.Query("sort"); v != "" { - v = strings.ToLower(v) - if v != "asc" && v != "desc" { - c.JSON(http.StatusBadRequest, gin.H{"error": "sort must be asc or desc"}) - return - } - sortOrder = v - } - - offset := (page - 1) * limit - - // Resolve bookmarked_only filter: requires authenticated user. - var bookmarkUserID interface{} - if c.Query("bookmarked_only") == "true" { - if userIDVal, exists := c.Get("userID"); exists { - if uid, ok := userIDVal.(int64); ok { - bookmarkUserID = uid - } - } - } - - // Count total matching calls. - total, err := h.queries.CountCallsFiltered(ctx, db.CountCallsFilteredParams{ - SystemIdsCsv: systemIDsCSV, - TalkgroupIdsCsv: talkgroupIDsCSV, - GroupIdsCsv: groupIDsCSV, - TagIdsCsv: tagIDsCSV, - DateFrom: dateFrom, - DateTo: dateTo, - BookmarkUserID: bookmarkUserID, - Transcript: transcript, - }) - if err != nil { - slog.Error("failed to count calls", "error", err) - c.JSON(http.StatusInternalServerError, gin.H{"error": "internal error"}) - return - } - - // Fetch calls page. - var calls []db.Call - listParams := db.ListCallsParams{ - SystemIdsCsv: systemIDsCSV, - TalkgroupIdsCsv: talkgroupIDsCSV, - GroupIdsCsv: groupIDsCSV, - TagIdsCsv: tagIDsCSV, - DateFrom: dateFrom, - DateTo: dateTo, - BookmarkUserID: bookmarkUserID, - Transcript: transcript, - PageOffset: sql.NullInt64{Int64: offset, Valid: true}, - PageSize: sql.NullInt64{Int64: limit, Valid: true}, - } - if sortOrder == "asc" { - calls, err = h.queries.ListCallsAsc(ctx, db.ListCallsAscParams(listParams)) - } else { - calls, err = h.queries.ListCalls(ctx, listParams) - } - if err != nil { - slog.Error("failed to list calls", "error", err) - c.JSON(http.StatusInternalServerError, gin.H{"error": "internal error"}) - return - } - - // Enforce per-user grants — filter out calls the listener is not - // authorised to see. Admins and unauthenticated public-access users - // have nil grants (allow-all). - grants := shared.LoadUserGrants(c, h.queries) - if grants != nil { - allowed := calls[:0] - for _, call := range calls { - if shared.IsGranted(grants, call.SystemID, call.TalkgroupID.Int64) { - allowed = append(allowed, call) - } - } - calls = allowed - // Adjust total to reflect grant-scoped count. The SQL count does not - // know about grants, so cap it to the filtered result set size when - // the filter actually removed rows. This is an approximation; an - // exact count would require SQL-level grant filtering. - if int64(len(calls)) < limit { - total = offset + int64(len(calls)) - } - } - - // Build set of bookmarked call IDs for authenticated users. - bookmarkedIDs := make(map[int64]bool) - if userIDVal, exists := c.Get("userID"); exists { - if uid, ok := userIDVal.(int64); ok { - bookmarks, berr := h.queries.ListBookmarksByUser(ctx, sql.NullInt64{Int64: uid, Valid: true}) - if berr == nil { - for _, bm := range bookmarks { - bookmarkedIDs[bm.CallID] = true - } - } - } - } - - // Pre-cache lookups to avoid N+1 queries. - systemCache := make(map[int64]db.System) - tgCache := make(map[int64]db.Talkgroup) - groupCache := make(map[int64]string) - tagCache := make(map[int64]string) - - // Build response with joined labels and transcripts. - results := make([]shared.CallSearchResult, 0, len(calls)) - for _, call := range calls { - r := shared.CallSearchResult{ - ID: call.ID, - AudioName: call.AudioName, - AudioType: call.AudioType, - DateTime: call.DateTime, - SystemID: call.SystemID, - } - - if call.Frequency.Valid { - r.Frequency = &call.Frequency.Int64 - } - if call.Duration.Valid { - r.Duration = &call.Duration.Int64 - } - if call.Source.Valid { - r.Source = &call.Source.Int64 - } - if call.ErrorCount.Valid { - r.ErrorCount = &call.ErrorCount.Int64 - } - if call.SpikeCount.Valid { - r.SpikeCount = &call.SpikeCount.Int64 - } - if call.Site.Valid { - r.Site = call.Site.String - } - if call.Channel.Valid { - r.Channel = call.Channel.String - } - if call.Decoder.Valid { - r.Decoder = call.Decoder.String - } - if call.TalkerAlias.Valid { - r.TalkerAlias = call.TalkerAlias.String - } - - // Join system label (cached). - sys, ok := systemCache[call.SystemID] - if !ok { - var serr error - sys, serr = h.queries.GetSystem(ctx, call.SystemID) - if serr == nil { - systemCache[call.SystemID] = sys - } - } - if ok || systemCache[call.SystemID].ID != 0 { - r.SystemID = sys.SystemID - r.SystemLabel = sys.Label - } - - // Join talkgroup details (cached). - if call.TalkgroupID.Valid { - tg, ok := tgCache[call.TalkgroupID.Int64] - if !ok { - var terr error - tg, terr = h.queries.GetTalkgroup(ctx, call.TalkgroupID.Int64) - if terr == nil { - tgCache[call.TalkgroupID.Int64] = tg - } - } - if ok || tgCache[call.TalkgroupID.Int64].ID != 0 { - r.TalkgroupID = tg.TalkgroupID - if tg.Label.Valid { - r.TalkgroupLabel = tg.Label.String - } - if tg.Name.Valid { - r.TalkgroupName = tg.Name.String - } - if tg.Led.Valid { - r.TalkgroupLed = tg.Led.String - } - // Resolve group label (cached). - if tg.GroupID.Valid { - grpLabel, ok := groupCache[tg.GroupID.Int64] - if !ok { - grp, gerr := h.queries.GetGroup(ctx, tg.GroupID.Int64) - if gerr == nil { - groupCache[tg.GroupID.Int64] = grp.Label - grpLabel = grp.Label - } - } - if ok || grpLabel != "" { - r.TalkgroupGroup = grpLabel - } - } - // Resolve tag label (cached). - if tg.TagID.Valid { - tagLabel, ok := tagCache[tg.TagID.Int64] - if !ok { - tag, tgerr := h.queries.GetTag(ctx, tg.TagID.Int64) - if tgerr == nil { - tagCache[tg.TagID.Int64] = tag.Label - tagLabel = tag.Label - } - } - if ok || tagLabel != "" { - r.TalkgroupTag = tagLabel - } - } - } - } - - // Join transcript. - trn, terr := h.queries.GetTranscriptionByCallID(ctx, call.ID) - if terr == nil { - r.Transcript = trn.Text - } - - // Bookmark status. - r.Bookmarked = bookmarkedIDs[call.ID] - - results = append(results, r) - } - - c.JSON(http.StatusOK, shared.CallSearchResponse{ - Calls: results, - Total: total, - }) -} - -// transcriptResponse is the JSON shape returned by GetCallTranscript. -type transcriptResponse struct { - Text string `json:"text"` - Segments []audio.TranscriptionSegment `json:"segments"` - Language string `json:"language"` - Model string `json:"model"` -} // @name TranscriptResponse - -// GetCallTranscript handles GET /api/calls/:id/transcript. -// Returns the transcription for a call if one exists. -// -// @Summary Get call transcript -// @Description Returns the transcription text, segments, language and model for a call. Authentication is optional when the publicAccess setting is enabled; otherwise a valid JWT is required. -// @Tags Calls -// @Produce json -// @Security BearerAuth -// @Param id path int true "Call ID" -// @Success 200 {object} transcriptResponse -// @Failure 400 {object} ErrorResponse -// @Failure 404 {object} ErrorResponse -// @Failure 500 {object} ErrorResponse -// @Router /calls/{id}/transcript [get] -func (h *Handler) GetCallTranscript(c *gin.Context) { - ctx := c.Request.Context() - id, err := strconv.ParseInt(c.Param("id"), 10, 64) - if err != nil || id <= 0 { - c.JSON(http.StatusBadRequest, gin.H{"error": "invalid call id"}) - return - } - - // Require authentication or publicAccess. - _, hasUser := c.Get("userID") - if !hasUser && shared.GetSettingValue(c, h.queries, "publicAccess") != "true" { - c.JSON(http.StatusUnauthorized, gin.H{"error": "authentication required"}) - return - } - - trx, err := h.queries.GetTranscriptionByCallID(ctx, id) - if err != nil { - if errors.Is(err, sql.ErrNoRows) { - c.JSON(http.StatusNotFound, gin.H{"error": "transcript not found"}) - return - } - slog.Error("failed to get transcript", "call_id", id, "error", err) - c.JSON(http.StatusInternalServerError, gin.H{"error": "internal error"}) - return - } - - var segments []audio.TranscriptionSegment - if trx.Segments.Valid && trx.Segments.String != "" { - if err := json.Unmarshal([]byte(trx.Segments.String), &segments); err != nil { - slog.Warn("failed to parse transcript segments", "call_id", id, "error", err) - } - } - if segments == nil { - segments = []audio.TranscriptionSegment{} - } - - c.JSON(http.StatusOK, transcriptResponse{ - Text: trx.Text, - Segments: segments, - Language: trx.Language.String, - Model: trx.Model.String, - }) -} - -// --- helpers --- - -// needsBackfill returns true if at least one talkgroup field is empty and a -// corresponding value was provided in the upload metadata. -func needsBackfill(tg db.Talkgroup, label, name, tag, group string) bool { - if !tg.Label.Valid && label != "" { - return true - } - if !tg.Name.Valid && name != "" { - return true - } - if !tg.TagID.Valid && tag != "" { - return true - } - if !tg.GroupID.Valid && group != "" { - return true - } - return false -} - -// isBlacklistedTG checks whether a talkgroup ID appears in a system's blacklist. -// The blacklist is a JSON array of integers stored in blacklists_json. -func isBlacklistedTG(blacklistsJSON sql.NullString, talkgroupID int64) bool { - if !blacklistsJSON.Valid || strings.TrimSpace(blacklistsJSON.String) == "" { - return false - } - var ids []int64 - if err := json.Unmarshal([]byte(blacklistsJSON.String), &ids); err != nil { - slog.Warn("failed to parse blacklists_json", "error", err) - return false - } - for _, id := range ids { - if id == talkgroupID { - return true - } - } - return false -} - -// upsertUnitsFromSources parses the sources JSON array and upserts any units -// that include a "tag" (label) into the units table. -// Sources format: [{"pos":0,"src":12345,"tag":"Unit Name"}, ...] -// Entries without "src" or "tag" are silently skipped. -func upsertUnitsFromSources(ctx context.Context, q *db.Queries, systemDBID int64, raw string) { - var sources []map[string]any - if err := json.Unmarshal([]byte(raw), &sources); err != nil { - return - } - for _, entry := range sources { - srcVal, ok := entry["src"] - if !ok { - continue - } - srcFloat, ok := srcVal.(float64) - if !ok || srcFloat <= 0 { - continue - } - tagVal, ok := entry["tag"] - if !ok { - continue - } - tag, ok := tagVal.(string) - if !ok || tag == "" { - continue - } - if err := q.UpsertUnit(ctx, db.UpsertUnitParams{ - SystemID: systemDBID, - UnitID: int64(srcFloat), - Label: sql.NullString{String: tag, Valid: true}, - }); err != nil { - slog.Warn("failed to upsert unit from sources", - "unit_id", int64(srcFloat), "tag", tag, "error", err) - } - } -} - -// extractPrimarySource returns the "src" value from the first entry in a -// sources JSON array. Trunk-recorder sends unit IDs only inside this array -// (e.g. [{"pos":0,"src":12345}, ...]) and does not set a top-level "source". -func extractPrimarySource(raw string) sql.NullInt64 { - var sources []map[string]any - if err := json.Unmarshal([]byte(raw), &sources); err != nil || len(sources) == 0 { - return sql.NullInt64{} - } - srcVal, ok := sources[0]["src"] - if !ok { - return sql.NullInt64{} - } - srcFloat, ok := srcVal.(float64) - if !ok || srcFloat <= 0 { - return sql.NullInt64{} - } - return sql.NullInt64{Int64: int64(srcFloat), Valid: true} -} - -// extractPrimarySourceTag returns the "tag" value from the first source entry -// that has a non-empty tag. Trunk-recorder sends OTA aliases (talker alias) -// inside the sources JSON rather than as a top-level "talkerAlias" field. -func extractPrimarySourceTag(raw string) sql.NullString { - var sources []map[string]any - if err := json.Unmarshal([]byte(raw), &sources); err != nil { - return sql.NullString{} - } - for _, entry := range sources { - tagVal, ok := entry["tag"] - if !ok { - continue - } - tag, ok := tagVal.(string) - if !ok || tag == "" { - continue - } - return sql.NullString{String: tag, Valid: true} - } - return sql.NullString{} -} - -// aggregateErrorSpikeCounts sums errorCount and spikeCount from all entries -// in a frequencies JSON array. Trunk-recorder sends per-segment values inside -// this array (e.g. [{"errorCount":2,"spikeCount":0}, ...]) rather than -// providing aggregate top-level fields. -func aggregateErrorSpikeCounts(raw string) (sql.NullInt64, sql.NullInt64) { - var freqs []map[string]any - if err := json.Unmarshal([]byte(raw), &freqs); err != nil || len(freqs) == 0 { - return sql.NullInt64{}, sql.NullInt64{} - } - var totalErrors, totalSpikes int64 - var found bool - for _, entry := range freqs { - if v, ok := entry["errorCount"]; ok { - if f, ok := v.(float64); ok { - totalErrors += int64(f) - found = true - } - } - // trunk-recorder also uses "error_count" in its call JSON. - if v, ok := entry["error_count"]; ok { - if f, ok := v.(float64); ok { - totalErrors += int64(f) - found = true - } - } - if v, ok := entry["spikeCount"]; ok { - if f, ok := v.(float64); ok { - totalSpikes += int64(f) - found = true - } - } - if v, ok := entry["spike_count"]; ok { - if f, ok := v.(float64); ok { - totalSpikes += int64(f) - found = true - } - } - } - if !found { - return sql.NullInt64{}, sql.NullInt64{} - } - return sql.NullInt64{Int64: totalErrors, Valid: true}, - sql.NullInt64{Int64: totalSpikes, Valid: true} -} diff --git a/backend/internal/handler/calls/search.go b/backend/internal/handler/calls/search.go new file mode 100644 index 0000000..56a3c5f --- /dev/null +++ b/backend/internal/handler/calls/search.go @@ -0,0 +1,440 @@ +package calls + +import ( + "database/sql" + "log/slog" + "net/http" + "strconv" + "strings" + + "github.com/gin-gonic/gin" + "github.com/openscanner/openscanner/internal/db" + "github.com/openscanner/openscanner/internal/handler/shared" +) + +// GetCalls handles GET /api/calls — paginated call archive search. +// +// @Summary Search calls +// @Description Paginated search of the call archive with optional filters. Authentication is optional when the publicAccess setting is enabled; otherwise a valid JWT is required. +// @Tags Calls +// @Security BearerAuth +// @Produce json +// @Param system_ids query string false "CSV system DB IDs (e.g. 1,2,3)" +// @Param talkgroup_ids query string false "CSV talkgroup DB IDs (e.g. 10,11)" +// @Param groups query string false "CSV group labels (e.g. Police,Fire)" +// @Param tags query string false "CSV tag labels (e.g. Law,EMS)" +// @Param system_id query int false "Legacy single system DB ID" +// @Param talkgroup_id query int false "Legacy single talkgroup DB ID" +// @Param group query string false "Legacy single group label" +// @Param tag query string false "Legacy single tag label" +// @Param date_from query int false "Unix timestamp lower bound" +// @Param date_to query int false "Unix timestamp upper bound" +// @Param sort query string false "Sort order: asc or desc" Enums(asc, desc) default(desc) +// @Param page query int false "Page number (1-based)" default(1) +// @Param limit query int false "Results per page (max 100)" default(25) +// @Param bookmarked_only query bool false "Show only bookmarked calls" +// @Param transcript query string false "Filter by transcript text (partial match)" +// @Success 200 {object} CallSearchResponse "Paginated call results" +// @Failure 400 {object} ErrorResponse "Invalid query parameter" +// @Failure 500 {object} ErrorResponse "Internal server error" +// @Router /calls [get] +func (h *Handler) GetCalls(c *gin.Context) { + ctx := c.Request.Context() + + parseCSVInt64 := func(raw string) ([]int64, error) { + if strings.TrimSpace(raw) == "" { + return nil, nil + } + parts := strings.Split(raw, ",") + vals := make([]int64, 0, len(parts)) + seen := make(map[int64]struct{}) + for _, part := range parts { + part = strings.TrimSpace(part) + if part == "" { + continue + } + n, err := strconv.ParseInt(part, 10, 64) + if err != nil { + return nil, err + } + if _, ok := seen[n]; ok { + continue + } + seen[n] = struct{}{} + vals = append(vals, n) + } + if len(vals) == 0 { + return nil, nil + } + return vals, nil + } + + parseCSVStrings := func(raw string) []string { + if strings.TrimSpace(raw) == "" { + return nil + } + parts := strings.Split(raw, ",") + vals := make([]string, 0, len(parts)) + seen := make(map[string]struct{}) + for _, part := range parts { + v := strings.TrimSpace(part) + if v == "" { + continue + } + if _, ok := seen[v]; ok { + continue + } + seen[v] = struct{}{} + vals = append(vals, v) + } + if len(vals) == 0 { + return nil + } + return vals + } + + toCSVFilter := func(vals []int64) interface{} { + if len(vals) == 0 { + return nil + } + parts := make([]string, 0, len(vals)) + for _, v := range vals { + parts = append(parts, strconv.FormatInt(v, 10)) + } + return strings.Join(parts, ",") + } + + // Parse multi-select IDs (new CSV params) with single-select fallback. + rawSystemIDs := c.Query("system_ids") + if rawSystemIDs == "" { + rawSystemIDs = c.Query("system_id") + } + rawTalkgroupIDs := c.Query("talkgroup_ids") + if rawTalkgroupIDs == "" { + rawTalkgroupIDs = c.Query("talkgroup_id") + } + + systemIDs, err := parseCSVInt64(rawSystemIDs) + if err != nil { + c.JSON(http.StatusBadRequest, gin.H{"error": "invalid system_ids"}) + return + } + talkgroupIDs, err := parseCSVInt64(rawTalkgroupIDs) + if err != nil { + c.JSON(http.StatusBadRequest, gin.H{"error": "invalid talkgroup_ids"}) + return + } + + // Parse multi-select labels (new CSV params) with single-select fallback. + rawGroups := c.Query("groups") + if rawGroups == "" { + rawGroups = c.Query("group") + } + rawTags := c.Query("tags") + if rawTags == "" { + rawTags = c.Query("tag") + } + groupLabels := parseCSVStrings(rawGroups) + tagLabels := parseCSVStrings(rawTags) + + groupIDs := make([]int64, 0, len(groupLabels)) + for _, label := range groupLabels { + g, err := h.queries.GetGroupByLabel(ctx, label) + if err == nil { + groupIDs = append(groupIDs, g.ID) + } + } + if len(groupLabels) > 0 && len(groupIDs) == 0 { + c.JSON(http.StatusOK, shared.CallSearchResponse{Calls: []shared.CallSearchResult{}, Total: 0}) + return + } + + tagIDs := make([]int64, 0, len(tagLabels)) + for _, label := range tagLabels { + t, err := h.queries.GetTagByLabel(ctx, label) + if err == nil { + tagIDs = append(tagIDs, t.ID) + } + } + if len(tagLabels) > 0 && len(tagIDs) == 0 { + c.JSON(http.StatusOK, shared.CallSearchResponse{Calls: []shared.CallSearchResult{}, Total: 0}) + return + } + + systemIDsCSV := toCSVFilter(systemIDs) + talkgroupIDsCSV := toCSVFilter(talkgroupIDs) + groupIDsCSV := toCSVFilter(groupIDs) + tagIDsCSV := toCSVFilter(tagIDs) + + var transcript interface{} + if v := c.Query("transcript"); v != "" { + transcript = v + } + + var dateFrom, dateTo interface{} + if v := c.Query("date_from"); v != "" { + n, err := strconv.ParseInt(v, 10, 64) + if err != nil { + c.JSON(http.StatusBadRequest, gin.H{"error": "invalid date_from"}) + return + } + dateFrom = n + } + if v := c.Query("date_to"); v != "" { + n, err := strconv.ParseInt(v, 10, 64) + if err != nil { + c.JSON(http.StatusBadRequest, gin.H{"error": "invalid date_to"}) + return + } + dateTo = n + } + + page := int64(1) + if v := c.Query("page"); v != "" { + n, err := strconv.ParseInt(v, 10, 64) + if err != nil || n < 1 { + c.JSON(http.StatusBadRequest, gin.H{"error": "invalid page"}) + return + } + page = n + } + + limit := int64(25) + if v := c.Query("limit"); v != "" { + n, err := strconv.ParseInt(v, 10, 64) + if err != nil || n < 1 { + c.JSON(http.StatusBadRequest, gin.H{"error": "invalid limit"}) + return + } + if n > 100 { + n = 100 + } + limit = n + } + + sortOrder := "desc" + if v := c.Query("sort"); v != "" { + v = strings.ToLower(v) + if v != "asc" && v != "desc" { + c.JSON(http.StatusBadRequest, gin.H{"error": "sort must be asc or desc"}) + return + } + sortOrder = v + } + + offset := (page - 1) * limit + + // Resolve bookmarked_only filter: requires authenticated user. + var bookmarkUserID interface{} + if c.Query("bookmarked_only") == "true" { + if userIDVal, exists := c.Get("userID"); exists { + if uid, ok := userIDVal.(int64); ok { + bookmarkUserID = uid + } + } + } + + // Count total matching calls. + total, err := h.queries.CountCallsFiltered(ctx, db.CountCallsFilteredParams{ + SystemIdsCsv: systemIDsCSV, + TalkgroupIdsCsv: talkgroupIDsCSV, + GroupIdsCsv: groupIDsCSV, + TagIdsCsv: tagIDsCSV, + DateFrom: dateFrom, + DateTo: dateTo, + BookmarkUserID: bookmarkUserID, + Transcript: transcript, + }) + if err != nil { + slog.Error("failed to count calls", "error", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "internal error"}) + return + } + + // Fetch calls page. + var calls []db.Call + listParams := db.ListCallsParams{ + SystemIdsCsv: systemIDsCSV, + TalkgroupIdsCsv: talkgroupIDsCSV, + GroupIdsCsv: groupIDsCSV, + TagIdsCsv: tagIDsCSV, + DateFrom: dateFrom, + DateTo: dateTo, + BookmarkUserID: bookmarkUserID, + Transcript: transcript, + PageOffset: sql.NullInt64{Int64: offset, Valid: true}, + PageSize: sql.NullInt64{Int64: limit, Valid: true}, + } + if sortOrder == "asc" { + calls, err = h.queries.ListCallsAsc(ctx, db.ListCallsAscParams(listParams)) + } else { + calls, err = h.queries.ListCalls(ctx, listParams) + } + if err != nil { + slog.Error("failed to list calls", "error", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "internal error"}) + return + } + + // Enforce per-user grants — filter out calls the listener is not + // authorised to see. Admins and unauthenticated public-access users + // have nil grants (allow-all). + grants := shared.LoadUserGrants(c, h.queries) + if grants != nil { + allowed := calls[:0] + for _, call := range calls { + if shared.IsGranted(grants, call.SystemID, call.TalkgroupID.Int64) { + allowed = append(allowed, call) + } + } + calls = allowed + // Adjust total to reflect grant-scoped count. The SQL count does not + // know about grants, so cap it to the filtered result set size when + // the filter actually removed rows. This is an approximation; an + // exact count would require SQL-level grant filtering. + if int64(len(calls)) < limit { + total = offset + int64(len(calls)) + } + } + + // Build set of bookmarked call IDs for authenticated users. + bookmarkedIDs := make(map[int64]bool) + if userIDVal, exists := c.Get("userID"); exists { + if uid, ok := userIDVal.(int64); ok { + bookmarks, berr := h.queries.ListBookmarksByUser(ctx, sql.NullInt64{Int64: uid, Valid: true}) + if berr == nil { + for _, bm := range bookmarks { + bookmarkedIDs[bm.CallID] = true + } + } + } + } + + // Pre-cache lookups to avoid N+1 queries. + systemCache := make(map[int64]db.System) + tgCache := make(map[int64]db.Talkgroup) + groupCache := make(map[int64]string) + tagCache := make(map[int64]string) + + // Build response with joined labels and transcripts. + results := make([]shared.CallSearchResult, 0, len(calls)) + for _, call := range calls { + r := shared.CallSearchResult{ + ID: call.ID, + AudioName: call.AudioName, + AudioType: call.AudioType, + DateTime: call.DateTime, + SystemID: call.SystemID, + } + + if call.Frequency.Valid { + r.Frequency = &call.Frequency.Int64 + } + if call.Duration.Valid { + r.Duration = &call.Duration.Int64 + } + if call.Source.Valid { + r.Source = &call.Source.Int64 + } + if call.ErrorCount.Valid { + r.ErrorCount = &call.ErrorCount.Int64 + } + if call.SpikeCount.Valid { + r.SpikeCount = &call.SpikeCount.Int64 + } + if call.Site.Valid { + r.Site = call.Site.String + } + if call.Channel.Valid { + r.Channel = call.Channel.String + } + if call.Decoder.Valid { + r.Decoder = call.Decoder.String + } + if call.TalkerAlias.Valid { + r.TalkerAlias = call.TalkerAlias.String + } + + // Join system label (cached). + sys, ok := systemCache[call.SystemID] + if !ok { + var serr error + sys, serr = h.queries.GetSystem(ctx, call.SystemID) + if serr == nil { + systemCache[call.SystemID] = sys + } + } + if ok || systemCache[call.SystemID].ID != 0 { + r.SystemID = sys.SystemID + r.SystemLabel = sys.Label + } + + // Join talkgroup details (cached). + if call.TalkgroupID.Valid { + tg, ok := tgCache[call.TalkgroupID.Int64] + if !ok { + var terr error + tg, terr = h.queries.GetTalkgroup(ctx, call.TalkgroupID.Int64) + if terr == nil { + tgCache[call.TalkgroupID.Int64] = tg + } + } + if ok || tgCache[call.TalkgroupID.Int64].ID != 0 { + r.TalkgroupID = tg.TalkgroupID + if tg.Label.Valid { + r.TalkgroupLabel = tg.Label.String + } + if tg.Name.Valid { + r.TalkgroupName = tg.Name.String + } + if tg.Led.Valid { + r.TalkgroupLed = tg.Led.String + } + // Resolve group label (cached). + if tg.GroupID.Valid { + grpLabel, ok := groupCache[tg.GroupID.Int64] + if !ok { + grp, gerr := h.queries.GetGroup(ctx, tg.GroupID.Int64) + if gerr == nil { + groupCache[tg.GroupID.Int64] = grp.Label + grpLabel = grp.Label + } + } + if ok || grpLabel != "" { + r.TalkgroupGroup = grpLabel + } + } + // Resolve tag label (cached). + if tg.TagID.Valid { + tagLabel, ok := tagCache[tg.TagID.Int64] + if !ok { + tag, tgerr := h.queries.GetTag(ctx, tg.TagID.Int64) + if tgerr == nil { + tagCache[tg.TagID.Int64] = tag.Label + tagLabel = tag.Label + } + } + if ok || tagLabel != "" { + r.TalkgroupTag = tagLabel + } + } + } + } + + // Join transcript. + trn, terr := h.queries.GetTranscriptionByCallID(ctx, call.ID) + if terr == nil { + r.Transcript = trn.Text + } + + // Bookmark status. + r.Bookmarked = bookmarkedIDs[call.ID] + + results = append(results, r) + } + + c.JSON(http.StatusOK, shared.CallSearchResponse{ + Calls: results, + Total: total, + }) +} diff --git a/backend/internal/handler/calls/transcript.go b/backend/internal/handler/calls/transcript.go new file mode 100644 index 0000000..2ee1063 --- /dev/null +++ b/backend/internal/handler/calls/transcript.go @@ -0,0 +1,80 @@ +package calls + +import ( + "database/sql" + "encoding/json" + "errors" + "log/slog" + "net/http" + "strconv" + + "github.com/gin-gonic/gin" + "github.com/openscanner/openscanner/internal/audio" + "github.com/openscanner/openscanner/internal/handler/shared" +) + +// transcriptResponse is the JSON shape returned by GetCallTranscript. +type transcriptResponse struct { + Text string `json:"text"` + Segments []audio.TranscriptionSegment `json:"segments"` + Language string `json:"language"` + Model string `json:"model"` +} // @name TranscriptResponse + +// GetCallTranscript handles GET /api/calls/:id/transcript. +// Returns the transcription for a call if one exists. +// +// @Summary Get call transcript +// @Description Returns the transcription text, segments, language and model for a call. Authentication is optional when the publicAccess setting is enabled; otherwise a valid JWT is required. +// @Tags Calls +// @Produce json +// @Security BearerAuth +// @Param id path int true "Call ID" +// @Success 200 {object} transcriptResponse +// @Failure 400 {object} ErrorResponse +// @Failure 404 {object} ErrorResponse +// @Failure 500 {object} ErrorResponse +// @Router /calls/{id}/transcript [get] +func (h *Handler) GetCallTranscript(c *gin.Context) { + ctx := c.Request.Context() + id, err := strconv.ParseInt(c.Param("id"), 10, 64) + if err != nil || id <= 0 { + c.JSON(http.StatusBadRequest, gin.H{"error": "invalid call id"}) + return + } + + // Require authentication or publicAccess. + _, hasUser := c.Get("userID") + if !hasUser && shared.GetSettingValue(c, h.queries, "publicAccess") != "true" { + c.JSON(http.StatusUnauthorized, gin.H{"error": "authentication required"}) + return + } + + trx, err := h.queries.GetTranscriptionByCallID(ctx, id) + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + c.JSON(http.StatusNotFound, gin.H{"error": "transcript not found"}) + return + } + slog.Error("failed to get transcript", "call_id", id, "error", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "internal error"}) + return + } + + var segments []audio.TranscriptionSegment + if trx.Segments.Valid && trx.Segments.String != "" { + if err := json.Unmarshal([]byte(trx.Segments.String), &segments); err != nil { + slog.Warn("failed to parse transcript segments", "call_id", id, "error", err) + } + } + if segments == nil { + segments = []audio.TranscriptionSegment{} + } + + c.JSON(http.StatusOK, transcriptResponse{ + Text: trx.Text, + Segments: segments, + Language: trx.Language.String, + Model: trx.Model.String, + }) +} diff --git a/backend/internal/handler/calls/upload.go b/backend/internal/handler/calls/upload.go new file mode 100644 index 0000000..ba47212 --- /dev/null +++ b/backend/internal/handler/calls/upload.go @@ -0,0 +1,833 @@ +package calls + +import ( + "context" + "database/sql" + "encoding/json" + "errors" + "io" + "log/slog" + "net/http" + "os" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/gin-gonic/gin" + "github.com/openscanner/openscanner/internal/audio" + "github.com/openscanner/openscanner/internal/db" + "github.com/openscanner/openscanner/internal/downstream" + "github.com/openscanner/openscanner/internal/handler/shared" + "github.com/openscanner/openscanner/internal/ws" +) + +// PostCallUpload handles POST /api/call-upload and /api/trunk-recorder-call-upload. +// +// @Summary Upload a call recording +// @Description Ingest a radio call with audio and metadata. Requires a valid API key. +// @Tags Upload +// @Accept multipart/form-data +// @Produce json +// @Security APIKeyAuth +// @Param audio formData file true "Audio file" +// @Param dateTime formData int true "Unix timestamp of the call" +// @Param systemId formData int true "Radio system ID" +// @Param talkgroupId formData int true "Talkgroup ID" +// @Param source formData int false "Source unit ID" +// @Param frequency formData int false "Frequency in Hz" +// @Param duration formData number false "Call duration in seconds" +// @Param talkgroupLabel formData string false "Talkgroup label for auto-populate" +// @Param talkgroupTag formData string false "Talkgroup tag name" +// @Param talkgroupGroup formData string false "Talkgroup group name" +// @Param talkgroupName formData string false "Talkgroup display name" +// @Param systemLabel formData string false "System label" +// @Param patches formData string false "JSON array of patched talkgroup IDs" +// @Param audioName formData string false "Original audio file name" +// @Param audioType formData string false "Audio MIME type" +// @Param site formData string false "Site identifier" +// @Param channel formData string false "Channel identifier" +// @Param decoder formData string false "Decoder software name" +// @Param errorCount formData int false "Decoding error count" +// @Param spikeCount formData int false "Signal spike count" +// @Success 200 {object} object{id=int64} "Call ingested successfully" +// @Failure 400 {object} ErrorResponse "Bad request" +// @Failure 401 {object} ErrorResponse "API key required" +// @Failure 429 {object} ErrorResponse "Rate limit exceeded" +// @Failure 500 {object} ErrorResponse "Internal server error" +// @Router /call-upload [post] +// @Router /trunk-recorder-call-upload [post] +func (h *Handler) PostCallUpload(c *gin.Context) { + slog.Debug("call-upload: request received", "ip", c.ClientIP()) + // Retrieve API key ID injected by APIKeyAuth middleware. + apiKeyIDVal, exists := c.Get("apiKeyID") + if !exists { + c.JSON(http.StatusUnauthorized, gin.H{"error": "API key required"}) + return + } + apiKeyID, ok := apiKeyIDVal.(int64) + if !ok { + c.JSON(http.StatusInternalServerError, gin.H{"error": "internal error"}) + return + } + + // Per-API-key rate limiting. + rateLimit := defaultCallRatePerMin + apiKeyRateOverride := false + if apiKeyRateVal, ok := c.Get("apiKeyCallRate"); ok { + if apiKeyRate, ok := apiKeyRateVal.(int64); ok && apiKeyRate > 0 { + rateLimit = int(apiKeyRate) + apiKeyRateOverride = true + } + } + if rStr := shared.GetSettingValue(c, h.queries, "apiKeyCallRate"); rStr != "" { + if r, err := strconv.Atoi(rStr); err == nil && r > 0 && !apiKeyRateOverride { + rateLimit = r + } + } + if rateLimit > maxCallRatePerMin { + rateLimit = maxCallRatePerMin + } + if !h.getLimiter(apiKeyID, rateLimit).allow() { + slog.Warn("call upload rate limit exceeded", "api_key_id", apiKeyID) + c.JSON(http.StatusTooManyRequests, gin.H{"error": "rate limit exceeded"}) + return + } + + slog.Debug("call-upload: rate limit passed", "api_key_id", apiKeyID) + + // SDRTrunk and other rdio-scanner-compatible clients may send a POST with + // partial data to verify the API key. rdio-scanner responds with plain-text + // "Incomplete call data: " (status 417) which SDRTrunk treats as a + // successful connection test. We replicate that behavior: parse all fields + // first, then return the same message format for missing required fields. + dateTimeStr := c.PostForm("dateTime") + systemIDStr := c.PostForm("systemId") + if systemIDStr == "" { + systemIDStr = c.PostForm("system") + } + talkgroupIDStr := c.PostForm("talkgroupId") + if talkgroupIDStr == "" { + talkgroupIDStr = c.PostForm("talkgroup") + } + _, audioErr := c.FormFile("audio") + + // Check for test=1 explicitly (Trunk Recorder). + if c.PostForm("test") == "1" { + c.String(http.StatusOK, "Incomplete call data: no talkgroup\n") + return + } + + // rdio-scanner's IsValid() checks all fields WITHOUT early returns and + // overwrites the error each time, so the LAST failing check wins. + // SDRTrunk sends system= but no audio/dateTime/talkgroup, so the last + // error is always "no talkgroup" — which SDRTrunk explicitly checks for. + // We replicate this behavior: collect the last error, then return it. + var incompleteReason string + if audioErr != nil { + incompleteReason = "no audio" + } + if dateTimeStr == "" { + incompleteReason = "no datetime" + } + if systemIDStr == "" { + incompleteReason = "no system" + } + if talkgroupIDStr == "" { + incompleteReason = "no talkgroup" + } + if incompleteReason != "" { + slog.Warn("call-upload: incomplete data", + "reason", incompleteReason, + "api_key_id", apiKeyID, + ) + c.String(http.StatusExpectationFailed, "Incomplete call data: %s\n", incompleteReason) + return + } + + // Parse dateTime. + // Try unix timestamp first (Trunk Recorder, SDRTrunk), then ISO 8601 (voxcall). + var dateTimeUnix int64 + if n, err := strconv.ParseInt(dateTimeStr, 10, 64); err == nil { + dateTimeUnix = n + } else if t, err := time.Parse(time.RFC3339Nano, dateTimeStr); err == nil { + dateTimeUnix = t.Unix() + } else if t, err := time.Parse(time.RFC3339, dateTimeStr); err == nil { + dateTimeUnix = t.Unix() + } else { + c.JSON(http.StatusBadRequest, gin.H{"error": "invalid dateTime: expected unix timestamp or ISO 8601"}) + return + } + callTime := time.Unix(dateTimeUnix, 0) + + // Trunk Recorder's rdioscanner_uploader plugin sends "system" and + // "talkgroup" while our canonical field names are "systemId" and + // "talkgroupId". Accept both for backward compatibility. + // (Already parsed above for the connectivity check.) + systemIDRaw, err := strconv.ParseInt(systemIDStr, 10, 64) + if err != nil { + c.JSON(http.StatusBadRequest, gin.H{"error": "invalid systemId"}) + return + } + + talkgroupIDRaw, err := strconv.ParseInt(talkgroupIDStr, 10, 64) + if err != nil { + c.JSON(http.StatusBadRequest, gin.H{"error": "invalid talkgroupId"}) + return + } + + // Parse optional fields. + var frequency, duration, source sql.NullInt64 + if v := c.PostForm("frequency"); v != "" { + if n, err := strconv.ParseInt(v, 10, 64); err == nil { + frequency = sql.NullInt64{Int64: n, Valid: true} + } + } + if v := c.PostForm("duration"); v != "" { + if n, err := strconv.ParseInt(v, 10, 64); err == nil { + duration = sql.NullInt64{Int64: n, Valid: true} + } + } + if v := c.PostForm("source"); v != "" { + if n, err := strconv.ParseInt(v, 10, 64); err == nil { + source = sql.NullInt64{Int64: n, Valid: true} + } + } + + var errorCount, spikeCount sql.NullInt64 + if v := c.PostForm("errorCount"); v != "" { + if n, err := strconv.ParseInt(v, 10, 64); err == nil { + errorCount = sql.NullInt64{Int64: n, Valid: true} + } + } + if v := c.PostForm("spikeCount"); v != "" { + if n, err := strconv.ParseInt(v, 10, 64); err == nil { + spikeCount = sql.NullInt64{Int64: n, Valid: true} + } + } + + var sourcesJSON, frequenciesJSON, patchesJSON sql.NullString + if v := c.PostForm("sources"); v != "" { + sourcesJSON = sql.NullString{String: v, Valid: true} + } + if v := c.PostForm("frequencies"); v != "" { + frequenciesJSON = sql.NullString{String: v, Valid: true} + } + if v := c.PostForm("patches"); v != "" { + patchesJSON = sql.NullString{String: v, Valid: true} + } + + // Trunk-recorder's rdio-scanner uploader embeds unit IDs inside the + // "sources" JSON array rather than sending a top-level "source" field. + // Extract the first source unit ID when not explicitly provided. + if !source.Valid && sourcesJSON.Valid { + source = extractPrimarySource(sourcesJSON.String) + } + + // Similarly, error and spike counts are per-segment inside the + // "frequencies" JSON array. Aggregate them when no top-level values + // were provided. + if !errorCount.Valid && !spikeCount.Valid && frequenciesJSON.Valid { + errorCount, spikeCount = aggregateErrorSpikeCounts(frequenciesJSON.String) + } + + // Optional call metadata fields. + var siteCol, channelCol, decoderCol sql.NullString + if v := c.PostForm("site"); v != "" { + siteCol = sql.NullString{String: v, Valid: true} + } + if v := c.PostForm("channel"); v != "" { + channelCol = sql.NullString{String: v, Valid: true} + } + if v := c.PostForm("decoder"); v != "" { + decoderCol = sql.NullString{String: v, Valid: true} + } + + // Optional talkgroup metadata for auto-populate / backfill. + talkgroupLabel := c.PostForm("talkgroupLabel") + talkgroupTag := c.PostForm("talkgroupTag") + talkgroupGroup := c.PostForm("talkgroupGroup") + talkgroupName := c.PostForm("talkgroupName") + + var talkerAliasCol sql.NullString + if v := c.PostForm("talkerAlias"); v != "" { + talkerAliasCol = sql.NullString{String: v, Valid: true} + } + + // Trunk-recorder embeds OTA aliases in the sources JSON "tag" field + // rather than sending a top-level "talkerAlias". Extract from the + // first source entry when not explicitly provided. + if !talkerAliasCol.Valid && sourcesJSON.Valid { + talkerAliasCol = extractPrimarySourceTag(sourcesJSON.String) + } + + ctx := c.Request.Context() + autoPopulateSystems := shared.GetSettingValue(c, h.queries, "autoPopulateSystems") == "true" + + slog.Debug("call-upload: resolving system and talkgroup", + "system_id", systemIDRaw, "talkgroup_id", talkgroupIDRaw) + + // Resolve system by its radio system_id. + system, err := h.queries.GetSystemBySystemID(ctx, systemIDRaw) + if err != nil { + if !errors.Is(err, sql.ErrNoRows) { + slog.Error("failed to query system", "system_id", systemIDRaw, "error", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "internal error"}) + return + } + if !autoPopulateSystems { + c.JSON(http.StatusBadRequest, gin.H{"error": "system not found"}) + return + } + label := strconv.FormatInt(systemIDRaw, 10) + // SDRTrunk and other uploaders send systemLabel with a human-readable name. + if sl := c.PostForm("systemLabel"); sl != "" { + label = sl + } + newID, cerr := h.queries.CreateSystem(ctx, db.CreateSystemParams{ + SystemID: systemIDRaw, + Label: label, + AutoPopulateTalkgroups: 1, + }) + if cerr != nil { + slog.Error("failed to auto-create system", "system_id", systemIDRaw, "error", cerr) + c.JSON(http.StatusInternalServerError, gin.H{"error": "internal error"}) + return + } + slog.Info("auto-populated system", "system_id", systemIDRaw, "label", label, "db_id", newID) + system = db.System{ID: newID, SystemID: systemIDRaw, Label: label, AutoPopulateTalkgroups: 1} + h.hub.BroadcastCFG(ctx) + } + + // Blacklist check: reject calls to blacklisted talkgroups. + if isBlacklistedTG(system.BlacklistsJson, talkgroupIDRaw) { + slog.Info("call upload: talkgroup is blacklisted", + "system_id", systemIDRaw, "talkgroup_id", talkgroupIDRaw) + c.JSON(http.StatusOK, gin.H{"message": "blacklisted"}) + return + } + + // Resolve talkgroup by system DB ID + radio talkgroup ID. + talkgroup, err := h.queries.GetTalkgroupBySystemAndTGID(ctx, db.GetTalkgroupBySystemAndTGIDParams{ + SystemID: system.ID, + TalkgroupID: talkgroupIDRaw, + }) + if err != nil { + if !errors.Is(err, sql.ErrNoRows) { + slog.Error("failed to query talkgroup", "system_id", system.ID, "talkgroup_id", talkgroupIDRaw, "error", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "internal error"}) + return + } + if system.AutoPopulateTalkgroups == 0 { + c.JSON(http.StatusBadRequest, gin.H{"error": "talkgroup not found"}) + return + } + var tgLabel, tgName sql.NullString + if talkgroupLabel != "" { + tgLabel = sql.NullString{String: talkgroupLabel, Valid: true} + } + if talkgroupName != "" { + tgName = sql.NullString{String: talkgroupName, Valid: true} + } + // Resolve group from talkgroupGroup (e.g. SDRTrunk sends this). + var groupID sql.NullInt64 + if talkgroupGroup != "" { + groupID = shared.ResolveGroupID(ctx, h.queries, talkgroupGroup) + } + // Resolve tag from talkgroupTag (e.g. "Law Dispatch", "Fire-Tac"). + var tagID sql.NullInt64 + if talkgroupTag != "" { + tagID = shared.ResolveTagID(ctx, h.queries, talkgroupTag) + } + newID, cerr := h.queries.CreateTalkgroup(ctx, db.CreateTalkgroupParams{ + SystemID: system.ID, + TalkgroupID: talkgroupIDRaw, + Label: tgLabel, + Name: tgName, + GroupID: groupID, + TagID: tagID, + }) + if cerr != nil { + slog.Error("failed to auto-create talkgroup", "talkgroup_id", talkgroupIDRaw, "error", cerr) + c.JSON(http.StatusInternalServerError, gin.H{"error": "internal error"}) + return + } + slog.Info("auto-populated talkgroup", "system_id", system.SystemID, "talkgroup_id", talkgroupIDRaw, "label", tgLabel.String, "db_id", newID) + talkgroup = db.Talkgroup{ID: newID, SystemID: system.ID, TalkgroupID: talkgroupIDRaw, Label: tgLabel, Name: tgName, GroupID: groupID, TagID: tagID} + h.hub.BroadcastCFG(ctx) + } else if needsBackfill(talkgroup, talkgroupLabel, talkgroupName, talkgroupTag, talkgroupGroup) { + // Existing talkgroup has empty fields — backfill from upload metadata. + if !talkgroup.Label.Valid && talkgroupLabel != "" { + talkgroup.Label = sql.NullString{String: talkgroupLabel, Valid: true} + } + if !talkgroup.Name.Valid && talkgroupName != "" { + talkgroup.Name = sql.NullString{String: talkgroupName, Valid: true} + } + if !talkgroup.GroupID.Valid && talkgroupGroup != "" { + talkgroup.GroupID = shared.ResolveGroupID(ctx, h.queries, talkgroupGroup) + } + if !talkgroup.TagID.Valid && talkgroupTag != "" { + talkgroup.TagID = shared.ResolveTagID(ctx, h.queries, talkgroupTag) + } + if uerr := h.queries.UpdateTalkgroup(ctx, db.UpdateTalkgroupParams{ + ID: talkgroup.ID, + TalkgroupID: talkgroup.TalkgroupID, + Label: talkgroup.Label, + Name: talkgroup.Name, + Frequency: talkgroup.Frequency, + Led: talkgroup.Led, + GroupID: talkgroup.GroupID, + TagID: talkgroup.TagID, + Order: talkgroup.Order, + }); uerr != nil { + slog.Warn("failed to backfill talkgroup from upload", + "talkgroup_id", talkgroup.TalkgroupID, "error", uerr) + } else { + slog.Info("backfilled talkgroup from upload", + "talkgroup_id", talkgroup.TalkgroupID) + h.hub.BroadcastCFG(ctx) + } + } + + // Duplicate detection (system.ID and talkgroup.ID are the FK values in calls). + if shared.GetSettingValue(c, h.queries, "disableDuplicateDetection") != "true" { + windowMs := int64(2000) + if v := shared.GetSettingValue(c, h.queries, "duplicateDetectionTimeFrame"); v != "" { + if wm, err := strconv.ParseInt(v, 10, 64); err == nil { + windowMs = wm + } + } + dup, derr := audio.IsDuplicate(ctx, h.queries, system.ID, talkgroup.ID, callTime, windowMs) + if derr != nil { + slog.Error("duplicate detection failed", "error", derr) + // Non-fatal: proceed with ingest. + } else if dup { + slog.Info("duplicate call rejected", "system_id", systemIDRaw, "talkgroup_id", talkgroupIDRaw) + c.JSON(http.StatusOK, gin.H{"message": "duplicate call rejected"}) + return + } + } + + // Get uploaded audio file. + fh, err := c.FormFile("audio") + if err != nil { + c.JSON(http.StatusBadRequest, gin.H{"error": "audio file is required"}) + return + } + + // Resolve audio conversion mode from settings. + convMode := audio.ConversionDisabled + if mStr := shared.GetSettingValue(c, h.queries, "audioConversion"); mStr != "" { + if m, err := strconv.Atoi(mStr); err == nil { + convMode = audio.ConversionMode(m) + } + } + + // Resolve encoding preset from settings. + convPreset := audio.ParseEncodingPreset(shared.GetSettingValue(c, h.queries, "audioEncodingPreset")) + + // Store audio file (conversion handled inside Processor.Store). + relPath, err := h.processor.Store(ctx, fh, convMode, convPreset) + if err != nil { + slog.Error("failed to store audio file", + "system_id", systemIDRaw, + "talkgroup_id", talkgroupIDRaw, + "error", err, + ) + c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to store audio"}) + return + } + + slog.Debug("call-upload: audio stored", "path", relPath, "mode", convMode) + + // If the recorder didn't supply a duration, probe the stored file. + if !duration.Valid { + absPath := filepath.Join(h.processor.RecordingsDir(), relPath) + if d := audio.ProbeDuration(ctx, absPath); d > 0 { + duration = sql.NullInt64{Int64: d, Valid: true} + } + } + + // Determine audio MIME type. + // When conversion is enabled the output format depends on the encoding + // preset (M4A for AAC presets, MP3 for MP3 presets). + // Otherwise validate the client-supplied Content-Type against an allowlist + // to prevent attacker-controlled MIME types from reaching the database. + var audioType string + if convMode != audio.ConversionDisabled { + audioType = audio.OutputMIME(convPreset) + } else { + switch fh.Header.Get("Content-Type") { + case "audio/mpeg", "audio/mp3", "audio/wav", "audio/x-wav", + "audio/ogg", "audio/aac", "audio/m4a", "audio/mp4", + "audio/x-m4a", "audio/opus": + audioType = fh.Header.Get("Content-Type") + default: + audioType = "application/octet-stream" + } + } + + // Insert call record. + callID, err := h.queries.CreateCall(ctx, db.CreateCallParams{ + AudioPath: relPath, + AudioName: filepath.Base(relPath), + AudioType: audioType, + DateTime: dateTimeUnix, + Frequency: frequency, + Duration: duration, + Source: source, + SourcesJson: sourcesJSON, + FrequenciesJson: frequenciesJSON, + PatchesJson: patchesJSON, + SystemID: system.ID, + TalkgroupID: sql.NullInt64{Int64: talkgroup.ID, Valid: true}, + Site: siteCol, + Channel: channelCol, + Decoder: decoderCol, + ErrorCount: errorCount, + SpikeCount: spikeCount, + TalkerAlias: talkerAliasCol, + }) + if err != nil { + slog.Error("failed to insert call", "error", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "internal error"}) + return + } + + slog.Debug("call-upload: db record inserted", + "call_id", callID, + "system_id", systemIDRaw, + "talkgroup_id", talkgroupIDRaw, + "audio_path", relPath, + ) + + // Extract unit tags from sources JSON and upsert into units table. + // Sources format: [{"pos":0,"src":12345,"tag":"Unit Name"}, ...] + if sourcesJSON.Valid { + upsertUnitsFromSources(ctx, h.queries, system.ID, sourcesJSON.String) + } + + // Map talkerAlias to the source unit as a label (e.g. P25 radios broadcasting a name). + if source.Valid && talkerAliasCol.Valid { + if err := h.queries.UpsertUnit(ctx, db.UpsertUnitParams{ + SystemID: system.ID, + UnitID: source.Int64, + Label: sql.NullString{String: talkerAliasCol.String, Valid: true}, + }); err != nil { + slog.Warn("failed to upsert unit from talkerAlias", + "unit_id", source.Int64, "talkerAlias", talkerAliasCol.String, "error", err) + } + } + + // Broadcast to WebSocket listeners. + if h.hub != nil { + // Read audio file for inline embedding in the CAL JSON frame. + // Use os.Root so the read is scoped to RecordingsDir and cannot + // follow a traversal sequence or symlink out of the directory, + // regardless of what relPath contains. + const maxBroadcastAudioBytes = 20 << 20 // 20 MiB + var audioBytes []byte + if root, rootErr := os.OpenRoot(h.processor.RecordingsDir()); rootErr != nil { + slog.Warn("failed to open recordings root for WS broadcast", "error", rootErr) + } else { + if fi, statErr := root.Stat(relPath); statErr != nil { + slog.Warn("failed to stat audio for WS broadcast", "path", relPath, "error", statErr) + } else if fi.Size() > maxBroadcastAudioBytes { + slog.Warn("audio file too large for inline WS broadcast, sending metadata only", + "path", relPath, "size_bytes", fi.Size(), "max_bytes", maxBroadcastAudioBytes) + } else if f, openErr := root.Open(relPath); openErr != nil { + slog.Warn("failed to open audio for WS broadcast", "path", relPath, "error", openErr) + } else { + readBytes, readErr := io.ReadAll(io.LimitReader(f, maxBroadcastAudioBytes)) + f.Close() + if readErr != nil { + slog.Warn("failed to read audio for WS broadcast", "path", relPath, "error", readErr) + } else { + audioBytes = readBytes + } + } + root.Close() + } + + calPayload := map[string]any{ + "id": callID, + "audioName": filepath.Base(relPath), + "audioType": audioType, + "dateTime": dateTimeUnix, + "systemId": system.SystemID, + "system": system.ID, + "talkgroupId": talkgroup.TalkgroupID, + "talkgroup": talkgroup.ID, + } + if frequency.Valid { + calPayload["frequency"] = frequency.Int64 + } + if duration.Valid { + calPayload["duration"] = duration.Int64 + } + if source.Valid { + calPayload["source"] = source.Int64 + } + if siteCol.Valid { + calPayload["site"] = siteCol.String + } + if channelCol.Valid { + calPayload["channel"] = channelCol.String + } + if decoderCol.Valid { + calPayload["decoder"] = decoderCol.String + } + if errorCount.Valid { + calPayload["errorCount"] = errorCount.Int64 + } + if spikeCount.Valid { + calPayload["spikeCount"] = spikeCount.Int64 + } + if talkerAliasCol.Valid { + calPayload["talkerAlias"] = talkerAliasCol.String + } + if sourcesJSON.Valid { + calPayload["sources"] = sourcesJSON.String + } + if frequenciesJSON.Valid { + calPayload["frequencies"] = frequenciesJSON.String + } + calMsg, err := ws.NewCALMessage(calPayload, audioBytes) + if err != nil { + slog.Error("failed to build CAL message", "error", err) + } else { + h.hub.BroadcastCAL(calMsg, func(cl *ws.Client) bool { + return cl.CanReceive(system.ID, talkgroup.ID) + }) + slog.Debug("call-upload: ws broadcast sent", "call_id", callID) + } + } + + logAttrs := []any{ + "call_id", callID, + "system_id", systemIDRaw, + "talkgroup_id", talkgroupIDRaw, + "audio_path", relPath, + "api_key_id", apiKeyID, + } + if duration.Valid { + logAttrs = append(logAttrs, "duration_ms", duration.Int64) + } + slog.Info("call-upload: complete", logAttrs...) + + c.JSON(http.StatusOK, gin.H{"id": callID, "message": "Call imported successfully."}) + + // Notify downstream pushers (non-blocking, after response is sent). + if h.dsNotifier != nil { + // Resolve labels for downstream consumers. + var groupLabel, tagLabel string + if talkgroup.GroupID.Valid { + if g, err := h.queries.GetGroup(ctx, talkgroup.GroupID.Int64); err == nil { + groupLabel = g.Label + } + } + if talkgroup.TagID.Valid { + if t, err := h.queries.GetTag(ctx, talkgroup.TagID.Int64); err == nil { + tagLabel = t.Label + } + } + + h.dsNotifier.Notify(downstream.CallEvent{ + CallID: callID, + AudioPath: relPath, + AudioName: filepath.Base(relPath), + AudioType: audioType, + DateTime: dateTimeUnix, + SystemID: system.SystemID, + System: system.ID, + TalkgroupID: talkgroup.TalkgroupID, + Talkgroup: talkgroup.ID, + Frequency: frequency.Int64, + Duration: duration.Int64, + Source: source.Int64, + Sources: sourcesJSON.String, + Frequencies: frequenciesJSON.String, + Patches: patchesJSON.String, + SystemLabel: system.Label, + TalkgroupLabel: talkgroup.Label.String, + TalkgroupName: talkgroup.Name.String, + TalkgroupGroup: groupLabel, + TalkgroupTag: tagLabel, + TalkerAlias: talkerAliasCol.String, + }) + slog.Debug("call-upload: downstream notify queued", "call_id", callID) + } + + // Enqueue transcription (non-blocking, after response is sent). + if h.transcriber != nil { + absPath := filepath.Join(h.processor.RecordingsDir(), relPath) + if err := h.transcriber.Submit(ctx, audio.TranscriptionJob{ + CallID: callID, + AudioPath: absPath, + }); err != nil { + slog.Warn("call-upload: failed to enqueue transcription", "call_id", callID, "error", err) + } + } +} + +// --- helpers --- + +// needsBackfill returns true if at least one talkgroup field is empty and a +// corresponding value was provided in the upload metadata. +func needsBackfill(tg db.Talkgroup, label, name, tag, group string) bool { + if !tg.Label.Valid && label != "" { + return true + } + if !tg.Name.Valid && name != "" { + return true + } + if !tg.TagID.Valid && tag != "" { + return true + } + if !tg.GroupID.Valid && group != "" { + return true + } + return false +} + +// isBlacklistedTG checks whether a talkgroup ID appears in a system's blacklist. +// The blacklist is a JSON array of integers stored in blacklists_json. +func isBlacklistedTG(blacklistsJSON sql.NullString, talkgroupID int64) bool { + if !blacklistsJSON.Valid || strings.TrimSpace(blacklistsJSON.String) == "" { + return false + } + var ids []int64 + if err := json.Unmarshal([]byte(blacklistsJSON.String), &ids); err != nil { + slog.Warn("failed to parse blacklists_json", "error", err) + return false + } + for _, id := range ids { + if id == talkgroupID { + return true + } + } + return false +} + +// upsertUnitsFromSources parses the sources JSON array and upserts any units +// that include a "tag" (label) into the units table. +// Sources format: [{"pos":0,"src":12345,"tag":"Unit Name"}, ...] +// Entries without "src" or "tag" are silently skipped. +func upsertUnitsFromSources(ctx context.Context, q *db.Queries, systemDBID int64, raw string) { + var sources []map[string]any + if err := json.Unmarshal([]byte(raw), &sources); err != nil { + return + } + for _, entry := range sources { + srcVal, ok := entry["src"] + if !ok { + continue + } + srcFloat, ok := srcVal.(float64) + if !ok || srcFloat <= 0 { + continue + } + tagVal, ok := entry["tag"] + if !ok { + continue + } + tag, ok := tagVal.(string) + if !ok || tag == "" { + continue + } + if err := q.UpsertUnit(ctx, db.UpsertUnitParams{ + SystemID: systemDBID, + UnitID: int64(srcFloat), + Label: sql.NullString{String: tag, Valid: true}, + }); err != nil { + slog.Warn("failed to upsert unit from sources", + "unit_id", int64(srcFloat), "tag", tag, "error", err) + } + } +} + +// extractPrimarySource returns the "src" value from the first entry in a +// sources JSON array. Trunk-recorder sends unit IDs only inside this array +// (e.g. [{"pos":0,"src":12345}, ...]) and does not set a top-level "source". +func extractPrimarySource(raw string) sql.NullInt64 { + var sources []map[string]any + if err := json.Unmarshal([]byte(raw), &sources); err != nil || len(sources) == 0 { + return sql.NullInt64{} + } + srcVal, ok := sources[0]["src"] + if !ok { + return sql.NullInt64{} + } + srcFloat, ok := srcVal.(float64) + if !ok || srcFloat <= 0 { + return sql.NullInt64{} + } + return sql.NullInt64{Int64: int64(srcFloat), Valid: true} +} + +// extractPrimarySourceTag returns the "tag" value from the first source entry +// that has a non-empty tag. Trunk-recorder sends OTA aliases (talker alias) +// inside the sources JSON rather than as a top-level "talkerAlias" field. +func extractPrimarySourceTag(raw string) sql.NullString { + var sources []map[string]any + if err := json.Unmarshal([]byte(raw), &sources); err != nil { + return sql.NullString{} + } + for _, entry := range sources { + tagVal, ok := entry["tag"] + if !ok { + continue + } + tag, ok := tagVal.(string) + if !ok || tag == "" { + continue + } + return sql.NullString{String: tag, Valid: true} + } + return sql.NullString{} +} + +// aggregateErrorSpikeCounts sums errorCount and spikeCount from all entries +// in a frequencies JSON array. Trunk-recorder sends per-segment values inside +// this array (e.g. [{"errorCount":2,"spikeCount":0}, ...]) rather than +// providing aggregate top-level fields. +func aggregateErrorSpikeCounts(raw string) (sql.NullInt64, sql.NullInt64) { + var freqs []map[string]any + if err := json.Unmarshal([]byte(raw), &freqs); err != nil || len(freqs) == 0 { + return sql.NullInt64{}, sql.NullInt64{} + } + var totalErrors, totalSpikes int64 + var found bool + for _, entry := range freqs { + if v, ok := entry["errorCount"]; ok { + if f, ok := v.(float64); ok { + totalErrors += int64(f) + found = true + } + } + // trunk-recorder also uses "error_count" in its call JSON. + if v, ok := entry["error_count"]; ok { + if f, ok := v.(float64); ok { + totalErrors += int64(f) + found = true + } + } + if v, ok := entry["spikeCount"]; ok { + if f, ok := v.(float64); ok { + totalSpikes += int64(f) + found = true + } + } + if v, ok := entry["spike_count"]; ok { + if f, ok := v.(float64); ok { + totalSpikes += int64(f) + found = true + } + } + } + if !found { + return sql.NullInt64{}, sql.NullInt64{} + } + return sql.NullInt64{Int64: totalErrors, Valid: true}, + sql.NullInt64{Int64: totalSpikes, Valid: true} +} diff --git a/backend/internal/middleware/auth.go b/backend/internal/middleware/auth.go new file mode 100644 index 0000000..882c0ae --- /dev/null +++ b/backend/internal/middleware/auth.go @@ -0,0 +1,192 @@ +package middleware + +import ( + "log/slog" + "net/http" + "strings" + "time" + + "github.com/gin-gonic/gin" + "github.com/openscanner/openscanner/internal/auth" + "github.com/openscanner/openscanner/internal/db" +) + +// JWTAuth validates a Bearer JWT and stores userID, username, and role in the +// Gin context. Aborts with 401 if the token is missing or invalid. +func JWTAuth() gin.HandlerFunc { + return func(c *gin.Context) { + header := c.GetHeader("Authorization") + if !strings.HasPrefix(header, "Bearer ") { + slog.Debug("middleware: jwt auth failed, no bearer header", "path", c.Request.URL.Path) + c.AbortWithStatusJSON(401, gin.H{"error": "authorization header required"}) + return + } + + tokenStr := strings.TrimPrefix(header, "Bearer ") + claims, err := auth.ParseToken(tokenStr) + if err != nil { + c.AbortWithStatusJSON(401, gin.H{"error": "invalid or expired token"}) + return + } + + if auth.Tokens.IsRevoked(claims.ID) { + c.AbortWithStatusJSON(401, gin.H{"error": "token has been revoked"}) + return + } + + // Check account expiration embedded in JWT claims (OWASP A01). + if claims.AccountExp > 0 && time.Now().Unix() > claims.AccountExp { + c.AbortWithStatusJSON(401, gin.H{"error": "account expired"}) + return + } + + slog.Debug("middleware: jwt auth success", "user_id", claims.UserID, "role", claims.Role) + c.Set("userID", claims.UserID) + c.Set("username", claims.Username) + c.Set("role", claims.Role) + c.Set("jti", claims.ID) + c.Next() + } +} + +// OptionalJWTAuth extracts user info from a Bearer JWT if present, but does not +// abort the request when the token is missing or invalid. Useful for endpoints +// that are publicly accessible but provide extra data to authenticated users. +func OptionalJWTAuth() gin.HandlerFunc { + return func(c *gin.Context) { + header := c.GetHeader("Authorization") + if !strings.HasPrefix(header, "Bearer ") { + c.Next() + return + } + + tokenStr := strings.TrimPrefix(header, "Bearer ") + claims, err := auth.ParseToken(tokenStr) + if err != nil { + c.Next() + return + } + + if auth.Tokens.IsRevoked(claims.ID) { + c.Next() + return + } + + // Check account expiration embedded in JWT claims (OWASP A01). + if claims.AccountExp > 0 && time.Now().Unix() > claims.AccountExp { + c.Next() + return + } + + c.Set("userID", claims.UserID) + c.Set("username", claims.Username) + c.Set("role", claims.Role) + c.Set("jti", claims.ID) + c.Next() + } +} + +// RequireAdmin checks that the authenticated user has the admin role. +// Must be chained after JWTAuth. Aborts with 403 if the role is not admin. +func RequireAdmin() gin.HandlerFunc { + return func(c *gin.Context) { + role, _ := c.Get("role") + roleStr, _ := role.(string) + if roleStr != auth.RoleAdmin { + slog.Debug("middleware: admin check failed", "role", roleStr) + c.AbortWithStatusJSON(403, gin.H{"error": "admin access required"}) + return + } + slog.Debug("middleware: admin check passed") + c.Next() + } +} + +// APIKeyAuth reads the API key from the X-API-Key header, ?key= query param, +// or (for Trunk Recorder compatibility) a multipart "key" form field — in that +// order. It looks up the key in the database and sets "apiKeyID" in the Gin +// context. Aborts with 401 if the key is missing, not found, or disabled. +func APIKeyAuth(queries *db.Queries) gin.HandlerFunc { + return func(c *gin.Context) { + requestID, _ := c.Get("requestID") + + // Prefer header, then query string. Only fall back to PostForm + // (which parses the entire multipart body) when both are empty. + key := c.GetHeader("X-API-Key") + if key == "" { + key = c.Query("key") + } + if key == "" { + // Trunk Recorder's rdioscanner_uploader plugin sends the API key + // as a multipart form field named "key" rather than a header. + key = c.PostForm("key") + } + if key == "" { + slog.Warn("api key auth: missing X-API-Key header", + "request_id", requestID, + "ip", c.ClientIP(), + "path", c.Request.URL.Path, + ) + c.AbortWithStatusJSON(401, gin.H{"error": "API key required"}) + return + } + + // Reject implausibly long keys before hashing (defense-in-depth; real + // keys are 64 hex chars). Prevents CPU waste on attacker-controlled input. + if len(key) > 128 { + slog.Warn("api key auth: oversized key rejected", + "request_id", requestID, + "ip", c.ClientIP(), + "path", c.Request.URL.Path, + "length", len(key), + ) + c.AbortWithStatusJSON(401, gin.H{"error": "invalid API key"}) + return + } + + hashed := auth.HashAPIKey(key) + apiKey, err := queries.GetAPIKeyByKey(c.Request.Context(), hashed) + if err != nil { + slog.Warn("api key auth: invalid key", + "request_id", requestID, + "ip", c.ClientIP(), + "path", c.Request.URL.Path, + ) + c.AbortWithStatusJSON(401, gin.H{"error": "invalid API key"}) + return + } + if apiKey.Disabled != 0 { + slog.Warn("api key auth: disabled key used", + "request_id", requestID, + "ip", c.ClientIP(), + "path", c.Request.URL.Path, + "api_key_id", apiKey.ID, + ) + c.AbortWithStatusJSON(401, gin.H{"error": "API key is disabled"}) + return + } + + c.Set("apiKeyID", apiKey.ID) + if apiKey.CallRateLimit.Valid { + c.Set("apiKeyCallRate", apiKey.CallRateLimit.Int64) + } + slog.Debug("middleware: api key auth success", + "api_key_id", apiKey.ID, + "ident", apiKey.Ident.String, + "path", c.Request.URL.Path, + ) + c.Next() + } +} + +// SwaggerCookieAuth validates the short-lived docs session cookie. +func SwaggerCookieAuth() gin.HandlerFunc { + return func(c *gin.Context) { + value, err := c.Cookie(auth.SwaggerCookieName) + if err != nil || !auth.ValidateSwaggerCookie(value) { + c.AbortWithStatusJSON(http.StatusUnauthorized, gin.H{"error": "swagger session required"}) + return + } + c.Next() + } +} diff --git a/backend/internal/middleware/cors.go b/backend/internal/middleware/cors.go new file mode 100644 index 0000000..6b1afbe --- /dev/null +++ b/backend/internal/middleware/cors.go @@ -0,0 +1,69 @@ +package middleware + +import ( + "net/http" + "net/url" + "strings" + + "github.com/gin-gonic/gin" +) + +// CORS handles Cross-Origin Resource Sharing. +// In production the frontend is served from the same origin so cross-origin +// requests are rejected. The allowed origin is derived from the request's own +// Host header (same-origin only). Preflight requests are handled with 204. +func CORS() gin.HandlerFunc { + return func(c *gin.Context) { + origin := c.GetHeader("Origin") + if origin == "" { + c.Next() + return + } + + // Default to same-origin: the Origin must match the Host. + host := c.Request.Host + // Build the expected origin from the request scheme + host. + scheme := "http" + if c.Request.TLS != nil || c.GetHeader("X-Forwarded-Proto") == "https" { + scheme = "https" + } + expected := scheme + "://" + host + allowed := origin == expected + + isLocalhost := func(h string) bool { + h = strings.ToLower(h) + return h == "localhost" || h == "127.0.0.1" + } + + // Dev/local exception: allow localhost frontend origins on different ports + // when backend is also running on localhost. Only active when Gin is in + // debug mode; release builds enforce strict same-origin. + if !allowed && gin.Mode() == gin.DebugMode { + if u, err := url.Parse(origin); err == nil { + if reqHost, reqErr := url.Parse(scheme + "://" + host); reqErr == nil { + if isLocalhost(u.Hostname()) && isLocalhost(reqHost.Hostname()) { + allowed = true + } + } + } + } + + if !allowed { + c.AbortWithStatus(http.StatusForbidden) + return + } + + c.Header("Access-Control-Allow-Origin", origin) + c.Header("Access-Control-Allow-Methods", "GET, POST, PUT, DELETE, OPTIONS") + c.Header("Access-Control-Allow-Headers", "Authorization, Content-Type, X-API-Key") + c.Header("Access-Control-Max-Age", "86400") + c.Header("Vary", "Origin") + + if c.Request.Method == http.MethodOptions { + c.AbortWithStatus(http.StatusNoContent) + return + } + + c.Next() + } +} diff --git a/backend/internal/middleware/limits.go b/backend/internal/middleware/limits.go new file mode 100644 index 0000000..03ac14e --- /dev/null +++ b/backend/internal/middleware/limits.go @@ -0,0 +1,83 @@ +package middleware + +import ( + "net/http" + "sync" + "time" + + "github.com/gin-gonic/gin" + "github.com/openscanner/openscanner/internal/auth" +) + +// RateLimit returns middleware that rejects requests with 429 if the client IP +// is locked out by the given rate limiter. +func RateLimit(rl *auth.RateLimiter) gin.HandlerFunc { + return func(c *gin.Context) { + if rl.IsLockedOut(c.ClientIP()) { + c.AbortWithStatusJSON(429, gin.H{"error": "too many failed attempts, try again later"}) + return + } + c.Next() + } +} + +// MaxBodySize limits the size of request bodies to prevent memory exhaustion. +// Applies to non-multipart requests only (multipart is limited by +// router.MaxMultipartMemory). +func MaxBodySize(maxBytes int64) gin.HandlerFunc { + return func(c *gin.Context) { + if c.Request.Body != nil { + c.Request.Body = http.MaxBytesReader(c.Writer, c.Request.Body, maxBytes) + } + c.Next() + } +} + +// ipBucket is a per-IP sliding-window counter. +type ipBucket struct { + windowStart time.Time + count int +} + +// RateLimitByIP returns middleware that limits requests per IP per minute. +// Designed for unauthenticated, public endpoints (e.g. shared call access). +func RateLimitByIP(rpm int) gin.HandlerFunc { + var mu sync.Mutex + buckets := make(map[string]*ipBucket) + window := time.Minute + + return func(c *gin.Context) { + ip := c.ClientIP() + now := time.Now() + + mu.Lock() + + // Periodic cleanup: remove stale entries to bound memory. + if len(buckets) > 1000 { + for k, b := range buckets { + if now.Sub(b.windowStart) >= 2*window { + delete(buckets, k) + } + } + } + + b, ok := buckets[ip] + if !ok { + b = &ipBucket{windowStart: now} + buckets[ip] = b + } + if now.Sub(b.windowStart) >= window { + b.windowStart = now + b.count = 0 + } + if b.count >= rpm { + mu.Unlock() + c.AbortWithStatusJSON(http.StatusTooManyRequests, gin.H{"error": "rate limit exceeded"}) + return + } + b.count++ + mu.Unlock() + + c.Next() + } +} diff --git a/backend/internal/middleware/logging.go b/backend/internal/middleware/logging.go new file mode 100644 index 0000000..0a096d1 --- /dev/null +++ b/backend/internal/middleware/logging.go @@ -0,0 +1,73 @@ +package middleware + +import ( + "log/slog" + "net/http" + "time" + + "github.com/gin-gonic/gin" + "github.com/google/uuid" +) + +func requestLogLevel(status int) string { + switch { + case status >= http.StatusInternalServerError: + return "error" + case status >= http.StatusBadRequest: + return "warn" + default: + return "info" + } +} + +// RequestID adds a UUID v4 X-Request-ID response header and stores it in the +// Gin context under the key "requestID". +func RequestID() gin.HandlerFunc { + return func(c *gin.Context) { + requestID := uuid.New().String() + c.Set("requestID", requestID) + c.Header("X-Request-ID", requestID) + c.Next() + } +} + +// Logger emits a structured slog line for every request including method, path, +// status code, latency, request ID, and client IP. +// Health check probes and CORS preflight requests are logged at Debug only so +// they don't drown out real traffic in normal operation. +func Logger() gin.HandlerFunc { + return func(c *gin.Context) { + start := time.Now() + c.Next() + latency := time.Since(start) + status := c.Writer.Status() + level := requestLogLevel(status) + + var slogLevel slog.Level + switch level { + case "error": + slogLevel = slog.LevelError + case "warn": + slogLevel = slog.LevelWarn + default: + slogLevel = slog.LevelInfo + } + + // Demote noisy low-signal endpoints to Debug when they succeed. + path := c.Request.URL.Path + if slogLevel == slog.LevelInfo && + (path == "/api/health" || c.Request.Method == http.MethodOptions) { + slogLevel = slog.LevelDebug + } + + requestID, _ := c.Get("requestID") + slog.Log(c.Request.Context(), slogLevel, "request", + "method", c.Request.Method, + "path", path, + "status", status, + "latency_ms", latency.Milliseconds(), + "request_id", requestID, + "ip", c.ClientIP(), + ) + } +} diff --git a/backend/internal/middleware/middleware.go b/backend/internal/middleware/middleware.go index 3be99b5..6e41c58 100644 --- a/backend/internal/middleware/middleware.go +++ b/backend/internal/middleware/middleware.go @@ -1,392 +1,2 @@ // Package middleware contains Gin middleware: JWT auth, API key auth, rate limiting, request ID (UUID v4), logging, CORS. package middleware - -import ( - "log/slog" - "net/http" - "net/url" - "strings" - "sync" - "time" - - "github.com/gin-gonic/gin" - "github.com/google/uuid" - "github.com/openscanner/openscanner/internal/auth" - "github.com/openscanner/openscanner/internal/db" -) - -func requestLogLevel(status int) string { - switch { - case status >= http.StatusInternalServerError: - return "error" - case status >= http.StatusBadRequest: - return "warn" - default: - return "info" - } -} - -// RequestID adds a UUID v4 X-Request-ID response header and stores it in the -// Gin context under the key "requestID". -func RequestID() gin.HandlerFunc { - return func(c *gin.Context) { - requestID := uuid.New().String() - c.Set("requestID", requestID) - c.Header("X-Request-ID", requestID) - c.Next() - } -} - -// CORS handles Cross-Origin Resource Sharing. -// In production the frontend is served from the same origin so cross-origin -// requests are rejected. The allowed origin is derived from the request's own -// Host header (same-origin only). Preflight requests are handled with 204. -func CORS() gin.HandlerFunc { - return func(c *gin.Context) { - origin := c.GetHeader("Origin") - if origin == "" { - c.Next() - return - } - - // Default to same-origin: the Origin must match the Host. - host := c.Request.Host - // Build the expected origin from the request scheme + host. - scheme := "http" - if c.Request.TLS != nil || c.GetHeader("X-Forwarded-Proto") == "https" { - scheme = "https" - } - expected := scheme + "://" + host - allowed := origin == expected - - isLocalhost := func(h string) bool { - h = strings.ToLower(h) - return h == "localhost" || h == "127.0.0.1" - } - - // Dev/local exception: allow localhost frontend origins on different ports - // when backend is also running on localhost. Only active when Gin is in - // debug mode; release builds enforce strict same-origin. - if !allowed && gin.Mode() == gin.DebugMode { - if u, err := url.Parse(origin); err == nil { - if reqHost, reqErr := url.Parse(scheme + "://" + host); reqErr == nil { - if isLocalhost(u.Hostname()) && isLocalhost(reqHost.Hostname()) { - allowed = true - } - } - } - } - - if !allowed { - c.AbortWithStatus(http.StatusForbidden) - return - } - - c.Header("Access-Control-Allow-Origin", origin) - c.Header("Access-Control-Allow-Methods", "GET, POST, PUT, DELETE, OPTIONS") - c.Header("Access-Control-Allow-Headers", "Authorization, Content-Type, X-API-Key") - c.Header("Access-Control-Max-Age", "86400") - c.Header("Vary", "Origin") - - if c.Request.Method == http.MethodOptions { - c.AbortWithStatus(http.StatusNoContent) - return - } - - c.Next() - } -} - -// Logger emits a structured slog line for every request including method, path, -// status code, latency, request ID, and client IP. -// Health check probes and CORS preflight requests are logged at Debug only so -// they don't drown out real traffic in normal operation. -func Logger() gin.HandlerFunc { - return func(c *gin.Context) { - start := time.Now() - c.Next() - latency := time.Since(start) - status := c.Writer.Status() - level := requestLogLevel(status) - - var slogLevel slog.Level - switch level { - case "error": - slogLevel = slog.LevelError - case "warn": - slogLevel = slog.LevelWarn - default: - slogLevel = slog.LevelInfo - } - - // Demote noisy low-signal endpoints to Debug when they succeed. - path := c.Request.URL.Path - if slogLevel == slog.LevelInfo && - (path == "/api/health" || c.Request.Method == http.MethodOptions) { - slogLevel = slog.LevelDebug - } - - requestID, _ := c.Get("requestID") - slog.Log(c.Request.Context(), slogLevel, "request", - "method", c.Request.Method, - "path", path, - "status", status, - "latency_ms", latency.Milliseconds(), - "request_id", requestID, - "ip", c.ClientIP(), - ) - } -} - -// JWTAuth validates a Bearer JWT and stores userID, username, and role in the -// Gin context. Aborts with 401 if the token is missing or invalid. -func JWTAuth() gin.HandlerFunc { - return func(c *gin.Context) { - header := c.GetHeader("Authorization") - if !strings.HasPrefix(header, "Bearer ") { - slog.Debug("middleware: jwt auth failed, no bearer header", "path", c.Request.URL.Path) - c.AbortWithStatusJSON(401, gin.H{"error": "authorization header required"}) - return - } - - tokenStr := strings.TrimPrefix(header, "Bearer ") - claims, err := auth.ParseToken(tokenStr) - if err != nil { - c.AbortWithStatusJSON(401, gin.H{"error": "invalid or expired token"}) - return - } - - if auth.Tokens.IsRevoked(claims.ID) { - c.AbortWithStatusJSON(401, gin.H{"error": "token has been revoked"}) - return - } - - // Check account expiration embedded in JWT claims (OWASP A01). - if claims.AccountExp > 0 && time.Now().Unix() > claims.AccountExp { - c.AbortWithStatusJSON(401, gin.H{"error": "account expired"}) - return - } - - slog.Debug("middleware: jwt auth success", "user_id", claims.UserID, "role", claims.Role) - c.Set("userID", claims.UserID) - c.Set("username", claims.Username) - c.Set("role", claims.Role) - c.Set("jti", claims.ID) - c.Next() - } -} - -// OptionalJWTAuth extracts user info from a Bearer JWT if present, but does not -// abort the request when the token is missing or invalid. Useful for endpoints -// that are publicly accessible but provide extra data to authenticated users. -func OptionalJWTAuth() gin.HandlerFunc { - return func(c *gin.Context) { - header := c.GetHeader("Authorization") - if !strings.HasPrefix(header, "Bearer ") { - c.Next() - return - } - - tokenStr := strings.TrimPrefix(header, "Bearer ") - claims, err := auth.ParseToken(tokenStr) - if err != nil { - c.Next() - return - } - - if auth.Tokens.IsRevoked(claims.ID) { - c.Next() - return - } - - // Check account expiration embedded in JWT claims (OWASP A01). - if claims.AccountExp > 0 && time.Now().Unix() > claims.AccountExp { - c.Next() - return - } - - c.Set("userID", claims.UserID) - c.Set("username", claims.Username) - c.Set("role", claims.Role) - c.Set("jti", claims.ID) - c.Next() - } -} - -// RequireAdmin checks that the authenticated user has the admin role. -// Must be chained after JWTAuth. Aborts with 403 if the role is not admin. -func RequireAdmin() gin.HandlerFunc { - return func(c *gin.Context) { - role, _ := c.Get("role") - roleStr, _ := role.(string) - if roleStr != auth.RoleAdmin { - slog.Debug("middleware: admin check failed", "role", roleStr) - c.AbortWithStatusJSON(403, gin.H{"error": "admin access required"}) - return - } - slog.Debug("middleware: admin check passed") - c.Next() - } -} - -// APIKeyAuth reads the API key from the X-API-Key header, ?key= query param, -// or (for Trunk Recorder compatibility) a multipart "key" form field — in that -// order. It looks up the key in the database and sets "apiKeyID" in the Gin -// context. Aborts with 401 if the key is missing, not found, or disabled. -func APIKeyAuth(queries *db.Queries) gin.HandlerFunc { - return func(c *gin.Context) { - requestID, _ := c.Get("requestID") - - // Prefer header, then query string. Only fall back to PostForm - // (which parses the entire multipart body) when both are empty. - key := c.GetHeader("X-API-Key") - if key == "" { - key = c.Query("key") - } - if key == "" { - // Trunk Recorder's rdioscanner_uploader plugin sends the API key - // as a multipart form field named "key" rather than a header. - key = c.PostForm("key") - } - if key == "" { - slog.Warn("api key auth: missing X-API-Key header", - "request_id", requestID, - "ip", c.ClientIP(), - "path", c.Request.URL.Path, - ) - c.AbortWithStatusJSON(401, gin.H{"error": "API key required"}) - return - } - - // Reject implausibly long keys before hashing (defense-in-depth; real - // keys are 64 hex chars). Prevents CPU waste on attacker-controlled input. - if len(key) > 128 { - slog.Warn("api key auth: oversized key rejected", - "request_id", requestID, - "ip", c.ClientIP(), - "path", c.Request.URL.Path, - "length", len(key), - ) - c.AbortWithStatusJSON(401, gin.H{"error": "invalid API key"}) - return - } - - hashed := auth.HashAPIKey(key) - apiKey, err := queries.GetAPIKeyByKey(c.Request.Context(), hashed) - if err != nil { - slog.Warn("api key auth: invalid key", - "request_id", requestID, - "ip", c.ClientIP(), - "path", c.Request.URL.Path, - ) - c.AbortWithStatusJSON(401, gin.H{"error": "invalid API key"}) - return - } - if apiKey.Disabled != 0 { - slog.Warn("api key auth: disabled key used", - "request_id", requestID, - "ip", c.ClientIP(), - "path", c.Request.URL.Path, - "api_key_id", apiKey.ID, - ) - c.AbortWithStatusJSON(401, gin.H{"error": "API key is disabled"}) - return - } - - c.Set("apiKeyID", apiKey.ID) - if apiKey.CallRateLimit.Valid { - c.Set("apiKeyCallRate", apiKey.CallRateLimit.Int64) - } - slog.Debug("middleware: api key auth success", - "api_key_id", apiKey.ID, - "ident", apiKey.Ident.String, - "path", c.Request.URL.Path, - ) - c.Next() - } -} - -// RateLimit returns middleware that rejects requests with 429 if the client IP -// is locked out by the given rate limiter. -func RateLimit(rl *auth.RateLimiter) gin.HandlerFunc { - return func(c *gin.Context) { - if rl.IsLockedOut(c.ClientIP()) { - c.AbortWithStatusJSON(429, gin.H{"error": "too many failed attempts, try again later"}) - return - } - c.Next() - } -} - -// MaxBodySize limits the size of request bodies to prevent memory exhaustion. -// Applies to non-multipart requests only (multipart is limited by -// router.MaxMultipartMemory). -func MaxBodySize(maxBytes int64) gin.HandlerFunc { - return func(c *gin.Context) { - if c.Request.Body != nil { - c.Request.Body = http.MaxBytesReader(c.Writer, c.Request.Body, maxBytes) - } - c.Next() - } -} - -// SwaggerCookieAuth validates the short-lived docs session cookie. -func SwaggerCookieAuth() gin.HandlerFunc { - return func(c *gin.Context) { - value, err := c.Cookie(auth.SwaggerCookieName) - if err != nil || !auth.ValidateSwaggerCookie(value) { - c.AbortWithStatusJSON(http.StatusUnauthorized, gin.H{"error": "swagger session required"}) - return - } - c.Next() - } -} - -// ipBucket is a per-IP sliding-window counter. -type ipBucket struct { - windowStart time.Time - count int -} - -// RateLimitByIP returns middleware that limits requests per IP per minute. -// Designed for unauthenticated, public endpoints (e.g. shared call access). -func RateLimitByIP(rpm int) gin.HandlerFunc { - var mu sync.Mutex - buckets := make(map[string]*ipBucket) - window := time.Minute - - return func(c *gin.Context) { - ip := c.ClientIP() - now := time.Now() - - mu.Lock() - - // Periodic cleanup: remove stale entries to bound memory. - if len(buckets) > 1000 { - for k, b := range buckets { - if now.Sub(b.windowStart) >= 2*window { - delete(buckets, k) - } - } - } - - b, ok := buckets[ip] - if !ok { - b = &ipBucket{windowStart: now} - buckets[ip] = b - } - if now.Sub(b.windowStart) >= window { - b.windowStart = now - b.count = 0 - } - if b.count >= rpm { - mu.Unlock() - c.AbortWithStatusJSON(http.StatusTooManyRequests, gin.H{"error": "rate limit exceeded"}) - return - } - b.count++ - mu.Unlock() - - c.Next() - } -} From d1ab97178172ba011a98be26f65dffa9062ccbbe Mon Sep 17 00:00:00 2001 From: Randy Hammond Date: Sat, 25 Apr 2026 18:59:05 +0000 Subject: [PATCH 16/27] chore: mark docs/plans/ as local-only in agent instructions - copilot-instructions.md: add 'Local-only planning docs' section forbidding references to docs/plans/* in CHANGELOG, committed docs, commit messages, PR descriptions, or code comments - docs-expert.agent.md: rewrite design-docs section as LOCAL ONLY; remove stale file table listing now-ignored plan files - react-expert.agent.md: soften plan reference to 'local-only design notes' - CHANGELOG.md: remove two pre-existing references to plan documents ('audio-http-migration-plan.md' and 'native-API design plan') --- .github/agents/docs-expert.agent.md | 36 ++++++++++++---------------- .github/agents/react-expert.agent.md | 2 +- .github/copilot-instructions.md | 16 ++++++++++++- CHANGELOG.md | 3 +-- 4 files changed, 32 insertions(+), 25 deletions(-) diff --git a/.github/agents/docs-expert.agent.md b/.github/agents/docs-expert.agent.md index 92c86e0..73569de 100644 --- a/.github/agents/docs-expert.agent.md +++ b/.github/agents/docs-expert.agent.md @@ -36,9 +36,16 @@ These are **instructional**, written for operators and end users — not for con - Screenshots are fine to reference by filename but are not required; text must stand alone. - Accuracy is non-negotiable. Paths, flag names, env var names, URLs, and ports must match the code exactly. If the code says `--listen`, the doc says `--listen`, not `--address`. -### Design docs and specs (`docs/plans/*.md`) +### Design docs and specs (`docs/plans/*.md`) — LOCAL ONLY -These are for contributors and maintainers. Use technical language, reference code paths, include Mermaid diagrams, cite file paths with line numbers. The audience rules above do **not** apply here. +The `docs/plans/` directory is **gitignored**. Files there are local-only working notes used while implementing a feature; they are never committed and never visible to anyone other than the author. + +Rules: + +- Place new design docs / specs / phase plans under `docs/plans/` only when the user explicitly asks for a plan. Do not stage or commit them. +- **Never reference `docs/plans/*` paths from tracked files** (CHANGELOG, committed docs, commit messages, PR descriptions, code comments). The link would 404 for everyone else. +- If you encounter an existing tracked file that links into `docs/plans/`, treat that link as a bug and remove it. +- Inside a plan doc itself, technical language, code paths, Mermaid diagrams, and line-number citations are fine — the audience is just you and the user. ### Quick check before submitting a user guide @@ -57,25 +64,12 @@ These are for contributors and maintainers. Use technical language, reference co ## Doc Files -| File | Purpose | -| -------------------------- | -------------------------------------------------------------------- | -| `docs/admin-guide.md` | UI walkthrough for the admin dashboard | -| `docs/deployment-guide.md` | Bare metal, Docker, reverse proxy, Let's Encrypt, secrets encryption | -| `docs/recorder-guide.md` | Per-recorder setup instructions | -| `docs/plans/` | Design plans and specs (architecture, API, etc.) | - -### Plans Directory (`docs/plans/`) - -| File | Purpose | -| --------------------------------------- | ------------------------------------------------- | -| `docs/plans/plan.md` | Master project plan and UI design spec | -| `docs/plans/architecture.md` | System diagram, component descriptions, data flow | -| `docs/plans/api.md` | Full API endpoint reference | -| `docs/plans/recorder-integration.md` | Recorder integration design | -| `docs/plans/transcription.md` | Transcription feature design (go-whisper) | -| `docs/plans/refresh-token-auth-plan.md` | Refresh token auth flow design | -| `docs/plans/security-hardening-plan.md` | Security hardening roadmap | -| Other plan files | Feature-specific implementation plans | +| File | Purpose | +| -------------------------- | ----------------------------------------------------------------------------- | +| `docs/admin-guide.md` | UI walkthrough for the admin dashboard | +| `docs/deployment-guide.md` | Bare metal, Docker, reverse proxy, Let's Encrypt, secrets encryption | +| `docs/recorder-guide.md` | Per-recorder setup instructions | +| `docs/plans/` | **LOCAL ONLY** — gitignored working notes; never reference from tracked files | ## Key Diagrams to Maintain diff --git a/.github/agents/react-expert.agent.md b/.github/agents/react-expert.agent.md index cd11236..7d212c4 100644 --- a/.github/agents/react-expert.agent.md +++ b/.github/agents/react-expert.agent.md @@ -160,7 +160,7 @@ frontend/ ## UI Design Principles -Full visual specification with ASCII wireframes, color palette, component mapping, responsive breakpoints, and animations is in `docs/plans/plan.md` § "Web UI Design". Key points: +Local-only design notes (in the gitignored `docs/plans/` working directory) may contain extended ASCII wireframes and palette spec. The canonical, in-repo summary follows. Key points: - **Dark-first** — custom DaisyUI `openscanner` theme; `base-100` (#121212), `base-200` (#1e1e1e), `base-300` (#2d2d2d), `primary` (#00e676 green), `secondary` (#ff9100 orange), `error` (#ff1744 red) - **Scanner page** — vertically-stacked single column, max-width 640px, 24px padding: diff --git a/.github/copilot-instructions.md b/.github/copilot-instructions.md index bda64de..4aaaa1b 100644 --- a/.github/copilot-instructions.md +++ b/.github/copilot-instructions.md @@ -117,13 +117,27 @@ Detailed conventions live in the individual agent files. The non-negotiables for - Do not commit or push unless the user explicitly asks - Do not delete files as a shortcut; if a file looks unfamiliar, read it first +## Local-only planning docs (`docs/plans/`) + +- The entire `docs/plans/` directory is **gitignored** (see [.gitignore](../.gitignore)). Files there are local working notes only. +- **Never** mention plan files, plan filenames, or contents of `docs/plans/*` in: + - `CHANGELOG.md` + - committed docs (`docs/*.md` outside `docs/plans/`) + - commit messages + - PR titles or PR descriptions + - code comments + - any other tracked file +- Treat plan docs the way you'd treat a personal scratchpad: useful while working, invisible to anyone reading the repository. +- When asked to write a plan, place it under `docs/plans/` and do not stage or commit it. Do not link to it from tracked files (the link would 404 for everyone else). +- If you find an existing tracked file that links into `docs/plans/`, treat that link as a bug and remove the reference. + ## Changelog - User-visible changes (new features, fixes, config/schema changes, security patches) **must** add a bullet under the `[Unreleased]` section of `CHANGELOG.md` in the same PR - Group bullets under `### Added`, `### Changed`, `### Fixed`, `### Security`, `### Removed`, or `### Deprecated` (Keep a Changelog format) - Pure internal refactors, CI-only tweaks, and typo fixes can skip the CHANGELOG — the PR should be labeled `skip-changelog` - The `changelog` CI job blocks merges into `main` when `CHANGELOG.md` wasn't touched and the label isn't applied -- Full release process: [docs/plans/release-guide.md](../docs/plans/release-guide.md) +- CHANGELOG bullets describe **what changed in the product**, never **what plan was followed**. Don't reference plan filenames or phase numbers from `docs/plans/`. ## Releases diff --git a/CHANGELOG.md b/CHANGELOG.md index 61d2715..4249266 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,8 +10,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Added - Canonical `GET /api/ws` listener WebSocket route. The existing `GET /ws` - remains as a compatibility alias that delegates to the same handler; - retirement of the alias is tracked in the native-API design plan. The + remains as a compatibility alias that delegates to the same handler. The frontend now connects to `/api/ws`, and the Vite dev proxy covers both paths. From 6f49a8ae7d0f1dd35d0a4e21bd0cb2799ec38542 Mon Sep 17 00:00:00 2001 From: Randy Hammond Date: Sat, 25 Apr 2026 15:31:01 -0400 Subject: [PATCH 17/27] feat(auth): add os_session cookie + dual-auth on /api/calls/:id/audio (#25) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - New auth.SetSessionCookie / ClearSessionCookie helpers (HttpOnly, Secure when HTTPS, SameSite=Strict, Path=/api) - POST /api/auth/login and /api/auth/refresh issue os_session alongside the existing access JWT response; POST /api/auth/logout clears it - New middleware.OptionalJWTOrSessionAuth resolves identity from, in priority order: bearer header, os_session cookie (guarded by Sec-Fetch-Site), anonymous - GET /api/calls/:id/audio swapped to the new middleware; every other route is unchanged. Bearer flow continues to work everywhere. - Routes.RegisterRoutes now promotes deps.Hub into the WSDisconnecter interface only when the concrete pointer is non-nil, fixing a pre-existing typed-nil interface footgun on the logout path. Tests: - backend/internal/auth/cookie_test.go: SetSessionCookie / ClearSessionCookie flag matrix - backend/internal/handler/routes/auth_test.go: login/refresh/logout cookie issuance and rotation - backend/internal/handler/routes/audio_test.go (new): full dual-auth matrix on the audio route — bearer, cookie+same-origin, cookie+missing Sec-Fetch-Site, cookie+cross-site (publicAccess on/off), stale cookie fallthrough (publicAccess on/off), anonymous (publicAccess on/off) go vet, go build, go test ./... all clean. Backwards compatible — no frontend changes. --- CHANGELOG.md | 9 + backend/internal/auth/cookie.go | 31 ++ backend/internal/auth/cookie_test.go | 65 ++++ backend/internal/handler/auth/auth.go | 11 + backend/internal/handler/routes/audio_test.go | 325 ++++++++++++++++++ backend/internal/handler/routes/auth_test.go | 112 ++++++ backend/internal/handler/routes/routes.go | 12 +- backend/internal/middleware/auth.go | 80 +++++ 8 files changed, 643 insertions(+), 2 deletions(-) create mode 100644 backend/internal/handler/routes/audio_test.go diff --git a/CHANGELOG.md b/CHANGELOG.md index 4249266..3d64283 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,15 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Added +- Session cookie (`os_session`) issued on login and refresh, cleared on + logout. The `GET /api/calls/:id/audio` route now accepts authentication + via either the existing `Authorization: Bearer` header or the new + cookie, so `