From acb22abc76f4fdf93e3a097bb9cd07d2a795fee3 Mon Sep 17 00:00:00 2001 From: tyl3r-ch Date: Tue, 28 Apr 2026 16:06:58 +0200 Subject: [PATCH 01/42] Fix toktrack version drift --- README.md | 4 +- bun.lock | 8 +- docs/review/README.md | 67 ---- docs/review/architecture-review.md | 70 ---- docs/review/code-review.md | 54 --- docs/review/dashboard-review.md | 65 --- docs/review/fixed-findings.md | 586 ---------------------------- docs/review/performance-review.md | 67 ---- docs/review/security-review.md | 63 --- docs/review/server-review.md | 55 --- docs/review/test-review.md | 94 ----- shared/toktrack-version.d.ts | 4 +- shared/toktrack-version.js | 13 +- tests/unit/toktrack-version.test.ts | 110 +++++- 14 files changed, 128 insertions(+), 1132 deletions(-) delete mode 100644 docs/review/README.md delete mode 100644 docs/review/architecture-review.md delete mode 100644 docs/review/code-review.md delete mode 100644 docs/review/dashboard-review.md delete mode 100644 docs/review/fixed-findings.md delete mode 100644 docs/review/performance-review.md delete mode 100644 docs/review/security-review.md delete mode 100644 docs/review/server-review.md delete mode 100644 docs/review/test-review.md diff --git a/README.md b/README.md index b647618..c19f2b3 100644 --- a/README.md +++ b/README.md @@ -98,8 +98,8 @@ Then either: The auto-import path prefers: 1. local `toktrack` -2. `bunx toktrack@2.5.0` -3. `npx --yes toktrack@2.5.0` +2. `bunx toktrack@` +3. `npx --yes toktrack@` ## Common Commands diff --git a/bun.lock b/bun.lock index 55d5f4b..7d75ec6 100644 --- a/bun.lock +++ b/bun.lock @@ -9,7 +9,7 @@ "i18next": "^26.0.6", "react-i18next": "^17.0.4", "react-is": "^19.2.5", - "toktrack": "2.5.0", + "toktrack": "2.6.0", }, "devDependencies": { "@eslint/js": "^9.39.4", @@ -49,7 +49,7 @@ "jsdom": "^29.0.2", "lucide-react": "^1.8.0", "prettier": "^3.8.3", - "prettier-plugin-tailwindcss": "^0.7.2", + "prettier-plugin-tailwindcss": "^0.8.0", "react": "^19.2.5", "react-dom": "^19.2.5", "recharts": "^3.8.1", @@ -1063,7 +1063,7 @@ "prettier": ["prettier@3.8.3", "", { "bin": { "prettier": "bin/prettier.cjs" } }, "sha512-7igPTM53cGHMW8xWuVTydi2KO233VFiTNyF5hLJqpilHfmn8C8gPf+PS7dUT64YcXFbiMGZxS9pCSxL/Dxm/Jw=="], - "prettier-plugin-tailwindcss": ["prettier-plugin-tailwindcss@0.7.2", "", { "peerDependencies": { "@ianvs/prettier-plugin-sort-imports": "*", "@prettier/plugin-hermes": "*", "@prettier/plugin-oxc": "*", "@prettier/plugin-pug": "*", "@shopify/prettier-plugin-liquid": "*", "@trivago/prettier-plugin-sort-imports": "*", "@zackad/prettier-plugin-twig": "*", "prettier": "^3.0", "prettier-plugin-astro": "*", "prettier-plugin-css-order": "*", "prettier-plugin-jsdoc": "*", "prettier-plugin-marko": "*", "prettier-plugin-multiline-arrays": "*", "prettier-plugin-organize-attributes": "*", "prettier-plugin-organize-imports": "*", "prettier-plugin-sort-imports": "*", "prettier-plugin-svelte": "*" }, "optionalPeers": ["@ianvs/prettier-plugin-sort-imports", "@prettier/plugin-hermes", "@prettier/plugin-oxc", "@prettier/plugin-pug", "@shopify/prettier-plugin-liquid", "@trivago/prettier-plugin-sort-imports", "@zackad/prettier-plugin-twig", "prettier-plugin-astro", "prettier-plugin-css-order", "prettier-plugin-jsdoc", "prettier-plugin-marko", "prettier-plugin-multiline-arrays", "prettier-plugin-organize-attributes", "prettier-plugin-organize-imports", "prettier-plugin-sort-imports", "prettier-plugin-svelte"] }, "sha512-LkphyK3Fw+q2HdMOoiEHWf93fNtYJwfamoKPl7UwtjFQdei/iIBoX11G6j706FzN3ymX9mPVi97qIY8328vdnA=="], + "prettier-plugin-tailwindcss": ["prettier-plugin-tailwindcss@0.8.0", "", { "peerDependencies": { "@ianvs/prettier-plugin-sort-imports": "*", "@prettier/plugin-hermes": "*", "@prettier/plugin-oxc": "*", "@prettier/plugin-pug": "*", "@shopify/prettier-plugin-liquid": "*", "@trivago/prettier-plugin-sort-imports": "*", "@zackad/prettier-plugin-twig": "*", "prettier": "^3.0", "prettier-plugin-astro": "*", "prettier-plugin-css-order": "*", "prettier-plugin-jsdoc": "*", "prettier-plugin-marko": "*", "prettier-plugin-multiline-arrays": "*", "prettier-plugin-organize-attributes": "*", "prettier-plugin-organize-imports": "*", "prettier-plugin-sort-imports": "*", "prettier-plugin-svelte": "*" }, "optionalPeers": ["@ianvs/prettier-plugin-sort-imports", "@prettier/plugin-hermes", "@prettier/plugin-oxc", "@prettier/plugin-pug", "@shopify/prettier-plugin-liquid", "@trivago/prettier-plugin-sort-imports", "@zackad/prettier-plugin-twig", "prettier-plugin-astro", "prettier-plugin-css-order", "prettier-plugin-jsdoc", "prettier-plugin-marko", "prettier-plugin-multiline-arrays", "prettier-plugin-organize-attributes", "prettier-plugin-organize-imports", "prettier-plugin-sort-imports", "prettier-plugin-svelte"] }, "sha512-V8ITGH87yuBDF6JpEZTOVlUz/saAwqb8f3HRgUj8Lh+tGCcrmorhsLpYqzygwFwK0PE2Ib6Mv3M7T/uE2tZV1g=="], "pretty-format": ["pretty-format@27.5.1", "", { "dependencies": { "ansi-regex": "^5.0.1", "ansi-styles": "^5.0.0", "react-is": "^17.0.1" } }, "sha512-Qb1gy5OrP5+zDf2Bvnzdl3jsTf1qXVMazbvCoKhtKqVs4/YK4ozX4gKQJJVyNe+cajNPn0KoC0MC3FUmaHWEmQ=="], @@ -1255,7 +1255,7 @@ "to-valid-identifier": ["to-valid-identifier@1.0.0", "", { "dependencies": { "@sindresorhus/base62": "^1.0.0", "reserved-identifiers": "^1.0.0" } }, "sha512-41wJyvKep3yT2tyPqX/4blcfybknGB4D+oETKLs7Q76UiPqRpUJK3hr1nxelyYO0PHKVzJwlu0aCeEAsGI6rpw=="], - "toktrack": ["toktrack@2.5.0", "", { "os": [ "linux", "win32", "darwin", ], "cpu": [ "x64", "arm64", ], "bin": { "toktrack": "bin/run.js" } }, "sha512-WJYj86vF6uQ2NA3eBQZ7bzN9Idrn12i3u2vEqT7FkjuTAz6zaRXX1/Z1JjIyurc7OR8xfzE78PiONhCTl0T3jw=="], + "toktrack": ["toktrack@2.6.0", "", { "os": [ "linux", "win32", "darwin", ], "cpu": [ "x64", "arm64", ], "bin": { "toktrack": "bin/run.js" } }, "sha512-Gq6Q7hsMc8i3Ld4gG7pCgG63D4QADPDjquWiQ8lRsmio1+WXSK+7E85MLNAiHthb1Qdwv2Tsk9uVa90YPVbAxQ=="], "tough-cookie": ["tough-cookie@6.0.1", "", { "dependencies": { "tldts": "^7.0.5" } }, "sha512-LktZQb3IeoUWB9lqR5EWTHgW/VTITCXg4D21M+lvybRVdylLrRMnqaIONLVb5mav8vM19m44HIcGq4qASeu2Qw=="], diff --git a/docs/review/README.md b/docs/review/README.md deleted file mode 100644 index c838caa..0000000 --- a/docs/review/README.md +++ /dev/null @@ -1,67 +0,0 @@ -# TTDash Ultra-Deep Review - -## Kurzfazit - -Der Codebase-Stand ist insgesamt solide: die wichtigsten Qualitaetsgates laufen gruens, die Testlandschaft ist deutlich ueber Durchschnitt, und mehrere fruehere Security-Luecken sind inzwischen sauber abgesichert. Die groessten aktuellen Risiken liegen nicht in akuten Produktionsfehlern, sondern in Struktur, Wartbarkeit, Testsignalen und der wachsenden Dichte der Dashboard-Oberflaeche. - -## Durchgefuehrte Evidenzarbeit - -- Statische Review von `src/**`, `server.js`, `server/**`, `shared/**`, `tests/**`, `docs/architecture.md`, `docs/testing.md`, `vite.config.ts`, `vitest.config.ts`, `.dependency-cruiser.cjs` -- Ausgefuehrte Gates: - - `npm run check` -> bestanden - - `npm run test:unit` -> bestanden (`99` Dateien, `369` Tests bestanden, `1` uebersprungen) - - `npm run test:unit:coverage` -> bestanden - - `npm run test:timings` -> bestanden - - `npm run build:app` -> bestanden -- Architektur-Spezialfall: - - `npm run test:architecture` schlug einmal fehl, weil `tests/architecture/frontend-layers.test.ts` im Gesamtlauf in den `5000ms` Timeout lief - - isolierter Re-Run derselben Datei bestand, aber der langsamste Fall lag bei ca. `4950ms` - -## Wichtigste Querschnittsbefunde - -1. Die groessten Wartbarkeitsrisiken sitzen inzwischen weniger im Server-Entrypoint und staerker in wenigen Frontend-Mega-Modulen: `use-dashboard-controller.ts`, `SettingsModal.tsx`, `DashboardSections.tsx`, `FilterBar.tsx`. -2. Die Architektur-Grenzen sind auf Repo-Ebene gut abgesichert, aber die Anwendungslogik ist intern noch zu stark zentralisiert und ueber breite Props- und Return-Surfaces gekoppelt. -3. Die Security-Hardening-Basis ist fuer den Default-Loopback-Betrieb gut; lokale Read-APIs sind per per-start Session-Token geschuetzt, `TTDASH_ALLOW_REMOTE=1` bleibt ein separater token-gesicherter Betriebsmodus, und die Style-CSP kommt ohne `unsafe-inline` aus. -4. Die Testbasis ist breit, aber die gemeldete Coverage unterschaetzt nicht nur Luecken, sondern blendet ganze produktive Runtime-Bereiche aus. -5. Die Dashboard-Oberflaeche ist funktional stark und accessibility-bewusst, wirkt aber an mehreren Stellen ueberladen und pflegt zu viele Interaktionsmuster in zu wenigen Komponenten. - -## Wichtige Messpunkte - -- Urspruengliche Coverage-Summary aus `npm run test:unit:coverage`: - - Statements `76.27%` - - Branches `65.71%` - - Functions `76.43%` - - Lines `78.61%` -- Aktueller Stand zu `test-review.md / H-02`: - - `npm run test:unit:coverage` zaehlt inzwischen `src/**/*.{ts,tsx}`, `server.js`, `server/**/*.js`, `shared/**/*.js` und `usage-normalizer.js` - - Die neue Produkt-Runtime-Baseline liegt bei Statements `72.85%`, Branches `63.01%`, Functions `74.97%`, Lines `73.88%` - - Server-Entrypoints, Child-Process-Pfade und lazy Dashboard-Sektionen bleiben dadurch sichtbar, auch wenn sie nicht alle direkt im Vitest-Hauptprozess hohe Line-Coverage erzeugen -- Langsamste Test-Suites aus `npm run test:timings`: - - `tests/integration/server-background.test.ts` -> `5.785s` - - `tests/integration/server-auto-import.test.ts` -> `4.521s` - - `tests/unit/server-helpers-runner-process.test.ts` -> `2.778s` - - `tests/frontend/settings-modal-language.test.tsx` -> `2.443s` - - `tests/frontend/drill-down-modal-motion.test.tsx` -> `2.013s` -- Build-Signal aus `npm run build:app`: - - `charts-vendor` -> `422.02 kB` raw / `119.59 kB` gzip - - `index` -> `212.32 kB` raw / `57.55 kB` gzip - - `react-vendor` -> `200.38 kB` raw / `64.43 kB` gzip - - `motion-vendor` -> `125.85 kB` raw / `41.08 kB` gzip - - `i18n` -> `119.68 kB` raw / `36.70 kB` gzip - -## Berichte - -- [code-review.md](./code-review.md) -- [architecture-review.md](./architecture-review.md) -- [security-review.md](./security-review.md) -- [performance-review.md](./performance-review.md) -- [dashboard-review.md](./dashboard-review.md) -- [server-review.md](./server-review.md) -- [test-review.md](./test-review.md) -- [fixed-findings.md](./fixed-findings.md) - -## Bewertungslogik - -- `Hoch`: strukturelles oder betriebliches Risiko mit klarer Folgewirkung auf Aenderungssicherheit, Security oder Teamtempo -- `Mittel`: echtes Problem mit begrenztem oder lokalem Blast Radius -- `Niedrig`: saubere Verbesserung mit niedrigerem Risiko, aber gutem Langzeitnutzen diff --git a/docs/review/architecture-review.md b/docs/review/architecture-review.md deleted file mode 100644 index 4d47d92..0000000 --- a/docs/review/architecture-review.md +++ /dev/null @@ -1,70 +0,0 @@ -# Architecture Review - -## Kurzfazit - -Die Repo-weiten Guardrails sind gut gedacht und weitgehend wirksam. Das groesste Architekturproblem ist nicht fehlende Regelung, sondern zu viel funktionale Konzentration in wenigen Modulen, waehrend andere Teile bereits sauber extrahiert wurden. - -## Was bereits gut ist - -- `dependency-cruiser`, `eslint-plugin-boundaries` und `archunit` decken unterschiedliche Strukturfragen sinnvoll ab -- `shared/dashboard-domain.js` wird bereits sowohl server- als auch frontendseitig genutzt -- Die Runtime-Trennung `src` vs `server` vs `shared` ist dokumentiert und im Check-Setup sichtbar verankert - -## Findings - -### H-01 - `server.js` ist die dominante Architektur-Engstelle - -**Referenzen:** `server.js` insgesamt, besonders `75-92`, `444-542`, `1747-1774`, `2208-2451`, `2482-2975` - -`server.js` umfasst `2992` Zeilen und rund `125` Funktionsdefinitionen. Darin liegen gleichzeitig: - -- CLI-Argumente -- Port- und Runtime-Setup -- Datei- und Prozess-Locks -- Background-Registry -- Settings- und Usage-Persistenz -- HTTP-Guards und Routing -- Auto-Import samt Runner-Erkennung -- PDF-Reporting -- Static Serving -- Shutdown-Pfade - -Das erzeugt hohe Aenderungskopplung. Ein Bugfix an Import- oder Background-Logik verlaengert unmittelbar die Review-Flaeche fuer HTTP-, CLI- und Persistenzcode. - -**Empfehlung:** das Runtime-Modul weiter schneiden, z. B. `server/settings-runtime`, `server/data-runtime`, `server/background-runtime`, `server/auto-import-runtime`, `server/http-router`. - -### H-02 - Der Settings-Vertrag ist client- und serverseitig dupliziert - -**Referenzen:** `server.js:75-92`, `server.js:1403-1485`, `src/lib/app-settings.ts:20-91`, `src/lib/dashboard-preferences.ts:124-220` - -Defaults und Normalisierung fuer Settings leben mehrfach: - -- Server: `DEFAULT_SETTINGS`, `normalizeProviderLimits`, `normalizeDefaultFilters`, `normalizeSectionVisibility`, `normalizeSectionOrder`, `normalizeSettings` -- Frontend: `DEFAULT_APP_SETTINGS`, `normalizeAppSettings`, `normalizeDashboardDefaultFilters`, `normalizeDashboardSectionVisibility`, `normalizeDashboardSectionOrder` - -Die Logik ist semantisch aehnlich, aber nicht aus einer gemeinsamen Quelle abgeleitet. Das ist ein Drift-Risiko: ein neuer Settings-Key, ein neues Default oder eine veraenderte Normalisierungsregel kann unbemerkt asymmetrisch werden. - -**Empfehlung:** einen gemeinsamen Settings-Schema- und Normalisierungs-Layer nach `shared/**` ziehen. - -### M-01 - Die Dashboard-Orchestrierung haengt an sehr breiten Props- und Return-Surfaces - -**Referenzen:** `src/hooks/use-dashboard-controller.ts:665-760`, `src/components/dashboard/DashboardSections.tsx:179-219`, `src/components/Dashboard.tsx:301-456` - -Der Controller liefert `95` Rueckgabefelder. `DashboardSectionsProps` umfasst `35` Inputs. `Dashboard.tsx` agiert dadurch eher als Durchreicher denn als klarer Kompositionspunkt. - -Das ist architektonisch teuer: - -- grosse Merge-Flaechen -- hohe Aenderungskosten fuer kleine Features -- breite Re-Render- und Testflaechen -- schwache lokale Ownership der Subsysteme - -**Empfehlung:** Sections ueber bewusstere View-Model-Bundles versorgen, z. B. `filtersViewModel`, `forecastViewModel`, `tableViewModel`, `actions`. - -### M-02 - Die gute Shared-Domain-Extraktion wurde noch nicht bis zu den Settings und UI-Regeln durchgezogen - -**Referenzen:** `shared/dashboard-domain.js`, `src/lib/data-transforms.ts:8-16`, `src/lib/calculations.ts:6-16` - -Bei Metrics und Transformationsregeln ist die Richtung bereits richtig: Frontend und Server ziehen gemeinsame Logik aus `shared/dashboard-domain.js`. Bei Settings, Presets und Teilen der UI-Regeln fehlt dieselbe Konsequenz noch. - -**Empfehlung:** dieselbe Shared-Strategie selektiv auf Settings-Schema, Preset-Definitionen und andere rein fachliche Regeln ausweiten. diff --git a/docs/review/code-review.md b/docs/review/code-review.md deleted file mode 100644 index 18a36a9..0000000 --- a/docs/review/code-review.md +++ /dev/null @@ -1,54 +0,0 @@ -# Code Review - -## Kurzfazit - -Die Codequalitaet ist im Mittel gut: Typsicherheit, Namensgebung, Testtiefe und defensive API-Fehlerbehandlung sind ueberwiegend stark. Die Hauptschwaechen sind zentrale God-Objects, Dopplungen in der UI-Logik und etwas liegengebliebener Dead Code. - -## Was bereits gut ist - -- Die produktiven Grenzschichten sind bewusst organisiert (`src`, `server`, `shared`) -- Gemeinsame Domainlogik fuer Datenfilterung und Kennzahlen wurde bereits nach `shared/dashboard-domain.js` gezogen -- Fehlertexte und i18n-Pfade sind grossenteils konsistent -- Upload-, Import- und Export-Flows haben klare Fehlerrueckmeldungen und sichtbare Busy-States - -## Findings - -### H-01 - `use-dashboard-controller.ts` ist ein God-Hook mit sehr breiter API - -**Referenzen:** `src/hooks/use-dashboard-controller.ts:102-760`, besonders `69-89`, `393-657`, `665-760` - -Der Hook vereint Bootstrap, React Query Orchestrierung, Theme-Anwendung, i18n-Synchronisierung, Toasts, Datei-Import/Export, PDF-Download, Scroll-Navigation, Error-State-Bildung und UI-Dialogsteuerung in einem Modul mit `763` Zeilen. Der finale Return-Block exportiert `95` Felder. - -Das ist der groesste Clean-Code-Befund im Frontend: die Datei hat mehrere Gruende, sich gleichzeitig zu aendern, und entkoppelt weder Browser-I/O noch Produktlogik sauber. Die Tests sind deshalb gezwungen, grosse Zustandsflaechen zu kennen, statt kleine Einheiten gezielt zu pruefen. - -**Empfehlung:** in kleinere Hooks oder Controller-Slices zerlegen, z. B. Bootstrap/Load-State, Settings-Orchestrierung, Data-Transfer, Report/Download, UI-Only Actions. Browser-I/O wie Blob-Downloads aus dem Hook in kleine Helper oder View-Layer verschieben. - -### M-01 - Preset-Logik ist doppelt implementiert - -**Referenzen:** `src/components/layout/FilterBar.tsx:71-117`, `src/hooks/use-dashboard-filters.ts:17-45` - -`resolveActivePreset(...)` und `resolvePresetRange(...)` codieren dieselben Datumspresets (`7d`, `30d`, `month`, `year`, `all`) getrennt. Eine Aenderung an den Preset-Regeln kann deshalb dazu fuehren, dass der angewendete Filter korrekt ist, die aktive UI-Markierung aber etwas anderes zeigt. - -Das ist ein klassischer Konsistenz- und Duplikationsbefund: fachliche Regeln sollten fuer Anwenden und Anzeigen aus derselben Quelle kommen. - -**Empfehlung:** Preset-Definitionen in ein gemeinsames Modul ueberfuehren und sowohl Hook als auch FilterBar nur noch daraus ableiten lassen. - -### M-02 - `SettingsModal.tsx` mischt zu viele Verantwortlichkeiten - -**Referenzen:** `src/components/features/settings/SettingsModal.tsx:1-1026`, besonders `53-83`, `219-356` - -Die Settings-Komponente vereint Sprachwahl, Motion-Einstellungen, Provider-Limits, Default-Filter, Section-Visibility und -Order, Backup-Import/Export sowie einen live geladenen Toktrack-Versionsstatus in einer einzigen Datei mit `1026` Zeilen. - -Das ist nicht nur ein Architekturthema, sondern auch ein Code-Consistency-Problem: je groesser die Datei wird, desto wahrscheinlicher werden inkonsistente Patterns fuer State, Reset, Busy-Zustaende und Fehlerbehandlung. - -**Empfehlung:** in klar getrennte Subviews oder Dialog-Sektionen zerlegen, z. B. `General`, `Defaults`, `Sections`, `Data`, `Versions`. - -### N-01 - Es gibt ungenutzte Hooks mit `0%` Coverage - -**Referenzen:** `src/hooks/use-theme.ts:1-21`, `src/hooks/use-provider-limits.ts:1-17` - -Beide Hooks sind im Repo vorhanden, aber weder produktiv importiert noch testseitig abgedeckt. Gleichzeitig melden die Coverage-Daten fuer beide `0%`. - -Das ist kleiner als die God-Hook-Befunde, aber trotzdem wichtig: unbenutzter Code erzeugt Pflegekosten, verlaengert Suchraeume bei Refactors und verwirrt Guardrails, wenn er trotz `no-orphans-src` Regel nicht als Problem auftaucht. - -**Empfehlung:** entfernen oder bewusst wieder integrieren und dann gezielt testen. diff --git a/docs/review/dashboard-review.md b/docs/review/dashboard-review.md deleted file mode 100644 index 414eaba..0000000 --- a/docs/review/dashboard-review.md +++ /dev/null @@ -1,65 +0,0 @@ -# Dashboard Review - -> Historical note: Findings M-01, M-02, N-01, and N-02 are resolved historical -> review items. See `docs/review/fixed-findings.md` for the implementation -> status and guardrails. - -## Kurzfazit - -Das Dashboard ist funktional stark und sichtbar mit Fokus auf Accessibility, Internationalisierung und lehrreiche Analytik gebaut. Die Kehrseite ist eine hohe Oberflaechen- und Interaktionsdichte, die sowohl Nutzer als auch Maintainer belastet. - -## Was bereits gut ist - -- Leere-, Fehler- und Erfolgszustaende sind bewusst modelliert -- Filter, Motion und Dialoge sind durch viele gezielte Frontend-Tests abgesichert -- Deutsche und englische Terminologie werden aktiv gepflegt -- Forecast-, Drilldown- und Tabellenoberflaechen sind keine bloessen "nice to have", sondern tief integriert - -## Findings - -### M-01 - Der Settings-Dialog ist fuer eine einzelne Oberflaeche ueberladen - -**Referenzen:** `src/components/features/settings/SettingsModal.tsx:53-83`, `219-356`, gesamte Datei mit `1026` Zeilen - -Nutzer bekommen in einem einzigen Modal gleichzeitig: - -- Sprachumschaltung -- Motion-Einstellungen -- Default-Filter -- Provider-Limits -- Sichtbarkeit und Reihenfolge der Sektionen -- Settings- und Daten-Import/Export -- Versionsstatus von Toktrack -- Data-Status und Load-Quelle - -Das ist fuer Power-User noch beherrschbar, fuer neue oder seltene Nutzer aber schnell ein "scroll and search" Workflow statt einer gefuehrten Konfiguration. - -**Empfehlung:** in klar getrennte Bereiche oder Tabs schneiden und "day-to-day" Einstellungen von "advanced / admin" Funktionen trennen. - -### M-02 - Die Filterleiste kombiniert zu viele Interaktionsmuster in einer Komponente - -**Referenzen:** `src/components/layout/FilterBar.tsx` insgesamt, besonders `27-47`, `71-117`, `119-317` - -Die FilterBar mischt ViewMode-Wechsel, Monatsauswahl, Provider- und Modellchips, aktive Preset-Erkennung und einen komplett eigenen Date-Picker inklusive Focus- und Keyboard-Management. Das zeigt hohe Ambition, macht die Komponente aber schwer ueberschaubar und schwer aenderbar. - -Die Accessibility-Tests belegen, dass der aktuelle Zustand funktioniert. Gleichzeitig zeigt die Menge an Focus- und Overlay-Logik, wie fragil dieser Bereich werden kann. - -**Empfehlung:** Date-Picker und Chip-Filter in klar getrennte, kleinere Subkomponenten auslagern. - -### N-01 - Die Action-Landschaft ist reich, aber verstreut - -**Referenzen:** `src/components/Dashboard.tsx:219-456` - -Upload, Auto-Import, Export, Settings, Help, Report, Filter, Command Palette und section-based Navigation sind ueber Header, Empty State, Settings, Dialoge und Command Palette verteilt. Das erhoeht die Entdeckbarkeit, aber auch den kognitiven Overhead. - -Vor allem fuer seltene Aufgaben wie Backups, Reset oder Toktrack-Versionsstatus ist nicht immer klar, ob sie "daily use" oder "advanced maintenance" sind. - -**Empfehlung:** primaere Alltagsaktionen deutlicher von Wartungs- und Diagnoseaktionen trennen. - -### N-02 - Die Dashboard-Struktur ist intern deutlich breiter als von aussen sichtbar - -**Referenzen:** `src/components/dashboard/DashboardSections.tsx:179-219`, `src/components/Dashboard.tsx:301-390` - -`DashboardSectionsProps` umfasst `35` Inputs. Das ist weniger ein direkter UX-Bug als ein Hinweis darauf, dass das Dashboard intern bereits sehr viele Konzepte gleichzeitig aufspannt. Solche Breite endet oft spaeter als inkonsistente UX. - -**Empfehlung:** pro Dashboard-Sektion bewusstere View-Model-Grenzen definieren, damit Fach- und UI-Entscheidungen wieder lokaler werden. diff --git a/docs/review/fixed-findings.md b/docs/review/fixed-findings.md deleted file mode 100644 index 7803c94..0000000 --- a/docs/review/fixed-findings.md +++ /dev/null @@ -1,586 +0,0 @@ -# Fixed Findings - -## 2026-04-27 - -### test-review.md / H-01 - -- Status: fixed -- Scope: simple architecture rules no longer depend on repeated ArchUnit project scans. `tests/architecture/source-graph.ts` now owns one cached `src/**` file scan, TypeScript-based import/export parsing, alias resolution for `@/...`, relative import resolution, extension resolution, and `index` module resolution. The frontend layer, unused-hook, hook-naming, and shared-UI-placement tests use this shared graph while the feature-slice diagram remains on ArchUnit where its diagram model adds value. -- Guardrails: `tests/architecture/frontend-layers.test.ts` still blocks hooks from importing components, lib core from importing hooks/components, lib React modules from reaching back into hooks/components, and type modules from depending on components/hooks/lib. `tests/architecture/unused-hooks.test.ts`, `hook-naming.test.ts`, and `shared-ui-placement.test.ts` now reuse the same source graph so future test-layer changes do not reintroduce separate slow scans. `tests/architecture/source-graph.test.ts` covers static imports, re-exports, side-effect imports, and dynamic imports with options. -- Follow-up quality fixes during implementation: - - Dynamic `import(...)` calls are parsed alongside static imports and re-exports, so lazy edges stay visible to the architecture guardrails instead of only top-level declarations being checked. - - The formerly near-timeout `hooks must not depend on components` rule dropped from the historical `~4950ms` flake boundary to well below `100ms` in the targeted architecture run, without increasing global or local timeouts. - - Dashboard UI, content, animation, runtime API behavior, and production code remain unchanged. -- Validation: - - `npm run test:architecture -- --reporter=verbose` - - `npm run format:check` - - `npm run lint` - - `tsc --noEmit` - - `npm run check:deps` - - `git diff --check` - - `npm run verify:full` - - `npm run test:timings` - - `coderabbit review --agent -t uncommitted -c AGENTS.md --files ...` -> round 1: 0 issues, round 2: 1 minor status-line suggestion already satisfied by the current `- Status: fixed` text, round 3: 1 dynamic-import options issue fixed, final round: 0 issues - -### test-review.md / H-02 - -- Status: fixed -- Scope: Vitest coverage now reports against the product runtime instead of the former narrow frontend slice. `vitest.config.ts` includes `src/**/*.{ts,tsx}`, `server.js`, `server/**/*.js`, `shared/**/*.js`, and `usage-normalizer.js`, while excluding only TypeScript declarations, test files, and locale JSON assets from the configured runtime denominator. -- Guardrails: `tests/unit/vitest-coverage-config.test.ts` locks the coverage includes, excludes, and broad-denominator global thresholds. The thresholds ratchet the new signal at Statements `70`, Branches `60`, Functions `70`, and Lines `70`, below the measured broad baseline so CI fails on real regressions without making the first honest denominator brittle. -- Follow-up quality fixes during implementation: - - The current `npm run test:unit:coverage` baseline is Statements `72.85%`, Branches `63.01%`, Functions `74.97%`, Lines `73.88%` across the broader runtime scope. - - Server entrypoints, local server modules, shared runtime contracts, App/main entry files, and lazy dashboard sections are now visible in the coverage report instead of being hidden outside the denominator. - - `docs/testing.md` documents the broader denominator and clarifies that subprocess-spawned CLI/server paths remain behaviorally covered by integration, background, and Playwright tests even when V8 main-process line attribution stays lower. - - Dashboard UI, content, animation, runtime API behavior, and production code remain unchanged. -- Validation: - - `npx vitest run --project unit tests/unit/vitest-coverage-config.test.ts --reporter=verbose` - - `npm run test:unit:coverage` - - `npm run format:check` - - `npm run lint` - - `tsc --noEmit` - - `npm run check:deps` - - `npx vitest run --project architecture --reporter=verbose` - - `npm run verify:full` - - `npm run test:timings` - - `coderabbit review --agent -t uncommitted -c AGENTS.md --files ...` -> round 1: 0 issues, round 2: 0 issues - -### test-review.md / M-01 - -- Status: fixed -- Scope: dead hook visibility now has an explicit architecture guardrail. The two originally cited unused hook files, `src/hooks/use-theme.ts` and `src/hooks/use-provider-limits.ts`, had already been removed during the earlier `code-review.md / N-01` cleanup; this phase strengthens the test-review guardrail so the same class of dead runtime hook cannot silently return. -- Guardrails: `tests/architecture/unused-hooks.test.ts` now treats a hook as live only when it is reachable from `src/main.tsx`, rather than merely imported by any production file. A focused synthetic regression covers a dead hook cluster where one unused hook imports another, so future dead-code islands cannot satisfy the guardrail by importing each other. -- Follow-up quality fixes during implementation: - - `docs/architecture.md` and `docs/testing.md` now describe the hook rule as app-entrypoint reachability, matching the enforced behavior. - - `dependency-cruiser` remains responsible for dependency boundaries and cycle/orphan visibility, while `npm run test:architecture` owns the precise unused-hook signal that the original finding needed. - - Dashboard UI, content, animation, runtime API behavior, and production code remain unchanged. -- Validation: - - `npm run test:architecture` - - `npm run check:deps` - - `npm run verify:full` - - `npm run test:timings` - - `git diff --check` - - `coderabbit review --agent -t uncommitted -c AGENTS.md` -> round 1: 0 issues, round 2: 0 issues - -### test-review.md / M-02 - -- Status: fixed -- Scope: the highest-cost and hang-prone test paths were split, made deterministic, and given bounded cleanup without changing production behavior. The auto-import singleton integration test now coordinates the fake Toktrack runner through start/release sentinel files instead of a fixed 2-second delay, and the previous background catch-all suite was split into focused background prefix, selection, concurrency, registry, and startup CLI files. -- Guardrails: the `integration-background` Vitest project now allows file-level scheduling with a capped `maxWorkers: 2`, so independent background process tests can overlap without turning the test run into an unbounded process fan-out. `docs/testing.md` now documents deterministic subprocess coordination and small focused background files as the expected pattern. -- Follow-up quality fixes during implementation: - - The final `npm run test:timings` run now reports the auto-import singleton test at `1.371s` instead of the historical `3.326s`, and `tests/integration/server-auto-import.test.ts` at `2.669s` instead of the historical `4.521s`. - - The old `tests/integration/server-background.test.ts` monolith no longer dominates as one `5.785s` suite; its formerly mixed cases now appear as focused files, with the slowest background slice at `3.208s` in the final coverage/timing run. - - Test hangs were addressed at the harness level: server readiness/shutdown probes now have abort timeouts, CLI subprocess helpers have bounded timeouts and SIGKILL fallback, failed standalone server startup cleans up the spawned process, background cleanup force-stops leftover registry PIDs owned by the test root, and shared integration servers now wait for process shutdown in `afterAll`. - - `tests/unit/server-helpers-runner-process.test.ts` remains intentionally subprocess-backed where shell, `PATH`, timeout, and runner fallback behavior are the thing under test. - - Dashboard UI, content, animation, runtime API behavior, and production code remain unchanged. -- Validation: - - `npx vitest run --project integration tests/integration/server-auto-import.test.ts tests/integration/server-startup-cli.test.ts --project integration-background tests/integration/server-background.test.ts tests/integration/server-background-selection.test.ts tests/integration/server-background-concurrency.test.ts tests/integration/server-background-registry.test.ts --reporter=verbose` - - `npx vitest run --project integration tests/integration/server-auto-import.test.ts tests/integration/server-startup-cli.test.ts tests/integration/server-local-auth.test.ts --project integration-background tests/integration/server-background.test.ts tests/integration/server-background-selection.test.ts tests/integration/server-background-concurrency.test.ts tests/integration/server-background-registry.test.ts --reporter=verbose` - - `npx vitest run --project integration-background tests/integration/server-background-registry.test.ts --reporter=verbose` - - `npx vitest run --project integration-background tests/integration/server-background-selection.test.ts --reporter=verbose` - - `tsc --noEmit` - - `npx vitest run --project integration --project integration-background --reporter=verbose` - - `npm run verify:full` -> final run passed, including Playwright `15 passed` - - `npm run test:timings` -> completed without hanging after the cleanup hardening - - `coderabbit review --agent -t uncommitted -c AGENTS.md` -> multiple rounds: 0 issues initially, 4 minor test-harness/test-clarity issues fixed, latest round 0 issues - -### test-review.md / M-03 - -- Status: fixed -- Scope: the Playwright dashboard monolith was split by user journey without changing dashboard runtime behavior. The former `tests/e2e/dashboard.spec.ts` coverage now lives in focused load/upload, forecast/filter, settings/backups, and reporting specs, while command-palette coverage remains separate. -- Guardrails: shared E2E auth, state reset, usage seeding, file upload, mocked auto-import/report, download-recording, and dashboard test-hook access now live in `tests/e2e/helpers.ts`. `docs/testing.md` documents journey-based Playwright files so future browser coverage does not grow back into a catch-all suite. -- Follow-up quality fixes during implementation: - - The CSP/browser-error smoke coverage moved to `tests/e2e/dashboard-load-upload.spec.ts`, replacing stale references to the removed dashboard monolith. - - The report-language test now resets both usage and settings before switching locale, making it independent from earlier Playwright file order. - - The largest dashboard-specific E2E files are now focused around settings/backups and command-palette behavior instead of one mixed dashboard file. - - The recurring silent coverage/timing hang was traced to Vitest runs that emitted only the JUnit reporter in this non-interactive gate. `test:unit:coverage` and `test:timings` now keep the JUnit artifact and also emit the `dot` reporter, so long coverage runs show progress and finish cleanly instead of depending on a silent reporter path. - - Dashboard UI, content, animation, runtime API behavior, and production code remain unchanged. -- Validation: - - `npx vitest run --project unit tests/unit/vitest-coverage-config.test.ts --reporter=verbose` -> passed, including the guardrail for explicit `dot` plus `junit` reporters on coverage-heavy scripts. - - `npm run test:unit:coverage` -> passed with `135` files, `496` tests passed, `1` skipped, and no silent hang after the reporter fix. - - `npm run test:timings` -> passed and printed timing diagnostics; the slowest suites remained existing server integration subprocess paths, while the split E2E work is outside this Vitest-only timing gate. - - `PLAYWRIGHT_TEST_PORT=3016 npm_config_cache=/tmp/ttdash-npm-cache npm run test:e2e` -> passed with `15` Playwright tests after the journey split. - - `npm run verify:full` -> final run passed, including format, lint, docstring lint, dependency-cruiser, `tsc --noEmit`, architecture tests, coverage, package verification, production build, and Playwright `15 passed`. - - Targeted Playwright follow-ups passed for `tests/e2e/dashboard-load-upload.spec.ts` and `tests/e2e/dashboard-settings-backups.spec.ts` after CodeRabbit requested locale-resilient label assertions. - - `git diff --check` -> passed after this validation update. - - `coderabbit review --agent -t uncommitted -c AGENTS.md` -> rounds 1 and 3 reported minor test/doc clarity issues that were fixed; rounds 2, 4, and 5 reported 0 issues; round 6 reported this missing validation detail and was fixed by this entry; the follow-up review after the validation update reported 0 issues. - -## 2026-04-26 - -### server-review.md / H-01 - -- Status: fixed -- Scope: `server.js` was reduced to dependency/runtime wiring during this phase, then further reduced by `server-review.md / M-01` to an executable CLI/Bin shim. CLI parsing/help moved to `server/cli.js`, startup summaries/browser opening/local auth-session metadata moved to `server/startup-runtime.js`, shared process helpers moved to `server/process-utils.js`, and HTTP server lifecycle, CLI routing, startup sequencing, client errors, and shutdown cleanup moved to `server/server-lifecycle.js`. -- Guardrails: `tests/architecture/server-entrypoint-contract.test.ts` blocks local helper function definitions, `__test__` exports, and direct `http.createServer(...)` calls from returning to `server.js`. `tests/unit/server-cli.test.ts`, `tests/unit/startup-runtime.test.ts`, and `tests/unit/server-lifecycle.test.ts` cover the extracted behavior directly. Existing server helper tests now instantiate `server/data-runtime.js` and `server/auto-import-runtime.js` directly instead of importing `server.js`. -- Follow-up quality fixes during implementation: - - The productive `server.js.__test__` helper surface was removed as part of the Entrypoint split; tests now target the owning runtime modules. - - The cross-process file-lock test now loads `server/data-runtime.js` directly in its child process, so it still validates real lock behavior without loading the CLI entrypoint. - - The startup data summary now pluralizes `1 day` versus `N days` correctly without changing the existing cost or token formatting. - - Background-child shutdown now logs unregister failures, suppresses unhandled promise rejections, exits through a finally-style path, and prevents duplicate shutdown completion when graceful close and forced timeout race. - - CLI help now documents the supported `-bg` legacy background alias alongside `-b` and `--background`, so displayed usage matches parser behavior. - - Startup behavior remains intentionally unchanged: auth bootstrap URL output, remote warnings, browser opening, background registration, auto-load logging, package startup, and API routing keep the same runtime contracts. -- Validation: - - `npx vitest run --project unit tests/unit/server-cli.test.ts tests/unit/startup-runtime.test.ts tests/unit/server-lifecycle.test.ts tests/unit/server-helpers-network.test.ts tests/unit/server-helpers-runner-core.test.ts tests/unit/server-helpers-runner-process.test.ts tests/unit/server-helpers-file-locks.test.ts --reporter=verbose` - - `npx vitest run --project architecture tests/architecture/server-entrypoint-contract.test.ts --reporter=verbose` - - `npm run format:check` - - `npm run lint` - - `tsc --noEmit` - - `npm run test:architecture` - - `npm run check:deps` - - `npm run verify:full` - - `npm run test:timings` - - `coderabbit review --agent -t uncommitted -c AGENTS.md --files ...` -> rounds 1-2: 0 issues; round 3: 2 minor issues fixed; round 4: 1 minor issue fixed - -### server-review.md / M-01 - -- Status: fixed -- Scope: `server.js` is now only the executable CLI/Bin shim. Runtime composition moved to `server/app-runtime.js`, which builds the injected server lifecycle and keeps the background process entrypoint pointed at package-root `server.js`. The undocumented `require('./server.js').bootstrapCli/runCli` import surface was removed; in-process test/server starts now use `createAppRuntime(...)` explicitly. -- Guardrails: `tests/architecture/server-entrypoint-contract.test.ts` now blocks `module.exports`, `exports.*`, local helper functions, `__test__`, direct `http.createServer(...)`, and any `server.js` require target other than `./server/app-runtime`. Playwright's `scripts/start-test-server.js` uses the explicit app-runtime composer so E2E startup still exercises the same server lifecycle without importing the executable shim as a helper module. -- Follow-up quality fixes during implementation: - - Environment-derived CLI/server configuration now lives inside `createAppRuntime(...)`, so test harnesses can set isolated storage, host, and port environment variables before composing the runtime. - - `server/app-runtime.js` passes an explicit root `server.js` path to the background runtime, preserving foreground/background CLI behavior after the composition move. - - `docs/architecture.md` documents the new split between the executable shim and the app-runtime composition root. - - Dashboard UI, content, animation, API route behavior, local auth, remote auth, background CLI, packaging, and E2E startup behavior remain unchanged. -- Validation: - - `node -c server.js` - - `node -c server/app-runtime.js` - - `npx vitest run --project architecture tests/architecture/server-entrypoint-contract.test.ts --reporter=verbose` - - `npx vitest run --project unit tests/unit/server-lifecycle.test.ts tests/unit/server-cli.test.ts tests/unit/startup-runtime.test.ts --project integration tests/integration/server-local-auth.test.ts tests/integration/server-remote-auth.test.ts --reporter=verbose` - - `npx vitest run --project integration-background tests/integration/server-background.test.ts --reporter=verbose` - - `npm run verify:full` - - `npm run test:timings` - - `coderabbit review --agent -t uncommitted -c AGENTS.md --files ...` -> multiple rounds: 0 issues - -### server-review.md / M-02 - -- Status: fixed -- Scope: mutable server runtime state is now encapsulated in per-runtime services. `server/runtime-state.js` owns the runtime snapshot, startup auto-load flag, singleton runtime lease, and expiring async cache primitives; `server/app-runtime.js`, `server/server-lifecycle.js`, and `server/startup-runtime.js` use the runtime-state service instead of ad hoc mutable flags; `server/auto-import-runtime.js` owns the Auto-Import lease and Toktrack latest-version cache through those services. -- Guardrails: `tests/unit/runtime-state.test.ts` covers runtime snapshots, startup flag isolation, singleton lease behavior, in-flight lookup deduplication, and TTL/reset behavior. `tests/architecture/server-runtime-state-contract.test.ts` blocks route-local `autoImportStreamRunning` state and the old free Auto-Import/Toktrack cache variables from returning. -- Follow-up quality fixes during implementation: - - `server/http-router.js` no longer owns an Auto-Import stream flag. It acquires a lease before sending SSE headers, so concurrent starts still return the existing HTTP `409` response without turning into streamed errors. - - `server/auto-import-runtime.js` remains the owner of Auto-Import execution, but the singleton state is now an explicit lease with idempotent release for normal completion, failures, and aborted streams. - - Toktrack latest-version lookups still share one in-flight request and keep separate success/failure TTL behavior, but the cache/promise state is hidden behind `createExpiringAsyncCache(...)`. - - Dashboard UI, content, animation, API paths, response shapes, local auth, remote auth, background startup, and E2E startup behavior remain unchanged. -- Validation: - - `node -c server/runtime-state.js` - - `node -c server/app-runtime.js` - - `node -c server/auto-import-runtime.js` - - `node -c server/http-router.js` - - `npx vitest run --project unit tests/unit/runtime-state.test.ts tests/unit/server-lifecycle.test.ts tests/unit/startup-runtime.test.ts tests/unit/server-helpers-runner-process.test.ts --project architecture tests/architecture/server-entrypoint-contract.test.ts tests/architecture/server-runtime-state-contract.test.ts --reporter=verbose` - - `npx vitest run --project integration tests/integration/server-auto-import.test.ts tests/integration/server-api-routing-runtime.test.ts tests/integration/server-local-auth.test.ts tests/integration/server-remote-auth.test.ts --project integration-background tests/integration/server-background.test.ts --reporter=verbose` - - `npm run verify:full` - - `npm run test:timings` - - `coderabbit review --agent -t uncommitted -c AGENTS.md --files ...` -> multiple rounds: 0 issues - -### server-review.md / N-01 - -- Status: fixed -- Scope: Host, Origin, Sec-Fetch-Site, and JSON content-type request policy moved from the broader HTTP utility module into `server/http-request-guards.js`. `server/http-utils.js` remains the compatible facade for router consumers and still owns request body parsing, JSON responses, buffer responses, and API-prefix resolution. -- Guardrails: `tests/unit/http-request-guards.test.ts` covers loopback, wildcard, non-loopback, IPv6, origin, cross-site, and content-type behavior directly. `tests/unit/http-utils.test.ts` now focuses on the HTTP facade and body/response behavior. `tests/architecture/server-http-boundaries.test.ts` keeps request guard policy out of the router and out of generic HTTP utility internals. -- Follow-up quality fixes during implementation: - - The request guard code now tolerates missing `req.headers` defensively while preserving the same rejection path for malformed or missing Host/Origin input. - - Wildcard binds now apply the intended socket-local host check before exact bind-host matching, so `Host: 0.0.0.0` is not treated as a trusted client-facing host. - - Body-size and response-header behavior gained focused unit coverage, so the split did not trade broad utility tests for narrower policy-only tests. - - Dashboard UI, content, animation, API paths, response shapes, local auth, remote auth, background startup, and E2E startup behavior remain unchanged. -- Validation: - - `node -c server/http-request-guards.js` - - `node -c server/http-utils.js` - - `npx vitest run --project unit tests/unit/http-request-guards.test.ts tests/unit/http-utils.test.ts --project architecture tests/architecture/server-http-boundaries.test.ts tests/architecture/server-entrypoint-contract.test.ts tests/architecture/server-runtime-state-contract.test.ts --reporter=verbose` - - `npx vitest run --project integration tests/integration/server-api-guards.test.ts tests/integration/server-remote-auth.test.ts --reporter=verbose` - - `npm run verify:full` - - `npm run test:timings` - - `coderabbit review --agent -t uncommitted -c AGENTS.md --files ...` -> multiple rounds: 0 issues - -## 2026-04-25 - -### security-review.md / H-01 - -- Status: fixed -- Scope: explicit non-loopback binding now requires `TTDASH_REMOTE_TOKEN` in addition to `TTDASH_ALLOW_REMOTE=1`. Remote API requests are authenticated centrally before route handling through Bearer auth, `X-TTDash-Remote-Token`, or an HttpOnly same-site cookie; the later `security-review.md / M-01` fix extends the same auth boundary to default loopback sessions. -- Guardrails: `tests/unit/remote-auth.test.ts` covers remote-mode configuration, accepted credential forms, generic rejection paths, timing-safe length-independent comparison behavior, and the browser bootstrap redirect/cookie contract. `tests/integration/server-remote-auth.test.ts` covers failed startup without a token, protected remote API reads, cookie bootstrap, and preserved Origin mutation guards. Existing server guard tests continue to cover host validation, malformed paths, oversized payloads, and cross-site rejection. -- Follow-up quality fixes during implementation: - - Remote authentication lives in `server/remote-auth.js` as a focused server boundary, while `server/http-router.js` only applies the injected gate before API routing and static bootstrap handling. - - Remote browser access is supported without frontend changes by visiting `?ttdash_token=` once; the token is moved to an HttpOnly cookie and removed from the redirected URL. - - Background runtime identity checks now send the remote Bearer header when the parent process is running in authenticated remote mode, so remote background management keeps working. - - Startup help and remote warnings now mention the additional token requirement without logging the token value. -- Validation: - - `npx vitest run --project unit tests/unit/remote-auth.test.ts tests/unit/http-utils.test.ts tests/unit/background-runtime.test.ts --reporter=verbose` - - `npx vitest run --project integration tests/integration/server-remote-auth.test.ts tests/integration/server-api-guards.test.ts --reporter=verbose` - - `npx vitest run --project integration-background tests/integration/server-background.test.ts --reporter=verbose` - - `npm run format:check` - - `npm run lint` - - `tsc --noEmit` - - `npm run test:architecture` - - `npm run check:deps` - - `npm run verify:full` - - `npm run test:timings` - - `coderabbit review --agent -t uncommitted -c AGENTS.md --files ...` -> round 1: 0 issues, round 2: 0 issues, round 3: 0 issues, round 4: 0 issues - -### security-review.md / M-01 - -- Status: fixed -- Scope: default loopback servers now generate a per-start local session token and require authentication for every API endpoint, including `/api/usage`, `/api/settings`, `/api/runtime`, and `/api/toktrack/version-status`. Normal dashboard startup stays frictionless because the auto-open URL carries the one-time bootstrap token, which is exchanged for an HttpOnly/SameSite cookie and stripped from the redirected URL. -- Guardrails: `tests/unit/remote-auth.test.ts` covers default local session auth, generated local tokens, explicit test opt-out, shared credential parsing, bootstrap redirects, and generic rejection paths. `tests/integration/server-local-auth.test.ts` covers protected loopback reads, Bearer and cookie access, preserved mutation Origin guards, and restrictive auth-session file permissions. Existing remote, background, persistence, recovery, routing, and E2E tests were updated to authenticate through the same boundary. -- Follow-up quality fixes during implementation: - - The existing `server/remote-auth.js` boundary now owns both local session auth and remote auth so API route handlers stay unaware of the source of the credential. - - `server.js` writes the local session metadata to restrictive user config state and prints a `Local Auth URL` only when browser auto-open is disabled. - - Background instance registry entries now carry per-instance auth headers and bootstrap URLs so `ttdash stop`, registry pruning, and no-open background starts remain usable. - - Playwright and integration test helpers bootstrap through the same local session file instead of bypassing the production auth path. -- Residual risk: - - This protects the local HTTP API from unauthenticated loopback access, browser/DNS-rebinding-style access, and other OS users. It does not fully isolate against malware already running as the same OS user, which can target user config files, terminal output, or browser state. -- Validation: - - `npx vitest run --project unit tests/unit/remote-auth.test.ts tests/unit/background-runtime.test.ts --reporter=verbose` - - `npx vitest run --project integration tests/integration/server-local-auth.test.ts tests/integration/server-api-guards.test.ts tests/integration/server-api-persistence.test.ts tests/integration/server-api-routing-runtime.test.ts tests/integration/server-api-recovery.test.ts tests/integration/server-remote-auth.test.ts --reporter=verbose` - - `npx vitest run --project integration-background tests/integration/server-background.test.ts --reporter=verbose` - - `PLAYWRIGHT_TEST_PORT=3020 npm_config_cache=/tmp/ttdash-npm-cache npm run test:e2e` - - `npm run format:check` - - `npm run lint` - - `tsc --noEmit` - - `npm run test:architecture` - - `npm run check:deps` - - `npm run verify:package` - - `npm run verify:full` - - `npm run test:timings` - - `coderabbit review --agent -t uncommitted -c AGENTS.md --files ...` -> round 1: 0 issues, round 2: 0 issues, round 3: 2 minor documentation issues fixed, round 4: 0 issues - -### security-review.md / N-01 - -- Status: fixed -- Scope: the server CSP no longer allows `unsafe-inline` styles. Shared security headers now live in `server/security-headers.js`, HTML responses get a per-response CSP nonce plus a matching `ttdash-csp-nonce` meta tag, `style-src-elem` is limited to `self` and the nonce, and `style-src-attr 'none'` blocks literal inline style attributes. -- Guardrails: `tests/unit/security-headers.test.ts` covers CSP construction, nonce shape, nonce meta injection, HTML response preparation, and non-HTML header behavior. `tests/integration/server-api-guards.test.ts` checks the strict CSP on authenticated API responses. `tests/e2e/dashboard-load-upload.spec.ts` verifies that the loaded dashboard HTML carries the nonce-backed CSP and that the browser reports no CSP errors while the main dashboard journey runs. -- Follow-up quality fixes during implementation: - - CSP generation moved out of `server.js`, so future header changes have a focused unit-testable boundary. - - `server/http-router.js` now treats HTML static responses separately from other assets, allowing nonce-specific headers without weakening API, JSON, CSS, or JS asset responses. - - React/Recharts/Motion JS-driven style property updates remain allowed because the browser-enforced risk path for this finding is literal inline style attributes or inline style elements; preserving those runtime style properties avoids UI and animation regressions without reintroducing `unsafe-inline`. -- Validation: - - `npx vitest run --project unit tests/unit/security-headers.test.ts --reporter=verbose` - - `npx vitest run --project integration tests/integration/server-api-guards.test.ts --reporter=verbose` - - `npm run format:check` - - `npm run lint` - - `tsc --noEmit` - - `npm run test:architecture` - - `npm run check:deps` - - `npm run verify:full` - - `npm run test:timings` - - `coderabbit review --agent -t uncommitted -c AGENTS.md --files ...` -> round 1: 0 issues, round 2: 1 minor grammar issue in `docs/application-stack-reference.md` fixed, round 3: 0 issues - -### performance-review.md / H-01 - -- Status: fixed -- Scope: secondary cost-analysis charts now leave the initial dashboard bundle and load through the section warmup path. Dashboard sections also get an adaptive, deduplicated preload scheduler that starts visible lazy section chunks shortly after the first render and keeps IntersectionObserver preloading far enough ahead of the viewport to avoid visible lazy-loading gaps while scrolling. -- Guardrails: `tests/frontend/dashboard-motion.test.tsx` covers idle/fallback scheduling, deduplication, cancellation, early preloading, and inert hidden content. `tests/unit/dashboard-section-preloading.test.ts` covers visible-section queue ordering, hidden/request-data gating, and duplicate task removal. -- Follow-up quality fixes during implementation: - - Cost-analysis entry charts (`CostOverTime`, `CostByModel`) were moved from static dashboard imports to lazy chunks, reducing the built main `index` chunk from roughly `212 kB` raw / `57 kB` gzip to roughly `198 kB` raw / `53 kB` gzip. - - The warmup scheduler now uses a short idle timeout with bounded parallelism so deeper sections are warmed without waiting for late viewport proximity. - - Section placeholders for deeper analysis/table areas now better match final section heights, preventing scroll-command layout shift while lazy chunks complete above the target section. -- Validation: - - `npm run test:unit -- tests/frontend/dashboard-motion.test.tsx tests/unit/dashboard-section-preloading.test.ts` - - `npx playwright test tests/e2e/command-palette.spec.ts -g "executes analysis section navigation commands"` - - `npm run lint` - - `npm run test:architecture` - - `npm run check:deps` - - `tsc --noEmit` - - `npm run build:app` - - `npm run verify:full` - - `npm run test:timings` - - `coderabbit review --agent -t uncommitted -c AGENTS.md --files ...` -> round 1: 0 issues, round 2: 0 issues, round 3: 0 issues - -### performance-review.md / M-01 - -- Status: fixed -- Scope: dashboard filter changes now flow through a centralized `deriveDashboardFilterData(...)` pass that derives date/month filtering, provider/model filtering, available filter options, date range, and view-mode aggregation behind the stable `useDashboardFilters` contract. Computed dashboard summaries now share one normalized breakdown aggregation for model costs, provider metrics, and model options behind the stable `useComputedMetrics` contract. -- Guardrails: `tests/unit/dashboard-filter-data.test.ts` compares the new filter derivation with the previous staged semantics across representative filter combinations and empty states. `tests/unit/dashboard-aggregation.test.ts` locks normalized model/provider aggregation and day counting. `tests/frontend/use-computed-metrics.test.tsx` covers the public computed-metrics hook boundary. -- Follow-up quality fixes during implementation: - - `src/lib/dashboard-filter-data.ts` imports directly from the shared dashboard-domain contract instead of routing through broader frontend transform helpers, keeping the hook dependency graph smaller and the architecture layer test below its timeout budget. - - `src/lib/data-transforms.ts` now fills scalar chart arrays and model-name discovery in one sorted-data pass instead of separate `map(...)` and `flatMap(...)` passes. - - `computeModelCosts(...)` and `computeProviderMetrics(...)` now delegate to the shared breakdown summary, so standalone calculation callers and dashboard hook callers use the same aggregation path. - - After CodeRabbit review, computed `allModels` now unions `modelsUsed` and `modelBreakdowns`, so inconsistent imported data can no longer hide a breakdown-backed model from the model-over-time chart while existing modelsUsed-only behavior remains preserved. -- Validation: - - `npm run test:unit -- tests/unit/dashboard-filter-data.test.ts tests/unit/dashboard-aggregation.test.ts tests/frontend/use-computed-metrics.test.tsx tests/frontend/use-dashboard-filters.test.tsx tests/unit/analytics.test.ts tests/unit/data-transforms.test.ts tests/unit/code-rabbit-phase4.test.ts` - - `npm run test:unit -- tests/unit/dashboard-aggregation.test.ts tests/frontend/use-computed-metrics.test.tsx tests/unit/analytics.test.ts` - - `npm run format:check` - - `npm run lint` - - `tsc --noEmit` - - `npm run test:architecture` - - `npm run check:deps` - - `npm run verify:full` -> completed after the CodeRabbit follow-up fix - - `npm run test:timings` -> completed after the CodeRabbit follow-up fix - - `coderabbit review --agent -t uncommitted -c AGENTS.md --files ...` -> round 1: 0 issues, round 2: 1 minor issue fixed, round 3: 0 issues, round 4: 0 issues - -### performance-review.md / M-02 - -- Status: fixed -- Scope: the Settings dialog no longer starts `/api/toktrack/version-status` when it opens. The dashboard shell now schedules one cancellable, idle-friendly toktrack latest-version warmup per browser session through `src/lib/toktrack-version-status.ts`, and the settings version hook only formats the shared session snapshot for the Maintenance tab. -- Guardrails: `tests/unit/toktrack-version-status.test.ts` covers the pinned initial snapshot, concurrent warmup deduplication, cached failure behavior, scheduled fallback warmup, and cancellation. `tests/frontend/settings-modal-version-status.test.tsx` covers warmed status rendering without a dialog-open fetch, the no-fetch dialog-open path, and cached failure reuse across reopen. `tests/frontend/dashboard-filter-visibility.test.tsx` covers that the dashboard shell wires the session warmup scheduler. -- Follow-up quality fixes during implementation: - - `docs/architecture.md` now documents the session-wide toktrack version status cache as the owner of lookup state, while the settings modal hook is only the presentation adapter. - - Dashboard tests mock the warmup scheduler explicitly so future dashboard renders cannot accidentally perform real toktrack version network work during component tests. - - The server `/api/toktrack/version-status` contract and existing server-side success/failure TTL cache remain unchanged, so the fix changes when the UI asks for the status, not how the status is resolved. -- Validation: - - `npm run test:unit -- tests/unit/toktrack-version-status.test.ts tests/frontend/settings-modal-version-status.test.tsx tests/frontend/settings-modal-tabs.test.tsx tests/frontend/dashboard-filter-visibility.test.tsx tests/frontend/dashboard-error-state.test.tsx tests/unit/api.test.ts` - - `npm run format:check` - - `npm run lint` - - `tsc --noEmit` - - `npm run test:architecture` - - `npm run check:deps` - - `npm run verify:full` - - `npm run test:timings` - - `coderabbit review --agent -t uncommitted -c AGENTS.md --files ...` -> round 1: 0 issues, round 2: 0 issues, round 3: 0 issues - -### performance-review.md / N-01 - -- Status: fixed -- Scope: complex dashboard UI islands now separate calculation-heavy view data from rendering, accessibility, and motion. Drilldown details, heatmap grids, request-quality ratios, sortable table rows, recent-day benchmarks, and date-picker calendar navigation are derived through focused `src/lib/*-data.ts` helpers while the existing frontend functionality, visual structure, and animations stay unchanged. -- Guardrails: new unit suites cover drilldown aggregation/rankings/token segments, heatmap grid and keyboard target derivation, request-quality ratios/progress, table sort/row derivation, and date-picker calendar actions. Existing frontend suites still cover the DOM, ARIA, keyboard, and motion contracts for the touched components. -- Follow-up quality fixes during implementation: - - React component tests for heatmap and drilldown were narrowed to visible UI/A11y contracts after the derived branches moved to fast unit coverage, and over-broad timeout overrides were removed from sortable table tests. - - `docs/architecture.md` now documents the non-presentational data-derivation helpers as the boundary for complex dashboard UI islands. - - `npm run test:timings` shows the new helper suites stay out of the slowest-suite list; the remaining slow entries are existing integration, motion, settings, and broad table/UI interaction paths. -- Validation: - - `npx vitest run --project unit tests/unit/drill-down-data.test.ts tests/unit/heatmap-calendar-data.test.ts tests/unit/request-quality-data.test.ts tests/unit/sortable-table-data.test.ts tests/unit/filter-date-picker-data.test.ts tests/unit/recent-days-reveal.test.ts --reporter=verbose` - - `npx vitest run --project frontend tests/frontend/drill-down-modal-content.test.tsx tests/frontend/drill-down-modal-motion.test.tsx tests/frontend/heatmap-calendar-accessibility.test.tsx tests/frontend/request-quality.test.tsx tests/frontend/sortable-table-provider-model.test.tsx tests/frontend/sortable-table-recent-days.test.tsx tests/frontend/filter-bar-date-picker.test.tsx --reporter=verbose` - - `npm run format:check` - - `npm run lint` - - `tsc --noEmit` - - `npm run test:architecture` - - `npm run check:deps` - - `npm run verify:full` - - `npm run test:timings` - - `coderabbit review --agent -t uncommitted -c AGENTS.md --files ...` -> round 1: 0 issues, round 2: 0 issues - -### dashboard-review.md / N-02 - -- Status: fixed -- Scope: `src/components/dashboard/DashboardSections.tsx` already consumes a single structured `DashboardSectionsViewModel`, and `src/types/dashboard-view-model.d.ts` keeps the section data split into named section bundles instead of flat dashboard props. This closes the original broad `DashboardSectionsProps` concern without changing visible dashboard functionality, content, UI, or animations. -- Guardrails: `tests/architecture/dashboard-sections-contract.test.ts` now locks the public `DashboardSections` prop contract to one `viewModel` prop and keeps `DashboardSectionsViewModel` split into the intended layout, analysis, table, comparison, and interaction bundles. -- Follow-up quality fixes during implementation: - - No production refactor was needed because the broader view-model boundary had already been introduced by `architecture-review.md / M-01`; this change adds a targeted regression guardrail so future section work does not reintroduce wide flat props. - - `docs/architecture.md` already documents the intended DashboardSections boundary, so no duplicate architecture text was added. -- Validation: - - `npm run test:architecture` - - `npm run test:unit -- tests/frontend/dashboard-filter-visibility.test.tsx` - - `tsc --noEmit` - - `npm run lint` - - `npm run check:deps` - - `npm run verify:full` - - `npm run test:timings` -> attempted twice; both runs failed under external CPU pressure with migrating 5s timeouts in existing unrelated frontend suites, while `npm run verify:full` had just completed the same coverage test set successfully; the new architecture guardrail itself stayed below 100ms in `npm run test:architecture` - - `coderabbit review --agent -t uncommitted -c AGENTS.md --files docs/review/fixed-findings.md tests/architecture/dashboard-sections-contract.test.ts` -> 0 issues - -## 2026-04-24 - -### dashboard-review.md / N-01 - -- Status: fixed -- Scope: the dashboard action landscape now separates everyday data loading, export/use actions, and maintenance actions in the header, while the Command Palette mirrors the same intent split for load data, exports, and maintenance. Existing actions and command IDs remain available. -- Guardrails: `tests/frontend/header-links.test.tsx` covers localized header action groups and preserved button access, while `tests/frontend/command-palette-action-groups.test.tsx` covers localized Command Palette groups and stable command IDs. -- Follow-up quality fixes during implementation: - - Header action rendering is now owned by a private `HeaderActions` composition inside `Header.tsx`, keeping the global header shell separate from action information architecture. - - Command Palette action commands are grouped by intent instead of sharing one broad `Actions` bucket, without changing command handlers or `data-testid` contracts. -- Validation: - - `npm run test:unit -- tests/frontend/header-links.test.tsx tests/frontend/command-palette-action-groups.test.tsx` - - `npx playwright test tests/e2e/command-palette.spec.ts` - - `tsc --noEmit` - - `npm run lint` - - `npm run test:architecture` - - `npm run check:deps` - - `npm run verify:full` - - `npm run test:timings` - - `coderabbit review --agent -t uncommitted -c AGENTS.md -f ...` -> only reported unrelated untracked `docs/application-stack-reference.md`; the file was not changed - - `coderabbit review --agent -t uncommitted -c AGENTS.md --dir src/components` -> 0 issues - - `coderabbit review --agent -t uncommitted -c AGENTS.md --dir tests/frontend` -> 0 issues - - `coderabbit review --agent -t uncommitted -c AGENTS.md --dir shared/locales` -> 0 issues - - `coderabbit review --agent -t uncommitted -c AGENTS.md --dir docs/review` -> 0 issues - - `coderabbit review --agent -t uncommitted -c AGENTS.md --files docs/architecture.md` -> 0 issues - -### dashboard-review.md / M-02 - -- Status: fixed -- Scope: `src/components/layout/FilterBar.tsx` was reduced to a composition shell over private filter groups for status, time presets, date range, and provider/model chips. The visible dashboard filtering capabilities remain intact while the bar now shows lightly grouped Time, Date range, Providers, and Models areas. -- Guardrails: `tests/frontend/filter-bar-accessibility.test.tsx` covers localized filter groups, while the existing date-picker and preset/chip suites continue to lock keyboard focus, date clearing, preset highlighting, and chip `aria-pressed`/visual states. `.dependency-cruiser.cjs` now keeps the new FilterBar internals private to the FilterBar shell. -- Follow-up quality fixes during implementation: - - The custom date picker logic now lives in `FilterBarDateRange.tsx`, isolating its portal, overlay positioning, keyboard navigation, and focus restoration from the main filter shell. - - Provider and model chip rendering now lives in `FilterBarChipFilters.tsx`, keeping provider badge styling and model color state local to chip filters. - - Provider badge alpha variants now flow through `getProviderBadgeStyle(...)`, so included provider chips no longer parse or mutate CSS strings in the UI. - - Date-picker calendar labels now recompute from the active locale while the picker is open, and weekday cells use stable keys. -- Validation: - - `npm run test:unit -- tests/frontend/filter-bar-accessibility.test.tsx tests/frontend/filter-bar-date-picker.test.tsx tests/frontend/filter-bar-presets.test.tsx tests/unit/model-colors.test.ts` - - `tsc --noEmit` - - `npm run lint` - - `npm run test:architecture` - - `npm run check:deps` - - `npm run verify:full` - - `npm run test:timings` - - `coderabbit review --agent -t uncommitted -c AGENTS.md -f ...` -> fixed relevant minor findings for weekday keys, provider badge alpha handling, locale-reactive calendar labels, and included-chip style collisions; remaining global uncommitted finding is isolated to unrelated untracked `docs/application-stack-reference.md`. - - `coderabbit review --agent -t uncommitted -c AGENTS.md --dir src/components/layout` -> 0 issues - - `coderabbit review --agent -t uncommitted -c AGENTS.md --dir src/lib` -> 0 issues - - `coderabbit review --agent -t uncommitted -c AGENTS.md --dir tests` -> 0 issues - - `coderabbit review --agent -t uncommitted -c AGENTS.md --dir shared` -> 0 issues - - `coderabbit review --agent -t uncommitted -c AGENTS.md --dir docs/review` -> 0 issues - - `coderabbit review --agent -t uncommitted -c AGENTS.md --files .dependency-cruiser.cjs` -> 0 issues - -### dashboard-review.md / M-01 - -- Status: fixed -- Scope: `src/components/features/settings/SettingsModal.tsx` was changed from one long settings surface into a tabbed workspace with Basics, Layout, Limits, and Maintenance areas. Existing settings capabilities remain available, but day-to-day preferences are separated from section layout, provider limits, and backup/version-maintenance actions. -- Guardrails: `tests/frontend/settings-modal-tabs.test.tsx` covers default tab state, section grouping, and keyboard navigation, while the existing focused settings suites now exercise their sections through the relevant tab. `docs/architecture.md` documents the tabbed settings shell and split motion/toktrack version ownership. -- Follow-up quality fixes during implementation: - - `SettingsModalSections.tsx` now separates the reduced-motion card from the toktrack version-status card so Maintenance can own diagnostic/version information without mixing it into daily dashboard behavior settings. - - `SettingsModal.tsx` now resets the active settings tab while the dialog is closed, so reopening the dialog always starts from Basics without a transient stale tab render. - - `tests/e2e/command-palette.spec.ts` now splits the formerly near-timeout section-navigation coverage into smaller scroll, dashboard-section, and analysis-section tests after the full gate exposed the old monolithic test as a reliability/performance risk. -- Validation: - - `npm run test:unit -- tests/frontend/settings-modal-tabs.test.tsx tests/frontend/settings-modal-language.test.tsx tests/frontend/settings-modal-version-status.test.tsx tests/frontend/settings-modal-defaults.test.tsx tests/frontend/settings-modal-sections.test.tsx tests/frontend/settings-modal-backups.test.tsx tests/frontend/settings-modal-provider-limits.test.tsx tests/frontend/settings-modal-draft-state.test.tsx` - - `npx playwright test tests/e2e/command-palette.spec.ts` - - `npm run test:architecture` - - `npm run check:deps` - - `npm run verify:full` - - `npm run test:timings` - - `coderabbit review --agent -t uncommitted -c AGENTS.md` -> round 1: 0 issues, round 2: 0 issues, round 3: 0 issues - -### code-review.md / N-01 - -- Status: fixed -- Scope: removed the unused `src/hooks/use-theme.ts` and `src/hooks/use-provider-limits.ts` files. Their active responsibilities already live in the persisted settings flow: theme application goes through `applyTheme` and dashboard controller effects, while provider-limit synchronization goes through `useAppSettings` and `syncProviderLimits`. -- Guardrails: `tests/architecture/unused-hooks.test.ts` now fails when a production hook file under `src/hooks/` has no production import, and `docs/architecture.md` plus `docs/testing.md` document that unused hook helpers should be removed instead of retained as speculative code. -- Follow-up quality fixes during implementation: - - The guardrail covers the same dead-hook visibility gap that made the original `0%` coverage files easy to miss, so future unused hook files are caught by `npm run test:architecture`. -- Validation: - - `npm run test:architecture` - - `npm run check:deps` - - `npm run test:unit -- tests/frontend/react-query-hooks.test.tsx tests/frontend/dashboard-controller-state.test.tsx tests/frontend/dashboard-controller-actions.test.tsx` - - `npm run verify:full` - - `npm run test:timings` - - `coderabbit review --agent -t uncommitted -c AGENTS.md` -> round 1: blocked by CodeRabbit rate limit (`Rate limit exceeded`, retry window reported by the CLI: `47 minutes and 10 seconds`) - -### code-review.md / M-01 - -- Status: fixed -- Scope: dashboard date preset behavior is centralized in the shared dashboard preferences contract. `src/hooks/use-dashboard-filters.ts` applies presets through `resolveDashboardPresetRange`, and `src/components/layout/FilterBar.tsx` derives the active UI state through `resolveDashboardActivePreset`; both flow through `src/lib/dashboard-preferences.ts` to `shared/dashboard-preferences.js`, so applying and displaying presets no longer duplicate the `7d`, `30d`, `month`, `year`, and `all` rules. -- Guardrails: `tests/unit/dashboard-preferences.test.ts` locks the shared/frontend preset contract, while `tests/frontend/use-dashboard-filters.test.tsx` and `tests/frontend/filter-bar-presets.test.tsx` cover applying presets in the hook and highlighting them in the filter bar. -- Follow-up quality fixes during implementation: - - No production or test changes were needed for this finding because the shared preset contract and focused regression coverage already existed; the fix was to document the concrete `code-review.md / M-01` reference as closed. -- Validation: - - `npm run test:unit -- tests/unit/dashboard-preferences.test.ts tests/frontend/use-dashboard-filters.test.tsx tests/frontend/filter-bar-presets.test.tsx` - - `npm run test:architecture` - - `npm run check:deps` - - `npm run verify:full` - - `npm run test:timings` - - `coderabbit review --agent -t uncommitted -c AGENTS.md` -> round 1: 2 minor documentation issues, fixed - - `coderabbit review --agent -t uncommitted -c AGENTS.md` -> round 2: blocked by CodeRabbit rate limit (`Rate limit exceeded`, retry window reported by the CLI: `57 minutes and 23 seconds`) - -### code-review.md / M-02 - -- Status: fixed -- Scope: `src/components/features/settings/SettingsModal.tsx` was reduced from the former 1000+ line all-in-one dialog into a shell/composition root over extracted settings sections, a draft-state hook, a version-status hook, and modal-local helper logic. The visible settings areas now live behind `src/components/features/settings/SettingsModalSections.tsx`, while the draft/save/reset behavior moved into `use-settings-modal-draft.ts` and the toktrack lookup behavior moved into `use-settings-modal-version-status.ts`. -- Guardrails: `docs/architecture.md` now documents the settings modal shell, internal section bundle, and private helper hooks as a feature-internal composition boundary, and `.dependency-cruiser.cjs` now blocks unrelated frontend modules from importing `SettingsModalSections.tsx`, `use-settings-modal-draft.ts`, `use-settings-modal-version-status.ts`, or `settings-modal-helpers.ts` directly. -- Follow-up quality fixes during implementation: - - `src/components/features/settings/settings-modal-helpers.ts` now owns the extracted number parsing, selection normalization, provider-limit draft building/patching, and section reorder helpers so tests no longer import utility behavior through the UI shell. - - `use-settings-modal-draft.ts` now clones incoming section drafts and patches empty provider configs from `DEFAULT_PROVIDER_LIMIT_CONFIG`, preserving the previous provider-limit safety behavior after the refactor. - - `use-settings-modal-draft.ts` now initializes modal drafts once per open session and resets that guard on close, so external prop churn can no longer overwrite in-progress edits while the dialog remains open. - - `tests/unit/settings-modal-helpers.test.ts` now covers the extracted settings helpers directly, and `tests/unit/code-rabbit-phase1.test.ts` was narrowed back to the unrelated chart/auto-import helpers it actually owns. - - `tests/frontend/settings-modal-defaults.test.tsx`, `settings-modal-sections.test.tsx`, `settings-modal-backups.test.tsx`, `settings-modal-provider-limits.test.tsx`, and `settings-modal-draft-state.test.tsx` now split the modal coverage by responsibility instead of adding another broad catch-all dialog suite. -- Validation: - - `npm run test:unit -- tests/unit/settings-modal-helpers.test.ts tests/unit/code-rabbit-phase1.test.ts tests/frontend/settings-modal-language.test.tsx tests/frontend/settings-modal-version-status.test.tsx tests/frontend/settings-modal-defaults.test.tsx tests/frontend/settings-modal-sections.test.tsx tests/frontend/settings-modal-backups.test.tsx tests/frontend/settings-modal-provider-limits.test.tsx` - - `npm run check` - - `npm run test:architecture` - - `npm run test:timings` - - `npm_config_cache=/tmp/ttdash-npm-cache npm run verify:release` - - `PLAYWRIGHT_TEST_PORT=3016 npm_config_cache=/tmp/ttdash-npm-cache npm run test:e2e` - - `coderabbit review --agent -t uncommitted -c AGENTS.md` -> round 1: 1 minor issue, fixed (draft state no longer reinitializes while the modal stays open; `tests/frontend/settings-modal-draft-state.test.tsx` added) - - `coderabbit review --agent -t uncommitted -c AGENTS.md` -> round 2: blocked by CodeRabbit rate limit (`Rate limit exceeded`, retry window reported by the CLI: `54 minutes and 32 seconds`) - -### code-review.md / H-01 - -- Status: fixed -- Scope: `src/hooks/use-dashboard-controller.ts` was reduced from the remaining god-hook into a composition root over focused internal controller slices. The heavy derived state, browser IO, dialog ownership, drill-down navigation, shell/load state, and imperative dashboard actions now live in `src/hooks/use-dashboard-controller-actions.ts`, `use-dashboard-controller-browser.ts`, `use-dashboard-controller-derived-state.ts`, `use-dashboard-controller-dialogs.ts`, `use-dashboard-controller-drill-down.ts`, `use-dashboard-controller-effects.ts`, and `use-dashboard-controller-shell-state.ts`, while the public controller contract stayed stable through `src/hooks/use-dashboard-controller.ts`. -- Guardrails: `docs/architecture.md` now documents the internal dashboard controller slices and the browser-IO helper as private implementation details behind the public controller hook, and `.dependency-cruiser.cjs` now blocks component-level fanout to the internal `use-dashboard-controller-*.ts` slices while keeping the intentional type-only controller contract out of the orphan warning path. -- Follow-up quality fixes during implementation: - - `tests/frontend/dashboard-controller-browser.test.tsx` now locks the extracted browser helper responsibilities for JSON downloads, section scrolling, and the test-only `openSettings` bridge. - - `tests/frontend/dashboard-controller-drill-down.test.tsx` now covers the extracted drill-down slice directly, including the edge case where the selected day disappears after filtering changes. - - `src/hooks/use-dashboard-controller-actions.ts` now uses the upload-specific fallback toast (`api.uploadFailed`) after a successful JSON parse when the backend rejects a usage upload without an `Error` instance, and `tests/frontend/dashboard-error-state.test.tsx` covers that regression explicitly. - - `npm run test:timings` showed no new performance hotspot in the touched dashboard/controller suites; the new slice tests stay sub-100ms and the existing dashboard controller/public-shell tests remained fast. -- Validation: - - `npm run check` - - `npm run test:architecture` - - `npm run test:timings` - - `npm run test:unit -- tests/frontend/dashboard-controller-browser.test.tsx tests/frontend/dashboard-controller-drill-down.test.tsx tests/frontend/dashboard-controller-state.test.tsx tests/frontend/dashboard-controller-actions.test.tsx tests/frontend/dashboard-filter-visibility.test.tsx tests/frontend/dashboard-error-state.test.tsx` - - `npm run test:unit -- tests/frontend/dashboard-error-state.test.tsx tests/frontend/dashboard-controller-actions.test.tsx tests/frontend/dashboard-controller-browser.test.tsx` - - `npm_config_cache=/tmp/ttdash-npm-cache npm run verify:release` - - `PLAYWRIGHT_TEST_PORT=3016 npm_config_cache=/tmp/ttdash-npm-cache npm run test:e2e` - - `coderabbit review --agent -t uncommitted -c AGENTS.md` -> round 1: 1 minor issue, fixed (`api.uploadFailed` fallback for backend upload rejection after successful JSON parse) - - `coderabbit review --agent -t uncommitted -c AGENTS.md` -> round 2: blocked by CodeRabbit rate limit (`Rate limit exceeded`, retry window reported by the CLI: `52 minutes and 50 seconds`) - -## 2026-04-23 - -### architecture-review.md / H-01 - -- Status: fixed -- Scope: `server.js` was reduced to the CLI/bootstrap and runtime composition root; subsystem logic moved into `server/data-runtime.js`, `server/background-runtime.js`, `server/auto-import-runtime.js`, and `server/http-router.js`. -- Guardrails: `docs/architecture.md` now documents the server split, `.dependency-cruiser.cjs` prevents runtime modules from coupling back to `server.js`, the router, or each other, and `tests/unit/background-runtime.test.ts` locks the background registry snapshot behavior that was tightened during the review cycle. -- Follow-up quality fixes during implementation: - - `server/background-runtime.js`: removed a snapshot TOCTOU re-read so background pruning now decides cleanup from one captured registry read. - - `src/components/layout/FilterBar.tsx`: added cleanup for queued focus-restoration callbacks; `tests/frontend/filter-bar-date-picker.test.tsx` now covers the unmount path that was causing the flaky date-picker teardown in Playwright. -- Validation: - - `npm run test:architecture` - - `npm run check:deps` - - `npm run test:unit -- tests/unit/server-helpers-network.test.ts tests/unit/server-helpers-runner-core.test.ts tests/unit/server-helpers-runner-process.test.ts tests/unit/server-helpers-file-locks.test.ts tests/integration/server-auto-import.test.ts tests/integration/server-background.test.ts tests/integration/server-api-guards.test.ts` - - `npm run verify:full` - - `coderabbit review --agent -t uncommitted -c AGENTS.md` -> round 1: 1 minor issue, round 2: 0 issues - -### architecture-review.md / H-02 - -- Status: fixed -- Scope: the duplicated client/server settings contract was consolidated into `shared/app-settings.js` with typed declarations in `shared/app-settings.d.ts`; frontend adapters in `src/lib/app-settings.ts`, `src/lib/dashboard-preferences.ts`, and `src/lib/provider-limits.ts` now consume that shared contract, and `server/data-runtime.js` now normalizes persisted settings through the same source. -- Guardrails: `docs/architecture.md` now documents `shared/app-settings.js` as the single production source for persisted settings defaults and normalization, `.dependency-cruiser.cjs` blocks bypassing that contract from `server.js`, `server/data-runtime.js`, and `src/lib/app-settings.ts`, and `vitest.config.ts` now includes `shared/app-settings.js` in coverage reporting. -- Follow-up quality fixes during implementation: - - `tests/unit/app-settings-contract.test.ts`: locks the shared/frontend contract alignment for defaults, fragment normalization, persisted settings normalization, and runtime-only flags. - - `tests/integration/server-api-imports.test.ts`: now asserts the normalized settings-import response instead of only the status code. - - `tests/integration/server-api-persistence.test.ts` and `tests/frontend/settings-modal-test-helpers.tsx`: now derive defaults from the shared settings contract instead of re-hardcoding them in tests. - - `tests/unit/background-runtime.test.ts`: cleaned up type-only imports to keep the repo lint/type gates green after the shared typings were added. -- Validation: - - `npm run check` - - `npm run test:architecture` - - `npm run test:unit -- tests/unit/app-settings-contract.test.ts tests/unit/api.test.ts tests/unit/dashboard-preferences.test.ts tests/integration/server-api-persistence.test.ts tests/integration/server-api-imports.test.ts` - - `npm_config_cache=/tmp/ttdash-npm-cache npm run test:unit:coverage` - - `npm run build:app` - - `npm run verify:package` - - `PLAYWRIGHT_TEST_PORT=3016 npm_config_cache=/tmp/ttdash-npm-cache npm run test:e2e` - - `coderabbit review --agent -t uncommitted -c AGENTS.md` -> round 1: 0 issues, round 2: 0 issues - -### architecture-review.md / M-01 - -- Status: fixed -- Scope: the dashboard orchestration was cut from a broad flat controller surface into focused view-model bundles in `src/hooks/use-dashboard-controller.ts`; `src/components/Dashboard.tsx` now consumes `header`, `filterBar`, `sections`, `settingsModal`, `dialogs`, `commandPalette`, `report`, and shell bundles instead of forwarding dozens of individual fields, and `src/components/dashboard/DashboardSections.tsx` now consumes one structured `DashboardSectionsViewModel`. -- Guardrails: `src/types/dashboard-view-model.d.ts` now owns the shared frontend-only dashboard view-model contracts, `docs/architecture.md` documents `Dashboard.tsx` as the controller composition root, and `.dependency-cruiser.cjs` now blocks component-subtree fanout to `src/hooks/use-dashboard-controller.ts`. -- Follow-up quality fixes during implementation: - - `src/components/layout/Header.tsx`, `src/components/layout/FilterBar.tsx`, `src/components/features/command-palette/CommandPalette.tsx`, and `src/components/features/settings/SettingsModal.tsx` now type their props from the shared dashboard view-model contracts instead of re-declaring local prop shapes. - - `tests/frontend/dashboard-controller-test-helpers.ts` now provides bundle-based controller and section factories, so dashboard composition tests no longer rebuild a flat mega-mock. - - `tests/frontend/dashboard-controller-actions.test.tsx` now covers drill-down navigation from the controller-owned dialog bundle, locking the logic that moved out of `Dashboard.tsx`. - - `tests/frontend/dashboard-filter-visibility.test.tsx` now also asserts that `DashboardSections` receives a structured `viewModel` bundle instead of flat section props. -- Validation: - - `npm run check` - - `npm run test:architecture` - - `npm run test:unit -- tests/frontend/dashboard-controller-state.test.tsx tests/frontend/dashboard-controller-actions.test.tsx tests/frontend/dashboard-filter-visibility.test.tsx` - - `npm_config_cache=/tmp/ttdash-npm-cache npm run test:unit:coverage` - - `npm run build:app` - - `npm run verify:package` - - `PLAYWRIGHT_TEST_PORT=3016 npm_config_cache=/tmp/ttdash-npm-cache npm run test:e2e` - - `coderabbit review --agent -t uncommitted -c AGENTS.md` -> round 1: 0 issues, round 2: 0 issues - -### architecture-review.md / M-02 - -- Status: fixed -- Scope: dashboard preset, section, and settings-adjacent UI rules now flow through `shared/dashboard-preferences.js` with declarations in `shared/dashboard-preferences.d.ts`; `shared/app-settings.js` consumes that shared contract, `src/lib/dashboard-preferences.ts` was reduced to a thin adapter, and the duplicated preset semantics were removed from `src/hooks/use-dashboard-filters.ts` and `src/components/layout/FilterBar.tsx`. -- Guardrails: `docs/architecture.md` now documents the shared dashboard contract separately from the app settings contract, `.dependency-cruiser.cjs` blocks production imports of `shared/dashboard-preferences.json` outside `shared/dashboard-preferences.js`, and `vitest.config.ts` now includes `shared/dashboard-preferences.js` in coverage. -- Follow-up quality fixes during implementation: - - `tests/unit/dashboard-preferences.test.ts` now locks the shared/frontend adapter alignment for config parsing, section metadata, preset-range resolution, and active-preset detection. - - `tests/frontend/use-dashboard-filters.test.tsx` now asserts preset application and reset behavior against the shared preset resolver instead of re-hardcoding date ranges. - - `tests/frontend/filter-bar-presets.test.tsx` now seeds active-preset UI states from the shared resolver while preserving the existing visible quick-select order. - - `src/types/dashboard-view-model.d.ts` and `src/hooks/use-dashboard-controller.ts` now type preset actions with `DashboardDatePreset` instead of broad strings. -- Validation: - - `npm run check` - - `npm run test:architecture` - - `npm run check:deps` - - `npm_config_cache=/tmp/ttdash-npm-cache npm run test:unit:coverage` - - `npm run build:app` - - `npm run verify:package` - - `PLAYWRIGHT_TEST_PORT=3016 npm_config_cache=/tmp/ttdash-npm-cache npm run test:e2e` - - `coderabbit review --agent -t uncommitted -c AGENTS.md` -> round 1: 0 issues, round 2: 0 issues diff --git a/docs/review/performance-review.md b/docs/review/performance-review.md deleted file mode 100644 index 4bc7c5e..0000000 --- a/docs/review/performance-review.md +++ /dev/null @@ -1,67 +0,0 @@ -# Performance Review - -## Kurzfazit - -Die Laufzeitperformance wirkt fuer einen lokalen Analytics-Dashboard-Use-Case aktuell brauchbar, aber der Code zeigt klare Skalierungsgrenzen: ein schweres Initial-Bundle, mehrere komplette Datenpasses pro Filterwechsel und einige bewusst teure Testpfade. - -## Was bereits gut ist - -- Sekundaere Oberflaechen wie `SettingsModal`, `DrillDownModal`, Forecast-Zoom und Help sind lazy geladen -- Viele teure Ableitungen sind bereits in `useMemo(...)` gekapselt -- Auto-Import und Background-Pfade sind zumindest gegen Parallelstarts abgesichert - -## Findings - -### H-01 - Das Initial-Bundle ist fuer ein lokales Dashboard weiterhin schwer - -**Evidenz:** `npm run build:app` - -Die Build-Ausgabe zeigt mehrere grosse Chunks: - -- `charts-vendor` -> `422.02 kB` raw / `119.59 kB` gzip -- `index` -> `212.32 kB` raw / `57.55 kB` gzip -- `react-vendor` -> `200.38 kB` raw / `64.43 kB` gzip -- `motion-vendor` -> `125.85 kB` raw / `41.08 kB` gzip -- `i18n` -> `119.68 kB` raw / `36.70 kB` gzip - -Die Lazy-Splits helfen fuer modale Nebenwege, aber das Kern-Dashboard bleibt sehr chart-lastig und schwer. Auf schwachen Geraeten oder in eingebetteten Browsern ist das der wahrscheinlichste Performance-Flaschenhals beim Kaltstart. - -**Empfehlung:** kritische First-View-Visualisierungen priorisieren und weniger wichtige Charts spaeter oder sektional nachladen. - -### M-01 - Ein Filterwechsel loest mehrere komplette Datenpasses aus - -**Referenzen:** `src/hooks/use-dashboard-filters.ts:64-114`, `src/hooks/use-dashboard-filters.ts:164-201`, `src/hooks/use-computed-metrics.ts:8-29`, `shared/dashboard-domain.js:261-520` - -Pro Aenderung an Filtern oder ViewMode wird die Datenmenge mehrfach komplett durchlaufen: - -- sortieren -- Datumsfilter -- Providerfilter -- Modellfilter -- Aggregation -- Metrics -- Provider- und Modellaggregation -- Chart-Transforms -- Unique-Model-Listen - -Fuer kleine Datensaetze ist das okay. Fuer groessere Historien wird es teuer, weil viele Schritte nicht dieselben Vorberechnungen teilen. - -**Empfehlung:** gemeinsame Vorberechnungen zentralisieren, vor allem fuer provider/model-Indexes und bereits normalisierte Breakdown-Maps. - -### M-02 - Der Settings-Dialog koppelt UI-Opening an eine externe Registry-Abfrage - -**Referenzen:** `src/components/features/settings/SettingsModal.tsx:241-271`, `server.js:2208-2247` - -Beim Oeffnen des Settings-Dialogs wird der Toktrack-Versionstatus angefragt, und der Server kann dafuer `npm view` mit Timeout starten. Das ist logisch nachvollziehbar, aber es koppelt einen lokalen UI-Dialog an eine externe Netzabhaengigkeit. - -Der bestehende Cache entschraerft das, beseitigt das Grundmuster aber nicht. Offline oder in restriktiven Netzen fuehrt das zu wiederholter Fehlermeldungsarbeit und vermeidbarer Laufzeitkomplexitaet. - -**Empfehlung:** Version-Check optional auf explizite Nutzeraktion legen oder im Hintergrund einmal pro Session vorwaermen. - -### N-01 - Die langsamsten Frontend-Tests deuten auf komplexe UI-Inseln hin - -**Evidenz:** `npm run test:timings` - -Die langsamsten Frontend-Suites haengen an `SettingsModal`, `DrillDownModal`, `HeatmapCalendar`, Sortierungstabellen und Filterzugriffen. Das ist nicht nur ein Testthema, sondern ein Signal fuer komplexe UI-Inseln mit hoher Interaktions- und Renderlast. - -**Empfehlung:** grosse Interaktionskomponenten weiter zerlegen und bei UI-Hotspots systematisch zwischen Renderlogik, Datenlogik und Accessibility-Glue unterscheiden. diff --git a/docs/review/security-review.md b/docs/review/security-review.md deleted file mode 100644 index db270ef..0000000 --- a/docs/review/security-review.md +++ /dev/null @@ -1,63 +0,0 @@ -# Security Review - -## Kurzfazit - -Der aktuelle Stand ist fuer den Default-Loopback-Betrieb deutlich staerker als es die aeltere Pen-Test-Doku vermuten laesst: Host-Checks, Origin-Pruefung, Payload-Grenzen, Null-Byte-Abwehr, token-basierte API-Auth, restriktive Datei-Permissions und eine CSP ohne `unsafe-inline` fuer Styles sind vorhanden und getestet. Die verbleibenden Risiken liegen vor allem bei kompromittierten Prozessen desselben OS-Users und bei veralteter Security-Dokumentation. - -## Was bereits gut ist - -- Nicht-loopback Bind braucht explizites Opt-in ueber `TTDASH_ALLOW_REMOTE=1` -- Nicht-loopback Bind braucht zusaetzlich `TTDASH_REMOTE_TOKEN` -- Loopback-API-Requests brauchen einen per Start generierten lokalen Session-Token -- Mutation-Requests werden ueber Host- und Origin-Validierung abgesichert -- Null-Byte-Pfade werden abgefangen, ohne den Server zu beenden -- Oversized Upload- und Report-Requests werden sauber mit `413` behandelt -- Persistierte Dateien und App-Directories werden mit restriktiven Rechten geschrieben -- Die CSP trennt Element- und Attribut-Styles, erlaubt Stylesheet-Elemente nur ueber `self` bzw. HTML-Nonce und blockiert Style-Attribute ueber `style-src-attr 'none'` -- Diese Schutzmechanismen sind nicht nur im Code sichtbar, sondern auch in Integrationstests verankert - -## Findings - -### H-01 - `TTDASH_ALLOW_REMOTE=1` bleibt ein Vollvertrauensmodus ohne Authentifizierung - -**Referenzen:** `server/runtime.js:10-21`, `server/http-utils.js:152-233`, `server.js:1608-1765`, `server.js:2482-2852` - -Sobald Remote-Binding explizit aktiviert wird, existiert weiterhin keine echte Authentifizierung. Die Autorisierung stuetzt sich dann nur noch auf Host- und Origin-Konsistenz. Das schuetzt gegen Browser-Cross-Site-Szenarien, aber nicht gegen einfache nicht-browserseitige Clients auf demselben Netz, die passende `Host`- und `Origin`-Header setzen. - -Das ist fuer den Default-Modus kein akuter Bug, aber ein klares Security-Design-Risiko fuer alle Nutzer, die das Tool absichtlich ins Netz haengen. - -**Empfehlung:** Remote-Bind nur mit Token-basierter Auth oder einem separaten abgesicherten Mode erlauben. - -**Aktueller Stand:** In `docs/review/fixed-findings.md` als `security-review.md / H-01` geschlossen. Remote-Bind verlangt `TTDASH_REMOTE_TOKEN`, und Remote-API-Requests laufen durch die zentrale Token-Auth. - -### M-01 - Lokale Read-Endpoints sind im gleichen Host-Kontext offen - -**Referenzen:** `server.js:2503-2588`, `server.js:2769-2777` - -`/api/usage`, `/api/settings`, `/api/runtime` und `/api/toktrack/version-status` waren fuer jeden Prozess erreichbar, der den Loopback-Server ansprechen konnte. Fuer eine lokale Single-User-App war das ein verstaendlicher Tradeoff, aber die Bedrohungsannahme war stark: "andere lokale Prozesse sind vertrauenswuerdig". - -**Aktueller Stand:** In `docs/review/fixed-findings.md` als `security-review.md / M-01` geschlossen. Der Loopback-Server generiert pro Start einen lokalen Session-Token, oeffnet bzw. dokumentiert eine einmalige Bootstrap-URL, setzt daraus ein HttpOnly/SameSite-Cookie und verlangt danach Auth fuer alle API-Endpoints. Background-Registry und Testserver speichern die notwendige Session-Metadaten nur in restriktiven User-Config-Dateien. - -**Restrisiko:** Ein kompromittierter Prozess mit denselben OS-User-Rechten kann weiterhin Terminalausgaben, User-Config-Dateien oder Browserprofile angreifen. Vollstaendige Isolation gegen diesen Angreifertyp wuerde einen OS-naeheren Transport wie Unix Domain Sockets, Named Pipes mit ACLs oder eine native Desktop-Shell erfordern. - -### N-01 - Die CSP erlaubt weiterhin Inline-Styles - -**Referenzen:** `server.js:49-56` - -Die gesetzte CSP enthielt `style-src 'self' 'unsafe-inline'`. Das war kein unmittelbarer Exploit-Nachweis, vergroesserte aber die Angriffsoberflaeche, falls spaeter ungewollte Style-Injektionen oder unsaubere HTML-Renderpfade dazukommen. - -**Empfehlung:** mittelfristig auf style hashes oder reine Stylesheet-basierte Ausgabe umstellen. - -**Aktueller Stand:** In `docs/review/fixed-findings.md` als `security-review.md / N-01` geschlossen. Die CSP enthaelt kein `unsafe-inline` mehr fuer Styles, HTML-Antworten erhalten pro Response eine Nonce und ein passendes `ttdash-csp-nonce` Meta-Tag, `style-src-elem` erlaubt nur `self` bzw. die Nonce, und `style-src-attr 'none'` blockiert echte Inline-Style-Attribute. - -**Restrisiko:** JavaScript-gesteuerte Style-Properties bleiben fuer React, Recharts und Motion erlaubt. Das ist bewusst, weil diese Updates nicht der blockierte HTML-Style-Attributpfad sind und die bestehende Dashboard-Optik sowie Animationen ohne Security-Gewinn sonst umfangreich ersetzt werden muessten. - -### N-02 - Die vorhandene Pen-Test-Doku ist fachlich nicht mehr auf dem aktuellen Stand - -**Referenzen:** `docs/security/pentest-2026-04-17.md:3-11`, `tests/integration/server-api-guards.test.ts:7-110`, `server.js:2818-2866` - -Die alte Doku nennt `%00`-DoS, fehlende Host-Pruefung und fehlende Origin-Pruefung als aktuelle Hauptbefunde. Der aktuelle Code und die aktuellen Guards-Tests zeigen aber, dass diese Punkte inzwischen abgesichert sind. - -Das ist kein Runtime-Bug, aber ein Security-Governance-Problem: veraltete Security-Dokumente fuehren zu falschem Vertrauen oder falscher Alarmierung. - -**Empfehlung:** Security-Dokumente versionieren oder mit einem klaren "historisch / superseded" Hinweis versehen. diff --git a/docs/review/server-review.md b/docs/review/server-review.md deleted file mode 100644 index 451a9e2..0000000 --- a/docs/review/server-review.md +++ /dev/null @@ -1,55 +0,0 @@ -# Server Review - -## Kurzfazit - -Der Server ist funktional robust fuer eine lokale Single-Binary-Node-Runtime. Die fruehere Entrypoint-Konzentration wurde deutlich reduziert: `server.js` ist heute nur noch der ausfuehrbare CLI/Bin-Shim, waehrend `server/app-runtime.js` die Runtime-Komposition uebernimmt und CLI, Startup-Shell, HTTP-Lifecycle, Auth, Router, Persistenz, Auto-Import und Background-Betrieb in fokussierten Runtime-Modulen liegen. - -## Was bereits gut ist - -- Host-, Origin- und Payload-Grenzen sind aktiv und getestet -- Persistenz nutzt atomische Schreibpfade und Cross-Process-Locks -- Background-Instanzen, Logfiles und Dateirechte sind nicht nur "best effort", sondern explizit mitgedacht -- Reporting, Auto-Import und Background-Betrieb haben klare Fehler- und Timeout-Strategien -- `server.js` exportiert keine Test- oder Runtime-Helper-API mehr und bleibt durch eine Architektur-Guardrail auf den ausfuehrbaren Shim begrenzt - -## Findings - -### H-01 - Zu viele Server-Subsysteme leben im Entrypoint - -**Referenzen:** `server.js` insgesamt, besonders `322-542`, `655-940`, `1665-1765`, `1784-2451`, `2482-2975` - -Das Entrypoint-Modul traegt Persistenz, File Locks, Background-Registry, CLI, Auto-Import, Runner-Diagnostik, HTTP-Router, Static Serving und Shutdown in einem Kontext. Diese Kopplung verlangsamt Reviews und macht lokale Refactors teuer. - -**Empfehlung:** innere Runtime-Helfer in eigene Module verschieben und `server.js` auf Komposition reduzieren. - -**Aktueller Stand:** In `docs/review/fixed-findings.md` als `server-review.md / H-01` geschlossen. CLI-Parsing, Startup-Ausgabe, Browser-Open, lokale Auth-Session-Metadaten und HTTP-Lifecycle/Shutdown sind aus dem Entrypoint herausgezogen. Nach `server-review.md / M-01` umfasst `server.js` nur noch den `require.main`-Startpfad und delegiert die Runtime-Komposition an `server/app-runtime.js`. - -### M-01 - Der produktive Entrypoint exportiert einen breiten `__test__`-API-Schatten - -**Referenzen:** `server.js:2935-2962` - -Fuer Tests werden viele interne Helfer direkt aus `server.js` exportiert. Das ist pragmatisch, macht das Produktionsmodul aber implizit zu einer halb-oeffentlichen Utility-Sammlung. Mit wachsender Codebasis entsteht daraus schnell eine "nicht offiziell oeffentliche, aber faktisch stabile" API. - -**Empfehlung:** Testziele aus `server.js` in importierbare Runtime-Module verschieben und dort direkt testen. - -**Aktueller Stand:** In `docs/review/fixed-findings.md` als `server-review.md / M-01` geschlossen. Die Server-helper-Tests importieren die Runtime-Module direkt, die Playwright-Testserver-Komposition nutzt `server/app-runtime.js`, und `server.js` exportiert weder `__test__` noch andere produktive Runtime-Helper. - -### M-02 - Globale Runtime-Flags und Caches erschweren lokale Isolation - -**Referenzen:** `server.js:93-101`, `1759-1774`, `2208-2247`, `2271-2451` - -Zustaende wie `startupAutoLoadCompleted`, `runtimePort`, `runtimeUrl`, `autoImportRunning`, `autoImportStreamRunning`, `latestToktrackVersionCache` und `latestToktrackVersionLookupPromise` sind zentral und mutable. Fuer die aktuelle App ist das noch beherrschbar, aber es koppelt Nebenwirkungen ueber mehrere Funktionsbereiche. - -**Empfehlung:** zumindest die Toktrack- und Auto-Import-Laufzeit in dedizierte Service-Objekte kapseln. - -**Aktueller Stand:** In `docs/review/fixed-findings.md` als `server-review.md / M-02` geschlossen. Runtime-Snapshot, Startup-Auto-Load-Status, Auto-Import-Lease und Toktrack-Version-Cache liegen jetzt in pro Runtime instanziierten Service-Objekten; der HTTP-Router besitzt kein eigenes Auto-Import-Stream-Flag mehr. - -### N-01 - Die Serverbasis ist strukturell staerker als es ihre Dateiform vermuten laesst - -**Referenzen:** `server/runtime.js`, `server/http-utils.js`, `tests/integration/server-api-guards.test.ts`, `tests/integration/server-background.test.ts` - -Positiv auffaellig ist, dass wesentliche Sicherheits- und Runtime-Helfer bereits aus `server.js` herausgezogen wurden. Diese Richtung ist richtig und sollte konsequent weitergefuehrt werden. - -**Empfehlung:** `server/runtime.js` und `server/http-utils.js` als Muster fuer weitere Extraktionen verwenden. - -**Aktueller Stand:** In `docs/review/fixed-findings.md` als `server-review.md / N-01` geschlossen. Die Host-/Origin-/Content-Type-Request-Policy liegt jetzt in `server/http-request-guards.js`, waehrend `server/http-utils.js` als kompatible Fassade fuer Body-, Response- und Router-Helfer erhalten bleibt. diff --git a/docs/review/test-review.md b/docs/review/test-review.md deleted file mode 100644 index 7cd9a0a..0000000 --- a/docs/review/test-review.md +++ /dev/null @@ -1,94 +0,0 @@ -# Test Review - -## Kurzfazit - -Die Teststrategie ist stark: klare Layer, viele gezielte Frontend- und Integrationstests, gute Accessibility-Abdeckung und brauchbare Runtime-Regressionen. Die groessten aktuellen Testprobleme liegen bei Messbarkeit, Flakiness und Testdauer, nicht bei fehlender Ernsthaftigkeit. - -## Was bereits gut ist - -- Die vier Testlayer sind sauber dokumentiert und tatsaechlich im Repo sichtbar -- Server-Guards, Background-Betrieb und Auto-Import haben echte Integrationstests -- Frontend-Tests pruefen nicht nur Rendern, sondern Sprache, Motion und Accessibility -- Die Test-Timings werden aktiv ueber ein eigenes Script ausgewertet - -## Findings - -### H-01 - Die Architektur-Suite enthaelt einen echten Timeout-Fall an der Flake-Grenze - -**Status:** Behoben, siehe `docs/review/fixed-findings.md` -> `test-review.md / H-01`. - -**Referenzen:** `tests/architecture/frontend-layers.test.ts:3-12` - -`npm run test:architecture` fiel im Gesamtlauf aus, weil `hooks must not depend on components` den `5000ms` Timeout riss. Der isolierte Re-Run derselben Datei bestand, aber der langsamste Fall lag bei etwa `4950ms`. - -Das ist kein semantischer Architekturverstoss, aber ein instabiles Signal. Genau solche Grenzfaelle untergraben das Vertrauen in CI-Fehler. - -**Empfehlung:** Timeout anheben oder die Archunit-Pruefung leichter machen, bevor daraus wiederkehrende CI-Flakes werden. - -### H-02 - Die Coverage-Zahl bildet die produktive Runtime nur teilweise ab - -**Status:** Behoben, siehe `docs/review/fixed-findings.md` -> `test-review.md / H-02`. - -**Referenzen:** `vitest.config.ts:27-44` - -Die Coverage-Includes decken nur: - -- `src/hooks/**/*.ts` -- `src/lib/**/*.ts` -- `src/components/Dashboard.tsx` -- `usage-normalizer.js` - -Damit fehlen in der gemeldeten Quote unter anderem: - -- `server.js` -- `server/**` -- `shared/**` -- fast alle Komponenten ausser `Dashboard.tsx` - -Die gemeldeten `76.27 / 65.71 / 76.43 / 78.61` sind also technisch korrekt, aber strategisch irrefuehrend, wenn man sie als Produkt-Coverage liest. - -**Empfehlung:** separate Server-Coverage fuer Spawn-Pfade einfuehren und die Includes auf die realen Runtime-Schwerpunkte erweitern. - -### M-01 - Dead Code und Coverage-Luecken werden von den Guardrails nicht sichtbar gemacht - -**Status:** Behoben, siehe `docs/review/fixed-findings.md` -> `test-review.md / M-01`. - -**Referenzen:** `src/hooks/use-theme.ts:1-21`, `src/hooks/use-provider-limits.ts:1-17`, `.dependency-cruiser.cjs:12-21` - -Es gibt mindestens zwei Hooks ohne produktive Importe und mit `0%` Coverage. Gleichzeitig lief `dependency-cruiser` trotz `no-orphans-src` Regel ohne Hinweis durch. - -Das ist wichtig, weil tote oder veraltete Pfade so laenger unauffaellig im Repo bleiben. - -**Empfehlung:** Orphan-Detection schaerfer pruefen oder zusaetzlich ein dediziertes Dead-Code-Werkzeug in den Review-/CI-Prozess aufnehmen. - -### M-02 - Testdauer konzentriert sich auf wenige Hotspots - -**Status:** Behoben, siehe `docs/review/fixed-findings.md` -> `test-review.md / M-02`. - -**Evidenz:** `npm run test:timings` - -Langsamste Suites: - -- `tests/integration/server-background.test.ts` -> `5.785s` -- `tests/integration/server-auto-import.test.ts` -> `4.521s` -- `tests/unit/server-helpers-runner-process.test.ts` -> `2.778s` -- `tests/frontend/settings-modal-language.test.tsx` -> `2.443s` -- `tests/frontend/drill-down-modal-motion.test.tsx` -> `2.013s` - -Langsamster Einzeltest: - -- `rejects parallel auto-import starts before launching a second toktrack runner` -> `3.326s` - -Die Hotspots sind plausibel, aber sie zeigen klar, welche Testpfade kuenftig zuerst entkoppelt oder fokussiert werden sollten. - -**Empfehlung:** langsame Prozess- und UI-Pfade weiter isolieren, damit neue Regressionen dort nicht unverhaeltnismaessig teuer werden. - -### M-03 - Die E2E-Abdeckung ist funktional stark, aber in einer grossen Monolith-Datei konzentriert - -**Status:** Behoben, siehe `docs/review/fixed-findings.md` -> `test-review.md / M-03`. - -**Referenzen:** `tests/e2e/dashboard.spec.ts` insgesamt, `734` Zeilen, `7` Tests - -Die Playwright-Suite prueft wichtige Journeys, aber fast alles lebt in einer Datei. Das erschwert Navigation, Review und selektive Optimierung. Mit weiterem Wachstum wird daraus schnell ein langsamer Catch-all statt fokussierter Journeys. - -**Empfehlung:** nach Themen splitten, z. B. `load-and-upload`, `settings-and-backups`, `forecast-and-reporting`, `filters-and-navigation`. diff --git a/shared/toktrack-version.d.ts b/shared/toktrack-version.d.ts index 582efb4..c1aa0f5 100644 --- a/shared/toktrack-version.d.ts +++ b/shared/toktrack-version.d.ts @@ -1,6 +1,6 @@ /** Canonical npm package name used for toktrack lookups and execution. */ export const TOKTRACK_PACKAGE_NAME: 'toktrack' /** Pinned toktrack version validated by TTDash. */ -export const TOKTRACK_VERSION: '2.6.0' +export const TOKTRACK_VERSION: string /** Fully qualified toktrack package spec used by npm and bun executors. */ -export const TOKTRACK_PACKAGE_SPEC: 'toktrack@2.6.0' +export const TOKTRACK_PACKAGE_SPEC: `toktrack@${string}` diff --git a/shared/toktrack-version.js b/shared/toktrack-version.js index 4f99f0c..2d25681 100644 --- a/shared/toktrack-version.js +++ b/shared/toktrack-version.js @@ -1,5 +1,16 @@ +const packageJson = require('../package.json') + const TOKTRACK_PACKAGE_NAME = 'toktrack' -const TOKTRACK_VERSION = '2.6.0' +const EXACT_SEMVER_PATTERN = + /^(?:0|[1-9]\d*)\.(?:0|[1-9]\d*)\.(?:0|[1-9]\d*)(?:-[0-9A-Za-z-]+(?:\.[0-9A-Za-z-]+)*)?(?:\+[0-9A-Za-z-]+(?:\.[0-9A-Za-z-]+)*)?$/ +const TOKTRACK_VERSION = packageJson.dependencies?.[TOKTRACK_PACKAGE_NAME] + +if (typeof TOKTRACK_VERSION !== 'string' || !EXACT_SEMVER_PATTERN.test(TOKTRACK_VERSION)) { + throw new Error( + `package.json dependencies.${TOKTRACK_PACKAGE_NAME} must be pinned to an exact SemVer version.`, + ) +} + const TOKTRACK_PACKAGE_SPEC = `${TOKTRACK_PACKAGE_NAME}@${TOKTRACK_VERSION}` module.exports = { diff --git a/tests/unit/toktrack-version.test.ts b/tests/unit/toktrack-version.test.ts index 7fdce73..ed672a9 100644 --- a/tests/unit/toktrack-version.test.ts +++ b/tests/unit/toktrack-version.test.ts @@ -1,4 +1,7 @@ import { createRequire } from 'node:module' +import { readdirSync, readFileSync } from 'node:fs' +import path from 'node:path' +import { fileURLToPath } from 'node:url' import { describe, expect, it } from 'vitest' import { TOKTRACK_PACKAGE_SPEC, TOKTRACK_VERSION } from '../../shared/toktrack-version.js' @@ -6,13 +9,116 @@ const require = createRequire(import.meta.url) const packageJson = require('../../package.json') as { dependencies?: Record } +const packageLockJson = require('../../package-lock.json') as { + packages?: Record< + string, + { + dependencies?: Record + version?: string + } + > +} + +const repoRoot = path.resolve(path.dirname(fileURLToPath(import.meta.url)), '../..') +const exactSemverPattern = + /^(?:0|[1-9]\d*)\.(?:0|[1-9]\d*)\.(?:0|[1-9]\d*)(?:-[0-9A-Za-z-]+(?:\.[0-9A-Za-z-]+)*)?(?:\+[0-9A-Za-z-]+(?:\.[0-9A-Za-z-]+)*)?$/ +const hardcodedToktrackVersionPattern = + /(?:toktrack@\d+\.\d+\.\d+(?:[-+][0-9A-Za-z.-]+)?|TOKTRACK_(?:VERSION|PACKAGE_SPEC):\s*['"`][^'"`]*\d+\.\d+\.\d+)/ +const scannedExtensions = new Set([ + '.bat', + '.cjs', + '.html', + '.js', + '.json', + '.md', + '.mjs', + '.sh', + '.ts', + '.tsx', + '.yaml', + '.yml', +]) +const excludedDirectories = new Set([ + '.git', + '.tmp-playwright', + 'coverage', + 'dist', + 'docs/review', + 'node_modules', + 'playwright-report', + 'test-results', +]) +const excludedFiles = new Set(['bun.lock', 'CHANGELOG.md', 'package-lock.json']) + +function getToktrackDependency() { + const version = packageJson.dependencies?.toktrack + if (typeof version !== 'string') { + throw new Error('package.json dependencies.toktrack must be defined.') + } + + return version +} + +function shouldSkipPath(relativePath: string) { + return ( + excludedFiles.has(relativePath) || + Array.from(excludedDirectories).some( + (directory) => relativePath === directory || relativePath.startsWith(`${directory}/`), + ) + ) +} + +function collectScannedFiles(directory: string, collected: string[] = []) { + for (const entry of readdirSync(directory, { withFileTypes: true })) { + const absolutePath = path.join(directory, entry.name) + const relativePath = path.relative(repoRoot, absolutePath) + + if (shouldSkipPath(relativePath)) continue + + if (entry.isDirectory()) { + collectScannedFiles(absolutePath, collected) + continue + } + + if (entry.isFile() && scannedExtensions.has(path.extname(entry.name))) { + collected.push(relativePath) + } + } + + return collected +} describe('toktrack version constants', () => { it('keeps the shared pinned version aligned with package.json', () => { - expect(TOKTRACK_VERSION).toBe(packageJson.dependencies?.toktrack) + expect(TOKTRACK_VERSION).toBe(getToktrackDependency()) }) it('builds the package spec from the shared pinned version', () => { - expect(TOKTRACK_PACKAGE_SPEC).toBe(`toktrack@${packageJson.dependencies?.toktrack}`) + expect(TOKTRACK_PACKAGE_SPEC).toBe(`toktrack@${getToktrackDependency()}`) + }) + + it('keeps the package dependency pinned to an exact SemVer version', () => { + expect(getToktrackDependency()).toMatch(exactSemverPattern) + }) + + it('keeps npm and bun lockfiles aligned with package.json', () => { + const toktrackVersion = getToktrackDependency() + const rootPackage = packageLockJson.packages?.[''] + const installedToktrackPackage = packageLockJson.packages?.['node_modules/toktrack'] + const bunLock = readFileSync(path.join(repoRoot, 'bun.lock'), 'utf8') + + expect(rootPackage?.dependencies?.toktrack).toBe(toktrackVersion) + expect(installedToktrackPackage?.version).toBe(toktrackVersion) + expect(bunLock).toContain(`"toktrack": "${toktrackVersion}",`) + expect(bunLock).toContain(`"toktrack": ["toktrack@${toktrackVersion}",`) + }) + + it('keeps toktrack package specs free of hardcoded versions outside managed files', () => { + const offenders = collectScannedFiles(repoRoot).filter((relativePath) => { + const content = readFileSync(path.join(repoRoot, relativePath), 'utf8') + return hardcodedToktrackVersionPattern.test(content) + }) + + expect(offenders).toEqual([]) }) }) From 5f69ade6acff4dbd3b0937b19010f53bc9c2e176 Mon Sep 17 00:00:00 2001 From: tyl3r-ch Date: Tue, 28 Apr 2026 20:30:39 +0200 Subject: [PATCH 02/42] Add test system review --- docs/review/test-review.md | 399 +++++++++++++++++++++++++++++++++++++ 1 file changed, 399 insertions(+) create mode 100644 docs/review/test-review.md diff --git a/docs/review/test-review.md b/docs/review/test-review.md new file mode 100644 index 0000000..05a99c5 --- /dev/null +++ b/docs/review/test-review.md @@ -0,0 +1,399 @@ +# Test Review + +## Kurzfazit + +Die Testbasis ist breit, aber das Testsystem ist noch nicht auf maximale lokale und CI-Performance +ausgelegt. Der groesste Engpass ist nicht der Build, sondern die Kombination aus serieller Pipeline, +nicht isolierbarer Playwright-Ausfuehrung und hohem `frontend/jsdom`-Overhead. + +Die wichtigsten Ziele fuer die naechste Umbauphase: + +- E2E-Tests muessen worker-isoliert laufen koennen, ohne gemeinsamen Server- oder Datenzustand. +- Die CI-Pipeline soll aus unabhaengigen Jobs bestehen statt aus einem langen seriellen Job. +- Command-Palette- und Dashboard-Composition-Tests sollen mehr Logik in Vitest beweisen und weniger + Browserzeit verbrauchen. +- Coverage soll risikogewichtet verbessert werden: zentrale Runtime- und Composition-Module zuerst, + nicht blind globale Thresholds erhoehen. + +## Baseline und Bottlenecks + +Gemessene lokale Baseline am 2026-04-28: + +| Gate | Zeit | Bewertung | +| --------------------------- | ----------: | ---------------------------------------------------- | +| `npm run check` | ca. `25s` | Statische Checks laufen komplett seriell. | +| `npm run test:architecture` | ca. `4.5s` | Stabil, aber aktuell explizit ohne File-Parallelism. | +| `npm run test:unit` | ca. `28s` | Wird vom `frontend`-Projekt dominiert. | +| `npm run test:timings` | ca. `30s` | Coverage aktiv, gute Timing-Signale. | +| `npm run build:app` | ca. `0.75s` | Kein relevanter Bottleneck. | +| `npm run verify:package` | ca. `7.4s` | Wertvoller Smoke, aber seriell im Full-Gate. | +| `npm run test:e2e:ci` | ca. `47s` | Groesster Einzelblock, absichtlich single-worker. | + +Vitest-Projekte isoliert: + +| Projekt | Zeit | Hauptkosten | +| -------------------------------------- | ----------: | ---------------------------------------------------------------- | +| `frontend` | ca. `18.4s` | `jsdom` environment, imports, React setup, viele kleine Dateien. | +| `unit` | ca. `4.4s` | Einzelne echte Prozess-/Timer-Tests dominieren. | +| `integration + integration-background` | ca. `4.6s` | Subprozesse und lokale Server, aber insgesamt gesund. | +| `architecture` | ca. `4.5s` | Source-Graph-/AST-Arbeit, aktuell seriell konfiguriert. | + +Playwright-Hotspots aus dem JUnit-Report: + +| Test | Zeit | +| ------------------------------------------------------------------ | ----------: | +| `command-palette.spec.ts` - dashboard section navigation | ca. `14.5s` | +| `command-palette.spec.ts` - analysis section navigation | ca. `5.5s` | +| `command-palette.spec.ts` - scroll and filter navigation | ca. `5.1s` | +| `dashboard-settings-backups.spec.ts` - settings and backup imports | ca. `4.4s` | + +Ein Testlauf mit `npx playwright test --workers=2` war schneller (`ca. 35s`), aber nicht robust: +ein Settings-/Backup-Test erwartete `6` Usage-Tage und bekam `8`. Das ist echte State-Leakage, kein +Timing-Artefakt. + +## Was bereits gut ist + +- Die Testpyramide ist grundsaetzlich vorhanden: Unit, Frontend/jsdom, Integration, Architektur und + E2E sind getrennt. +- Integration-Tests nutzen echte Server/Subprozesse nur dort, wo Prozess- oder CLI-Verhalten relevant + ist. +- `test:timings` liefert verwertbare Slow-Suite- und Slow-Test-Auswertungen. +- Playwright-E2E deckt wichtige echte Journeys ab: Load/Upload, Forecast/Filter, Settings/Backups, + Reporting und Command Palette. +- Coverage umfasst Produkt-Runtime wie `server/`, `shared/`, `src/` und `usage-normalizer.js`, nicht + nur Frontend-Ausschnitte. + +## Findings + +### H-01 - Playwright ist nicht worker-isoliert + +**Referenzen:** `playwright.config.ts`, `scripts/start-test-server.js`, `tests/e2e/helpers.ts` + +Der Playwright-Server nutzt eine gemeinsame Runtime unter `.tmp-playwright/app`. Beim Start wird dieser +Ordner geloescht und neu aufgebaut. Die Testhelfer lesen ausserdem eine globale Auth-Session und +mutieren globalen App-Zustand ueber `/api/usage` und `/api/settings`. + +Das blockiert sichere Parallelisierung: + +- Mehrere Worker koennen denselben Datenordner loeschen oder ueberschreiben. +- `resetAppState()` loescht globale Usage/Settings waehrend andere Tests dieselbe App verwenden. +- Relative `page.request`-Aufrufe haengen am gemeinsamen `baseURL`. +- E2E-Tests, die Daten importieren oder loeschen, beeinflussen andere E2E-Tests. + +**Impact:** `workers > 1` ist schneller, aber nicht verlaesslich. Der aktuelle CI-Befehl +`test:e2e:ci` erzwingt deshalb `--workers=1`, wodurch E2E zum groessten Full-Gate-Bottleneck wird. + +**Improvement:** E2E muss auf worker-scoped App-Instanzen umgestellt werden: + +- Jeder Playwright-Worker bekommt eigenen Port, Runtime-Root, Cache-, Config- und Data-Dir. +- Jeder Worker startet seinen eigenen Testserver und liest seine eigene Auth-Session. +- Die Playwright-Fixture ueberschreibt `baseURL` worker-spezifisch. +- `resetAppState()` darf nur noch den Server des aktuellen Workers betreffen. +- Der globale `webServer` in `playwright.config.ts` wird fuer die parallele Suite durch eine + worker-scoped Server-Fixture ersetzt oder er startet mehrere isolierte Server vorab. + +Akzeptanzkriterium fuer die spaetere Umsetzung: `npx playwright test --workers=2` und danach +`--workers=4` laufen mehrfach gruen, ohne Usage-Day-Mismatch, `socket hang up` oder Auth-Session-Race. + +### H-02 - CI ist ein monolithischer serieller Full-Gate + +**Referenzen:** `.github/workflows/ci.yml`, `package.json` + +Der Haupt-CI-Job fuehrt Format, Lint, Docstring-Lint, Dependency-Cruiser, TypeScript, Architecture, +Vitest-Coverage, Build, Package-Smoke, Playwright-Install und E2E nacheinander aus. + +Das ist robust, aber nicht maximal performant: + +- Unabhaengige Gates warten aufeinander. +- Ein langer Job nutzt GitHub-Runner-Parallelitaet nicht aus. +- Vitest-Projekte werden nicht als CI-Matrix genutzt. +- Package-Smoke und E2E laufen nach allen anderen Gates, obwohl sie nur den gebauten App-Artifact + benoetigen. + +**Improvement:** CI als DAG aufbauen: + +- `static`: Format, Lint, Dependency-Cruiser, TypeScript. +- `vitest-unit`, `vitest-frontend`, `vitest-integration`, `vitest-architecture`: getrennte Jobs. +- `build`: erzeugt `dist/` als Artifact. +- `package-smoke`: nutzt `dist/` Artifact und prueft Pack/Install/CLI/Server. +- `e2e`: nutzt `dist/` Artifact und worker-isolierte Playwright-Server. + +Akzeptanzkriterium: Full-CI-Walltime sinkt deutlich, ohne Testumfang zu reduzieren. Jeder Job laedt +seine eigenen Reports hoch und blockiert nicht durch gemeinsame Output-Pfade. + +### H-03 - Command-Palette-E2E mischt zu viele Testarten + +**Referenzen:** `tests/e2e/command-palette.spec.ts`, +`src/components/features/command-palette/CommandPalette.tsx` + +Die Command-Palette-Suite testet gleichzeitig: + +- Vollstaendige Command-Liste. +- Search aliases und Scoring. +- Action command execution. +- Filter- und View-State. +- Dynamische Provider-/Model-Commands. +- Scroll-Ziele fuer fast alle Dashboard-Sections. +- Theme, Sprache, Help und Quick-Select. + +Das ist ein klassischer Browser-Monolith: sehr wertvoll als Smoke, aber zu teuer und zu breit als +primaere Contract-Abdeckung. Besonders Scroll-Navigation durch viele Sections verursacht hohe +Browserzeit. + +**Impact:** Ein einzelnes Feature dominiert den E2E-Gate, obwohl viele Assertions keine echte +Browser-Integration brauchen. + +**Improvement:** Command-Palette in testbare Logik und E2E-Smoke trennen: + +- Pure Command-Erzeugung, Search-Scoring, Quick-Select-Mapping und Section-Command-Filterung in eine + testbare Helper-/Builder-Schicht extrahieren. +- Vitest-Tests fuer alle Command-IDs, Gruppen, Aliases, Visibility-Regeln, Provider/Model-Dynamik und + Section-Order. +- E2E nur fuer wenige representative Flows behalten: + - Palette oeffnet per Keyboard. + - Ein Action-Command triggert Download oder Dialog. + - Ein Filter-Command veraendert sichtbaren UI-State. + - Ein Section-Command scrollt zu genau einer representative Section. + +Akzeptanzkriterium: Command-Palette-E2E-Zeit sinkt signifikant, waehrend die Command-Contract-Coverage +in Vitest steigt. + +### H-04 - Report- und Coverage-Ausgaben sind nicht matrix-sicher + +**Referenzen:** `vitest.config.ts`, `playwright.config.ts`, `package.json` + +Vitest schreibt standardmaessig nach `test-results/vitest.junit.xml`. Playwright schreibt nach +`test-results/playwright.junit.xml` und `playwright-report/`. Das ist fuer einen seriellen lokalen Lauf +okay, kollidiert aber bei parallelen CI-Jobs, Shards oder mehreren Vitest-Prozessen. + +**Impact:** Sobald Vitest-Projekte oder Playwright-Shards parallel laufen, koennen Reports +ueberschrieben oder unvollstaendig hochgeladen werden. + +**Improvement:** Output-Pfade pro Projekt/Shard/Job trennen: + +- `test-results/vitest-unit.junit.xml` +- `test-results/vitest-frontend-1.junit.xml` +- `test-results/vitest-integration.junit.xml` +- `test-results/playwright-e2e-1.junit.xml` +- Coverage je Shard in eigene Verzeichnisse schreiben und danach bewusst mergen oder dediziert in + einem Coverage-Job erzeugen. + +### M-01 - `frontend/jsdom` ist der Vitest-Hauptkostentreiber + +**Referenzen:** `vitest.config.ts`, `vitest.setup.frontend.ts`, `tests/frontend` + +Das `frontend`-Projekt hat `75` Testdateien und braucht isoliert ca. `18.4s`. Die aggregierten +Vitest-Metriken zeigen hohe Kosten fuer `environment`, `import` und `setup`. Ein Lauf mit mehr +Workern (`--maxWorkers=80%`) war sogar etwas langsamer als die aktuelle `50%`-Einstellung. + +**Impact:** Einfach mehr Worker zu geben ist aktuell keine Loesung. Die Kosten liegen stark in +Environment-Erzeugung und schweren Importbaeumen. + +**Improvement:** + +- Aktuelle `maxWorkers: '50%'` beibehalten, bis Setup-/Import-Kosten reduziert sind. +- Frontend-Tests nach schweren Importbaeumen pruefen und dort konsolidieren, wo viele kleine Dateien + dieselbe Dashboard-/Chart-Struktur laden. +- Gemeinsame Recharts-, IntersectionObserver-, ResizeObserver- und Motion-Mocks zentralisieren. +- Kleine pure Daten-/Formatierungsfaelle aus jsdom-Tests in Node-Unit-Tests verschieben. +- Optional nach dem Umbau `happy-dom` nur dann evaluieren, wenn React/Radix/cmdk-Tests kompatibel + bleiben. Nicht blind wechseln. + +### M-02 - Node-Projekte laden unnoetiges React-Test-Setup + +**Referenzen:** `vitest.setup.ts`, `vitest.setup.frontend.ts`, `vitest.config.ts` + +`vitest.setup.ts` importiert `@testing-library/jest-dom/vitest` und `@testing-library/react` +`cleanup()`. Diese Datei wird auch fuer Node-only Projekte wie `unit`, `integration`, +`integration-background` und `architecture` geladen. + +**Impact:** Node-Tests zahlen React-/Testing-Library-Importkosten, obwohl sie kein DOM brauchen. + +**Improvement:** Setup splitten: + +- `vitest.setup.node.ts`: nur Timer/MOCK cleanup, keine Testing Library. +- `vitest.setup.frontend.ts`: `jest-dom`, `cleanup`, i18n, browser API shims. +- Architecture entweder ohne Setup oder mit minimalem Node-Setup. + +Akzeptanzkriterium: Node-Projekt-Setupzeit sinkt, ohne Cleanup-Sicherheit in Frontend-Tests zu +verlieren. + +### M-03 - Coverage ist global gruen, aber risikogewichtet schwach + +Die globale Coverage lag zuletzt bei ca. `75%` Statements, `64%` Branches, `76%` Functions und +`76%` Lines. Das reicht fuer den Gate, verdeckt aber zentrale Luecken. + +Wichtige Low-Coverage-Module: + +| Modul | Lines | Branches | Risiko | +| --------------------------------------------------------------------------------- | --------: | --------: | --------------------------------------------------- | +| `server/app-runtime.js` | `0%` | `0%` | Runtime-Wiring, CLI/Server-Komposition. | +| `DashboardSections.tsx` | ca. `22%` | `0%` | Haupt-Komposition der Dashboard-Sections. | +| `server/background-runtime.js` | ca. `40%` | ca. `41%` | Hintergrundinstanzen, Registry, Prozess-Handling. | +| `server/data-runtime.js` | ca. `50%` | ca. `29%` | Daten-/Settings-Persistenz, Locking, Import/Export. | +| `server/http-router.js` | ca. `54%` | ca. `39%` | API-Routing, Mutation Guards, Fehlerfaelle. | +| `CommandPalette.tsx` | ca. `47%` | ca. `35%` | Keyboard/Search/Command-Vertraege. | +| `CostByWeekday.tsx`, `ModelMix.tsx`, `TokensOverTime.tsx`, `PeriodComparison.tsx` | `0%` | `0%` | Lazy dashboard sections ohne Vitest-Signal. | + +**Improvement:** Coverage risikogewichtet erhoehen: + +- `server/app-runtime.js`: Wiring-Test mit gemockten Factory-Abhaengigkeiten, der sicherstellt, dass + Runtimes korrekt komponiert werden. +- `server/data-runtime.js`: Pfadprioritaeten, Settings-/Usage-Import, Lock-Timeouts, stale locks, + corrupted state und Recovery. +- `server/http-router.js`: Route-Matrix fuer Methoden, Auth, Mutation Guards, Payload-Fehler, + statische Dateien, SPA-Fallback und API-Prefix. +- `DashboardSections.tsx`: Section visibility/order, request section availability, lazy fallback, + forecast dialog, preload scheduling und ErrorBoundary. +- `CommandPalette.tsx`: Command Builder/Scoring/Visibility als pure Unit/Frontend-Tests. +- Lazy charts: je ein kleiner render-/empty-state-/branch Test pro `0%` Modul. + +Nicht empfohlen: globale Thresholds sofort stark erhoehen. Zuerst gezielte Coverage fuer die oben +genannten Module schaffen, dann file-/directory-spezifische Mindestwerte einfuehren. + +### M-04 - Einige Unit-Tests nutzen echte Zeit oder echte Prozesse + +**Referenzen:** `tests/unit/server-helpers-runner-process.test.ts`, +`tests/unit/server-helpers-file-locks.test.ts`, `tests/integration/server-auto-import.test.ts` + +Einzelne Tests sind bewusst prozessnah. Das ist richtig, weil genau Prozess-/CLI-Verhalten getestet +wird. Einige Faelle nutzen aber echte Sleeps, echte Child-Prozesse oder Timeout-Fenster. + +**Impact:** Diese Tests sind wertvoll, aber koennen lokal variieren und setzen eine Untergrenze fuer +Suite-Zeit. + +**Improvement:** + +- Prozessnahe Tests behalten, aber streng auf wenige Smoke-Faelle begrenzen. +- Timeout-/Fallback-Logik dort, wo moeglich, ueber injizierte Clock/Sleep/Spawn-Fakes testen. +- Echte Subprozess-Tests als Integration einstufen, wenn sie Shell/PATH/Cross-Process-Verhalten + pruefen. +- Slow-Test-Budget dokumentieren: kein neuer Unit-Test sollte ohne Begruendung > `300ms` brauchen. + +### M-05 - Statische Checks haben Doppelarbeit + +**Referenzen:** `package.json`, `eslint.config.mjs` + +`npm run check` fuehrt `eslint .` und danach `lint:docstrings` als zweiten ESLint-Lauf aus. Der zweite +Lauf ist enger, aber Regeln und Dateien ueberschneiden sich deutlich. + +**Impact:** Static-Gate-Zeit steigt, obwohl die meisten Inputs bereits im ersten ESLint-Lauf gesehen +werden. + +**Improvement:** + +- Pruefen, ob `lint:docstrings` vollstaendig in `eslint .` enthalten ist. Falls ja, `lint:docstrings` + als separaten Gate entfernen. +- Falls getrennt noetig, ESLint Cache aktivieren und unterschiedliche Cache-Keys fuer beide Laeufe + verwenden. +- Prettier Cache und TypeScript incremental fuer lokale Gates nutzen. + +### M-06 - Package-Smoke ist wichtig, aber gehoert in einen eigenen CI-Job + +**Referenzen:** `scripts/verify-package.js`, `package.json` + +`verify:package` packt das Projekt, installiert den Tarball in ein Temp-Projekt, prueft CLI-Help, +`npm exec` und Server-Startup. Das ist ein guter Release-Smoke, aber er muss nicht denselben seriellen +Job wie Unit, Integration und E2E blockieren. + +**Impact:** Im lokalen Full-Gate addiert er ca. `7s`. In CI kann er parallel zu E2E laufen, sobald +`dist/` als Artifact gebaut wurde. + +**Improvement:** + +- `build` erzeugt `dist/` einmal. +- `package-smoke` und `e2e` nutzen dieses Artifact parallel. +- Package-Smoke bleibt im Full-Gate, aber nicht als serieller Nachklapp nach allen Tests. + +### M-07 - Timing-Regressions sind sichtbar, aber nicht budgetiert + +`test:timings` listet Slow-Suites und Slow-Tests, erzwingt aber kein Budget und vergleicht nicht gegen +eine Baseline. + +**Improvement:** + +- Im Review/CI einen Timing-Budget-Abschnitt pflegen. +- Optional spaeter Script erweitern: + - Warnung fuer neue Tests > `500ms`. + - Warnung fuer Suites > `2s`. + - Separates Ranking nach Projekt. + - Trendvergleich gegen eine gespeicherte Baseline in CI-Artefakten. + +### N-01 - macOS-Sandbox kann Playwright-False-Negatives erzeugen + +Ein sandboxed Playwright-Lauf scheiterte mit Chromium +`MachPortRendezvousServer ... Permission denied`. Derselbe Lauf ausserhalb der Sandbox bestand. + +**Impact:** Agent-/Sandbox-Umgebungen koennen Browser-Failures erzeugen, die nicht repo-bezogen sind. + +**Improvement:** In Review- und Agent-Doku festhalten: Playwright auf macOS ggf. ausserhalb strenger +Sandbox ausfuehren. Das ist kein Produktfix. + +## Zielarchitektur fuer das Testsystem + +### Lokale Befehle + +Empfohlene Zielstruktur: + +- `npm run test:static`: Format, Lint, Dependency-Cruiser, TypeScript. +- `npm run test:vitest`: alle Vitest-Projekte ohne Coverage. +- `npm run test:vitest:coverage`: Coverage-Gate mit getrennten Outputs. +- `npm run test:e2e:parallel`: Playwright mit worker-isolierter App. +- `npm run verify:ci`: CI-kompatibler Full-Gate ohne doppelte Builds. +- `npm run verify:full`: lokaler kompletter Gate, der dieselbe Logik wie CI nutzt. + +### E2E-Isolation + +Die spaetere Playwright-Fixture soll worker-scoped sein: + +- Worker startet eigenen Serverprozess. +- Worker bekommt eigenen Runtime-Root: `.tmp-playwright/workers//app`. +- Worker bekommt eigenen Port, z. B. ueber freien Port oder deterministisch aus Basisport und Worker. +- Worker wartet auf eigene `session-auth.json`. +- `baseURL`, `bootstrapUrl` und Auth-Header kommen aus der Worker-Fixture. +- Tests importieren `test` und `expect` aus einem lokalen E2E-Fixture-Modul statt direkt aus + `@playwright/test`. + +Damit koennen Tests parallel laufen, ohne Daten zu teilen. + +### CI-DAG + +Ziel-CI: + +- `static` +- `vitest-unit` +- `vitest-frontend` optional geshardet +- `vitest-integration` +- `vitest-architecture` +- `build` +- `package-smoke` mit Build-Artifact +- `e2e` mit Build-Artifact und worker-isolierten Servern +- `reports` oder Artifact Upload pro Job + +Coverage-Strategie: + +- Einfachste robuste Variante: ein dedizierter `coverage`-Job, der alle Vitest-Projekte mit Coverage + seriell oder kontrolliert parallel ausfuehrt. +- Schnellere Variante: Coverage je Vitest-Shard erzeugen und danach mergen. Diese Variante erst + waehlen, wenn Report-Merging sauber automatisiert ist. + +## Priorisierte Roadmap + +1. **Dokumentation aktualisieren.** Diese Review-Datei als aktuelle Master-Analyse nutzen. +2. **E2E worker-isolieren.** Ohne das bleibt jede Playwright-Parallelisierung instabil. +3. **Command-Palette-Teststrategie umbauen.** Contract-Logik in Vitest, wenige Browser-Smokes. +4. **Vitest-Setup splitten.** Node-Projekte von React Testing Library und `jest-dom` entkoppeln. +5. **Coverage-Luecken gezielt schliessen.** Runtime-Komposition und Dashboard-Composition zuerst. +6. **CI in Jobs/Matrix aufteilen.** Erst nachdem Output-Pfade und E2E-Isolation parallel-sicher sind. +7. **Budgets einfuehren.** Slow-Test- und Coverage-Budgets als Regressionsschutz. + +## Akzeptanzkriterien fuer die spaetere Umsetzung + +- `npx playwright test --workers=2` und `--workers=4` laufen mehrfach gruen. +- `npm run verify:full` laeuft lokal vollstaendig und robust unter der neuen Struktur. +- CI-Full-Gate nutzt mehrere Jobs und ist deutlich schneller als der aktuelle serielle Job. +- Keine JUnit-, Coverage- oder Playwright-Reports werden durch parallele Jobs ueberschrieben. +- `frontend/jsdom`-Zeit sinkt oder bleibt trotz hoeherer Coverage stabil. +- Coverage steigt gezielt bei `server/app-runtime.js`, `server/data-runtime.js`, + `server/http-router.js`, `DashboardSections.tsx` und `CommandPalette.tsx`. +- E2E-Suite bleibt fokussiert auf echte Browserintegration statt auf exhaustive Contract-Tests. From c4abb2bd45fafa87b7e35573b146a7f201eb6c4f Mon Sep 17 00:00:00 2001 From: tyl3r-ch Date: Tue, 28 Apr 2026 20:50:39 +0200 Subject: [PATCH 03/42] Make Vitest setup and reports parallel-safe --- docs/testing.md | 3 +- package.json | 12 ++++--- scripts/report-test-timings.js | 2 +- tests/unit/vitest-coverage-config.test.ts | 42 ++++++++++++++++++++--- vitest.config.ts | 17 +++++---- vitest.setup.frontend.ts | 8 ++++- vitest.setup.node.ts | 7 ++++ vitest.setup.ts | 14 +------- 8 files changed, 73 insertions(+), 32 deletions(-) create mode 100644 vitest.setup.node.ts diff --git a/docs/testing.md b/docs/testing.md index 48295c8..60e56d7 100644 --- a/docs/testing.md +++ b/docs/testing.md @@ -31,6 +31,7 @@ Architecture constraints are documented separately in [`docs/architecture.md`](. ## Frontend Defaults +- `vitest.setup.node.ts` is the minimal shared cleanup for Node-only projects. - `vitest.setup.frontend.ts` is the shared place for `jsdom` defaults such as: - i18n bootstrapping - `matchMedia` @@ -98,7 +99,7 @@ For `tests/architecture`, prefer the shared source graph helper for simple file, `npm run test:unit:coverage` reports product-runtime coverage. The configured coverage scope intentionally includes frontend runtime modules, the local server runtime, shared runtime contracts, and `usage-normalizer.js` instead of only the historically high-signal frontend subset. -The coverage and timing commands use explicit `dot` and `junit` Vitest reporters. Keep those reporters on both scripts so non-interactive gates emit compact progress and do not depend on silent reporter paths. +The coverage and timing commands use explicit `dot` and `junit` Vitest reporters. Keep those reporters on both scripts so non-interactive gates emit compact progress and do not depend on silent reporter paths. They intentionally write separate JUnit files, `test-results/vitest-coverage.junit.xml` and `test-results/vitest-timings.junit.xml`, so coverage and timing diagnostics do not contend for the same report path. The global thresholds are ratchets for that broader denominator: diff --git a/package.json b/package.json index 134dcff..ffd59a3 100644 --- a/package.json +++ b/package.json @@ -29,11 +29,15 @@ "start": "node server.js", "start:test-server": "node scripts/start-test-server.js", "test": "npm run test:architecture && npm run test:unit", - "test:architecture": "vitest run --project architecture", - "test:unit": "vitest run --project unit --project frontend --project integration --project integration-background", + "test:architecture": "vitest run --project architecture --outputFile.junit=./test-results/vitest-architecture.junit.xml", + "test:unit": "vitest run --project unit --project frontend --project integration --project integration-background --outputFile.junit=./test-results/vitest-main.junit.xml", "test:unit:watch": "vitest --project unit --project frontend --project integration --project integration-background", - "test:unit:coverage": "vitest run --coverage --project unit --project frontend --project integration --project integration-background --reporter=dot --reporter=junit --outputFile.junit=./test-results/vitest.junit.xml", - "test:timings": "vitest run --coverage --project unit --project frontend --project integration --project integration-background --reporter=dot --reporter=junit --outputFile.junit=./test-results/vitest.junit.xml && node scripts/report-test-timings.js", + "test:vitest:unit": "vitest run --project unit --outputFile.junit=./test-results/vitest-unit.junit.xml", + "test:vitest:frontend": "vitest run --project frontend --outputFile.junit=./test-results/vitest-frontend.junit.xml", + "test:vitest:integration": "vitest run --project integration --outputFile.junit=./test-results/vitest-integration.junit.xml", + "test:vitest:integration-background": "vitest run --project integration-background --outputFile.junit=./test-results/vitest-integration-background.junit.xml", + "test:unit:coverage": "vitest run --coverage --project unit --project frontend --project integration --project integration-background --reporter=dot --reporter=junit --outputFile.junit=./test-results/vitest-coverage.junit.xml", + "test:timings": "vitest run --project unit --project frontend --project integration --project integration-background --reporter=dot --reporter=junit --outputFile.junit=./test-results/vitest-timings.junit.xml && node scripts/report-test-timings.js ./test-results/vitest-timings.junit.xml", "test:e2e": "npm run build:app && playwright test", "test:e2e:ci": "playwright test --workers=1", "test:all": "npm run verify:full", diff --git a/scripts/report-test-timings.js b/scripts/report-test-timings.js index 544fefb..99a686f 100644 --- a/scripts/report-test-timings.js +++ b/scripts/report-test-timings.js @@ -4,7 +4,7 @@ const fs = require('fs'); const path = require('path'); const ROOT = path.resolve(__dirname, '..'); -const DEFAULT_JUNIT_PATH = path.join(ROOT, 'test-results', 'vitest.junit.xml'); +const DEFAULT_JUNIT_PATH = path.join(ROOT, 'test-results', 'vitest-timings.junit.xml'); function parseAttributes(tag) { const attributes = {}; diff --git a/tests/unit/vitest-coverage-config.test.ts b/tests/unit/vitest-coverage-config.test.ts index 135a409..197fd87 100644 --- a/tests/unit/vitest-coverage-config.test.ts +++ b/tests/unit/vitest-coverage-config.test.ts @@ -18,6 +18,13 @@ type CoverageConfig = { type ResolvedVitestConfig = { test?: { coverage?: CoverageConfig + projects?: Array<{ + test?: { + environment?: string + name?: string + setupFiles?: string[] + } + }> } } @@ -63,15 +70,40 @@ describe('vitest coverage configuration', () => { }) it('keeps coverage-heavy scripts on bounded non-interactive reporters', () => { - const coverageScripts = [ - packageJson.scripts['test:unit:coverage'], - packageJson.scripts['test:timings'], - ] + const coverageScript = packageJson.scripts['test:unit:coverage'] + const timingsScript = packageJson.scripts['test:timings'] + const coverageScripts = [coverageScript, timingsScript] for (const script of coverageScripts) { expect(script).toContain('--reporter=dot') expect(script).toContain('--reporter=junit') - expect(script).toContain('--outputFile.junit=./test-results/vitest.junit.xml') + } + + expect(coverageScript).toContain('--outputFile.junit=./test-results/vitest-coverage.junit.xml') + expect(timingsScript).toContain('--outputFile.junit=./test-results/vitest-timings.junit.xml') + expect(timingsScript).toContain( + 'node scripts/report-test-timings.js ./test-results/vitest-timings.junit.xml', + ) + }) + + it('keeps React Testing Library setup out of Node-only projects', async () => { + const config = await resolveVitestConfig() + const projects = config.test?.projects ?? [] + const expectedSetupByProject = new Map([ + ['architecture', ['./vitest.setup.node.ts']], + ['unit', ['./vitest.setup.node.ts']], + ['frontend', ['./vitest.setup.node.ts', './vitest.setup.frontend.ts']], + ['integration', ['./vitest.setup.node.ts']], + ['integration-background', ['./vitest.setup.node.ts']], + ]) + + expect(projects).toHaveLength(expectedSetupByProject.size) + + for (const project of projects) { + const name = project.test?.name + expect(name).toBeDefined() + expect(expectedSetupByProject.has(name ?? '')).toBe(true) + expect(project.test?.setupFiles).toEqual(expectedSetupByProject.get(name ?? '')) } }) }) diff --git a/vitest.config.ts b/vitest.config.ts index 2033a8c..c93ea9f 100644 --- a/vitest.config.ts +++ b/vitest.config.ts @@ -1,6 +1,9 @@ import { defineConfig, mergeConfig } from 'vitest/config' import viteConfig from './vite.config' +const junitOutputFile = process.env.VITEST_JUNIT_FILE || './test-results/vitest.junit.xml' +const coverageReportsDirectory = process.env.VITEST_COVERAGE_DIR || './coverage' + export default defineConfig(async () => { // Resolve the imported Vite config explicitly before mergeConfig because // vite.config may export either a config object or an async config factory. @@ -22,12 +25,12 @@ export default defineConfig(async () => { include: [], reporters: ['default', 'junit'], outputFile: { - junit: './test-results/vitest.junit.xml', + junit: junitOutputFile, }, coverage: { provider: 'v8', reporter: ['text', 'html', 'lcov'], - reportsDirectory: './coverage', + reportsDirectory: coverageReportsDirectory, include: [ 'src/**/*.{ts,tsx}', 'server.js', @@ -51,7 +54,7 @@ export default defineConfig(async () => { include: ['tests/architecture/**/*.test.ts'], environment: 'node', globals: true, - setupFiles: ['./vitest.setup.ts'], + setupFiles: ['./vitest.setup.node.ts'], fileParallelism: false, sequence: { groupOrder: 0, @@ -64,7 +67,7 @@ export default defineConfig(async () => { name: 'unit', include: ['tests/unit/**/*.test.ts'], environment: 'node', - setupFiles: ['./vitest.setup.ts'], + setupFiles: ['./vitest.setup.node.ts'], maxWorkers: '80%', sequence: { groupOrder: 1, @@ -77,7 +80,7 @@ export default defineConfig(async () => { name: 'frontend', include: ['tests/frontend/**/*.test.{ts,tsx}'], environment: 'jsdom', - setupFiles: ['./vitest.setup.ts', './vitest.setup.frontend.ts'], + setupFiles: ['./vitest.setup.node.ts', './vitest.setup.frontend.ts'], maxWorkers: '50%', sequence: { groupOrder: 2, @@ -91,7 +94,7 @@ export default defineConfig(async () => { include: ['tests/integration/**/*.test.ts'], exclude: ['tests/integration/**/*background*.test.ts'], environment: 'node', - setupFiles: ['./vitest.setup.ts'], + setupFiles: ['./vitest.setup.node.ts'], maxWorkers: '50%', sequence: { groupOrder: 3, @@ -104,7 +107,7 @@ export default defineConfig(async () => { name: 'integration-background', include: ['tests/integration/**/*background*.test.ts'], environment: 'node', - setupFiles: ['./vitest.setup.ts'], + setupFiles: ['./vitest.setup.node.ts'], maxWorkers: 2, sequence: { groupOrder: 4, diff --git a/vitest.setup.frontend.ts b/vitest.setup.frontend.ts index edaf2af..b94bb55 100644 --- a/vitest.setup.frontend.ts +++ b/vitest.setup.frontend.ts @@ -1,6 +1,12 @@ -import { beforeAll, vi } from 'vitest' +import '@testing-library/jest-dom/vitest' +import { cleanup } from '@testing-library/react' +import { afterEach, beforeAll, vi } from 'vitest' import { initI18n } from '@/lib/i18n' +afterEach(() => { + cleanup() +}) + beforeAll(async () => { await initI18n('de') diff --git a/vitest.setup.node.ts b/vitest.setup.node.ts new file mode 100644 index 0000000..4f66b25 --- /dev/null +++ b/vitest.setup.node.ts @@ -0,0 +1,7 @@ +import { afterEach, vi } from 'vitest' + +afterEach(() => { + vi.useRealTimers() + vi.restoreAllMocks() + vi.unstubAllGlobals() +}) diff --git a/vitest.setup.ts b/vitest.setup.ts index d15a884..95fc963 100644 --- a/vitest.setup.ts +++ b/vitest.setup.ts @@ -1,13 +1 @@ -import '@testing-library/jest-dom/vitest' -import { afterEach, vi } from 'vitest' -import { cleanup } from '@testing-library/react' - -afterEach(() => { - vi.useRealTimers() - vi.restoreAllMocks() - vi.unstubAllGlobals() - - if (typeof document !== 'undefined') { - cleanup() - } -}) +import './vitest.setup.node' From ab4951398320dacaeff8a430514c64fe124ac80c Mon Sep 17 00:00:00 2001 From: tyl3r-ch Date: Tue, 28 Apr 2026 21:32:50 +0200 Subject: [PATCH 04/42] Isolate Playwright workers --- CONTRIBUTING.md | 4 +- README.md | 4 +- docs/testing.md | 3 +- package.json | 2 +- playwright.config.ts | 13 +- scripts/start-test-server.js | 14 +- tests/e2e/command-palette.spec.ts | 2 +- tests/e2e/dashboard-forecast-filters.spec.ts | 2 +- tests/e2e/dashboard-load-upload.spec.ts | 2 +- tests/e2e/dashboard-reporting.spec.ts | 2 +- tests/e2e/dashboard-settings-backups.spec.ts | 2 +- tests/e2e/fixtures.ts | 171 +++++++++++++++++++ tests/e2e/helpers.ts | 15 +- tests/unit/playwright-config.test.ts | 21 ++- 14 files changed, 224 insertions(+), 33 deletions(-) create mode 100644 tests/e2e/fixtures.ts diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 7525e01..fe85a18 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -45,7 +45,7 @@ Run the main local checks: ```bash npm run verify -npm run test:e2e +npm run test:e2e:ci ``` `npm run verify` covers formatting, ESLint, `tsc --noEmit`, unit tests, the production bundle, and packaged-artifact verification. If you want the same coverage gate used in release preparation, also run: @@ -66,7 +66,7 @@ If local port `3015` is already occupied, run Playwright on another isolated por PLAYWRIGHT_TEST_PORT=3016 npm run test:e2e ``` -The Playwright suite uses an isolated local app directory under `.tmp-playwright/` and should not reuse your normal local dashboard data. `npm run verify:package` builds the real tarball and verifies that the packaged CLI can start outside the repo checkout. +The Playwright suite starts an isolated local app per worker under `.tmp-playwright/workers/` and should not reuse your normal local dashboard data. `npm run verify:package` builds the real tarball and verifies that the packaged CLI can start outside the repo checkout. Then manually verify the main user flows touched by your change: diff --git a/README.md b/README.md index c19f2b3..3481e1c 100644 --- a/README.md +++ b/README.md @@ -352,7 +352,7 @@ If you want to run the steps individually, use: npm run check:deps npm run test:architecture npm run test:unit:coverage -npm run test:e2e +npm run test:e2e:ci ``` To inspect the slowest suites and test cases after a Vitest run: @@ -361,7 +361,7 @@ To inspect the slowest suites and test cases after a Vitest run: npm run test:timings ``` -The Playwright suite uses its own isolated local app directory. If port `3015` is already occupied locally, run it on another isolated port: +The Playwright suite starts an isolated local app per worker under `.tmp-playwright/workers/`. If the base port `3015` is already occupied locally, run it from another base port: ```bash PLAYWRIGHT_TEST_PORT=3016 npm run test:e2e diff --git a/docs/testing.md b/docs/testing.md index 60e56d7..5e1cbcc 100644 --- a/docs/testing.md +++ b/docs/testing.md @@ -60,7 +60,7 @@ Architecture constraints are documented separately in [`docs/architecture.md`](. - motion/reveal behavior - For large server helper or integration suites, group tests by subsystem so Vitest can schedule them more efficiently. - Keep background-process integration files focused by behavior; the background Vitest project intentionally uses a small worker cap instead of one serial catch-all file or unbounded process fan-out. -- Keep Playwright files grouped by end-to-end journey, such as load/upload, forecast/filter interaction, settings/backups, reporting, and command palette behavior. Share authentication, server reset, seeding, and download helpers through `tests/e2e/helpers.ts` instead of creating new browser catch-all files. +- Keep Playwright files grouped by end-to-end journey, such as load/upload, forecast/filter interaction, settings/backups, reporting, and command palette behavior. Import `test` and `expect` from `tests/e2e/fixtures.ts` so each worker gets its own server, port, auth session, and runtime directory. Share authentication, server reset, seeding, and download helpers through `tests/e2e/helpers.ts` instead of creating new browser catch-all files. ## Choosing the Right Layer @@ -135,6 +135,7 @@ Prioritize targeted branch coverage in runtime-heavy modules before adding anoth - Dependency graph gate: `npm run check:deps` - Coverage-only unit/integration gate: `npm run test:unit:coverage` - Playwright only: `PLAYWRIGHT_TEST_PORT=3016 npm run test:e2e` +- CI-style Playwright smoke: `npm run test:e2e:ci` ## Architecture Guardrails diff --git a/package.json b/package.json index ffd59a3..796a7cf 100644 --- a/package.json +++ b/package.json @@ -39,7 +39,7 @@ "test:unit:coverage": "vitest run --coverage --project unit --project frontend --project integration --project integration-background --reporter=dot --reporter=junit --outputFile.junit=./test-results/vitest-coverage.junit.xml", "test:timings": "vitest run --project unit --project frontend --project integration --project integration-background --reporter=dot --reporter=junit --outputFile.junit=./test-results/vitest-timings.junit.xml && node scripts/report-test-timings.js ./test-results/vitest-timings.junit.xml", "test:e2e": "npm run build:app && playwright test", - "test:e2e:ci": "playwright test --workers=1", + "test:e2e:ci": "playwright test --workers=2", "test:all": "npm run verify:full", "docs:screenshots": "node scripts/capture-readme-screenshots.js", "deps:graph": "dependency-cruiser --config .dependency-cruiser.cjs -T archi src shared server server.js usage-normalizer.js", diff --git a/playwright.config.ts b/playwright.config.ts index a002ebe..37494c2 100644 --- a/playwright.config.ts +++ b/playwright.config.ts @@ -6,8 +6,11 @@ const baseURL = `http://${host}:${port}` export default defineConfig({ testDir: './tests/e2e', - fullyParallel: false, - workers: process.env.CI ? 1 : undefined, + fullyParallel: true, + // fullyParallel is safe because tests/e2e/fixtures.ts starts a separate app per + // worker. process.env.CI caps workers at 2 to avoid hosted-runner CPU + // contention; pass --workers=1 if CI flakiness needs to be debugged. + workers: process.env.CI ? 2 : undefined, timeout: 30_000, reporter: [ ['list'], @@ -20,10 +23,4 @@ export default defineConfig({ screenshot: 'only-on-failure', video: 'retain-on-failure', }, - webServer: { - command: 'npm run start:test-server', - url: baseURL, - reuseExistingServer: false, - timeout: 120_000, - }, }) diff --git a/scripts/start-test-server.js b/scripts/start-test-server.js index 0fef0b8..08eb146 100644 --- a/scripts/start-test-server.js +++ b/scripts/start-test-server.js @@ -4,7 +4,19 @@ const fs = require('fs'); const path = require('path'); const root = path.resolve(__dirname, '..'); -const runtimeRoot = path.join(root, '.tmp-playwright', 'app'); +const playwrightRoot = path.join(root, '.tmp-playwright'); +const runtimeRoot = process.env.PLAYWRIGHT_TEST_RUNTIME_ROOT + ? path.resolve(process.env.PLAYWRIGHT_TEST_RUNTIME_ROOT) + : path.join(root, '.tmp-playwright', 'app'); + +function isPathInside(parent, child) { + const relative = path.relative(parent, child); + return relative !== '' && !relative.startsWith('..') && !path.isAbsolute(relative); +} + +if (!isPathInside(playwrightRoot, runtimeRoot)) { + throw new Error(`Refusing to delete unsafe Playwright runtime root: ${runtimeRoot}`); +} fs.rmSync(runtimeRoot, { recursive: true, force: true }); fs.mkdirSync(path.join(runtimeRoot, 'cache'), { recursive: true }); diff --git a/tests/e2e/command-palette.spec.ts b/tests/e2e/command-palette.spec.ts index 9b9b62b..5d37b2c 100644 --- a/tests/e2e/command-palette.spec.ts +++ b/tests/e2e/command-palette.spec.ts @@ -1,4 +1,4 @@ -import { expect, test, type Page } from '@playwright/test' +import { expect, test, type Page } from './fixtures' import { dailyViewPattern, mockAutoImportStream, diff --git a/tests/e2e/dashboard-forecast-filters.spec.ts b/tests/e2e/dashboard-forecast-filters.spec.ts index a300711..fe5b568 100644 --- a/tests/e2e/dashboard-forecast-filters.spec.ts +++ b/tests/e2e/dashboard-forecast-filters.spec.ts @@ -1,4 +1,4 @@ -import { expect, test } from '@playwright/test' +import { expect, test } from './fixtures' import { gotoDashboard, resetAppState, uploadSampleUsage } from './helpers' const costForecastExpandPattern = diff --git a/tests/e2e/dashboard-load-upload.spec.ts b/tests/e2e/dashboard-load-upload.spec.ts index 55af05b..34d1c3e 100644 --- a/tests/e2e/dashboard-load-upload.spec.ts +++ b/tests/e2e/dashboard-load-upload.spec.ts @@ -1,4 +1,4 @@ -import { expect, test } from '@playwright/test' +import { expect, test } from './fixtures' import { gotoDashboard, resetAppState, uploadSampleUsage } from './helpers' const importEntryButtonPattern = /^(Auto-Import|Auto import|Import)$/ diff --git a/tests/e2e/dashboard-reporting.spec.ts b/tests/e2e/dashboard-reporting.spec.ts index 43db4bc..f8d6e70 100644 --- a/tests/e2e/dashboard-reporting.spec.ts +++ b/tests/e2e/dashboard-reporting.spec.ts @@ -1,4 +1,4 @@ -import { expect, test } from '@playwright/test' +import { expect, test } from './fixtures' import { gotoDashboard, mockPdfReport, resetAppState, uploadSampleUsage } from './helpers' test('uses the current UI language when generating a PDF report after switching locale', async ({ diff --git a/tests/e2e/dashboard-settings-backups.spec.ts b/tests/e2e/dashboard-settings-backups.spec.ts index 7d5c277..46a7a91 100644 --- a/tests/e2e/dashboard-settings-backups.spec.ts +++ b/tests/e2e/dashboard-settings-backups.spec.ts @@ -1,5 +1,5 @@ import fsPromises from 'node:fs/promises' -import { expect, test } from '@playwright/test' +import { expect, test } from './fixtures' import { createApiAuthHeaders, createTrustedMutationHeaders, diff --git a/tests/e2e/fixtures.ts b/tests/e2e/fixtures.ts new file mode 100644 index 0000000..e8d071f --- /dev/null +++ b/tests/e2e/fixtures.ts @@ -0,0 +1,171 @@ +import fs from 'node:fs' +import path from 'node:path' +import { spawn, type ChildProcessWithoutNullStreams } from 'node:child_process' +import { test as base, expect, type Page } from '@playwright/test' + +type E2EServer = { + authSessionPath: string + baseURL: string + runtimeRoot: string +} + +type WorkerFixtures = { + e2eServer: E2EServer +} + +const startupTimeoutMs = 120_000 +const host = process.env.PLAYWRIGHT_TEST_HOST || '127.0.0.1' +const basePort = Number(process.env.PLAYWRIGHT_TEST_PORT || '3015') + +function sleep(ms: number) { + return new Promise((resolve) => setTimeout(resolve, ms)) +} + +function buildWorkerServer(workerIndex: number) { + const port = basePort + workerIndex + const runtimeRoot = path.join( + process.cwd(), + '.tmp-playwright', + 'workers', + String(workerIndex), + 'app', + ) + const authSessionPath = path.join(runtimeRoot, 'config', 'session-auth.json') + + return { + authSessionPath, + baseURL: `http://${host}:${port}`, + env: { + ...process.env, + HOST: host, + NO_OPEN_BROWSER: '1', + PLAYWRIGHT_TEST_HOST: host, + PLAYWRIGHT_TEST_PORT: String(port), + PLAYWRIGHT_TEST_RUNTIME_ROOT: runtimeRoot, + PORT: String(port), + }, + runtimeRoot, + } +} + +function createOutputBuffer(serverProcess: ChildProcessWithoutNullStreams) { + let output = '' + + const append = (chunk: Buffer) => { + output += chunk.toString('utf8') + if (output.length > 16_000) { + output = output.slice(-16_000) + } + } + + serverProcess.stdout.on('data', append) + serverProcess.stderr.on('data', append) + + return () => output.trim() +} + +async function waitForServerReady( + serverProcess: ChildProcessWithoutNullStreams, + baseURL: string, + authSessionPath: string, + readOutput: () => string, +) { + const deadline = Date.now() + startupTimeoutMs + let lastError = '' + + while (Date.now() < deadline) { + if (serverProcess.exitCode !== null) { + throw new Error( + `Playwright test server exited with code ${serverProcess.exitCode}.\n${readOutput()}`, + ) + } + + try { + const response = await fetch(baseURL, { + redirect: 'manual', + signal: AbortSignal.timeout(1_000), + }) + + if (response.status < 400 && fs.existsSync(authSessionPath)) { + return + } + } catch (error) { + lastError = error instanceof Error ? error.message : String(error) + } + + await sleep(250) + } + + throw new Error( + `Timed out waiting for Playwright test server at ${baseURL}. Last error: ${lastError}\n${readOutput()}`, + ) +} + +async function stopServer(serverProcess: ChildProcessWithoutNullStreams) { + if (serverProcess.exitCode !== null) { + return + } + + serverProcess.kill('SIGTERM') + + const exited = await Promise.race([ + new Promise((resolve) => serverProcess.once('exit', () => resolve(true))), + sleep(5_000).then(() => false), + ]) + + if (!exited && serverProcess.exitCode === null) { + serverProcess.kill('SIGKILL') + } +} + +export const test = base.extend, WorkerFixtures>({ + e2eServer: [ + async ({}, runFixture, workerInfo) => { + const workerServer = buildWorkerServer(workerInfo.parallelIndex) + const serverProcess = spawn(process.execPath, ['scripts/start-test-server.js'], { + cwd: process.cwd(), + env: workerServer.env, + }) + const readOutput = createOutputBuffer(serverProcess) + const previousRuntimeRoot = process.env.PLAYWRIGHT_TEST_RUNTIME_ROOT + const previousAuthSessionPath = process.env.PLAYWRIGHT_TEST_AUTH_SESSION_PATH + + process.env.PLAYWRIGHT_TEST_RUNTIME_ROOT = workerServer.runtimeRoot + process.env.PLAYWRIGHT_TEST_AUTH_SESSION_PATH = workerServer.authSessionPath + + try { + await waitForServerReady( + serverProcess, + workerServer.baseURL, + workerServer.authSessionPath, + readOutput, + ) + await runFixture({ + authSessionPath: workerServer.authSessionPath, + baseURL: workerServer.baseURL, + runtimeRoot: workerServer.runtimeRoot, + }) + } finally { + await stopServer(serverProcess) + + if (previousRuntimeRoot === undefined) { + delete process.env.PLAYWRIGHT_TEST_RUNTIME_ROOT + } else { + process.env.PLAYWRIGHT_TEST_RUNTIME_ROOT = previousRuntimeRoot + } + + if (previousAuthSessionPath === undefined) { + delete process.env.PLAYWRIGHT_TEST_AUTH_SESSION_PATH + } else { + process.env.PLAYWRIGHT_TEST_AUTH_SESSION_PATH = previousAuthSessionPath + } + } + }, + { scope: 'worker', timeout: startupTimeoutMs }, + ], + baseURL: async ({ e2eServer }, runFixture) => { + await runFixture(e2eServer.baseURL) + }, +}) + +export { expect, type Page } diff --git a/tests/e2e/helpers.ts b/tests/e2e/helpers.ts index 77f8dd2..a0d97fd 100644 --- a/tests/e2e/helpers.ts +++ b/tests/e2e/helpers.ts @@ -6,13 +6,12 @@ import { TOKTRACK_VERSION } from '../../shared/toktrack-version.js' export const sampleUsagePath = path.join(process.cwd(), 'examples', 'sample-usage.json') -const localAuthSessionPath = path.join( - process.cwd(), - '.tmp-playwright', - 'app', - 'config', - 'session-auth.json', -) +function getLocalAuthSessionPath(): string { + return ( + process.env.PLAYWRIGHT_TEST_AUTH_SESSION_PATH || + path.join(process.cwd(), '.tmp-playwright', 'app', 'config', 'session-auth.json') + ) +} export const uploadToastPattern = /^(Datei sample-usage\.json erfolgreich geladen|File sample-usage\.json loaded successfully)$/ @@ -50,7 +49,7 @@ type InitScriptTarget = { export const sampleUsage = JSON.parse(fs.readFileSync(sampleUsagePath, 'utf-8')) as SampleUsage export function readLocalAuthSession() { - return JSON.parse(fs.readFileSync(localAuthSessionPath, 'utf-8')) as LocalAuthSession + return JSON.parse(fs.readFileSync(getLocalAuthSessionPath(), 'utf-8')) as LocalAuthSession } export function createApiAuthHeaders() { diff --git a/tests/unit/playwright-config.test.ts b/tests/unit/playwright-config.test.ts index 553015c..e94d0e5 100644 --- a/tests/unit/playwright-config.test.ts +++ b/tests/unit/playwright-config.test.ts @@ -2,6 +2,8 @@ import { afterEach, describe, expect, it, vi } from 'vitest' import packageJson from '../../package.json' type PlaywrightConfig = { + fullyParallel?: boolean + webServer?: unknown workers?: number } @@ -28,14 +30,23 @@ describe('playwright config', () => { } }) - it('keeps single-worker mode for CI only', async () => { - await expect(importPlaywrightConfig(undefined)).resolves.toMatchObject({ + it('uses worker-scoped servers instead of the global Playwright web server', async () => { + const localConfig = await importPlaywrightConfig(undefined) + + expect(localConfig).toMatchObject({ + fullyParallel: true, workers: undefined, }) - await expect(importPlaywrightConfig('true')).resolves.toMatchObject({ - workers: 1, + expect(localConfig.webServer).toBeUndefined() + + const ciConfig = await importPlaywrightConfig('true') + + expect(ciConfig).toMatchObject({ + fullyParallel: true, + workers: 2, }) - expect(packageJson.scripts['test:e2e:ci']).toBe('playwright test --workers=1') + expect(ciConfig.webServer).toBeUndefined() + expect(packageJson.scripts['test:e2e:ci']).toBe('playwright test --workers=2') expect(packageJson.scripts['test:e2e:ci']).not.toContain('CI=1') }) }) From cb31d35b106bae7309127319f20f262b64609c77 Mon Sep 17 00:00:00 2001 From: tyl3r-ch Date: Tue, 28 Apr 2026 22:01:21 +0200 Subject: [PATCH 05/42] Move command palette contracts to Vitest --- .../CommandPalette.commands.tsx | 567 ++++++++++++++++++ .../command-palette/CommandPalette.tsx | 562 ++--------------- tests/e2e/command-palette.spec.ts | 377 ++---------- tests/unit/command-palette-commands.test.ts | 231 +++++++ tests/unit/playwright-config.test.ts | 2 +- 5 files changed, 898 insertions(+), 841 deletions(-) create mode 100644 src/components/features/command-palette/CommandPalette.commands.tsx create mode 100644 tests/unit/command-palette-commands.test.ts diff --git a/src/components/features/command-palette/CommandPalette.commands.tsx b/src/components/features/command-palette/CommandPalette.commands.tsx new file mode 100644 index 0000000..5cb52e0 --- /dev/null +++ b/src/components/features/command-palette/CommandPalette.commands.tsx @@ -0,0 +1,567 @@ +import { + ArrowDown, + ArrowUp, + BarChart3, + Calendar, + CalendarRange, + ChartBar, + CircleHelp, + Download, + Filter, + Languages, + Layers3, + LineChart, + Moon, + RefreshCcw, + Sigma, + SlidersHorizontal, + Sun, + Table, + Trash2, + Upload, + Zap, +} from 'lucide-react' +import { DASHBOARD_SECTION_DEFINITION_MAP } from '@/lib/dashboard-preferences' +import type { DashboardCommandPaletteViewModel } from '@/types/dashboard-view-model' +import type { DashboardSectionId } from '@/types' + +/** Describes one executable command rendered by the dashboard command palette. */ +export interface CommandItem { + id: string + label: string + description?: string + keywords?: string[] + aliases?: string[] + shortcut?: string + icon: React.ReactNode + action: () => void + group: string + testId?: string +} + +type Translate = (key: string, options?: Record) => string + +type CommandPaletteBuilderOptions = DashboardCommandPaletteViewModel & { + onScrollBottom: () => void + onScrollTop: () => void + t: Translate +} + +type SectionNavigationOptions = Pick< + DashboardCommandPaletteViewModel, + 'onScrollTo' | 'sectionOrder' | 'sectionVisibility' +> & { + sectionAvailability: Record + t: Translate +} + +const SECTION_COMMAND_ICON_MAP: Record = { + insights: , + metrics: , + today: , + currentMonth: , + activity: , + forecastCache: , + limits: , + costAnalysis: , + tokenAnalysis: , + requestAnalysis: , + advancedAnalysis: , + comparisons: , + tables: , +} + +const SECTION_COMMAND_KEYWORDS: Record = { + insights: ['summary', 'insight'], + metrics: ['kpi', 'zahlen'], + today: ['today', 'heute'], + currentMonth: ['monat', 'current month'], + activity: ['heatmap', 'aktivität'], + forecastCache: ['forecast', 'cache', 'roi'], + limits: ['limits', 'subscriptions', 'budget', 'anbieter limits'], + costAnalysis: ['charts', 'kostenanalyse'], + tokenAnalysis: ['tokens', 'token analyse'], + requestAnalysis: ['requests', 'request analyse', 'anfragen'], + advancedAnalysis: ['advanced analysis', 'distributions', 'risk', 'verteilungen'], + comparisons: ['anomalie', 'vergleich'], + tables: ['table', 'details'], +} + +const SECTION_COMMAND_ALIASES: Partial> = { + limits: ['limits sektion', 'subscriptions sektion'], + tokenAnalysis: ['token chart'], + requestAnalysis: ['request chart', 'request donut'], +} + +/** Normalizes command labels, aliases, and queries into comparable search text. */ +export function normalizeSearchValue(value: string) { + return value + .toLowerCase() + .normalize('NFD') + .replace(/[\u0300-\u036f]/g, '') + .replace(/[^\p{L}\p{N}]+/gu, ' ') + .trim() +} + +/** Returns the searchable text corpus for a command. */ +export function getCommandSearchText(cmd: CommandItem) { + return normalizeSearchValue( + [cmd.label, cmd.description, ...(cmd.keywords ?? []), ...(cmd.aliases ?? [])] + .filter(Boolean) + .join(' '), + ) +} + +/** Scores how well a command matches a query, returning zero for non-matches. */ +export function getCommandSearchScore(cmd: CommandItem, query: string) { + if (!query) return 1 + + const normalizedQuery = normalizeSearchValue(query) + if (!normalizedQuery) return 1 + + const haystack = getCommandSearchText(cmd) + const terms = normalizedQuery.split(/\s+/).filter(Boolean) + if (terms.length === 0) return 1 + + let score = 0 + + for (const term of terms) { + if (!haystack.includes(term)) return 0 + + if (normalizeSearchValue(cmd.label) === term) { + score += 120 + continue + } + + if (normalizeSearchValue(cmd.label).startsWith(term)) { + score += 80 + continue + } + + if ((cmd.aliases ?? []).some((alias) => normalizeSearchValue(alias) === term)) { + score += 70 + continue + } + + if ((cmd.keywords ?? []).some((keyword) => normalizeSearchValue(keyword).startsWith(term))) { + score += 55 + continue + } + + if (haystack.includes(` ${term}`) || haystack.startsWith(term)) { + score += 35 + continue + } + + score += 15 + } + + return score +} + +/** Filters and ranks commands using the palette's deterministic search policy. */ +export function filterCommandItems(commands: CommandItem[], search: string) { + return commands + .map((cmd, index) => ({ cmd, score: getCommandSearchScore(cmd, search), index })) + .filter((entry) => entry.score > 0) + .sort((a, b) => b.score - a.score || a.index - b.index) + .map((entry) => entry.cmd) +} + +/** Returns the commands that can be triggered by numeric quick-select shortcuts. */ +export function getVisibleCommandItems(commands: CommandItem[]) { + return commands.slice(0, 9) +} + +/** Builds the per-section availability map from the current dashboard data state. */ +export function buildSectionAvailability({ + hasMonthSection, + hasRequestSection, + hasTodaySection, +}: Pick< + DashboardCommandPaletteViewModel, + 'hasMonthSection' | 'hasRequestSection' | 'hasTodaySection' +>): Record { + return { + insights: true, + metrics: true, + today: hasTodaySection, + currentMonth: hasMonthSection, + activity: true, + forecastCache: true, + limits: true, + costAnalysis: true, + tokenAnalysis: true, + requestAnalysis: hasRequestSection, + advancedAnalysis: true, + comparisons: true, + tables: true, + } +} + +/** Builds ordered dashboard-section navigation commands. */ +export function buildSectionNavigationCommands({ + onScrollTo, + sectionAvailability, + sectionOrder, + sectionVisibility, + t, +}: SectionNavigationOptions): CommandItem[] { + return sectionOrder.flatMap((sectionId) => { + const section = DASHBOARD_SECTION_DEFINITION_MAP[sectionId] + + if (!sectionVisibility[sectionId] || !sectionAvailability[sectionId]) { + return [] + } + + const sectionLabel = t(section.labelKey) + + return [ + { + id: `section-${section.id}`, + label: t('commandPalette.commands.goToSection.label', { section: sectionLabel }), + description: t('commandPalette.commands.goToSection.description', { + section: sectionLabel, + }), + keywords: [sectionLabel, section.domId, ...SECTION_COMMAND_KEYWORDS[section.id]], + icon: SECTION_COMMAND_ICON_MAP[section.id], + action: () => onScrollTo(section.domId), + group: t('commandPalette.groups.navigation'), + testId: `command-section-${section.id}`, + ...(SECTION_COMMAND_ALIASES[section.id] + ? { aliases: SECTION_COMMAND_ALIASES[section.id] } + : {}), + }, + ] + }) +} + +/** Builds the complete command list for the current dashboard view model. */ +export function buildCommandPaletteCommands({ + isDark, + availableProviders, + selectedProviders, + availableModels, + selectedModels, + reportGenerating, + onToggleTheme, + onExportCSV, + onGenerateReport, + onDelete, + onUpload, + onAutoImport, + onOpenSettings, + onScrollTo, + onScrollTop, + onScrollBottom, + onViewModeChange, + onApplyPreset, + onToggleProvider, + onToggleModel, + onClearProviders, + onClearModels, + onClearDateRange, + onResetAll, + onHelp, + onLanguageChange, + t, + ...sectionInputs +}: CommandPaletteBuilderOptions): CommandItem[] { + const sectionNavigationCommands = buildSectionNavigationCommands({ + onScrollTo, + sectionAvailability: buildSectionAvailability(sectionInputs), + sectionOrder: sectionInputs.sectionOrder, + sectionVisibility: sectionInputs.sectionVisibility, + t, + }) + + const baseCommands: CommandItem[] = [ + { + id: 'auto-import', + label: t('commandPalette.commands.autoImport.label'), + description: t('commandPalette.commands.autoImport.description'), + keywords: ['toktrack', 'import', 'load', 'sync'], + aliases: ['auto import', 'daten importieren'], + icon: , + action: onAutoImport, + group: t('commandPalette.groups.loadData'), + }, + { + id: 'upload', + label: t('commandPalette.commands.upload.label'), + description: t('commandPalette.commands.upload.description'), + keywords: ['upload', 'file', 'json', 'import'], + aliases: ['datei laden', 'json import'], + shortcut: '⌘U', + icon: , + action: onUpload, + group: t('commandPalette.groups.loadData'), + }, + { + id: 'csv', + label: t('commandPalette.commands.exportCsv.label'), + description: t('commandPalette.commands.exportCsv.description'), + keywords: ['download', 'export', 'csv'], + aliases: ['csv download', 'daten exportieren'], + shortcut: '⌘E', + icon: , + action: onExportCSV, + group: t('commandPalette.groups.exports'), + }, + { + id: 'report', + label: reportGenerating + ? t('commandPalette.commands.generateReport.labelLoading') + : t('commandPalette.commands.generateReport.label'), + description: t('commandPalette.commands.generateReport.description'), + keywords: ['pdf', 'report', 'bericht', 'export'], + aliases: ['report export', 'pdf export', 'bericht generieren'], + icon: , + action: onGenerateReport, + group: t('commandPalette.groups.exports'), + }, + { + id: 'settings-open', + label: t('commandPalette.commands.openSettings.label'), + description: t('commandPalette.commands.openSettings.description'), + keywords: ['settings', 'limits', 'subscription', 'anbieter limit', 'backup'], + aliases: ['settings dialog', 'einstellungen öffnen', 'provider limits'], + icon: , + action: onOpenSettings, + group: t('commandPalette.groups.maintenance'), + }, + { + id: 'delete', + label: t('commandPalette.commands.delete.label'), + description: t('commandPalette.commands.delete.description'), + keywords: ['reset data', 'clear data', 'delete'], + aliases: ['daten reset', 'alles loeschen'], + icon: , + action: onDelete, + group: t('commandPalette.groups.maintenance'), + }, + { + id: 'view-daily', + label: t('commandPalette.commands.viewDaily.label'), + description: t('commandPalette.commands.viewDaily.description'), + keywords: ['daily', 'tage', 'tag', 'tagesansicht'], + aliases: ['daily view'], + icon: , + action: () => onViewModeChange('daily'), + group: t('commandPalette.groups.filters'), + }, + { + id: 'view-monthly', + label: t('commandPalette.commands.viewMonthly.label'), + description: t('commandPalette.commands.viewMonthly.description'), + keywords: ['monthly', 'monate', 'monat', 'monatsansicht'], + aliases: ['monthly view'], + icon: , + action: () => onViewModeChange('monthly'), + group: t('commandPalette.groups.filters'), + }, + { + id: 'view-yearly', + label: t('commandPalette.commands.viewYearly.label'), + description: t('commandPalette.commands.viewYearly.description'), + keywords: ['yearly', 'jahre', 'jahr', 'jahresansicht'], + aliases: ['yearly view'], + icon: , + action: () => onViewModeChange('yearly'), + group: t('commandPalette.groups.filters'), + }, + { + id: 'preset-7d', + label: t('commandPalette.commands.preset7d.label'), + description: t('commandPalette.commands.preset7d.description'), + keywords: ['7d', '7 tage'], + icon: , + action: () => onApplyPreset('7d'), + group: t('commandPalette.groups.filters'), + }, + { + id: 'preset-30d', + label: t('commandPalette.commands.preset30d.label'), + description: t('commandPalette.commands.preset30d.description'), + keywords: ['30d', '30 tage'], + icon: , + action: () => onApplyPreset('30d'), + group: t('commandPalette.groups.filters'), + }, + { + id: 'preset-month', + label: t('commandPalette.commands.presetMonth.label'), + description: t('commandPalette.commands.presetMonth.description'), + keywords: ['current month', 'monat'], + icon: , + action: () => onApplyPreset('month'), + group: t('commandPalette.groups.filters'), + }, + { + id: 'preset-year', + label: t('commandPalette.commands.presetYear.label'), + description: t('commandPalette.commands.presetYear.description'), + keywords: ['current year', 'jahr'], + icon: , + action: () => onApplyPreset('year'), + group: t('commandPalette.groups.filters'), + }, + { + id: 'preset-all', + label: t('commandPalette.commands.presetAll.label'), + description: t('commandPalette.commands.presetAll.description'), + keywords: ['all', 'alles'], + icon: , + action: () => onApplyPreset('all'), + group: t('commandPalette.groups.filters'), + }, + { + id: 'clear-providers', + label: t('commandPalette.commands.clearProviders.label'), + description: t('commandPalette.commands.clearProviders.description'), + keywords: ['provider', 'anbieter', 'clear'], + icon: , + action: onClearProviders, + group: t('commandPalette.groups.filters'), + }, + { + id: 'clear-models', + label: t('commandPalette.commands.clearModels.label'), + description: t('commandPalette.commands.clearModels.description'), + keywords: ['models', 'modelle', 'clear'], + icon: , + action: onClearModels, + group: t('commandPalette.groups.filters'), + }, + { + id: 'clear-dates', + label: t('commandPalette.commands.clearDates.label'), + description: t('commandPalette.commands.clearDates.description'), + keywords: ['date', 'datum', 'range', 'clear'], + icon: , + action: onClearDateRange, + group: t('commandPalette.groups.filters'), + }, + { + id: 'reset-all', + label: t('commandPalette.commands.resetAll.label'), + description: t('commandPalette.commands.resetAll.description'), + keywords: ['reset all', 'alles zurücksetzen', 'default', 'clear filters'], + aliases: ['reset dashboard', 'alles reset', 'filter reset'], + icon: , + action: onResetAll, + group: t('commandPalette.groups.filters'), + }, + { + id: 'top', + label: t('commandPalette.commands.scrollTop.label'), + description: t('commandPalette.commands.scrollTop.description'), + keywords: ['top', 'start', 'anfang'], + shortcut: '⌘↑', + icon: , + action: onScrollTop, + group: t('commandPalette.groups.navigation'), + }, + { + id: 'bottom', + label: t('commandPalette.commands.scrollBottom.label'), + description: t('commandPalette.commands.scrollBottom.description'), + keywords: ['bottom', 'ende'], + icon: , + action: onScrollBottom, + group: t('commandPalette.groups.navigation'), + }, + { + id: 'filters', + label: t('commandPalette.commands.filters.label'), + description: t('commandPalette.commands.filters.description'), + keywords: ['filterbar', 'filter'], + icon: , + action: () => onScrollTo('filters'), + group: t('commandPalette.groups.navigation'), + }, + ...sectionNavigationCommands, + { + id: 'theme', + label: isDark + ? t('commandPalette.commands.themeLight.label') + : t('commandPalette.commands.themeDark.label'), + description: t('commandPalette.commands.themeDark.description'), + keywords: ['theme', 'dark', 'light'], + shortcut: '⌘D', + icon: isDark ? : , + action: onToggleTheme, + group: t('commandPalette.groups.view'), + }, + { + id: 'language-de', + label: t('commandPalette.commands.languageGerman.label'), + description: t('commandPalette.commands.languageGerman.description'), + keywords: ['language', 'sprache', 'deutsch', 'german', 'locale'], + aliases: ['switch german', 'auf deutsch', 'sprache deutsch'], + icon: , + action: () => onLanguageChange('de'), + group: t('commandPalette.groups.language'), + }, + { + id: 'language-en', + label: t('commandPalette.commands.languageEnglish.label'), + description: t('commandPalette.commands.languageEnglish.description'), + keywords: ['language', 'sprache', 'english', 'englisch', 'locale'], + aliases: ['switch english', 'auf englisch', 'sprache english'], + icon: , + action: () => onLanguageChange('en'), + group: t('commandPalette.groups.language'), + }, + { + id: 'help', + label: t('commandPalette.commands.help.label'), + description: t('commandPalette.commands.help.description'), + keywords: ['shortcut', 'hilfe'], + shortcut: '?', + icon: , + action: onHelp, + group: t('commandPalette.groups.help'), + }, + ] + + const providerCommands: CommandItem[] = availableProviders.map((provider) => { + const selected = selectedProviders.includes(provider) + return { + id: `provider-${provider}`, + label: `${selected ? t('commandPalette.commands.clearProviders.label') : t('common.provider')}: ${provider}`, + description: selected + ? `${t('commandPalette.commands.clearProviders.description')}: ${provider}` + : `${t('common.provider')} ${provider}`, + keywords: ['anbieter', 'provider', provider.toLowerCase()], + aliases: [`filter ${provider.toLowerCase()}`, `${provider.toLowerCase()} daten`], + icon: , + action: () => onToggleProvider(provider), + group: t('commandPalette.groups.providers'), + testId: `command-provider-${provider}`, + } + }) + + const modelCommands: CommandItem[] = availableModels.map((model) => { + const selected = selectedModels.includes(model) + return { + id: `model-${model}`, + label: `${selected ? t('commandPalette.commands.clearModels.label') : t('common.model')}: ${model}`, + description: selected + ? `${t('commandPalette.commands.clearModels.description')}: ${model}` + : `${t('common.model')} ${model}`, + keywords: ['modell', 'model', model.toLowerCase()], + aliases: [ + `filter ${model.toLowerCase()}`, + `${model.toLowerCase()} requests`, + `${model.toLowerCase()} kosten`, + ], + icon: , + action: () => onToggleModel(model), + group: t('commandPalette.groups.models'), + testId: `command-model-${model}`, + } + }) + + return [...baseCommands, ...providerCommands, ...modelCommands] +} diff --git a/src/components/features/command-palette/CommandPalette.tsx b/src/components/features/command-palette/CommandPalette.tsx index e6bf86c..7aab4e1 100644 --- a/src/components/features/command-palette/CommandPalette.tsx +++ b/src/components/features/command-palette/CommandPalette.tsx @@ -2,150 +2,17 @@ import { useEffect, useMemo, useState } from 'react' import { useTranslation } from 'react-i18next' import { Command } from 'cmdk' import { Dialog, DialogContent, DialogDescription, DialogTitle } from '@/components/ui/dialog' -import { - Download, - Trash2, - Upload, - Sun, - Moon, - Calendar, - ChartBar, - Table, - Search, - ArrowUp, - CircleHelp, - Zap, - Filter, - BarChart3, - LineChart, - Sigma, - CalendarRange, - Layers3, - ArrowDown, - RefreshCcw, - SlidersHorizontal, - Languages, -} from 'lucide-react' -import { DASHBOARD_SECTION_DEFINITION_MAP } from '@/lib/dashboard-preferences' +import { Search } from 'lucide-react' import type { DashboardCommandPaletteViewModel } from '@/types/dashboard-view-model' -import type { DashboardSectionId } from '@/types' +import { + buildCommandPaletteCommands, + filterCommandItems, + getVisibleCommandItems, + type CommandItem, +} from './CommandPalette.commands' type CommandPaletteProps = DashboardCommandPaletteViewModel -interface CommandItem { - id: string - label: string - description?: string - keywords?: string[] - aliases?: string[] - shortcut?: string - icon: React.ReactNode - action: () => void - group: string - testId?: string -} - -const SECTION_COMMAND_ICON_MAP: Record = { - insights: , - metrics: , - today: , - currentMonth: , - activity: , - forecastCache: , - limits: , - costAnalysis: , - tokenAnalysis: , - requestAnalysis: , - advancedAnalysis: , - comparisons: , - tables:
, -} - -const SECTION_COMMAND_KEYWORDS: Record = { - insights: ['summary', 'insight'], - metrics: ['kpi', 'zahlen'], - today: ['today', 'heute'], - currentMonth: ['monat', 'current month'], - activity: ['heatmap', 'aktivität'], - forecastCache: ['forecast', 'cache', 'roi'], - limits: ['limits', 'subscriptions', 'budget', 'anbieter limits'], - costAnalysis: ['charts', 'kostenanalyse'], - tokenAnalysis: ['tokens', 'token analyse'], - requestAnalysis: ['requests', 'request analyse', 'anfragen'], - advancedAnalysis: ['advanced analysis', 'distributions', 'risk', 'verteilungen'], - comparisons: ['anomalie', 'vergleich'], - tables: ['table', 'details'], -} - -const SECTION_COMMAND_ALIASES: Partial> = { - limits: ['limits sektion', 'subscriptions sektion'], - tokenAnalysis: ['token chart'], - requestAnalysis: ['request chart', 'request donut'], -} - -function normalizeSearchValue(value: string) { - return value - .toLowerCase() - .normalize('NFD') - .replace(/[\u0300-\u036f]/g, '') - .replace(/[^\p{L}\p{N}]+/gu, ' ') - .trim() -} - -function getCommandSearchText(cmd: CommandItem) { - return normalizeSearchValue( - [cmd.label, cmd.description, ...(cmd.keywords ?? []), ...(cmd.aliases ?? [])] - .filter(Boolean) - .join(' '), - ) -} - -function getCommandSearchScore(cmd: CommandItem, query: string) { - if (!query) return 1 - - const normalizedQuery = normalizeSearchValue(query) - if (!normalizedQuery) return 1 - - const haystack = getCommandSearchText(cmd) - const terms = normalizedQuery.split(/\s+/).filter(Boolean) - if (terms.length === 0) return 1 - - let score = 0 - - for (const term of terms) { - if (!haystack.includes(term)) return 0 - - if (normalizeSearchValue(cmd.label) === term) { - score += 120 - continue - } - - if (normalizeSearchValue(cmd.label).startsWith(term)) { - score += 80 - continue - } - - if ((cmd.aliases ?? []).some((alias) => normalizeSearchValue(alias) === term)) { - score += 70 - continue - } - - if ((cmd.keywords ?? []).some((keyword) => normalizeSearchValue(keyword).startsWith(term))) { - score += 55 - continue - } - - if (haystack.includes(` ${term}`) || haystack.startsWith(term)) { - score += 35 - continue - } - - score += 15 - } - - return score -} - /** Renders the keyboard-first command palette for dashboard actions. */ export function CommandPalette({ isDark, @@ -182,25 +49,6 @@ export function CommandPalette({ const [open, setOpen] = useState(false) const [search, setSearch] = useState('') - const sectionAvailability = useMemo>( - () => ({ - insights: true, - metrics: true, - today: hasTodaySection, - currentMonth: hasMonthSection, - activity: true, - forecastCache: true, - limits: true, - costAnalysis: true, - tokenAnalysis: true, - requestAnalysis: hasRequestSection, - advancedAnalysis: true, - comparisons: true, - tables: true, - }), - [hasMonthSection, hasRequestSection, hasTodaySection], - ) - useEffect(() => { const handleKeyDown = (e: KeyboardEvent) => { if ((e.metaKey || e.ctrlKey) && e.key === 'k') { @@ -212,293 +60,56 @@ export function CommandPalette({ return () => document.removeEventListener('keydown', handleKeyDown) }, []) - const sectionNavigationCommands = useMemo( + const commands = useMemo( () => - sectionOrder.flatMap((sectionId) => { - const section = DASHBOARD_SECTION_DEFINITION_MAP[sectionId] - - if (!sectionVisibility[sectionId] || !sectionAvailability[sectionId]) { - return [] - } - - const sectionLabel = t(section.labelKey) - - return [ - { - id: `section-${section.id}`, - label: t('commandPalette.commands.goToSection.label', { section: sectionLabel }), - description: t('commandPalette.commands.goToSection.description', { - section: sectionLabel, - }), - keywords: [sectionLabel, section.domId, ...SECTION_COMMAND_KEYWORDS[section.id]], - icon: SECTION_COMMAND_ICON_MAP[section.id], - action: () => onScrollTo(section.domId), - group: t('commandPalette.groups.navigation'), - testId: `command-section-${section.id}`, - ...(SECTION_COMMAND_ALIASES[section.id] - ? { aliases: SECTION_COMMAND_ALIASES[section.id] } - : {}), - }, - ] + buildCommandPaletteCommands({ + isDark, + availableProviders, + selectedProviders, + availableModels, + selectedModels, + hasTodaySection, + hasMonthSection, + hasRequestSection, + sectionVisibility, + sectionOrder, + reportGenerating, + onToggleTheme, + onExportCSV, + onGenerateReport, + onDelete, + onUpload, + onAutoImport, + onOpenSettings, + onScrollTo, + onScrollTop: () => window.scrollTo({ top: 0, behavior: 'smooth' }), + onScrollBottom: () => + window.scrollTo({ top: document.body.scrollHeight, behavior: 'smooth' }), + onViewModeChange, + onApplyPreset, + onToggleProvider, + onToggleModel, + onClearProviders, + onClearModels, + onClearDateRange, + onResetAll, + onHelp, + onLanguageChange, + t: (key, options) => (options === undefined ? t(key) : t(key, options)), }), - [onScrollTo, sectionAvailability, sectionOrder, sectionVisibility, t], - ) - - const baseCommands = useMemo( - () => [ - { - id: 'auto-import', - label: t('commandPalette.commands.autoImport.label'), - description: t('commandPalette.commands.autoImport.description'), - keywords: ['toktrack', 'import', 'load', 'sync'], - aliases: ['auto import', 'daten importieren'], - icon: , - action: onAutoImport, - group: t('commandPalette.groups.loadData'), - }, - { - id: 'upload', - label: t('commandPalette.commands.upload.label'), - description: t('commandPalette.commands.upload.description'), - keywords: ['upload', 'file', 'json', 'import'], - aliases: ['datei laden', 'json import'], - shortcut: '⌘U', - icon: , - action: onUpload, - group: t('commandPalette.groups.loadData'), - }, - { - id: 'csv', - label: t('commandPalette.commands.exportCsv.label'), - description: t('commandPalette.commands.exportCsv.description'), - keywords: ['download', 'export', 'csv'], - aliases: ['csv download', 'daten exportieren'], - shortcut: '⌘E', - icon: , - action: onExportCSV, - group: t('commandPalette.groups.exports'), - }, - { - id: 'report', - label: reportGenerating - ? t('commandPalette.commands.generateReport.labelLoading') - : t('commandPalette.commands.generateReport.label'), - description: t('commandPalette.commands.generateReport.description'), - keywords: ['pdf', 'report', 'bericht', 'export'], - aliases: ['report export', 'pdf export', 'bericht generieren'], - icon: , - action: onGenerateReport, - group: t('commandPalette.groups.exports'), - }, - { - id: 'settings-open', - label: t('commandPalette.commands.openSettings.label'), - description: t('commandPalette.commands.openSettings.description'), - keywords: ['settings', 'limits', 'subscription', 'anbieter limit', 'backup'], - aliases: ['settings dialog', 'einstellungen öffnen', 'provider limits'], - icon: , - action: onOpenSettings, - group: t('commandPalette.groups.maintenance'), - }, - { - id: 'delete', - label: t('commandPalette.commands.delete.label'), - description: t('commandPalette.commands.delete.description'), - keywords: ['reset data', 'clear data', 'delete'], - aliases: ['daten reset', 'alles loeschen'], - icon: , - action: onDelete, - group: t('commandPalette.groups.maintenance'), - }, - - { - id: 'view-daily', - label: t('commandPalette.commands.viewDaily.label'), - description: t('commandPalette.commands.viewDaily.description'), - keywords: ['daily', 'tage', 'tag', 'tagesansicht'], - aliases: ['daily view'], - icon: , - action: () => onViewModeChange('daily'), - group: t('commandPalette.groups.filters'), - }, - { - id: 'view-monthly', - label: t('commandPalette.commands.viewMonthly.label'), - description: t('commandPalette.commands.viewMonthly.description'), - keywords: ['monthly', 'monate', 'monat', 'monatsansicht'], - aliases: ['monthly view'], - icon: , - action: () => onViewModeChange('monthly'), - group: t('commandPalette.groups.filters'), - }, - { - id: 'view-yearly', - label: t('commandPalette.commands.viewYearly.label'), - description: t('commandPalette.commands.viewYearly.description'), - keywords: ['yearly', 'jahre', 'jahr', 'jahresansicht'], - aliases: ['yearly view'], - icon: , - action: () => onViewModeChange('yearly'), - group: t('commandPalette.groups.filters'), - }, - { - id: 'preset-7d', - label: t('commandPalette.commands.preset7d.label'), - description: t('commandPalette.commands.preset7d.description'), - keywords: ['7d', '7 tage'], - icon: , - action: () => onApplyPreset('7d'), - group: t('commandPalette.groups.filters'), - }, - { - id: 'preset-30d', - label: t('commandPalette.commands.preset30d.label'), - description: t('commandPalette.commands.preset30d.description'), - keywords: ['30d', '30 tage'], - icon: , - action: () => onApplyPreset('30d'), - group: t('commandPalette.groups.filters'), - }, - { - id: 'preset-month', - label: t('commandPalette.commands.presetMonth.label'), - description: t('commandPalette.commands.presetMonth.description'), - keywords: ['current month', 'monat'], - icon: , - action: () => onApplyPreset('month'), - group: t('commandPalette.groups.filters'), - }, - { - id: 'preset-year', - label: t('commandPalette.commands.presetYear.label'), - description: t('commandPalette.commands.presetYear.description'), - keywords: ['current year', 'jahr'], - icon: , - action: () => onApplyPreset('year'), - group: t('commandPalette.groups.filters'), - }, - { - id: 'preset-all', - label: t('commandPalette.commands.presetAll.label'), - description: t('commandPalette.commands.presetAll.description'), - keywords: ['all', 'alles'], - icon: , - action: () => onApplyPreset('all'), - group: t('commandPalette.groups.filters'), - }, - { - id: 'clear-providers', - label: t('commandPalette.commands.clearProviders.label'), - description: t('commandPalette.commands.clearProviders.description'), - keywords: ['provider', 'anbieter', 'clear'], - icon: , - action: onClearProviders, - group: t('commandPalette.groups.filters'), - }, - { - id: 'clear-models', - label: t('commandPalette.commands.clearModels.label'), - description: t('commandPalette.commands.clearModels.description'), - keywords: ['models', 'modelle', 'clear'], - icon: , - action: onClearModels, - group: t('commandPalette.groups.filters'), - }, - { - id: 'clear-dates', - label: t('commandPalette.commands.clearDates.label'), - description: t('commandPalette.commands.clearDates.description'), - keywords: ['date', 'datum', 'range', 'clear'], - icon: , - action: onClearDateRange, - group: t('commandPalette.groups.filters'), - }, - { - id: 'reset-all', - label: t('commandPalette.commands.resetAll.label'), - description: t('commandPalette.commands.resetAll.description'), - keywords: ['reset all', 'alles zurücksetzen', 'default', 'clear filters'], - aliases: ['reset dashboard', 'alles reset', 'filter reset'], - icon: , - action: onResetAll, - group: t('commandPalette.groups.filters'), - }, - - { - id: 'top', - label: t('commandPalette.commands.scrollTop.label'), - description: t('commandPalette.commands.scrollTop.description'), - keywords: ['top', 'start', 'anfang'], - shortcut: '⌘↑', - icon: , - action: () => window.scrollTo({ top: 0, behavior: 'smooth' }), - group: t('commandPalette.groups.navigation'), - }, - { - id: 'bottom', - label: t('commandPalette.commands.scrollBottom.label'), - description: t('commandPalette.commands.scrollBottom.description'), - keywords: ['bottom', 'ende'], - icon: , - action: () => window.scrollTo({ top: document.body.scrollHeight, behavior: 'smooth' }), - group: t('commandPalette.groups.navigation'), - }, - { - id: 'filters', - label: t('commandPalette.commands.filters.label'), - description: t('commandPalette.commands.filters.description'), - keywords: ['filterbar', 'filter'], - icon: , - action: () => onScrollTo('filters'), - group: t('commandPalette.groups.navigation'), - }, - ...sectionNavigationCommands, - - { - id: 'theme', - label: isDark - ? t('commandPalette.commands.themeLight.label') - : t('commandPalette.commands.themeDark.label'), - description: t('commandPalette.commands.themeDark.description'), - keywords: ['theme', 'dark', 'light'], - shortcut: '⌘D', - icon: isDark ? : , - action: onToggleTheme, - group: t('commandPalette.groups.view'), - }, - { - id: 'language-de', - label: t('commandPalette.commands.languageGerman.label'), - description: t('commandPalette.commands.languageGerman.description'), - keywords: ['language', 'sprache', 'deutsch', 'german', 'locale'], - aliases: ['switch german', 'auf deutsch', 'sprache deutsch'], - icon: , - action: () => onLanguageChange('de'), - group: t('commandPalette.groups.language'), - }, - { - id: 'language-en', - label: t('commandPalette.commands.languageEnglish.label'), - description: t('commandPalette.commands.languageEnglish.description'), - keywords: ['language', 'sprache', 'english', 'englisch', 'locale'], - aliases: ['switch english', 'auf englisch', 'sprache english'], - icon: , - action: () => onLanguageChange('en'), - group: t('commandPalette.groups.language'), - }, - { - id: 'help', - label: t('commandPalette.commands.help.label'), - description: t('commandPalette.commands.help.description'), - keywords: ['shortcut', 'hilfe'], - shortcut: '?', - icon: , - action: onHelp, - group: t('commandPalette.groups.help'), - }, - ], [ isDark, + availableProviders, + selectedProviders, + availableModels, + selectedModels, + hasTodaySection, + hasMonthSection, + hasRequestSection, + sectionVisibility, + sectionOrder, + reportGenerating, + onToggleTheme, onAutoImport, onOpenSettings, onExportCSV, @@ -512,75 +123,20 @@ export function CommandPalette({ onClearDateRange, onResetAll, onScrollTo, - sectionNavigationCommands, - reportGenerating, - onToggleTheme, + onToggleProvider, + onToggleModel, onLanguageChange, onHelp, t, ], ) - const providerCommands = useMemo( - () => - availableProviders.map((provider) => { - const selected = selectedProviders.includes(provider) - return { - id: `provider-${provider}`, - label: `${selected ? t('commandPalette.commands.clearProviders.label') : t('common.provider')}: ${provider}`, - description: selected - ? `${t('commandPalette.commands.clearProviders.description')}: ${provider}` - : `${t('common.provider')} ${provider}`, - keywords: ['anbieter', 'provider', provider.toLowerCase()], - aliases: [`filter ${provider.toLowerCase()}`, `${provider.toLowerCase()} daten`], - icon: , - action: () => onToggleProvider(provider), - group: t('commandPalette.groups.providers'), - } - }), - [availableProviders, selectedProviders, onToggleProvider, t], - ) + const filteredCommands = useMemo(() => filterCommandItems(commands, search), [commands, search]) - const modelCommands = useMemo( - () => - availableModels.map((model) => { - const selected = selectedModels.includes(model) - return { - id: `model-${model}`, - label: `${selected ? t('commandPalette.commands.clearModels.label') : t('common.model')}: ${model}`, - description: selected - ? `${t('commandPalette.commands.clearModels.description')}: ${model}` - : `${t('common.model')} ${model}`, - keywords: ['modell', 'model', model.toLowerCase()], - aliases: [ - `filter ${model.toLowerCase()}`, - `${model.toLowerCase()} requests`, - `${model.toLowerCase()} kosten`, - ], - icon: , - action: () => onToggleModel(model), - group: t('commandPalette.groups.models'), - } - }), - [availableModels, selectedModels, onToggleModel, t], - ) - - const commands = useMemo( - () => [...baseCommands, ...providerCommands, ...modelCommands], - [baseCommands, providerCommands, modelCommands], - ) - - const filteredCommands = useMemo( - () => - commands - .map((cmd, index) => ({ cmd, score: getCommandSearchScore(cmd, search), index })) - .filter((entry) => entry.score > 0) - .sort((a, b) => b.score - a.score || a.index - b.index) - .map((entry) => entry.cmd), - [commands, search], + const visibleCommands = useMemo( + () => getVisibleCommandItems(filteredCommands), + [filteredCommands], ) - - const visibleCommands = useMemo(() => filteredCommands.slice(0, 9), [filteredCommands]) const groups = useMemo( () => Array.from(new Set(filteredCommands.map((c) => c.group))), [filteredCommands], diff --git a/tests/e2e/command-palette.spec.ts b/tests/e2e/command-palette.spec.ts index 5d37b2c..6777f43 100644 --- a/tests/e2e/command-palette.spec.ts +++ b/tests/e2e/command-palette.spec.ts @@ -1,7 +1,6 @@ import { expect, test, type Page } from './fixtures' import { dailyViewPattern, - mockAutoImportStream, mockPdfReport, monthlyViewPattern, prepareDashboard, @@ -10,70 +9,9 @@ import { } from './helpers' const commandPaletteTitlePattern = /^Command [Pp]alette$/ -const settingsDialogTitlePattern = /^(Settings|Einstellungen)$/ const helpDialogTitlePattern = /^(Help & shortcuts|Hilfe & Tastenkürzel)$/ -const autoImportDialogTitlePattern = /^(Toktrack auto import|Toktrack Auto-Import)$/ -const closeButtonPattern = /^(Close|Schliessen)$/ -const autoImportButtonPattern = /^(Auto-Import|Auto import)$/ -const uploadFileButtonPattern = /^(Datei hochladen|Upload file)$/ -const yearlyViewPattern = /^(Jahresansicht|Yearly view)$/ -const providersActivePattern = /^(1 providers active|1 Anbieter aktiv)$/ -const modelsActivePattern = /^(1 models active|1 Modelle aktiv)$/ const dateFilterActivePattern = /^(Date filter active|Datumsfilter aktiv)$/ const preset7Pattern = /^(7D|7T)$/ -const preset30Pattern = /^(30D|30T)$/ -const presetMonthPattern = /^(Month|Monat)$/ -const presetYearPattern = /^(Year|Jahr)$/ -const presetAllPattern = /^(All|Alle)$/ - -const providerLabels = ['Anthropic', 'Google', 'OpenAI'] -const modelLabels = ['Claude Sonnet 4.5', 'Gemini 2.5 Pro', 'GPT-5.4'] -const sectionCommands = [ - { testId: 'command-section-insights', selector: '#insights' }, - { testId: 'command-section-metrics', selector: '#metrics' }, - { testId: 'command-section-today', selector: '#today' }, - { testId: 'command-section-currentMonth', selector: '#current-month' }, - { testId: 'command-section-activity', selector: '#activity' }, - { testId: 'command-section-forecastCache', selector: '#forecast-cache' }, - { testId: 'command-section-limits', selector: '#limits' }, - { testId: 'command-section-costAnalysis', selector: '#charts' }, - { testId: 'command-section-tokenAnalysis', selector: '#token-analysis' }, - { testId: 'command-section-requestAnalysis', selector: '#request-analysis' }, - { testId: 'command-section-advancedAnalysis', selector: '#advanced-analysis' }, - { testId: 'command-section-comparisons', selector: '#comparisons' }, - { testId: 'command-section-tables', selector: '#tables' }, -] as const - -const expectedCommandTestIds = [ - 'command-auto-import', - 'command-settings-open', - 'command-csv', - 'command-report', - 'command-upload', - 'command-delete', - 'command-view-daily', - 'command-view-monthly', - 'command-view-yearly', - 'command-preset-7d', - 'command-preset-30d', - 'command-preset-month', - 'command-preset-year', - 'command-preset-all', - 'command-clear-providers', - 'command-clear-models', - 'command-clear-dates', - 'command-reset-all', - 'command-top', - 'command-bottom', - 'command-filters', - 'command-theme', - 'command-language-de', - 'command-language-en', - 'command-help', - ...sectionCommands.map((command) => command.testId), - ...providerLabels.map((provider) => `command-provider-${provider}`), - ...modelLabels.map((model) => `command-model-${model}`), -].sort() function getPalette(page: Page) { return page.getByRole('dialog', { name: commandPaletteTitlePattern }) @@ -110,301 +48,66 @@ async function waitForSectionNearTop(page: Page, selector: string) { .toBeLessThan(220) } -async function runSectionNavigationCommands( - page: Page, - commands: readonly (typeof sectionCommands)[number][], -) { - for (const section of commands) { - await test.step(`${section.testId} scrolls to the expected section`, async () => { - await page.evaluate(() => window.scrollTo({ top: document.body.scrollHeight })) - await runPaletteCommand(page, section.testId) - await waitForSectionNearTop(page, section.selector) - }) - } -} - test.beforeEach(async ({ page, baseURL }) => { await prepareDashboard(page, baseURL) }) -test('renders the full command palette command set for the seeded dataset', async ({ page }) => { +test('opens from the keyboard and shows representative commands', async ({ page }) => { const palette = await openPalette(page) - const renderedCommandTestIds = await palette - .locator('[data-testid^="command-"]') - .evaluateAll((nodes) => - nodes - .map((node) => node.getAttribute('data-testid')) - .filter((value): value is string => typeof value === 'string') - .sort(), - ) - expect(renderedCommandTestIds).toEqual(expectedCommandTestIds) + await expect(palette.locator('[data-testid="command-report"]')).toBeVisible() + await expect(palette.locator('[data-testid="command-view-monthly"]')).toBeVisible() + await expect(palette.locator('[data-testid="command-section-costAnalysis"]')).toBeVisible() }) -test('executes action commands from the command palette', async ({ page }) => { - await mockAutoImportStream(page) +test('executes a report action command from the command palette', async ({ page }) => { const pdfReport = await mockPdfReport(page) + const downloadPromise = page.waitForEvent('download') - await test.step('auto import opens and completes through the command palette', async () => { - await runPaletteCommand(page, 'command-auto-import') - - const dialog = page.getByRole('dialog', { name: autoImportDialogTitlePattern }) - await expect(dialog).toBeVisible() - await expect(dialog.getByText(/5 days imported|5 Tage importiert/)).toBeVisible() - await dialog.getByRole('button', { name: closeButtonPattern }).last().click() - await expect(dialog).toBeHidden() - }) - - await test.step('settings command is searchable through aliases and opens the dialog', async () => { - const palette = await openPalette(page) - await palette.locator('input').fill('einstellungen offnen') - const settingsCommand = palette.locator('[data-testid="command-settings-open"]') - await expect(settingsCommand).toBeVisible() - await settingsCommand.click() - await expect(palette).toBeHidden() - - const dialog = page.getByRole('dialog', { name: settingsDialogTitlePattern }) - await expect(dialog).toBeVisible() - await page.keyboard.press('Escape') - await expect(dialog).toBeHidden() - }) - - await test.step('upload command opens the file chooser', async () => { - const fileChooserPromise = page.waitForEvent('filechooser') - const { palette, command } = await getPaletteCommand(page, 'command-upload') - await command.click() - await fileChooserPromise - await expect(palette).toBeHidden() - }) - - await test.step('CSV export command downloads the current dataset', async () => { - const downloadPromise = page.waitForEvent('download') - await runPaletteCommand(page, 'command-csv') - const download = await downloadPromise - - expect(download.suggestedFilename()).toMatch(/^ttdash-export-\d{4}-\d{2}-\d{2}\.csv$/) - const csv = await readDownloadText(download) - expect(csv).toContain('"date","totalCost","totalTokens"') - expect(csv).toContain('GPT-5.4') - }) - - await test.step('PDF report command requests the report and downloads a PDF', async () => { - const downloadPromise = page.waitForEvent('download') - await runPaletteCommand(page, 'command-report') - const download = await downloadPromise - - expect(download.suggestedFilename()).toMatch(/^ttdash-report-\d{4}-\d{2}-\d{2}\.pdf$/) - const pdf = await readDownloadText(download) - expect(pdf.startsWith('%PDF-1.4')).toBe(true) - await expect.poll(() => pdfReport.getReportRequest()?.language).toBe('de') - }) - - await test.step('delete command clears the local dataset', async () => { - await runPaletteCommand(page, 'command-delete') - - await expect(page.getByRole('button', { name: autoImportButtonPattern })).toBeVisible() - await expect(page.getByRole('button', { name: uploadFileButtonPattern })).toBeVisible() - await expect(page.locator('#token-analysis')).toHaveCount(0) - }) -}) - -test('executes filter and view commands from the command palette', async ({ page }) => { - const filters = page.locator('#filters') - const openAiFilter = filters.getByRole('button', { name: 'OpenAI', exact: true }) - const gptFilter = filters.getByRole('button', { name: 'GPT-5.4', exact: true }) - - await test.step('view commands switch between daily, monthly, and yearly modes', async () => { - await runPaletteCommand(page, 'command-view-monthly') - await expect(filters.getByRole('combobox', { name: viewModeComboboxPattern })).toContainText( - monthlyViewPattern, - ) - - await runPaletteCommand(page, 'command-view-yearly') - await expect(filters.getByRole('combobox', { name: viewModeComboboxPattern })).toContainText( - yearlyViewPattern, - ) - - await runPaletteCommand(page, 'command-view-daily') - await expect(filters.getByRole('combobox', { name: viewModeComboboxPattern })).toContainText( - dailyViewPattern, - ) - }) - - await test.step('preset commands apply date ranges', async () => { - await runPaletteCommand(page, 'command-preset-7d') - await expect(filters.getByRole('button', { name: preset7Pattern })).toHaveAttribute( - 'aria-pressed', - 'true', - ) - await expect(filters.getByText(dateFilterActivePattern)).toBeVisible() - - await runPaletteCommand(page, 'command-preset-30d') - await expect(filters.getByRole('button', { name: preset30Pattern })).toHaveAttribute( - 'aria-pressed', - 'true', - ) - - await runPaletteCommand(page, 'command-preset-month') - await expect(filters.getByRole('button', { name: presetMonthPattern })).toHaveAttribute( - 'aria-pressed', - 'true', - ) - - await runPaletteCommand(page, 'command-preset-year') - await expect(filters.getByRole('button', { name: presetYearPattern })).toHaveAttribute( - 'aria-pressed', - 'true', - ) + await runPaletteCommand(page, 'command-report') + const download = await downloadPromise - await runPaletteCommand(page, 'command-preset-all') - await expect(filters.getByRole('button', { name: presetAllPattern })).toHaveAttribute( - 'aria-pressed', - 'true', - ) - await expect(filters.getByText(dateFilterActivePattern)).toHaveCount(0) - }) - - await test.step('clear and reset commands restore default filter state', async () => { - await openAiFilter.click() - await gptFilter.click() - await runPaletteCommand(page, 'command-preset-7d') - - await expect(filters.getByText(providersActivePattern)).toBeVisible() - await expect(filters.getByText(modelsActivePattern)).toBeVisible() - await expect(filters.getByText(dateFilterActivePattern)).toBeVisible() - - await runPaletteCommand(page, 'command-clear-providers') - await expect(openAiFilter).toHaveAttribute('aria-pressed', 'false') - await expect(filters.getByText(providersActivePattern)).toHaveCount(0) - - await runPaletteCommand(page, 'command-clear-models') - await expect(gptFilter).toHaveAttribute('aria-pressed', 'false') - await expect(filters.getByText(modelsActivePattern)).toHaveCount(0) - - await runPaletteCommand(page, 'command-clear-dates') - await expect(filters.getByText(dateFilterActivePattern)).toHaveCount(0) - await expect(filters.getByRole('button', { name: presetAllPattern })).toHaveAttribute( - 'aria-pressed', - 'true', - ) - - await openAiFilter.click() - await gptFilter.click() - await runPaletteCommand(page, 'command-view-monthly') - await runPaletteCommand(page, 'command-preset-30d') - await expect(filters.getByRole('combobox', { name: viewModeComboboxPattern })).toContainText( - monthlyViewPattern, - ) - - await runPaletteCommand(page, 'command-reset-all') - await expect(filters.getByRole('combobox', { name: viewModeComboboxPattern })).toContainText( - dailyViewPattern, - ) - await expect(openAiFilter).toHaveAttribute('aria-pressed', 'false') - await expect(gptFilter).toHaveAttribute('aria-pressed', 'false') - await expect(filters.getByText(providersActivePattern)).toHaveCount(0) - await expect(filters.getByText(modelsActivePattern)).toHaveCount(0) - await expect(filters.getByText(dateFilterActivePattern)).toHaveCount(0) - await expect(filters.getByRole('button', { name: presetAllPattern })).toHaveAttribute( - 'aria-pressed', - 'true', - ) - }) + expect(download.suggestedFilename()).toMatch(/^ttdash-report-\d{4}-\d{2}-\d{2}\.pdf$/) + const pdf = await readDownloadText(download) + expect(pdf.startsWith('%PDF-1.4')).toBe(true) + await expect.poll(() => pdfReport.getReportRequest()?.language).toBe('de') }) -test('executes dynamic provider and model commands from the command palette', async ({ page }) => { +test('executes representative filter and quick-select commands', async ({ page }) => { const filters = page.locator('#filters') - for (const provider of providerLabels) { - await test.step(`provider command toggles ${provider}`, async () => { - const providerButton = filters.getByRole('button', { name: provider, exact: true }) + await runPaletteCommand(page, 'command-view-monthly') + await expect(filters.getByRole('combobox', { name: viewModeComboboxPattern })).toContainText( + monthlyViewPattern, + ) - await runPaletteCommand(page, `command-provider-${provider}`) - await expect(providerButton).toHaveAttribute('aria-pressed', 'true') + await runPaletteCommand(page, 'command-preset-7d') + await expect(filters.getByRole('button', { name: preset7Pattern })).toHaveAttribute( + 'aria-pressed', + 'true', + ) + await expect(filters.getByText(dateFilterActivePattern)).toBeVisible() - await runPaletteCommand(page, `command-provider-${provider}`) - await expect(providerButton).toHaveAttribute('aria-pressed', 'false') - }) - } - - for (const model of modelLabels) { - await test.step(`model command toggles ${model}`, async () => { - const modelButton = filters.getByRole('button', { name: model, exact: true }) - - await runPaletteCommand(page, `command-model-${model}`) - await expect(modelButton).toHaveAttribute('aria-pressed', 'true') - - await runPaletteCommand(page, `command-model-${model}`) - await expect(modelButton).toHaveAttribute('aria-pressed', 'false') - }) - } -}) - -test('executes scroll and filter navigation commands from the command palette', async ({ - page, -}) => { - await test.step('scroll commands reach the bottom and top of the dashboard', async () => { - await runPaletteCommand(page, 'command-bottom') - await expect.poll(() => page.evaluate(() => Math.round(window.scrollY))).toBeGreaterThan(400) - - await runPaletteCommand(page, 'command-top') - await expect.poll(() => page.evaluate(() => Math.round(window.scrollY))).toBeLessThan(40) - }) - - await test.step('filters command scrolls to the filter bar', async () => { - await page.evaluate(() => window.scrollTo({ top: document.body.scrollHeight })) - await runPaletteCommand(page, 'command-filters') - await waitForSectionNearTop(page, '#filters') - }) -}) - -test('executes dashboard section navigation commands from the command palette', async ({ - page, -}) => { - const dashboardCommands = sectionCommands.slice(0, 7) - - await runSectionNavigationCommands(page, dashboardCommands) - await expect(page.locator(dashboardCommands.at(-1)!.selector)).toBeVisible() -}) - -test('executes analysis section navigation commands from the command palette', async ({ page }) => { - const analysisCommands = sectionCommands.slice(7) + const palette = await openPalette(page) + await palette.locator('input').fill('daily') + await expect(palette.locator('[data-testid="command-view-daily"]')).toBeVisible() + await page.keyboard.press('1') - await runSectionNavigationCommands(page, analysisCommands) - await expect(page.locator(analysisCommands.at(-1)!.selector)).toBeVisible() + await expect(palette).toBeHidden() + await expect(filters.getByRole('combobox', { name: viewModeComboboxPattern })).toContainText( + dailyViewPattern, + ) }) -test('executes theme, language, help, and quick-select interactions from the command palette', async ({ - page, -}) => { - await test.step('theme command toggles the document theme', async () => { - const wasDark = await page.evaluate(() => document.documentElement.classList.contains('dark')) - await runPaletteCommand(page, 'command-theme') - await expect - .poll(async () => page.evaluate(() => document.documentElement.classList.contains('dark'))) - .toBe(!wasDark) - }) - - await test.step('quick-select runs the English language command from search result #1', async () => { - const palette = await openPalette(page) - await palette.locator('input').fill('english') - await expect(palette.locator('[data-testid="command-language-en"]')).toBeVisible() - await page.keyboard.press('1') - await expect(palette).toBeHidden() - await expect(page.locator('#filters').getByText('Filter status')).toBeVisible() - }) - - await test.step('German language command switches the UI back to German', async () => { - await runPaletteCommand(page, 'command-language-de') - await expect(page.locator('#filters').getByText('Filterstatus')).toBeVisible() - }) +test('executes representative section navigation and help commands', async ({ page }) => { + await page.evaluate(() => window.scrollTo({ top: document.body.scrollHeight })) + await runPaletteCommand(page, 'command-section-costAnalysis') + await waitForSectionNearTop(page, '#charts') - await test.step('help command opens the help panel', async () => { - await runPaletteCommand(page, 'command-help') + await runPaletteCommand(page, 'command-help') - const dialog = page.getByRole('dialog', { name: helpDialogTitlePattern }) - await expect(dialog).toBeVisible() - await page.keyboard.press('Escape') - await expect(dialog).toBeHidden() - }) + const dialog = page.getByRole('dialog', { name: helpDialogTitlePattern }) + await expect(dialog).toBeVisible() + await page.keyboard.press('Escape') + await expect(dialog).toBeHidden() }) diff --git a/tests/unit/command-palette-commands.test.ts b/tests/unit/command-palette-commands.test.ts new file mode 100644 index 0000000..9047ec9 --- /dev/null +++ b/tests/unit/command-palette-commands.test.ts @@ -0,0 +1,231 @@ +import { describe, expect, it, vi } from 'vitest' +import { + buildCommandPaletteCommands, + filterCommandItems, + getVisibleCommandItems, + normalizeSearchValue, +} from '@/components/features/command-palette/CommandPalette.commands' +import { DEFAULT_APP_SETTINGS } from '@/lib/app-settings' +import type { DashboardSectionId } from '@/types' + +type CommandPaletteCommandOptions = Parameters[0] + +const providerLabels = ['Anthropic', 'Google', 'OpenAI'] +const modelLabels = ['Claude Sonnet 4.5', 'Gemini 2.5 Pro', 'GPT-5.4'] +const expectedCommandTestIds = [ + 'command-auto-import', + 'command-settings-open', + 'command-csv', + 'command-report', + 'command-upload', + 'command-delete', + 'command-view-daily', + 'command-view-monthly', + 'command-view-yearly', + 'command-preset-7d', + 'command-preset-30d', + 'command-preset-month', + 'command-preset-year', + 'command-preset-all', + 'command-clear-providers', + 'command-clear-models', + 'command-clear-dates', + 'command-reset-all', + 'command-top', + 'command-bottom', + 'command-filters', + 'command-theme', + 'command-language-de', + 'command-language-en', + 'command-help', + ...DEFAULT_APP_SETTINGS.sectionOrder.map((sectionId) => `command-section-${sectionId}`), + ...providerLabels.map((provider) => `command-provider-${provider}`), + ...modelLabels.map((model) => `command-model-${model}`), +].sort() + +const translations: Record = { + 'commandPalette.commands.autoImport.label': 'Auto import', + 'commandPalette.commands.autoImport.description': 'Import data from toktrack', + 'commandPalette.commands.upload.label': 'Upload file', + 'commandPalette.commands.upload.description': 'Upload JSON usage data', + 'commandPalette.commands.exportCsv.label': 'Export CSV', + 'commandPalette.commands.exportCsv.description': 'Download CSV', + 'commandPalette.commands.generateReport.label': 'Report', + 'commandPalette.commands.generateReport.labelLoading': 'Generating report', + 'commandPalette.commands.generateReport.description': 'Download PDF report', + 'commandPalette.commands.openSettings.label': 'Settings', + 'commandPalette.commands.openSettings.description': 'Open settings', + 'commandPalette.commands.delete.label': 'Delete', + 'commandPalette.commands.delete.description': 'Delete data', + 'commandPalette.commands.viewDaily.label': 'Daily view', + 'commandPalette.commands.viewDaily.description': 'Switch to daily view', + 'commandPalette.commands.viewMonthly.label': 'Monthly view', + 'commandPalette.commands.viewMonthly.description': 'Switch to monthly view', + 'commandPalette.commands.viewYearly.label': 'Yearly view', + 'commandPalette.commands.viewYearly.description': 'Switch to yearly view', + 'commandPalette.commands.preset7d.label': '7D', + 'commandPalette.commands.preset7d.description': 'Last 7 days', + 'commandPalette.commands.preset30d.label': '30D', + 'commandPalette.commands.preset30d.description': 'Last 30 days', + 'commandPalette.commands.presetMonth.label': 'Month', + 'commandPalette.commands.presetMonth.description': 'Current month', + 'commandPalette.commands.presetYear.label': 'Year', + 'commandPalette.commands.presetYear.description': 'Current year', + 'commandPalette.commands.presetAll.label': 'All', + 'commandPalette.commands.presetAll.description': 'All data', + 'commandPalette.commands.clearProviders.label': 'Clear providers', + 'commandPalette.commands.clearProviders.description': 'Clear provider filters', + 'commandPalette.commands.clearModels.label': 'Clear models', + 'commandPalette.commands.clearModels.description': 'Clear model filters', + 'commandPalette.commands.clearDates.label': 'Clear dates', + 'commandPalette.commands.clearDates.description': 'Clear date range', + 'commandPalette.commands.resetAll.label': 'Reset all', + 'commandPalette.commands.resetAll.description': 'Reset dashboard filters', + 'commandPalette.commands.scrollTop.label': 'Top', + 'commandPalette.commands.scrollTop.description': 'Scroll to top', + 'commandPalette.commands.scrollBottom.label': 'Bottom', + 'commandPalette.commands.scrollBottom.description': 'Scroll to bottom', + 'commandPalette.commands.filters.label': 'Filters', + 'commandPalette.commands.filters.description': 'Go to filters', + 'commandPalette.commands.themeLight.label': 'Light theme', + 'commandPalette.commands.themeDark.label': 'Dark theme', + 'commandPalette.commands.themeDark.description': 'Toggle theme', + 'commandPalette.commands.languageGerman.label': 'German', + 'commandPalette.commands.languageGerman.description': 'Switch to German', + 'commandPalette.commands.languageEnglish.label': 'English', + 'commandPalette.commands.languageEnglish.description': 'Switch to English', + 'commandPalette.commands.help.label': 'Help', + 'commandPalette.commands.help.description': 'Open help', + 'commandPalette.groups.loadData': 'Load data', + 'commandPalette.groups.exports': 'Exports', + 'commandPalette.groups.maintenance': 'Maintenance', + 'commandPalette.groups.filters': 'Filters', + 'commandPalette.groups.navigation': 'Navigation', + 'commandPalette.groups.view': 'View', + 'commandPalette.groups.language': 'Language', + 'commandPalette.groups.help': 'Help', + 'commandPalette.groups.providers': 'Providers', + 'commandPalette.groups.models': 'Models', + 'common.provider': 'Provider', + 'common.model': 'Model', +} + +function t(key: string, options?: Record) { + if (key === 'commandPalette.commands.goToSection.label') { + return `Go to ${String(options?.section)}` + } + + if (key === 'commandPalette.commands.goToSection.description') { + return `Scroll to ${String(options?.section)}` + } + + return translations[key] ?? key +} + +function buildOptions(overrides: Partial = {}) { + const noop = vi.fn() + + return { + isDark: true, + availableProviders: providerLabels, + selectedProviders: [], + availableModels: modelLabels, + selectedModels: [], + hasTodaySection: true, + hasMonthSection: true, + hasRequestSection: true, + sectionVisibility: { ...DEFAULT_APP_SETTINGS.sectionVisibility }, + sectionOrder: [...DEFAULT_APP_SETTINGS.sectionOrder], + reportGenerating: false, + onToggleTheme: noop, + onExportCSV: noop, + onGenerateReport: noop, + onDelete: noop, + onUpload: noop, + onAutoImport: noop, + onOpenSettings: noop, + onScrollTo: noop, + onScrollTop: noop, + onScrollBottom: noop, + onViewModeChange: noop, + onApplyPreset: noop, + onToggleProvider: noop, + onToggleModel: noop, + onClearProviders: noop, + onClearModels: noop, + onClearDateRange: noop, + onResetAll: noop, + onHelp: noop, + onLanguageChange: noop, + t, + ...overrides, + } satisfies CommandPaletteCommandOptions +} + +describe('command palette command builder', () => { + it('builds the complete command contract for the seeded dashboard state', () => { + const commands = buildCommandPaletteCommands(buildOptions()) + + expect(commands.map((cmd) => cmd.testId ?? `command-${cmd.id}`).sort()).toEqual( + expectedCommandTestIds, + ) + }) + + it('keeps section commands ordered and filtered by visibility and availability', () => { + const sectionOrder: DashboardSectionId[] = [ + 'tables', + 'requestAnalysis', + 'today', + 'costAnalysis', + ] + const commands = buildCommandPaletteCommands( + buildOptions({ + hasRequestSection: false, + hasTodaySection: false, + sectionOrder, + sectionVisibility: { + ...DEFAULT_APP_SETTINGS.sectionVisibility, + costAnalysis: false, + }, + }), + ) + + expect( + commands.filter((cmd) => cmd.id.startsWith('section-')).map((cmd) => cmd.testId), + ).toEqual(['command-section-tables']) + }) + + it('normalizes aliases and ranks matching commands for keyboard quick-select', () => { + const commands = buildCommandPaletteCommands(buildOptions()) + const settingsResults = filterCommandItems(commands, 'einstellungen offnen') + const visibleResults = getVisibleCommandItems(settingsResults) + + expect(normalizeSearchValue('Einstellungen öffnen')).toBe('einstellungen offnen') + expect(visibleResults[0]?.id).toBe('settings-open') + expect(filterCommandItems(commands, 'not-a-real-command')).toHaveLength(0) + }) + + it('wires dynamic provider and model commands to their callbacks', () => { + const onToggleProvider = vi.fn() + const onToggleModel = vi.fn() + const commands = buildCommandPaletteCommands( + buildOptions({ + selectedProviders: ['OpenAI'], + selectedModels: ['GPT-5.4'], + onToggleProvider, + onToggleModel, + }), + ) + + const providerCommand = commands.find((cmd) => cmd.id === 'provider-OpenAI') + const modelCommand = commands.find((cmd) => cmd.id === 'model-GPT-5.4') + + expect(providerCommand?.label).toBe('Clear providers: OpenAI') + providerCommand?.action() + expect(onToggleProvider).toHaveBeenCalledWith('OpenAI') + + expect(modelCommand?.label).toBe('Clear models: GPT-5.4') + modelCommand?.action() + expect(onToggleModel).toHaveBeenCalledWith('GPT-5.4') + }) +}) diff --git a/tests/unit/playwright-config.test.ts b/tests/unit/playwright-config.test.ts index e94d0e5..0cfc78c 100644 --- a/tests/unit/playwright-config.test.ts +++ b/tests/unit/playwright-config.test.ts @@ -48,5 +48,5 @@ describe('playwright config', () => { expect(ciConfig.webServer).toBeUndefined() expect(packageJson.scripts['test:e2e:ci']).toBe('playwright test --workers=2') expect(packageJson.scripts['test:e2e:ci']).not.toContain('CI=1') - }) + }, 10_000) }) From 3a4285d14790e18cc05327ed5bb33cbf46121bdc Mon Sep 17 00:00:00 2001 From: tyl3r-ch Date: Tue, 28 Apr 2026 23:07:35 +0200 Subject: [PATCH 06/42] Improve runtime and dashboard coverage --- .../dashboard-sections-rendering.test.tsx | 274 ++++++++++++++++ tests/frontend/lazy-dashboard-charts.test.tsx | 309 ++++++++++++++++++ tests/integration/app-runtime.test.ts | 134 ++++++++ .../data-runtime-persistence.test.ts | 76 +++++ tests/unit/data-runtime-contract.test.ts | 273 ++++++++++++++++ tests/unit/http-router-mutations.test.ts | 206 +++++++++++- 6 files changed, 1263 insertions(+), 9 deletions(-) create mode 100644 tests/frontend/dashboard-sections-rendering.test.tsx create mode 100644 tests/frontend/lazy-dashboard-charts.test.tsx create mode 100644 tests/integration/app-runtime.test.ts create mode 100644 tests/integration/data-runtime-persistence.test.ts create mode 100644 tests/unit/data-runtime-contract.test.ts diff --git a/tests/frontend/dashboard-sections-rendering.test.tsx b/tests/frontend/dashboard-sections-rendering.test.tsx new file mode 100644 index 0000000..1f57122 --- /dev/null +++ b/tests/frontend/dashboard-sections-rendering.test.tsx @@ -0,0 +1,274 @@ +// @vitest-environment jsdom + +import type { ReactNode } from 'react' +import { fireEvent, render, screen } from '@testing-library/react' +import { beforeEach, describe, expect, it, vi } from 'vitest' +import { DashboardSections } from '@/components/dashboard/DashboardSections' +import { DEFAULT_APP_SETTINGS } from '@/lib/app-settings' +import { initI18n } from '@/lib/i18n' +import type { DashboardSectionId } from '@/types' +import { createDashboardSectionsViewModel } from './dashboard-controller-test-helpers' + +const dashboardMotionMocks = vi.hoisted(() => ({ + cancelPreloads: vi.fn(), + scheduleDashboardPreloads: vi.fn(), +})) + +dashboardMotionMocks.scheduleDashboardPreloads.mockImplementation(() => ({ + cancel: dashboardMotionMocks.cancelPreloads, +})) + +vi.mock('@/components/dashboard/DashboardMotion', () => ({ + AnimatedDashboardSection: ({ + children, + eager, + id, + onPreload, + placeholderClassName, + }: { + children: ReactNode + eager?: boolean + id: string + onPreload?: () => void | Promise + placeholderClassName?: string + }) => ( +
+ {children} +
+ ), + scheduleDashboardPreloads: dashboardMotionMocks.scheduleDashboardPreloads, +})) + +vi.mock('@/components/ui/error-boundary', () => ({ + ErrorBoundary: ({ children }: { children: ReactNode }) => <>{children}, +})) + +vi.mock('@/components/ui/skeleton', () => ({ + ChartCardSkeleton: ({ className }: { className?: string }) => ( +
+ ), +})) + +vi.mock('@/components/ui/section-header', () => ({ + SectionHeader: ({ title }: { title: string }) =>

{title}

, +})) + +vi.mock('@/components/ui/expandable-card', () => ({ + ExpandableCard: ({ children, title }: { children: ReactNode; title: string }) => ( +
{children}
+ ), +})) + +vi.mock('@/components/cards/PrimaryMetrics', () => ({ + PrimaryMetrics: () =>
, +})) +vi.mock('@/components/cards/SecondaryMetrics', () => ({ + SecondaryMetrics: () =>
, +})) +vi.mock('@/components/cards/TodayMetrics', () => ({ + TodayMetrics: () =>
, +})) +vi.mock('@/components/cards/MonthMetrics', () => ({ + MonthMetrics: () =>
, +})) +vi.mock('@/components/features/heatmap/HeatmapCalendar', () => ({ + HeatmapCalendar: ({ metric }: { metric: string }) =>
, +})) +vi.mock('@/components/features/insights/UsageInsights', () => ({ + UsageInsights: () =>
, +})) +vi.mock('@/components/features/risk/ConcentrationRisk', () => ({ + ConcentrationRisk: () =>
, +})) + +vi.mock('@/components/features/forecast/CostForecast', () => ({ + CostForecast: ({ onExpand }: { onExpand: () => void }) => ( + + ), +})) +vi.mock('@/components/features/forecast/ProviderCostForecast', () => ({ + ProviderCostForecast: ({ onExpand }: { onExpand: () => void }) => ( + + ), +})) +vi.mock('@/components/features/forecast/ForecastZoomDialog', () => ({ + ForecastZoomDialog: ({ open }: { open: boolean }) => ( +
{String(open)}
+ ), +})) +vi.mock('@/components/features/cache-roi/CacheROI', () => ({ + CacheROI: () =>
, +})) +vi.mock('@/components/features/limits/ProviderLimitsSection', () => ({ + ProviderLimitsSection: () =>
, +})) +vi.mock('@/components/features/request-quality/RequestQuality', () => ({ + RequestQuality: () =>
, +})) +vi.mock('@/components/features/comparison/PeriodComparison', () => ({ + PeriodComparison: () =>
, +})) +vi.mock('@/components/features/anomaly/AnomalyDetection', () => ({ + AnomalyDetection: () =>
, +})) + +vi.mock('@/components/charts/CostOverTime', () => ({ + CostOverTime: () =>
, +})) +vi.mock('@/components/charts/CostByModel', () => ({ + CostByModel: () =>
, +})) +vi.mock('@/components/charts/CostByModelOverTime', () => ({ + CostByModelOverTime: () =>
, +})) +vi.mock('@/components/charts/CumulativeCost', () => ({ + CumulativeCost: () =>
, +})) +vi.mock('@/components/charts/CumulativeCostPerProvider', () => ({ + CumulativeCostPerProvider: () =>
, +})) +vi.mock('@/components/charts/CostByWeekday', () => ({ + CostByWeekday: () =>
, +})) +vi.mock('@/components/charts/TokenEfficiency', () => ({ + TokenEfficiency: () =>
, +})) +vi.mock('@/components/charts/ModelMix', () => ({ + ModelMix: () =>
, +})) +vi.mock('@/components/charts/TokensOverTime', () => ({ + TokensOverTime: () =>
, +})) +vi.mock('@/components/charts/TokenTypes', () => ({ + TokenTypes: () =>
, +})) +vi.mock('@/components/charts/RequestsOverTime', () => ({ + RequestsOverTime: () =>
, +})) +vi.mock('@/components/charts/RequestCacheHitRateByModel', () => ({ + RequestCacheHitRateByModel: () =>
, +})) +vi.mock('@/components/charts/DistributionAnalysis', () => ({ + DistributionAnalysis: () =>
, +})) +vi.mock('@/components/charts/CorrelationAnalysis', () => ({ + CorrelationAnalysis: () =>
, +})) + +vi.mock('@/components/tables/ModelEfficiency', () => ({ + ModelEfficiency: () =>
, +})) +vi.mock('@/components/tables/ProviderEfficiency', () => ({ + ProviderEfficiency: () =>
, +})) +vi.mock('@/components/tables/RecentDays', () => ({ + RecentDays: () =>
, +})) + +function createVisibility(overrides: Partial> = {}) { + return { + ...DEFAULT_APP_SETTINGS.sectionVisibility, + ...overrides, + } +} + +describe('DashboardSections rendering contracts', () => { + beforeEach(async () => { + await initI18n('en') + dashboardMotionMocks.cancelPreloads.mockClear() + dashboardMotionMocks.scheduleDashboardPreloads.mockClear() + }) + + it('renders configured sections in order and skips sections without required data', () => { + const viewModel = createDashboardSectionsViewModel({ + layout: { + sectionOrder: [ + 'metrics', + 'today', + 'currentMonth', + 'requestAnalysis', + 'costAnalysis', + 'tokenAnalysis', + ], + sectionVisibility: createVisibility(), + }, + overview: { + hasCurrentMonthData: false, + todayData: null, + }, + requestAnalysis: { + metrics: { + ...createDashboardSectionsViewModel().requestAnalysis.metrics, + hasRequestData: false, + }, + }, + }) + + render() + + expect(screen.getByTestId('section-metrics')).toHaveAttribute('data-eager', 'true') + expect(screen.queryByTestId('section-today')).not.toBeInTheDocument() + expect(screen.queryByTestId('section-current-month')).not.toBeInTheDocument() + expect(screen.queryByTestId('section-request-analysis')).not.toBeInTheDocument() + expect(Array.from(document.querySelectorAll('section')).map((section) => section.id)).toEqual([ + 'metrics', + 'charts', + 'token-analysis', + ]) + }) + + it('schedules warmup preloads for visible lazy sections and cancels them on unmount', () => { + const viewModel = createDashboardSectionsViewModel({ + layout: { + sectionOrder: ['forecastCache', 'requestAnalysis', 'limits'], + sectionVisibility: createVisibility(), + }, + requestAnalysis: { + metrics: { + ...createDashboardSectionsViewModel().requestAnalysis.metrics, + hasRequestData: false, + }, + }, + }) + + const { unmount } = render() + + expect(dashboardMotionMocks.scheduleDashboardPreloads).toHaveBeenCalledTimes(1) + expect(dashboardMotionMocks.scheduleDashboardPreloads.mock.calls[0]?.[0]).toHaveLength(2) + expect(screen.getByTestId('section-forecast-cache')).toHaveAttribute('data-preload', 'true') + expect(screen.getByTestId('section-limits')).toHaveAttribute('data-preload', 'true') + + unmount() + + expect(dashboardMotionMocks.cancelPreloads).toHaveBeenCalledTimes(1) + }) + + it('opens the forecast zoom dialog from lazy forecast cards', async () => { + const viewModel = createDashboardSectionsViewModel({ + layout: { + sectionOrder: ['forecastCache'], + sectionVisibility: createVisibility(), + }, + }) + + render() + + // Forecast cards cross a React.lazy boundary in DashboardSections, even with mocked chunks. + await screen.findByTestId('cost-forecast-expand') + expect(screen.getByTestId('forecast-dialog-open')).toHaveTextContent('false') + + fireEvent.click(screen.getByTestId('cost-forecast-expand')) + + expect(screen.getByTestId('forecast-dialog-open')).toHaveTextContent('true') + }) +}) diff --git a/tests/frontend/lazy-dashboard-charts.test.tsx b/tests/frontend/lazy-dashboard-charts.test.tsx new file mode 100644 index 0000000..c7dbf1c --- /dev/null +++ b/tests/frontend/lazy-dashboard-charts.test.tsx @@ -0,0 +1,309 @@ +// @vitest-environment jsdom + +import type { ReactNode } from 'react' +import { fireEvent, screen } from '@testing-library/react' +import { beforeAll, describe, expect, it, vi } from 'vitest' +import { CostByWeekday } from '@/components/charts/CostByWeekday' +import { ModelMix } from '@/components/charts/ModelMix' +import { TokensOverTime } from '@/components/charts/TokensOverTime' +import { PeriodComparison } from '@/components/features/comparison/PeriodComparison' +import { initI18n } from '@/lib/i18n' +import type { DailyUsage, TokenChartDataPoint, WeekdayData } from '@/types' +import { MockSvgContainer, MockSvgGroup } from '../recharts-test-utils' +import { renderWithTooltip } from '../test-utils' + +vi.mock('@/components/charts/ChartCard', () => ({ + ChartCard: ({ + children, + expandedExtra, + subtitle, + summary, + title, + }: { + children: ReactNode | ((expanded: boolean) => ReactNode) + expandedExtra?: ReactNode + subtitle?: string + summary?: ReactNode + title: string + }) => ( +
+

{title}

+ {subtitle ?

{subtitle}

: null} + {summary ?
{summary}
: null} +
+ {typeof children === 'function' ? children(false) : children} +
+ {expandedExtra ?
{expandedExtra}
: null} +
+ ), + ChartAnimationAware: ({ children }: { children: (active: boolean) => ReactNode }) => ( + <>{children(false)} + ), + ChartReveal: ({ children }: { children: ReactNode }) => <>{children}, +})) + +vi.mock('@/lib/model-color-context', () => ({ + useModelColorHelpers: () => ({ + getModelColor: (model: string) => + model.includes('Claude') ? 'rgb(251 146 60)' : 'rgb(16 185 129)', + }), +})) + +vi.mock('recharts', () => ({ + ResponsiveContainer: ({ children }: { children: ReactNode }) => ( + {children} + ), + AreaChart: ({ children, data }: { children: ReactNode; data?: unknown }) => ( + + {children} + + ), + BarChart: ({ + children, + data, + onMouseLeave, + onMouseMove, + }: { + children: ReactNode + data?: unknown + onMouseLeave?: () => void + onMouseMove?: (state: { activeTooltipIndex: number }) => void + }) => ( + onMouseLeave?.()} + onMouseMove={() => onMouseMove?.({ activeTooltipIndex: 1 })} + > + {children} + + ), + ComposedChart: ({ + children, + data, + onClick, + }: { + children: ReactNode + data?: unknown + onClick?: (state: { activePayload?: Array<{ payload?: { date?: string } }> }) => void + }) => { + const chartData = Array.isArray(data) ? data : [] + const firstPoint = chartData[0] as { date?: string } | undefined + return ( + onClick?.({ activePayload: [{ payload: firstPoint }] })} + > + {children} + + ) + }, + Area: ({ dataKey, fill, name }: { dataKey?: string; fill?: string; name?: string }) => ( + + ), + Bar: ({ children, dataKey, name }: { children?: ReactNode; dataKey?: string; name?: string }) => ( + + {children} + + ), + CartesianGrid: () => null, + Cell: ({ fill }: { fill?: string }) => ( + + ), + Line: ({ + dataKey, + name, + strokeDasharray, + }: { + dataKey?: string + name?: string + strokeDasharray?: string + }) => ( + + ), + Tooltip: () => null, + XAxis: () => null, + YAxis: () => null, +})) + +function createDailyUsage( + date: string, + { + claudeCost, + gptCost, + }: { + claudeCost: number + gptCost: number + }, +): DailyUsage { + const totalCost = claudeCost + gptCost + return { + date, + inputTokens: 100, + outputTokens: 50, + cacheCreationTokens: 10, + cacheReadTokens: 40, + thinkingTokens: 5, + totalTokens: 205, + totalCost, + requestCount: 4, + modelsUsed: ['Claude Sonnet 4.5', 'GPT-5.4'], + modelBreakdowns: [ + { + modelName: 'Claude Sonnet 4.5', + inputTokens: 50, + outputTokens: 25, + cacheCreationTokens: 5, + cacheReadTokens: 20, + thinkingTokens: 2, + cost: claudeCost, + requestCount: 2, + }, + { + modelName: 'GPT-5.4', + inputTokens: 50, + outputTokens: 25, + cacheCreationTokens: 5, + cacheReadTokens: 20, + thinkingTokens: 3, + cost: gptCost, + requestCount: 2, + }, + ], + } +} + +const weekdayData: WeekdayData[] = [ + { day: 'Mo', cost: 3 }, + { day: 'Di', cost: 9 }, + { day: 'Mi', cost: 6 }, + { day: 'Do', cost: 4 }, + { day: 'Fr', cost: 7 }, + { day: 'Sa', cost: 2 }, + { day: 'So', cost: 5 }, +] + +const tokenData: TokenChartDataPoint[] = [ + { + date: '2026-04-01', + Input: 100, + Output: 50, + 'Cache Write': 20, + 'Cache Read': 60, + Thinking: 10, + totalTokens: 240, + tokenMA7: 240, + inputMA7: 100, + outputMA7: 50, + cacheWriteMA7: 20, + cacheReadMA7: 60, + thinkingMA7: 10, + }, + { + date: '2026-04-02', + Input: 120, + Output: 70, + 'Cache Write': 30, + 'Cache Read': 80, + Thinking: 20, + totalTokens: 320, + tokenMA7: 280, + inputMA7: 110, + outputMA7: 60, + cacheWriteMA7: 25, + cacheReadMA7: 70, + thinkingMA7: 15, + }, +] + +describe('lazy dashboard charts', () => { + beforeAll(async () => { + await initI18n('en') + }) + + it('renders CostByWeekday peak and low bars without a browser-only chart dependency', () => { + renderWithTooltip() + + expect(screen.getByText('Cost by weekday')).toBeInTheDocument() + expect(screen.getByText('Peak: Di · Low: Sa · Weekend 19%')).toBeInTheDocument() + expect(screen.getByTestId('chart-bar')).toHaveAttribute('data-name', 'Avg cost') + + const fills = screen.getAllByTestId('bar-cell').map((cell) => cell.getAttribute('data-fill')) + expect(fills).toHaveLength(7) + expect(fills.some((fill) => fill?.includes('weekdayPeak'))).toBe(true) + expect(fills.some((fill) => fill?.includes('weekdayLow'))).toBe(true) + }) + + it('renders ModelMix for enough model history and skips underspecified input', () => { + const modelMixData = [ + createDailyUsage('2026-04-01', { claudeCost: 3, gptCost: 7 }), + createDailyUsage('2026-04-02', { claudeCost: 5, gptCost: 5 }), + createDailyUsage('2026-04-03', { claudeCost: 8, gptCost: 2 }), + ] + const view = renderWithTooltip() + + expect(view.container).toBeEmptyDOMElement() + view.unmount() + + renderWithTooltip() + + expect(screen.getByText('Model mix')).toBeInTheDocument() + expect(screen.getByText('Cost share by model over time')).toBeInTheDocument() + expect(screen.getAllByTestId('chart-area')).toHaveLength(2) + }) + + it('renders token totals, moving averages, and drilldown clicks in TokensOverTime', () => { + const onClickDay = vi.fn() + + renderWithTooltip() + + expect(screen.getByText('Tokens over time')).toBeInTheDocument() + expect(screen.getByText('Cache tokens')).toBeInTheDocument() + expect(screen.getByText('Input / Output tokens')).toBeInTheDocument() + expect(screen.getByText('Thinking tokens')).toBeInTheDocument() + expect(screen.getByText('Total tokens (all types)')).toBeInTheDocument() + expect(screen.getAllByTestId('chart-area')).toHaveLength(6) + expect(screen.getAllByTestId('chart-line')).toHaveLength(6) + + fireEvent.click(screen.getAllByTestId('composed-chart')[0]) + + expect(onClickDay).toHaveBeenCalledWith('2026-04-01') + }) + + it('renders PeriodComparison empty and populated states with preset switching', () => { + const comparisonData = Array.from({ length: 14 }, (_, index) => { + const day = String(index + 1).padStart(2, '0') + return createDailyUsage(`2026-04-${day}`, { + claudeCost: index + 1, + gptCost: index + 2, + }) + }) + const view = renderWithTooltip() + + expect(screen.getByText('Not enough data for a comparison')).toBeInTheDocument() + expect(screen.getByText('At least 7 days required (currently: 3)')).toBeInTheDocument() + view.unmount() + + renderWithTooltip() + + expect(screen.getByText('Period comparison')).toBeInTheDocument() + expect(screen.getByText('Last week')).toBeInTheDocument() + expect(screen.getByText('This week')).toBeInTheDocument() + + fireEvent.click(screen.getByRole('button', { name: 'Month' })) + + expect(screen.getByText('Last month')).toBeInTheDocument() + expect(screen.getByText('This month')).toBeInTheDocument() + }) +}) diff --git a/tests/integration/app-runtime.test.ts b/tests/integration/app-runtime.test.ts new file mode 100644 index 0000000..5bb63fe --- /dev/null +++ b/tests/integration/app-runtime.test.ts @@ -0,0 +1,134 @@ +import { EventEmitter } from 'node:events' +import { promises as fsPromises } from 'node:fs' +import { createRequire } from 'node:module' +import { tmpdir } from 'node:os' +import path from 'node:path' +import { describe, expect, it, vi } from 'vitest' + +const require = createRequire(import.meta.url) +const { createAppRuntime } = require('../../server/app-runtime.js') as { + createAppRuntime: (options: Record) => { + server: EventEmitter + } +} + +const REQUEST_TIMEOUT_MS = Number(process.env.TEST_TIMEOUT_MS || 5000) + +class MockRequest extends EventEmitter { + method = 'GET' + url: string + headers: Record + socket = { localAddress: '127.0.0.1' } + + constructor(url: string, headers: Record) { + super() + this.url = url + this.headers = headers + } +} + +class MockResponse extends EventEmitter { + status = 0 + headers: Record = {} + body = '' + headersSent = false + + writeHead(status: number, headers: Record) { + this.status = status + this.headers = headers + this.headersSent = true + } + + end(body?: string | Buffer) { + this.body = Buffer.isBuffer(body) ? body.toString('utf8') : (body ?? '') + this.emit('finish') + } +} + +async function emitServerRequest( + server: EventEmitter, + url: string, + headers: Record, +) { + const request = new MockRequest(url, headers) + const response = new MockResponse() + const finished = new Promise((resolve, reject) => { + const cleanup = () => { + clearTimeout(timeout) + response.off('error', onError) + response.off('finish', onFinish) + } + const onError = (error: Error) => { + cleanup() + reject(error) + } + const onFinish = () => { + cleanup() + resolve(response) + } + const timeout = setTimeout(() => { + response.off('error', onError) + response.off('finish', onFinish) + reject(new Error(`Timed out waiting for ${url}`)) + }, REQUEST_TIMEOUT_MS) + + response.once('error', onError) + response.once('finish', onFinish) + }) + + server.emit('request', request, response) + return finished +} + +describe('app runtime wiring', () => { + it('composes the real server runtime with isolated paths, API prefix, and auth', async () => { + const runtimeRoot = await fsPromises.mkdtemp(path.join(tmpdir(), 'ttdash-app-runtime-')) + const authToken = 'test-local-auth-token-with-enough-entropy' + const processObject = { + ...process, + argv: ['node', 'server.js', '--no-open'], + env: { + ...process.env, + API_PREFIX: '/custom-api', + CI: '1', + HOST: '127.0.0.1', + NO_OPEN_BROWSER: '1', + PORT: '4217', + TTDASH_CACHE_DIR: path.join(runtimeRoot, 'cache'), + TTDASH_CONFIG_DIR: path.join(runtimeRoot, 'config'), + TTDASH_DATA_DIR: path.join(runtimeRoot, 'data'), + TTDASH_INSTANCE_ID: 'app-runtime-contract', + TTDASH_LOCAL_AUTH_TOKEN: authToken, + }, + exit: vi.fn(), + kill: vi.fn(() => true), + on: vi.fn(), + pid: 42170, + platform: process.platform, + stdout: { isTTY: false }, + } + + try { + const runtime = createAppRuntime({ + entrypointPath: path.join(runtimeRoot, 'server.js'), + processObject, + }) + + const response = await emitServerRequest(runtime.server, '/custom-api/runtime', { + authorization: `Bearer ${authToken}`, + host: '127.0.0.1:4217', + }) + + expect(response.status).toBe(200) + expect(JSON.parse(response.body)).toEqual({ + id: 'app-runtime-contract', + mode: 'foreground', + port: null, + url: null, + }) + expect(processObject.exit).not.toHaveBeenCalled() + } finally { + await fsPromises.rm(runtimeRoot, { recursive: true, force: true }) + } + }) +}) diff --git a/tests/integration/data-runtime-persistence.test.ts b/tests/integration/data-runtime-persistence.test.ts new file mode 100644 index 0000000..2e3c687 --- /dev/null +++ b/tests/integration/data-runtime-persistence.test.ts @@ -0,0 +1,76 @@ +import * as fs from 'node:fs' +import { createRequire } from 'node:module' +import { tmpdir } from 'node:os' +import path from 'node:path' +import { describe, expect, it, vi } from 'vitest' + +const require = createRequire(import.meta.url) +const fsPromises = fs.promises +const { createDataRuntime } = require('../../server/data-runtime.js') as { + createDataRuntime: (options: Record) => { + ensureAppDirs: () => void + paths: { + dataFile: string + settingsFile: string + } + readData: () => unknown + readSettings: () => unknown + readSettingsForWrite: () => { cliAutoLoadActive: boolean } + writeSettings: (settings: unknown) => Promise + } +} + +function createRuntime(runtimeRoot: string) { + return createDataRuntime({ + fs, + fsPromises, + os: { homedir: () => runtimeRoot }, + path, + processObject: { + env: { + TTDASH_CACHE_DIR: path.join(runtimeRoot, 'cache'), + TTDASH_CONFIG_DIR: path.join(runtimeRoot, 'config'), + TTDASH_DATA_DIR: path.join(runtimeRoot, 'data'), + }, + kill: vi.fn(() => true), + pid: process.pid, + platform: 'linux', + }, + normalizeIncomingData: (value: unknown) => value, + runtimeInstanceId: 'data-runtime-persistence', + appDirName: 'TTDash', + appDirNameLinux: 'ttdash', + legacyDataFile: path.join(runtimeRoot, 'legacy-data.json'), + settingsBackupKind: 'ttdash-settings-backup', + usageBackupKind: 'ttdash-usage-backup', + isWindows: false, + secureDirMode: 0o700, + secureFileMode: 0o600, + fileMutationLockTimeoutMs: 80, + fileMutationLockStaleMs: 10, + }) +} + +describe('data runtime persistence', () => { + it('recovers default settings for writes while keeping corrupted persisted state visible', async () => { + const runtimeRoot = await fsPromises.mkdtemp(path.join(tmpdir(), 'ttdash-data-runtime-')) + const runtime = createRuntime(runtimeRoot) + + try { + runtime.ensureAppDirs() + await fsPromises.writeFile(runtime.paths.settingsFile, '{broken settings') + await fsPromises.writeFile(runtime.paths.dataFile, '{broken usage') + + expect(() => runtime.readSettings()).toThrow('Settings file is unreadable or corrupted.') + expect(() => runtime.readData()).toThrow('Usage data file is unreadable or corrupted.') + const recoveredSettings = runtime.readSettingsForWrite() + expect(recoveredSettings).toMatchObject({ cliAutoLoadActive: false }) + + await runtime.writeSettings(recoveredSettings) + + expect(runtime.readSettings()).toMatchObject({ cliAutoLoadActive: false }) + } finally { + await fsPromises.rm(runtimeRoot, { recursive: true, force: true }) + } + }) +}) diff --git a/tests/unit/data-runtime-contract.test.ts b/tests/unit/data-runtime-contract.test.ts new file mode 100644 index 0000000..34d3f6a --- /dev/null +++ b/tests/unit/data-runtime-contract.test.ts @@ -0,0 +1,273 @@ +import { promises as fsPromises } from 'node:fs' +import { createRequire } from 'node:module' +import path from 'node:path' +import { describe, expect, it, vi } from 'vitest' + +const require = createRequire(import.meta.url) +// createDataRuntime expects the real Node fs surface; these contract tests avoid host writes. +const fs = require('node:fs') +const { createDataRuntime } = require('../../server/data-runtime.js') as { + createDataRuntime: (options: Record) => { + appPaths: { + dataDir: string + configDir: string + cacheDir: string + } + paths: { + dataFile: string + settingsFile: string + npxCacheDir: string + } + ensureAppDirs: (extraDirs?: string[]) => void + extractSettingsImportPayload: (payload: unknown) => unknown + extractUsageImportPayload: (payload: unknown) => unknown + isPersistedStateError: (error: unknown, kind?: string) => boolean + mergeUsageData: ( + currentData: unknown, + importedData: { + daily: Array> + totals: Record + }, + ) => { + data: { + daily: Array> + totals: Record + } + summary: { + importedDays: number + addedDays: number + unchangedDays: number + conflictingDays: number + totalDays: number + } + } + readData: () => unknown + readSettings: () => { cliAutoLoadActive: boolean } + readSettingsForWrite: () => { cliAutoLoadActive: boolean } + writeData: (data: unknown) => Promise + writeSettings: (settings: unknown) => Promise + } +} + +function createRuntime({ + env = {}, + getCliAutoLoadActive = () => false, + homeDir = '/Users/tester', + isWindows = false, + pathModule = path, + platform = 'darwin', +}: { + env?: Record + getCliAutoLoadActive?: () => boolean + homeDir?: string + isWindows?: boolean + pathModule?: typeof path + platform?: string +} = {}) { + return createDataRuntime({ + fs, + fsPromises, + os: { homedir: () => homeDir }, + path: pathModule, + processObject: { + env, + kill: vi.fn(() => true), + pid: 7319, + platform, + }, + normalizeIncomingData: (value: unknown) => value, + runtimeInstanceId: 'data-runtime-contract', + appDirName: 'TTDash', + appDirNameLinux: 'ttdash', + legacyDataFile: pathModule.join(homeDir, 'legacy-data.json'), + settingsBackupKind: 'ttdash-settings-backup', + usageBackupKind: 'ttdash-usage-backup', + isWindows, + secureDirMode: 0o700, + secureFileMode: 0o600, + fileMutationLockTimeoutMs: 80, + fileMutationLockStaleMs: 10, + getCliAutoLoadActive, + }) +} + +describe('data runtime contracts', () => { + it('prefers explicit data, config, and cache directories over platform defaults', () => { + const runtime = createRuntime({ + env: { + TTDASH_CACHE_DIR: '/tmp/ttdash/cache', + TTDASH_CONFIG_DIR: '/tmp/ttdash/config', + TTDASH_DATA_DIR: '/tmp/ttdash/data', + }, + getCliAutoLoadActive: () => true, + platform: 'linux', + }) + + expect(runtime.appPaths).toEqual({ + cacheDir: '/tmp/ttdash/cache', + configDir: '/tmp/ttdash/config', + dataDir: '/tmp/ttdash/data', + }) + expect(runtime.paths).toMatchObject({ + dataFile: '/tmp/ttdash/data/data.json', + settingsFile: '/tmp/ttdash/config/settings.json', + npxCacheDir: '/tmp/ttdash/cache/npx-cache', + }) + expect(runtime.readSettings()).toMatchObject({ cliAutoLoadActive: true }) + }) + + it('resolves macOS, Windows, and XDG platform paths without touching the host profile', () => { + const darwinRuntime = createRuntime({ + homeDir: '/Users/alex', + platform: 'darwin', + }) + const windowsRuntime = createRuntime({ + env: { + APPDATA: 'C:\\Users\\Alex\\AppData\\Roaming', + LOCALAPPDATA: 'C:\\Users\\Alex\\AppData\\Local', + }, + homeDir: 'C:\\Users\\Alex', + isWindows: true, + pathModule: path.win32, + platform: 'win32', + }) + const linuxRuntime = createRuntime({ + env: { + XDG_CACHE_HOME: '/home/alex/.cache-root', + XDG_CONFIG_HOME: '/home/alex/.config-root', + XDG_DATA_HOME: '/home/alex/.data-root', + }, + homeDir: '/home/alex', + platform: 'linux', + }) + + expect(darwinRuntime.appPaths).toEqual({ + dataDir: '/Users/alex/Library/Application Support/TTDash', + configDir: '/Users/alex/Library/Application Support/TTDash', + cacheDir: '/Users/alex/Library/Caches/TTDash', + }) + expect(windowsRuntime.appPaths).toEqual({ + dataDir: 'C:\\Users\\Alex\\AppData\\Local\\TTDash', + configDir: 'C:\\Users\\Alex\\AppData\\Roaming\\TTDash', + cacheDir: 'C:\\Users\\Alex\\AppData\\Local\\TTDash\\Cache', + }) + expect(linuxRuntime.appPaths).toEqual({ + dataDir: '/home/alex/.data-root/ttdash', + configDir: '/home/alex/.config-root/ttdash', + cacheDir: '/home/alex/.cache-root/ttdash', + }) + }) + + it('validates backup kinds before importing settings or usage payloads', () => { + const runtime = createRuntime() + + expect( + runtime.extractSettingsImportPayload({ + kind: 'ttdash-settings-backup', + settings: { language: 'de' }, + }), + ).toEqual({ language: 'de' }) + expect( + runtime.extractUsageImportPayload({ + data: { daily: [] }, + kind: 'ttdash-usage-backup', + }), + ).toEqual({ daily: [] }) + expect(() => + runtime.extractSettingsImportPayload({ data: {}, kind: 'ttdash-usage-backup' }), + ).toThrow('This is a data backup file, not a settings file.') + expect(() => + runtime.extractUsageImportPayload({ kind: 'ttdash-settings-backup', settings: {} }), + ).toThrow('This is a settings backup file, not a data file.') + }) + + it('merges imported usage data without overwriting conflicts or duplicating equivalent days', () => { + const runtime = createRuntime() + const currentDay = { + date: '2026-04-01', + inputTokens: 10, + outputTokens: 5, + cacheCreationTokens: 0, + cacheReadTokens: 2, + thinkingTokens: 1, + totalTokens: 18, + totalCost: 0.25, + requestCount: 3, + modelsUsed: ['GPT-5.4', 'Claude'], + modelBreakdowns: [ + { + modelName: 'Claude', + inputTokens: 5, + outputTokens: 3, + cacheCreationTokens: 0, + cacheReadTokens: 1, + thinkingTokens: 0, + cost: 0.1, + requestCount: 1, + }, + { + modelName: 'GPT-5.4', + inputTokens: 5, + outputTokens: 2, + cacheCreationTokens: 0, + cacheReadTokens: 1, + thinkingTokens: 1, + cost: 0.15, + requestCount: 2, + }, + ], + } + const equivalentDay = { + ...currentDay, + modelsUsed: ['Claude', 'GPT-5.4'], + modelBreakdowns: [...currentDay.modelBreakdowns].reverse(), + } + const conflictingDay = { + ...currentDay, + totalCost: 9, + } + const newDay = { + ...currentDay, + date: '2026-04-02', + totalCost: 0.75, + totalTokens: 20, + requestCount: 4, + } + + const equivalentMerge = runtime.mergeUsageData( + { daily: [currentDay], totals: { totalCost: 0.25 } }, + { + daily: [equivalentDay, newDay], + totals: { totalCost: 1 }, + }, + ) + const conflictingMerge = runtime.mergeUsageData( + { daily: [currentDay], totals: { totalCost: 0.25 } }, + { + daily: [conflictingDay], + totals: { totalCost: 9 }, + }, + ) + + expect(equivalentMerge.summary).toEqual({ + importedDays: 2, + addedDays: 1, + unchangedDays: 1, + conflictingDays: 0, + totalDays: 2, + }) + expect(equivalentMerge.data.daily.map((day) => day.date)).toEqual(['2026-04-01', '2026-04-02']) + expect(equivalentMerge.data.totals).toMatchObject({ + totalCost: 1, + totalTokens: 38, + requestCount: 7, + }) + expect(conflictingMerge.summary).toMatchObject({ + addedDays: 0, + unchangedDays: 0, + conflictingDays: 1, + totalDays: 1, + }) + expect(conflictingMerge.data.daily[0]?.totalCost).toBe(0.25) + }) +}) diff --git a/tests/unit/http-router-mutations.test.ts b/tests/unit/http-router-mutations.test.ts index 6c94a4b..f773023 100644 --- a/tests/unit/http-router-mutations.test.ts +++ b/tests/unit/http-router-mutations.test.ts @@ -14,10 +14,10 @@ const { createHttpRouter } = require('../../server/http-router.js') as { class MockResponse { status = 0 - headers: Record = {} + headers: Record = {} body = '' - writeHead(status: number, headers: Record) { + writeHead(status: number, headers: Record) { this.status = status this.headers = headers } @@ -30,11 +30,24 @@ class MockResponse { function createRouter({ autoImportRuntimeOverrides = {}, dataRuntimeOverrides = {}, + generatePdfReport = vi.fn(), + getRuntimeSnapshot = vi.fn(() => ({ + id: 'runtime-1', + mode: 'foreground', + port: 3000, + url: 'http://127.0.0.1:3000', + })), + httpUtilsOverrides = {}, readBody = vi.fn(async () => ({})), + remoteAuthOverrides = {}, }: { autoImportRuntimeOverrides?: Record dataRuntimeOverrides?: Record + generatePdfReport?: () => Promise<{ buffer: Buffer; filename: string }> + getRuntimeSnapshot?: () => unknown + httpUtilsOverrides?: Record readBody?: () => Promise + remoteAuthOverrides?: Record } = {}) { const dataRuntime = { extractSettingsImportPayload: vi.fn( @@ -84,8 +97,16 @@ function createRouter({ staticRoot: '/app/dist', securityHeaders: { 'X-Test-Security': '1' }, httpUtils: { - json: (res: MockResponse, status: number, payload: unknown) => { - res.writeHead(status, { 'Content-Type': 'application/json; charset=utf-8' }) + json: ( + res: MockResponse, + status: number, + payload: unknown, + headers: Record = {}, + ) => { + res.writeHead(status, { + 'Content-Type': 'application/json; charset=utf-8', + ...headers, + }) res.end(JSON.stringify(payload)) }, readBody, @@ -95,13 +116,23 @@ function createRouter({ : pathname.startsWith('/api/') ? pathname.slice('/api'.length) : null, - sendBuffer: vi.fn(), + sendBuffer: ( + res: MockResponse, + status: number, + headers: Record, + buffer: Buffer, + ) => { + res.writeHead(status, headers) + res.end(buffer) + }, validateMutationRequest: vi.fn(() => null), validateRequestHost: vi.fn(() => null), + ...httpUtilsOverrides, }, remoteAuth: { resolveBootstrapResponse: () => null, validateApiRequest: () => null, + ...remoteAuthOverrides, }, dataRuntime, autoImportRuntime: { @@ -113,14 +144,14 @@ function createRouter({ })), ...autoImportRuntimeOverrides, }, - generatePdfReport: vi.fn(), - getRuntimeSnapshot: vi.fn(), + generatePdfReport, + getRuntimeSnapshot, }) - return { dataRuntime, readBody, router } + return { dataRuntime, generatePdfReport, getRuntimeSnapshot, readBody, router } } -async function request( +async function requestRaw( router: ReturnType['router'], url: string, method: string, @@ -130,6 +161,15 @@ async function request( { url, method, headers: { 'content-type': 'application/json' } }, res, ) + return { res } +} + +async function request( + router: ReturnType['router'], + url: string, + method: string, +) { + const { res } = await requestRaw(router, url, method) return { res, body: JSON.parse(res.body) } } @@ -332,4 +372,152 @@ describe('HTTP router mutation errors', () => { detail: 'registry unavailable', }) }) + + it('rejects untrusted hosts before API auth is evaluated', async () => { + const validateApiRequest = vi.fn(() => ({ + status: 401, + message: 'Authentication required', + })) + const { router } = createRouter({ + httpUtilsOverrides: { + validateRequestHost: vi.fn(() => ({ + status: 403, + message: 'Untrusted host header', + })), + }, + remoteAuthOverrides: { + validateApiRequest, + }, + }) + + const { res, body } = await request(router, '/api/runtime', 'GET') + + expect(res.status).toBe(403) + expect(body).toEqual({ message: 'Untrusted host header' }) + expect(validateApiRequest).not.toHaveBeenCalled() + }) + + it('forwards API authentication failures with challenge headers', async () => { + const { router } = createRouter({ + remoteAuthOverrides: { + validateApiRequest: vi.fn(() => ({ + headers: { 'WWW-Authenticate': 'Bearer realm="TTDash API"' }, + status: 401, + message: 'Authentication required', + })), + }, + }) + + const { res, body } = await request(router, '/api/runtime', 'GET') + + expect(res.status).toBe(401) + expect(res.headers['WWW-Authenticate']).toBe('Bearer realm="TTDash API"') + expect(body).toEqual({ message: 'Authentication required' }) + }) + + it('serves runtime snapshots only through GET', async () => { + const snapshot = { + id: 'runtime-2', + mode: 'background', + port: 3020, + url: 'http://localhost:3020', + } + const { getRuntimeSnapshot, router } = createRouter({ + getRuntimeSnapshot: vi.fn(() => snapshot), + }) + + const runtimeResponse = await request(router, '/api/runtime', 'GET') + const rejectedMethod = await request(router, '/api/runtime', 'POST') + + expect(runtimeResponse.res.status).toBe(200) + expect(runtimeResponse.body).toEqual(snapshot) + expect(getRuntimeSnapshot).toHaveBeenCalledTimes(1) + expect(rejectedMethod.res.status).toBe(405) + expect(rejectedMethod.body).toEqual({ message: 'Method Not Allowed' }) + }) + + it('keeps unknown API endpoints distinct from stale API prefixes', async () => { + const { router } = createRouter({ + httpUtilsOverrides: { + // Simulate a configured API prefix with no handler separately from a stale /api URL. + resolveApiPath: (pathname: string) => (pathname === '/custom/unknown' ? '/unknown' : null), + }, + }) + + const unknownEndpoint = await request(router, '/custom/unknown', 'GET') + const stalePrefix = await request(router, '/api/not-configured', 'GET') + + expect(unknownEndpoint.res.status).toBe(404) + expect(unknownEndpoint.body).toEqual({ message: 'API endpoint not found' }) + expect(stalePrefix.res.status).toBe(404) + expect(stalePrefix.body).toEqual({ message: 'Not Found' }) + }) + + it('rejects PDF reports without usage data before reading the request body', async () => { + const readBody = vi.fn(async () => ({ title: 'Usage' })) + const { router } = createRouter({ + dataRuntimeOverrides: { + readData: vi.fn(() => null), + }, + readBody, + }) + + const { res, body } = await request(router, '/api/report/pdf', 'POST') + + expect(res.status).toBe(400) + expect(body).toEqual({ message: 'No data available for the report.' }) + expect(readBody).not.toHaveBeenCalled() + }) + + it('maps PDF body and generator failures to client or service errors', async () => { + const usageData = { daily: [{ date: '2026-04-27' }], totals: { totalCost: 1 } } + const oversizedRouter = createRouter({ + dataRuntimeOverrides: { + readData: vi.fn(() => usageData), + }, + readBody: vi.fn(async () => { + throw Object.assign(new Error('too large'), { code: 'PAYLOAD_TOO_LARGE' }) + }), + }).router + const typstMissingRouter = createRouter({ + dataRuntimeOverrides: { + readData: vi.fn(() => usageData), + }, + generatePdfReport: vi.fn(async () => { + throw Object.assign(new Error('Typst not found'), { code: 'TYPST_MISSING' }) + }), + readBody: vi.fn(async () => ({ locale: 'de-CH' })), + }).router + + const oversized = await request(oversizedRouter, '/api/report/pdf', 'POST') + const typstMissing = await request(typstMissingRouter, '/api/report/pdf', 'POST') + + expect(oversized.res.status).toBe(413) + expect(oversized.body).toEqual({ message: 'Report request too large' }) + expect(typstMissing.res.status).toBe(503) + expect(typstMissing.body).toEqual({ message: 'Typst not found' }) + }) + + it('sends generated PDF buffers with attachment headers', async () => { + const usageData = { daily: [{ date: '2026-04-27' }], totals: { totalCost: 1 } } + const generatePdfReport = vi.fn(async () => ({ + buffer: Buffer.from('%PDF-1.4'), + filename: 'ttdash-report.pdf', + })) + const { router } = createRouter({ + dataRuntimeOverrides: { + readData: vi.fn(() => usageData), + }, + generatePdfReport, + readBody: vi.fn(async () => ({ locale: 'de-CH' })), + }) + + const { res } = await requestRaw(router, '/api/report/pdf', 'POST') + + expect(res.status).toBe(200) + expect(res.headers['Content-Type']).toBe('application/pdf') + expect(res.headers['Content-Disposition']).toBe('attachment; filename="ttdash-report.pdf"') + expect(res.body).toBe('%PDF-1.4') + expect(generatePdfReport).toHaveBeenCalledWith(usageData.daily, { locale: 'de-CH' }) + }) }) From b5cac6a9363f294cc9fa83cbbde27c8807d7ccd2 Mon Sep 17 00:00:00 2001 From: tyl3r-ch Date: Wed, 29 Apr 2026 07:38:33 +0200 Subject: [PATCH 07/42] Split CI test pipeline --- .github/workflows/ci.yml | 155 ++++++++++++++++-- .gitignore | 1 + docs/architecture.md | 4 +- docs/review/test-review.md | 11 ++ docs/testing.md | 14 +- package.json | 29 ++-- tests/frontend/auto-import-modal.test.tsx | 2 +- tests/frontend/chart-card.test.tsx | 2 +- tests/frontend/dashboard-error-state.test.tsx | 2 +- .../drill-down-modal-regressions.test.tsx | 2 +- tests/frontend/expandable-card.test.tsx | 2 +- tests/frontend/filter-bar-presets.test.tsx | 2 +- tests/frontend/header-links.test.tsx | 2 +- tests/frontend/info-heading.test.tsx | 4 +- .../frontend/provider-cost-forecast.test.tsx | 2 +- tests/frontend/requests-over-time.test.tsx | 2 +- tests/frontend/toast.test.tsx | 4 +- tests/unit/test-pipeline-scripts.test.ts | 57 +++++++ tests/unit/vitest-coverage-config.test.ts | 14 ++ vitest.config.ts | 1 + 20 files changed, 267 insertions(+), 45 deletions(-) create mode 100644 tests/unit/test-pipeline-scripts.test.ts diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index fc596d0..f0ea648 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -14,9 +14,39 @@ concurrency: cancel-in-progress: true jobs: - test: + static: runs-on: ubuntu-latest - timeout-minutes: 20 + timeout-minutes: 12 + + steps: + - name: Check out repository + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + + - name: Set up Node.js + uses: actions/setup-node@48b55a011bda9f5d6aeb4c2d9c7362e8dae4041e # v6.4.0 + with: + node-version: 24 + cache: npm + + - name: Install dependencies + run: npm ci --ignore-scripts + + - name: Run static checks + run: npm run test:static + + vitest: + name: Vitest (${{ matrix.project }}) + runs-on: ubuntu-latest + timeout-minutes: 15 + strategy: + fail-fast: false + matrix: + project: + - architecture + - unit + - frontend + - integration + - integration-background steps: - name: Check out repository @@ -31,33 +61,125 @@ jobs: - name: Install dependencies run: npm ci --ignore-scripts - - name: Check formatting - run: npm run format:check + - name: Run Vitest project + run: npm run test:vitest:${{ matrix.project }} - - name: Run ESLint - run: npm run lint + - name: Upload Vitest report + if: always() + uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # v7.0.1 + with: + name: test-reports-vitest-${{ matrix.project }} + if-no-files-found: ignore + path: test-results/vitest-${{ matrix.project }}.junit.xml - - name: Run docstring lint - run: npm run lint:docstrings + coverage: + runs-on: ubuntu-latest + timeout-minutes: 20 - - name: Run dependency graph checks - run: npm run check:deps + steps: + - name: Check out repository + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 - - name: Run TypeScript checks - run: ./node_modules/.bin/tsc --noEmit + - name: Set up Node.js + uses: actions/setup-node@48b55a011bda9f5d6aeb4c2d9c7362e8dae4041e # v6.4.0 + with: + node-version: 24 + cache: npm - - name: Run architecture tests - run: npm run test:architecture + - name: Install dependencies + run: npm ci --ignore-scripts - name: Run unit and integration tests with coverage - run: npm run test:unit:coverage + run: npm run test:vitest:coverage + + - name: Upload coverage reports + if: always() + uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # v7.0.1 + with: + name: coverage-reports + if-no-files-found: ignore + path: | + coverage/ + test-results/vitest-coverage.junit.xml + + build: + runs-on: ubuntu-latest + timeout-minutes: 10 + + steps: + - name: Check out repository + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + + - name: Set up Node.js + uses: actions/setup-node@48b55a011bda9f5d6aeb4c2d9c7362e8dae4041e # v6.4.0 + with: + node-version: 24 + cache: npm + + - name: Install dependencies + run: npm ci --ignore-scripts - name: Build production bundle run: npm run build:app + - name: Upload production bundle + uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # v7.0.1 + with: + name: production-dist + if-no-files-found: error + path: dist/ + + package-smoke: + runs-on: ubuntu-latest + timeout-minutes: 10 + needs: build + + steps: + - name: Check out repository + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + + - name: Set up Node.js + uses: actions/setup-node@48b55a011bda9f5d6aeb4c2d9c7362e8dae4041e # v6.4.0 + with: + node-version: 24 + cache: npm + + - name: Install dependencies + run: npm ci --ignore-scripts + + - name: Download production bundle + uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0 + with: + name: production-dist + path: dist + - name: Verify packed npm artifact run: npm run verify:package + e2e: + runs-on: ubuntu-latest + timeout-minutes: 20 + needs: build + + steps: + - name: Check out repository + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + + - name: Set up Node.js + uses: actions/setup-node@48b55a011bda9f5d6aeb4c2d9c7362e8dae4041e # v6.4.0 + with: + node-version: 24 + cache: npm + + - name: Install dependencies + run: npm ci --ignore-scripts + + - name: Download production bundle + uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0 + with: + name: production-dist + path: dist + - name: Install Playwright browser run: npx playwright install --with-deps chromium @@ -68,10 +190,9 @@ jobs: if: always() uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # v7.0.1 with: - name: test-reports + name: test-reports-e2e if-no-files-found: ignore path: | - coverage/ playwright-report/ test-results/ diff --git a/.gitignore b/.gitignore index 1fc5dc2..d6f51bc 100644 --- a/.gitignore +++ b/.gitignore @@ -2,6 +2,7 @@ node_modules/ dist/ data.json .DS_Store +.cache/ .playwright-mcp/ .tmp-playwright/ .tmp-smoke-*/ diff --git a/docs/architecture.md b/docs/architecture.md index c0886c5..c244503 100644 --- a/docs/architecture.md +++ b/docs/architecture.md @@ -200,10 +200,12 @@ Reason: the repo has stable layer boundaries, but it is not structured around st - dependency graph validation: `npm run check:deps` - dependency graph visualization: `npm run deps:graph` +- static quality gate: `npm run test:static` +- all Vitest projects without coverage: `npm run test:vitest` - architecture tests only: `npm run test:architecture` - main release-style local gate: `npm run verify:full` -Both `ci.yml` and `release.yml` run `check:deps` and `test:architecture` explicitly so dependency and architecture violations show up as separate CI failures instead of being hidden inside a larger local gate. +`ci.yml` runs static checks, architecture tests, Vitest projects, coverage, build, package smoke, and Playwright as separate DAG jobs so independent failures are visible without waiting for one serial full-gate job. `release.yml` still runs `check:deps` and `test:architecture` explicitly during the release path so dependency and architecture violations are not hidden inside a larger local gate. ## Contributor Rules diff --git a/docs/review/test-review.md b/docs/review/test-review.md index 05a99c5..af5ee69 100644 --- a/docs/review/test-review.md +++ b/docs/review/test-review.md @@ -121,6 +121,10 @@ Das ist robust, aber nicht maximal performant: Akzeptanzkriterium: Full-CI-Walltime sinkt deutlich, ohne Testumfang zu reduzieren. Jeder Job laedt seine eigenen Reports hoch und blockiert nicht durch gemeinsame Output-Pfade. +**Umsetzungsstand Phase 5:** `ci.yml` ist jetzt als DAG geschnitten: `static`, Vitest-Projektmatrix, +dedizierter `coverage`-Job und `build` laufen unabhaengig; `package-smoke` und `e2e` verwenden das +einmal erzeugte `production-dist`-Artifact parallel. + ### H-03 - Command-Palette-E2E mischt zu viele Testarten **Referenzen:** `tests/e2e/command-palette.spec.ts`, @@ -288,6 +292,10 @@ werden. verwenden. - Prettier Cache und TypeScript incremental fuer lokale Gates nutzen. +**Umsetzungsstand Phase 5:** `test:static` nutzt einen einzigen `lint`-Lauf fuer den regulaeren Gate, +waehrend `lint:docstrings` als gezielter Diagnosebefehl erhalten bleibt. Prettier, ESLint und +TypeScript schreiben lokale Cache-Dateien unter `.cache/`. + ### M-06 - Package-Smoke ist wichtig, aber gehoert in einen eigenen CI-Job **Referenzen:** `scripts/verify-package.js`, `package.json` @@ -305,6 +313,9 @@ Job wie Unit, Integration und E2E blockieren. - `package-smoke` und `e2e` nutzen dieses Artifact parallel. - Package-Smoke bleibt im Full-Gate, aber nicht als serieller Nachklapp nach allen Tests. +**Umsetzungsstand Phase 5:** Der CI-`build`-Job laedt `production-dist` hoch; `package-smoke` und +`e2e` laden dieses Artifact herunter und laufen nur noch mit `needs: build`. + ### M-07 - Timing-Regressions sind sichtbar, aber nicht budgetiert `test:timings` listet Slow-Suites und Slow-Tests, erzwingt aber kein Budget und vergleicht nicht gegen diff --git a/docs/testing.md b/docs/testing.md index 5e1cbcc..017dd3a 100644 --- a/docs/testing.md +++ b/docs/testing.md @@ -37,6 +37,9 @@ Architecture constraints are documented separately in [`docs/architecture.md`](. - `matchMedia` - `ResizeObserver` - default `IntersectionObserver` +- The `frontend` Vitest project owns the shared `30s` jsdom timeout because coverage instrumentation + can make a few otherwise-synchronous render tests exceed Vitest's default `5s` timeout under load. + Do not add per-test timeout overrides for routine frontend render tests. - Only override `IntersectionObserver` locally when the test explicitly verifies reveal or visibility behavior. - Only call `initI18n(...)` inside a test file when locale switching is itself part of the assertion. @@ -97,7 +100,7 @@ For `tests/architecture`, prefer the shared source graph helper for simple file, ## Coverage Scope -`npm run test:unit:coverage` reports product-runtime coverage. The configured coverage scope intentionally includes frontend runtime modules, the local server runtime, shared runtime contracts, and `usage-normalizer.js` instead of only the historically high-signal frontend subset. +`npm run test:vitest:coverage` reports product-runtime coverage. `npm run test:unit:coverage` is kept as the underlying compatibility command. The configured coverage scope intentionally includes frontend runtime modules, the local server runtime, shared runtime contracts, and `usage-normalizer.js` instead of only the historically high-signal frontend subset. The coverage and timing commands use explicit `dot` and `junit` Vitest reporters. Keep those reporters on both scripts so non-interactive gates emit compact progress and do not depend on silent reporter paths. They intentionally write separate JUnit files, `test-results/vitest-coverage.junit.xml` and `test-results/vitest-timings.junit.xml`, so coverage and timing diagnostics do not contend for the same report path. @@ -131,11 +134,14 @@ Prioritize targeted branch coverage in runtime-heavy modules before adding anoth - Required pre-PR gate: run `npm run verify:full` before opening a PR to ensure all tests and checks pass. - Faster inner-loop gate: `npm run verify` +- Static gate only: `npm run test:static` +- All Vitest projects without coverage: `npm run test:vitest` - Architecture tests only: `npm run test:architecture` - Dependency graph gate: `npm run check:deps` -- Coverage-only unit/integration gate: `npm run test:unit:coverage` -- Playwright only: `PLAYWRIGHT_TEST_PORT=3016 npm run test:e2e` +- Coverage-only unit/integration gate: `npm run test:vitest:coverage` +- Playwright only, with a fresh app build: `PLAYWRIGHT_TEST_PORT=3016 npm run test:e2e:parallel` - CI-style Playwright smoke: `npm run test:e2e:ci` +- Serial local mirror of the CI gate: `npm run verify:ci` ## Architecture Guardrails @@ -149,6 +155,8 @@ Do not run `test:timings` in parallel with another Vitest command that writes th ## CI Notes - Keep workflow test paths aligned with the split test structure. +- The main CI workflow is intentionally a DAG: `static`, Vitest project matrix, `coverage`, and `build` can run independently; `package-smoke` and `e2e` depend only on the `production-dist` artifact from `build`. +- Keep CI report artifacts job-scoped so parallel jobs do not overwrite each other. Vitest project jobs upload `test-reports-vitest-`, coverage uploads `coverage-reports`, and Playwright uploads `test-reports-e2e`. - Do not point CI jobs at deleted catch-all files such as old monolithic `server-helpers` or frontend regression suites. - Windows smoke coverage should stay focused on the small, platform-relevant helper suites rather than full subprocess-heavy integration runs unless the workflow explicitly targets Windows process behavior. diff --git a/package.json b/package.json index 796a7cf..3f09915 100644 --- a/package.json +++ b/package.json @@ -18,37 +18,44 @@ "dev": "vite", "build:app": "vite build", "build": "npm run format:check && npm run lint && npm run build:app", - "check": "npm run format:check && npm run lint && npm run lint:docstrings && npm run check:deps && tsc --noEmit", + "check": "npm run test:static", "check:deps": "dependency-cruiser --config .dependency-cruiser.cjs src shared server server.js usage-normalizer.js", - "format": "prettier . --write", - "format:check": "prettier . --check", - "lint": "eslint .", - "lint:docstrings": "eslint src server.js usage-normalizer.js shared --ext .ts,.tsx,.js,.d.ts", - "lint:fix": "eslint . --fix", + "format": "prettier . --write --cache --cache-location .cache/prettier/write --cache-strategy content", + "format:check": "prettier . --check --cache --cache-location .cache/prettier/check --cache-strategy content", + "lint": "eslint . --cache --cache-location .cache/eslint/full --cache-strategy content", + "lint:docstrings": "eslint src server.js usage-normalizer.js shared --ext .ts,.tsx,.js,.d.ts --cache --cache-location .cache/eslint/docstrings --cache-strategy content", + "lint:fix": "eslint . --fix --cache --cache-location .cache/eslint/full --cache-strategy content", "preview": "vite preview", "start": "node server.js", "start:test-server": "node scripts/start-test-server.js", - "test": "npm run test:architecture && npm run test:unit", + "test": "npm run test:vitest", "test:architecture": "vitest run --project architecture --outputFile.junit=./test-results/vitest-architecture.junit.xml", + "test:static": "npm run format:check && npm run lint && npm run check:deps && npm run typecheck", "test:unit": "vitest run --project unit --project frontend --project integration --project integration-background --outputFile.junit=./test-results/vitest-main.junit.xml", "test:unit:watch": "vitest --project unit --project frontend --project integration --project integration-background", + "test:vitest": "npm run test:architecture && npm run test:unit", + "test:vitest:architecture": "npm run test:architecture", + "test:vitest:coverage": "npm run test:unit:coverage", "test:vitest:unit": "vitest run --project unit --outputFile.junit=./test-results/vitest-unit.junit.xml", "test:vitest:frontend": "vitest run --project frontend --outputFile.junit=./test-results/vitest-frontend.junit.xml", "test:vitest:integration": "vitest run --project integration --outputFile.junit=./test-results/vitest-integration.junit.xml", "test:vitest:integration-background": "vitest run --project integration-background --outputFile.junit=./test-results/vitest-integration-background.junit.xml", "test:unit:coverage": "vitest run --coverage --project unit --project frontend --project integration --project integration-background --reporter=dot --reporter=junit --outputFile.junit=./test-results/vitest-coverage.junit.xml", "test:timings": "vitest run --project unit --project frontend --project integration --project integration-background --reporter=dot --reporter=junit --outputFile.junit=./test-results/vitest-timings.junit.xml && node scripts/report-test-timings.js ./test-results/vitest-timings.junit.xml", - "test:e2e": "npm run build:app && playwright test", + "test:e2e": "npm run test:e2e:parallel", + "test:e2e:parallel": "npm run build:app && playwright test", "test:e2e:ci": "playwright test --workers=2", "test:all": "npm run verify:full", + "typecheck": "tsc --noEmit --incremental --tsBuildInfoFile .cache/tsc/tsconfig.tsbuildinfo", "docs:screenshots": "node scripts/capture-readme-screenshots.js", "deps:graph": "dependency-cruiser --config .dependency-cruiser.cjs -T archi src shared server server.js usage-normalizer.js", "pack:dry-run": "npm pack --dry-run", - "verify": "npm run check && npm run test:architecture && npm run test:unit && npm run build:app && npm run verify:package", + "verify": "npm run test:static && npm run test:vitest && npm run build:app && npm run verify:package", "verify:package": "node scripts/verify-package.js", "verify:registry-install": "node scripts/verify-registry-install.js", - "verify:release": "npm run check && npm run test:architecture && npm run test:unit:coverage && npm run build:app && npm run verify:package", - "verify:full": "npm run verify:release && npm run test:e2e:ci", + "verify:release": "npm run test:static && npm run test:architecture && npm run test:vitest:coverage && npm run build:app && npm run verify:package", + "verify:ci": "npm run verify:release && npm run test:e2e:ci", + "verify:full": "npm run verify:ci", "prepare": "npm run build:app" }, "files": [ diff --git a/tests/frontend/auto-import-modal.test.tsx b/tests/frontend/auto-import-modal.test.tsx index 74641b4..d09698b 100644 --- a/tests/frontend/auto-import-modal.test.tsx +++ b/tests/frontend/auto-import-modal.test.tsx @@ -31,7 +31,7 @@ describe('AutoImportModal', () => { expect(closeMock).toHaveBeenCalledTimes(1) expect(onOpenChange).toHaveBeenCalledWith(false) - }, 15_000) + }) it('shows the concrete auto-import error message in the status area', () => { autoImportMocks.startAutoImport.mockImplementation((callbacks) => { diff --git a/tests/frontend/chart-card.test.tsx b/tests/frontend/chart-card.test.tsx index 4a13fba..78cec1f 100644 --- a/tests/frontend/chart-card.test.tsx +++ b/tests/frontend/chart-card.test.tsx @@ -29,7 +29,7 @@ describe('ChartCard', () => { expect(screen.getByText('Total')).toBeInTheDocument() expect(screen.getByText('Data points')).toBeInTheDocument() - }, 15_000) + }) it('reveals the expand control for keyboard focus on desktop', () => { renderWithAppProviders( diff --git a/tests/frontend/dashboard-error-state.test.tsx b/tests/frontend/dashboard-error-state.test.tsx index 0edbbd5..b84ff40 100644 --- a/tests/frontend/dashboard-error-state.test.tsx +++ b/tests/frontend/dashboard-error-state.test.tsx @@ -137,7 +137,7 @@ describe('Dashboard fatal load state', () => { await waitFor(() => expect(apiMocks.deleteSettings).toHaveBeenCalledTimes(1)) expect(await screen.findByText('Settings reset')).toBeInTheDocument() - }, 15_000) + }) it('renders a fatal usage error state with a delete action for corrupted stored data', async () => { const mutateAsync = vi.fn().mockResolvedValue(undefined) diff --git a/tests/frontend/drill-down-modal-regressions.test.tsx b/tests/frontend/drill-down-modal-regressions.test.tsx index 917496c..663ba12 100644 --- a/tests/frontend/drill-down-modal-regressions.test.tsx +++ b/tests/frontend/drill-down-modal-regressions.test.tsx @@ -51,7 +51,7 @@ describe('DrillDownModal regressions', () => { expect(screen.getAllByText('–').length).toBeGreaterThan(0) expect(screen.getByTestId('drilldown-token-distribution')).toBeInTheDocument() expect(screen.queryByTestId('drilldown-token-distribution-input')).not.toBeInTheDocument() - }, 15_000) + }) it('uses the canonical token sum instead of a stale day.totalTokens value', () => { const day: DailyUsage = { diff --git a/tests/frontend/expandable-card.test.tsx b/tests/frontend/expandable-card.test.tsx index 70ef72b..5f4b00e 100644 --- a/tests/frontend/expandable-card.test.tsx +++ b/tests/frontend/expandable-card.test.tsx @@ -29,7 +29,7 @@ describe('ExpandableCard', () => { 'Erweiterte Kartenansicht mit zusätzlichen Kennzahlen und vollständigem Inhalt.', ), ).toBeInTheDocument() - }, 15_000) + }) it('delegates expand handling to an external callback without opening its own dialog', () => { const onExpand = vi.fn() diff --git a/tests/frontend/filter-bar-presets.test.tsx b/tests/frontend/filter-bar-presets.test.tsx index a2bfe0a..c916abc 100644 --- a/tests/frontend/filter-bar-presets.test.tsx +++ b/tests/frontend/filter-bar-presets.test.tsx @@ -55,7 +55,7 @@ describe('FilterBar preset and chip states', () => { ) expect(screen.getByRole('button', { name: 'All' })).not.toHaveClass('bg-primary') - }, 15_000) + }) it('exposes pressed state for preset, provider, and model toggles', () => { renderFilterBar({ diff --git a/tests/frontend/header-links.test.tsx b/tests/frontend/header-links.test.tsx index 12781b7..f101a60 100644 --- a/tests/frontend/header-links.test.tsx +++ b/tests/frontend/header-links.test.tsx @@ -48,7 +48,7 @@ describe('Header external links', () => { expect(versionLink).toHaveAttribute('href', NPM_PACKAGE_URL) expect(versionLink).toHaveAttribute('target', '_blank') expect(versionLink).toHaveAttribute('rel', 'noopener noreferrer') - }, 15000) + }) it('shows npm, GitHub, and GitHub issues links in the help panel', () => { render() diff --git a/tests/frontend/info-heading.test.tsx b/tests/frontend/info-heading.test.tsx index 55913f7..12ac80d 100644 --- a/tests/frontend/info-heading.test.tsx +++ b/tests/frontend/info-heading.test.tsx @@ -64,7 +64,7 @@ describe('Info heading semantics', () => { const infoButton = screen.getByRole('button', { name: 'Show info' }) expect(infoButton).toBeInTheDocument() expect(infoButton).toHaveClass('focus-visible:ring-2') - }, 15_000) + }) it('keeps feature card titles semantically separate from the info button', () => { render( @@ -75,5 +75,5 @@ describe('Info heading semantics', () => { expect(screen.getByRole('heading', { name: 'Request quality' })).toBeInTheDocument() expect(screen.getByRole('button', { name: 'Show info' })).toBeInTheDocument() - }, 15_000) + }) }) diff --git a/tests/frontend/provider-cost-forecast.test.tsx b/tests/frontend/provider-cost-forecast.test.tsx index 9fbf031..387b140 100644 --- a/tests/frontend/provider-cost-forecast.test.tsx +++ b/tests/frontend/provider-cost-forecast.test.tsx @@ -241,7 +241,7 @@ describe('ProviderCostForecast', () => { .filter((entry) => entry.getAttribute('data-key')?.endsWith('Lower')) .every((entry) => entry.getAttribute('data-legend-type') === 'none'), ).toBe(true) - }, 15_000) + }) it('only renders visible providers from the filtered dataset', () => { const openAiOnlyData: DailyUsage[] = [ diff --git a/tests/frontend/requests-over-time.test.tsx b/tests/frontend/requests-over-time.test.tsx index 40e7b3b..66ec148 100644 --- a/tests/frontend/requests-over-time.test.tsx +++ b/tests/frontend/requests-over-time.test.tsx @@ -162,5 +162,5 @@ describe('RequestsOverTime', () => { expect(lineNames).not.toEqual( expect.arrayContaining(['Unused Model', 'Unused Model 7-day avg']), ) - }, 15_000) + }) }) diff --git a/tests/frontend/toast.test.tsx b/tests/frontend/toast.test.tsx index b80b0cc..f8167a3 100644 --- a/tests/frontend/toast.test.tsx +++ b/tests/frontend/toast.test.tsx @@ -39,7 +39,7 @@ describe('ToastProvider', () => { fireEvent.click(screen.getByRole('button', { name: 'Close' })) expect(screen.queryByRole('status')).not.toBeInTheDocument() - }, 15_000) + }) it('announces error toasts as alerts', () => { render( @@ -51,5 +51,5 @@ describe('ToastProvider', () => { fireEvent.click(screen.getByRole('button', { name: 'Trigger toast' })) expect(screen.getByRole('alert')).toHaveTextContent('Saved successfully') - }, 15_000) + }) }) diff --git a/tests/unit/test-pipeline-scripts.test.ts b/tests/unit/test-pipeline-scripts.test.ts new file mode 100644 index 0000000..e690792 --- /dev/null +++ b/tests/unit/test-pipeline-scripts.test.ts @@ -0,0 +1,57 @@ +import { readFile } from 'node:fs/promises' +import { describe, expect, it } from 'vitest' +import packageJson from '../../package.json' + +const scripts = packageJson.scripts as Record + +describe('test pipeline scripts', () => { + it('exposes local gates that map to the CI test layers', () => { + expect(scripts.check).toBe('npm run test:static') + expect(scripts.test).toBe('npm run test:vitest') + expect(scripts['test:vitest']).toBe('npm run test:architecture && npm run test:unit') + expect(scripts['test:vitest:coverage']).toBe('npm run test:unit:coverage') + expect(scripts['test:e2e']).toBe('npm run test:e2e:parallel') + expect(scripts['verify:ci']).toBe('npm run verify:release && npm run test:e2e:ci') + expect(scripts['verify:full']).toBe('npm run verify:ci') + }) + + it('keeps the static gate cached and avoids a duplicate docstring lint pass', () => { + expect(scripts['test:static']).toBe( + 'npm run format:check && npm run lint && npm run check:deps && npm run typecheck', + ) + expect(scripts['test:static']).not.toContain('lint:docstrings') + expect(scripts.lint).toContain('--cache-location .cache/eslint/full') + expect(scripts.lint).toContain('--cache-strategy content') + expect(scripts['lint:docstrings']).toContain('--cache-location .cache/eslint/docstrings') + expect(scripts.format).toContain('--cache-location .cache/prettier/write') + expect(scripts['format:check']).toContain('--cache-location .cache/prettier/check') + expect(scripts.typecheck).toContain('--tsBuildInfoFile .cache/tsc/tsconfig.tsbuildinfo') + }) + + it('keeps CI split into parallel jobs that share one production bundle artifact', async () => { + const workflow = await readFile('.github/workflows/ci.yml', 'utf8') + + for (const job of [ + 'static', + 'vitest', + 'coverage', + 'build', + 'package-smoke', + 'e2e', + 'windows-smoke', + ]) { + expect(workflow).toContain(`\n ${job}:`) + } + + expect(workflow).toContain('run: npm run test:static') + expect(workflow).toContain('run: npm run test:vitest:${{ matrix.project }}') + expect(workflow).toContain('run: npm run test:vitest:coverage') + expect(workflow).toContain('name: production-dist') + expect(workflow).toContain('package-smoke:') + expect(workflow).toContain('e2e:') + expect(workflow).toContain('needs: build') + expect(workflow).toContain( + 'uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131', + ) + }) +}) diff --git a/tests/unit/vitest-coverage-config.test.ts b/tests/unit/vitest-coverage-config.test.ts index 197fd87..55aadf7 100644 --- a/tests/unit/vitest-coverage-config.test.ts +++ b/tests/unit/vitest-coverage-config.test.ts @@ -23,6 +23,7 @@ type ResolvedVitestConfig = { environment?: string name?: string setupFiles?: string[] + testTimeout?: number } }> } @@ -106,4 +107,17 @@ describe('vitest coverage configuration', () => { expect(project.test?.setupFiles).toEqual(expectedSetupByProject.get(name ?? '')) } }) + + it('centralizes the longer jsdom timeout on the frontend project only', async () => { + const config = await resolveVitestConfig() + const projects = config.test?.projects ?? [] + const frontendProject = projects.find((project) => project.test?.name === 'frontend') + const nonFrontendProjects = projects.filter((project) => project.test?.name !== 'frontend') + + expect(frontendProject?.test?.testTimeout).toBe(30_000) + + for (const project of nonFrontendProjects) { + expect(project.test?.testTimeout).toBeUndefined() + } + }) }) diff --git a/vitest.config.ts b/vitest.config.ts index c93ea9f..cc6560e 100644 --- a/vitest.config.ts +++ b/vitest.config.ts @@ -82,6 +82,7 @@ export default defineConfig(async () => { environment: 'jsdom', setupFiles: ['./vitest.setup.node.ts', './vitest.setup.frontend.ts'], maxWorkers: '50%', + testTimeout: 30_000, sequence: { groupOrder: 2, }, From 117cfeb1687980f7dda8f71d60350b5fc694cb50 Mon Sep 17 00:00:00 2001 From: tyl3r-ch Date: Wed, 29 Apr 2026 07:45:18 +0200 Subject: [PATCH 08/42] Fix CodeRabbit test robustness issues --- src/components/charts/CostByWeekday.tsx | 2 +- tests/e2e/command-palette.spec.ts | 2 ++ tests/frontend/lazy-dashboard-charts.test.tsx | 10 +++++----- tests/integration/app-runtime.test.ts | 16 ++++++++++++++-- tests/unit/test-pipeline-scripts.test.ts | 4 +--- 5 files changed, 23 insertions(+), 11 deletions(-) diff --git a/src/components/charts/CostByWeekday.tsx b/src/components/charts/CostByWeekday.tsx index 8b21f2b..b29300f 100644 --- a/src/components/charts/CostByWeekday.tsx +++ b/src/components/charts/CostByWeekday.tsx @@ -33,7 +33,7 @@ export function CostByWeekday({ data }: CostByWeekdayProps) { const peakIndex = data.findIndex((d) => d.cost === maxCost) const lowIndex = data.findIndex((d) => d.cost === minCost) const weekendCost = data - .filter((entry) => entry.day === 'Sa' || entry.day === 'So') + .filter((_, index) => index === 5 || index === 6) .reduce((sum, entry) => sum + entry.cost, 0) const weekTotal = data.reduce((sum, entry) => sum + entry.cost, 0) diff --git a/tests/e2e/command-palette.spec.ts b/tests/e2e/command-palette.spec.ts index 6777f43..a39112e 100644 --- a/tests/e2e/command-palette.spec.ts +++ b/tests/e2e/command-palette.spec.ts @@ -64,6 +64,8 @@ test('executes a report action command from the command palette', async ({ page const pdfReport = await mockPdfReport(page) const downloadPromise = page.waitForEvent('download') + await page.getByTestId('language-switcher-de').click() + await expect(page.locator('html')).toHaveAttribute('lang', 'de') await runPaletteCommand(page, 'command-report') const download = await downloadPromise diff --git a/tests/frontend/lazy-dashboard-charts.test.tsx b/tests/frontend/lazy-dashboard-charts.test.tsx index c7dbf1c..2797e67 100644 --- a/tests/frontend/lazy-dashboard-charts.test.tsx +++ b/tests/frontend/lazy-dashboard-charts.test.tsx @@ -186,12 +186,12 @@ function createDailyUsage( const weekdayData: WeekdayData[] = [ { day: 'Mo', cost: 3 }, - { day: 'Di', cost: 9 }, - { day: 'Mi', cost: 6 }, - { day: 'Do', cost: 4 }, + { day: 'Tu', cost: 9 }, + { day: 'We', cost: 6 }, + { day: 'Th', cost: 4 }, { day: 'Fr', cost: 7 }, { day: 'Sa', cost: 2 }, - { day: 'So', cost: 5 }, + { day: 'Su', cost: 5 }, ] const tokenData: TokenChartDataPoint[] = [ @@ -236,7 +236,7 @@ describe('lazy dashboard charts', () => { renderWithTooltip() expect(screen.getByText('Cost by weekday')).toBeInTheDocument() - expect(screen.getByText('Peak: Di · Low: Sa · Weekend 19%')).toBeInTheDocument() + expect(screen.getByText('Peak: Tu · Low: Sa · Weekend 19%')).toBeInTheDocument() expect(screen.getByTestId('chart-bar')).toHaveAttribute('data-name', 'Avg cost') const fills = screen.getAllByTestId('bar-cell').map((cell) => cell.getAttribute('data-fill')) diff --git a/tests/integration/app-runtime.test.ts b/tests/integration/app-runtime.test.ts index 5bb63fe..95c8c44 100644 --- a/tests/integration/app-runtime.test.ts +++ b/tests/integration/app-runtime.test.ts @@ -40,9 +40,17 @@ class MockResponse extends EventEmitter { } end(body?: string | Buffer) { - this.body = Buffer.isBuffer(body) ? body.toString('utf8') : (body ?? '') + if (body !== undefined) { + this.write(body) + } this.emit('finish') } + + write(chunk: string | Buffer) { + this.body += Buffer.isBuffer(chunk) ? chunk.toString('utf8') : chunk + this.headersSent = true + return true + } } async function emitServerRequest( @@ -84,6 +92,10 @@ describe('app runtime wiring', () => { it('composes the real server runtime with isolated paths, API prefix, and auth', async () => { const runtimeRoot = await fsPromises.mkdtemp(path.join(tmpdir(), 'ttdash-app-runtime-')) const authToken = 'test-local-auth-token-with-enough-entropy' + const stdout = Object.assign(Object.create(process.stdout), { + isTTY: false, + write: vi.fn(() => true), + }) const processObject = { ...process, argv: ['node', 'server.js', '--no-open'], @@ -105,7 +117,7 @@ describe('app runtime wiring', () => { on: vi.fn(), pid: 42170, platform: process.platform, - stdout: { isTTY: false }, + stdout, } try { diff --git a/tests/unit/test-pipeline-scripts.test.ts b/tests/unit/test-pipeline-scripts.test.ts index e690792..98b54c4 100644 --- a/tests/unit/test-pipeline-scripts.test.ts +++ b/tests/unit/test-pipeline-scripts.test.ts @@ -50,8 +50,6 @@ describe('test pipeline scripts', () => { expect(workflow).toContain('package-smoke:') expect(workflow).toContain('e2e:') expect(workflow).toContain('needs: build') - expect(workflow).toContain( - 'uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131', - ) + expect(workflow).toContain('uses: actions/download-artifact@') }) }) From e9d54fa8f4e3f80fa612242ed55fcf9f156444ea Mon Sep 17 00:00:00 2001 From: tyl3r-ch Date: Wed, 29 Apr 2026 08:20:06 +0200 Subject: [PATCH 09/42] Add test timing budgets --- .github/workflows/ci.yml | 7 + docs/review/test-review.md | 6 + docs/testing.md | 15 +- package.json | 1 + scripts/report-test-timings.js | 180 ++++++++++++++++++++--- tests/unit/report-test-timings.test.ts | 110 ++++++++++++++ tests/unit/test-pipeline-scripts.test.ts | 5 + 7 files changed, 306 insertions(+), 18 deletions(-) create mode 100644 tests/unit/report-test-timings.test.ts diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index f0ea648..b692529 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -64,6 +64,13 @@ jobs: - name: Run Vitest project run: npm run test:vitest:${{ matrix.project }} + - name: Check Vitest timing budget + run: | + node scripts/report-test-timings.js \ + test-results/vitest-${{ matrix.project }}.junit.xml \ + --max-suite-seconds=20 \ + --max-test-seconds=12 + - name: Upload Vitest report if: always() uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # v7.0.1 diff --git a/docs/review/test-review.md b/docs/review/test-review.md index af5ee69..1c57134 100644 --- a/docs/review/test-review.md +++ b/docs/review/test-review.md @@ -330,6 +330,12 @@ eine Baseline. - Separates Ranking nach Projekt. - Trendvergleich gegen eine gespeicherte Baseline in CI-Artefakten. +**Umsetzungsstand Phase 6:** `scripts/report-test-timings.js` wertet JUnit-Reports jetzt mit +Warn- und Hard-Budgets aus. Lokal bleibt `test:timings` diagnostisch; `test:timings:budget` nutzt +einen separaten JUnit-Output und scheitert bei Suiten > `20s` oder Tests > `12s`. Die CI-Matrix +wendet denselben Hard-Budget-Check auf die bereits erzeugten Vitest-Projektreports an, ohne eine +zusaetzliche Vitest-Runde zu starten. + ### N-01 - macOS-Sandbox kann Playwright-False-Negatives erzeugen Ein sandboxed Playwright-Lauf scheiterte mit Chromium diff --git a/docs/testing.md b/docs/testing.md index 017dd3a..d17f390 100644 --- a/docs/testing.md +++ b/docs/testing.md @@ -148,15 +148,28 @@ Prioritize targeted branch coverage in runtime-heavy modules before adding anoth - Keep hook files under `src/hooks/` reachable from the frontend app entrypoint; `npm run test:architecture` fails on unused production hooks so dead hook helpers do not silently remain at `0%` coverage. - Timing diagnostics: `npm run test:timings` -`npm run test:timings` generates a fresh Vitest JUnit report and prints the slowest suites and tests. Use it after larger test additions or refactors to catch new hotspots early. +`npm run test:timings` generates a fresh Vitest JUnit report, prints the slowest suites and tests, and +lists warning-level timing budget entries. Use it after larger test additions or refactors to catch +new hotspots early. + +`npm run test:timings:budget` runs the same Vitest project set with a separate JUnit output and fails +when a suite exceeds `20s` or an individual test exceeds `12s`. These hard limits are intentionally +above the current baseline so they catch pathological regressions without making local and CI runs +fragile under normal CPU variance. CI applies the same hard budget to each Vitest matrix job by +evaluating the JUnit report already produced by that job, so the guard does not add another full +Vitest pass. Do not run `test:timings` in parallel with another Vitest command that writes the same JUnit file. +`test:timings:budget` uses `test-results/vitest-timing-budget.junit.xml` so it can be run separately +from the diagnostic report when needed. ## CI Notes - Keep workflow test paths aligned with the split test structure. - The main CI workflow is intentionally a DAG: `static`, Vitest project matrix, `coverage`, and `build` can run independently; `package-smoke` and `e2e` depend only on the `production-dist` artifact from `build`. - Keep CI report artifacts job-scoped so parallel jobs do not overwrite each other. Vitest project jobs upload `test-reports-vitest-`, coverage uploads `coverage-reports`, and Playwright uploads `test-reports-e2e`. +- Each Vitest matrix job evaluates its own JUnit report with `scripts/report-test-timings.js` and the + same `20s` suite / `12s` test hard budget used by `npm run test:timings:budget`. - Do not point CI jobs at deleted catch-all files such as old monolithic `server-helpers` or frontend regression suites. - Windows smoke coverage should stay focused on the small, platform-relevant helper suites rather than full subprocess-heavy integration runs unless the workflow explicitly targets Windows process behavior. diff --git a/package.json b/package.json index 3f09915..7c95041 100644 --- a/package.json +++ b/package.json @@ -42,6 +42,7 @@ "test:vitest:integration-background": "vitest run --project integration-background --outputFile.junit=./test-results/vitest-integration-background.junit.xml", "test:unit:coverage": "vitest run --coverage --project unit --project frontend --project integration --project integration-background --reporter=dot --reporter=junit --outputFile.junit=./test-results/vitest-coverage.junit.xml", "test:timings": "vitest run --project unit --project frontend --project integration --project integration-background --reporter=dot --reporter=junit --outputFile.junit=./test-results/vitest-timings.junit.xml && node scripts/report-test-timings.js ./test-results/vitest-timings.junit.xml", + "test:timings:budget": "vitest run --project unit --project frontend --project integration --project integration-background --reporter=dot --reporter=junit --outputFile.junit=./test-results/vitest-timing-budget.junit.xml && node scripts/report-test-timings.js ./test-results/vitest-timing-budget.junit.xml --max-suite-seconds=20 --max-test-seconds=12", "test:e2e": "npm run test:e2e:parallel", "test:e2e:parallel": "npm run build:app && playwright test", "test:e2e:ci": "playwright test --workers=2", diff --git a/scripts/report-test-timings.js b/scripts/report-test-timings.js index 99a686f..92a5093 100644 --- a/scripts/report-test-timings.js +++ b/scripts/report-test-timings.js @@ -5,6 +5,8 @@ const path = require('path'); const ROOT = path.resolve(__dirname, '..'); const DEFAULT_JUNIT_PATH = path.join(ROOT, 'test-results', 'vitest-timings.junit.xml'); +const DEFAULT_WARN_SUITE_SECONDS = 2; +const DEFAULT_WARN_TEST_SECONDS = 0.5; function parseAttributes(tag) { const attributes = {}; @@ -40,6 +42,65 @@ function parseJUnit(xml) { return { suites, cases }; } +function parsePositiveNumber(value, optionName) { + const parsed = Number(value); + + if (!Number.isFinite(parsed) || parsed <= 0) { + throw new Error(`${optionName} must be a positive number.`); + } + + return parsed; +} + +function parseArgs(argv) { + const options = { + junitPath: DEFAULT_JUNIT_PATH, + maxSuiteSeconds: null, + maxTestSeconds: null, + warnSuiteSeconds: DEFAULT_WARN_SUITE_SECONDS, + warnTestSeconds: DEFAULT_WARN_TEST_SECONDS, + }; + let positionalPath = null; + + for (const arg of argv) { + if (arg.startsWith('--max-suite-seconds=')) { + options.maxSuiteSeconds = parsePositiveNumber( + arg.slice('--max-suite-seconds='.length), + '--max-suite-seconds', + ); + } else if (arg.startsWith('--max-test-seconds=')) { + options.maxTestSeconds = parsePositiveNumber( + arg.slice('--max-test-seconds='.length), + '--max-test-seconds', + ); + } else if (arg.startsWith('--warn-suite-seconds=')) { + options.warnSuiteSeconds = parsePositiveNumber( + arg.slice('--warn-suite-seconds='.length), + '--warn-suite-seconds', + ); + } else if (arg.startsWith('--warn-test-seconds=')) { + options.warnTestSeconds = parsePositiveNumber( + arg.slice('--warn-test-seconds='.length), + '--warn-test-seconds', + ); + } else if (arg === '--help' || arg === '-h') { + options.help = true; + } else if (arg.startsWith('--')) { + throw new Error(`Unknown option: ${arg}`); + } else if (!positionalPath) { + positionalPath = path.resolve(process.cwd(), arg); + } else { + throw new Error(`Unexpected argument: ${arg}`); + } + } + + if (positionalPath) { + options.junitPath = positionalPath; + } + + return options; +} + function formatSeconds(value) { return `${value.toFixed(3)}s`; } @@ -59,38 +120,123 @@ function printRanking(title, rows, formatRow) { process.stdout.write('\n'); } -function main() { - const junitPath = process.argv[2] - ? path.resolve(process.cwd(), process.argv[2]) - : DEFAULT_JUNIT_PATH; - - if (!fs.existsSync(junitPath)) { - process.stderr.write(`JUnit report not found: ${junitPath}\n`); - process.stderr.write('Run vitest with the junit reporter first.\n'); - process.exitCode = 1; - return; +function filterTimedRows(rows, threshold) { + if (threshold === null || threshold === undefined) { + return []; } - const xml = fs.readFileSync(junitPath, 'utf8'); - const { suites, cases } = parseJUnit(xml); + return rows + .filter((row) => Number.isFinite(row.time) && row.time > threshold) + .sort((left, right) => right.time - left.time); +} - const slowSuites = suites +function buildTimingReport(junit, options = {}) { + const slowSuites = junit.suites .filter((suite) => Number.isFinite(suite.time)) .sort((left, right) => right.time - left.time) .slice(0, 10); - const slowCases = cases + const slowCases = junit.cases .filter((testCase) => Number.isFinite(testCase.time)) .sort((left, right) => right.time - left.time) .slice(0, 15); - printRanking('Slowest suites', slowSuites, (suite) => { + return { + slowSuites, + slowCases, + suiteWarnings: filterTimedRows(junit.suites, options.warnSuiteSeconds), + testWarnings: filterTimedRows(junit.cases, options.warnTestSeconds), + suiteFailures: filterTimedRows(junit.suites, options.maxSuiteSeconds), + testFailures: filterTimedRows(junit.cases, options.maxTestSeconds), + }; +} + +function printBudgetRows(title, rows, formatRow) { + if (rows.length === 0) { + return; + } + + printRanking(title, rows, formatRow); +} + +function printUsage(stdout) { + stdout.write(`Usage: node scripts/report-test-timings.js [junit-file] [options] + +Options: + --warn-suite-seconds=N Print suites slower than N seconds. Default: ${DEFAULT_WARN_SUITE_SECONDS} + --warn-test-seconds=N Print tests slower than N seconds. Default: ${DEFAULT_WARN_TEST_SECONDS} + --max-suite-seconds=N Fail when any suite is slower than N seconds. + --max-test-seconds=N Fail when any test is slower than N seconds. +`); +} + +function run(argv = process.argv.slice(2), streams = process) { + let options; + + try { + options = parseArgs(argv); + } catch (error) { + streams.stderr.write(`${error instanceof Error ? error.message : String(error)}\n`); + return 1; + } + + if (options.help) { + printUsage(streams.stdout); + return 0; + } + + if (!fs.existsSync(options.junitPath)) { + streams.stderr.write(`JUnit report not found: ${options.junitPath}\n`); + streams.stderr.write('Run vitest with the junit reporter first.\n'); + return 1; + } + + const xml = fs.readFileSync(options.junitPath, 'utf8'); + const report = buildTimingReport(parseJUnit(xml), options); + + printRanking('Slowest suites', report.slowSuites, (suite) => { + return `${formatSeconds(suite.time)} | ${suite.name}`; + }); + + printRanking('Slowest tests', report.slowCases, (testCase) => { + return `${formatSeconds(testCase.time)} | ${testCase.suite} | ${testCase.name}`; + }); + + printBudgetRows('Suites over warning budget', report.suiteWarnings, (suite) => { + return `${formatSeconds(suite.time)} | ${suite.name}`; + }); + + printBudgetRows('Tests over warning budget', report.testWarnings, (testCase) => { + return `${formatSeconds(testCase.time)} | ${testCase.suite} | ${testCase.name}`; + }); + + printBudgetRows('Suites over hard budget', report.suiteFailures, (suite) => { return `${formatSeconds(suite.time)} | ${suite.name}`; }); - printRanking('Slowest tests', slowCases, (testCase) => { + printBudgetRows('Tests over hard budget', report.testFailures, (testCase) => { return `${formatSeconds(testCase.time)} | ${testCase.suite} | ${testCase.name}`; }); + + if (report.suiteFailures.length > 0 || report.testFailures.length > 0) { + streams.stderr.write('Test timing budget exceeded.\n'); + return 1; + } + + return 0; +} + +function main() { + process.exitCode = run(); +} + +if (require.main === module) { + main(); } -main(); +module.exports = { + buildTimingReport, + parseArgs, + parseJUnit, + run, +}; diff --git a/tests/unit/report-test-timings.test.ts b/tests/unit/report-test-timings.test.ts new file mode 100644 index 0000000..3cee06e --- /dev/null +++ b/tests/unit/report-test-timings.test.ts @@ -0,0 +1,110 @@ +import { createRequire } from 'node:module' +import { describe, expect, it } from 'vitest' + +const require = createRequire(import.meta.url) + +type TimingRow = { + name: string + suite?: string + time: number +} + +type TimingReport = { + slowCases: TimingRow[] + slowSuites: TimingRow[] + suiteFailures: TimingRow[] + suiteWarnings: TimingRow[] + testFailures: TimingRow[] + testWarnings: TimingRow[] +} + +type TimingScript = { + buildTimingReport: ( + junit: { cases: TimingRow[]; suites: TimingRow[] }, + options?: { + maxSuiteSeconds?: number + maxTestSeconds?: number + warnSuiteSeconds?: number + warnTestSeconds?: number + }, + ) => TimingReport + parseArgs: (argv: string[]) => { + junitPath: string + maxSuiteSeconds: number | null + maxTestSeconds: number | null + warnSuiteSeconds: number + warnTestSeconds: number + } + parseJUnit: (xml: string) => { cases: TimingRow[]; suites: TimingRow[] } +} + +const timingScript = require('../../scripts/report-test-timings.js') as TimingScript + +describe('test timing report budgets', () => { + it('parses Vitest JUnit suites and test cases', () => { + const report = timingScript.parseJUnit(` + + + + + + + `) + + expect(report.suites).toEqual([{ name: 'unit', time: 1.25 }]) + expect(report.cases).toEqual([ + { suite: 'tests/unit/example.test.ts', name: 'fast path', time: 0.04 }, + { suite: 'tests/unit/example.test.ts', name: 'slow path', time: 0.71 }, + ]) + }) + + it('separates warning budgets from hard timing failures', () => { + const report = timingScript.buildTimingReport( + { + suites: [ + { name: 'fast suite', time: 0.2 }, + { name: 'slow suite', time: 3 }, + { name: 'failed suite budget', time: 21 }, + ], + cases: [ + { suite: 'a.test.ts', name: 'fast case', time: 0.1 }, + { suite: 'b.test.ts', name: 'slow case', time: 0.8 }, + { suite: 'c.test.ts', name: 'failed case budget', time: 12.5 }, + ], + }, + { + warnSuiteSeconds: 2, + warnTestSeconds: 0.5, + maxSuiteSeconds: 20, + maxTestSeconds: 12, + }, + ) + + expect(report.suiteWarnings.map((suite) => suite.name)).toEqual([ + 'failed suite budget', + 'slow suite', + ]) + expect(report.testWarnings.map((testCase) => testCase.name)).toEqual([ + 'failed case budget', + 'slow case', + ]) + expect(report.suiteFailures.map((suite) => suite.name)).toEqual(['failed suite budget']) + expect(report.testFailures.map((testCase) => testCase.name)).toEqual(['failed case budget']) + }) + + it('parses configurable timing budgets from CLI arguments', () => { + const options = timingScript.parseArgs([ + './test-results/custom.junit.xml', + '--warn-suite-seconds=1.5', + '--warn-test-seconds=0.25', + '--max-suite-seconds=20', + '--max-test-seconds=12', + ]) + + expect(options.junitPath).toMatch(/test-results\/custom\.junit\.xml$/) + expect(options.warnSuiteSeconds).toBe(1.5) + expect(options.warnTestSeconds).toBe(0.25) + expect(options.maxSuiteSeconds).toBe(20) + expect(options.maxTestSeconds).toBe(12) + }) +}) diff --git a/tests/unit/test-pipeline-scripts.test.ts b/tests/unit/test-pipeline-scripts.test.ts index 98b54c4..ab9a37a 100644 --- a/tests/unit/test-pipeline-scripts.test.ts +++ b/tests/unit/test-pipeline-scripts.test.ts @@ -11,6 +11,8 @@ describe('test pipeline scripts', () => { expect(scripts['test:vitest']).toBe('npm run test:architecture && npm run test:unit') expect(scripts['test:vitest:coverage']).toBe('npm run test:unit:coverage') expect(scripts['test:e2e']).toBe('npm run test:e2e:parallel') + expect(scripts['test:timings:budget']).toContain('--max-suite-seconds=20') + expect(scripts['test:timings:budget']).toContain('--max-test-seconds=12') expect(scripts['verify:ci']).toBe('npm run verify:release && npm run test:e2e:ci') expect(scripts['verify:full']).toBe('npm run verify:ci') }) @@ -45,6 +47,9 @@ describe('test pipeline scripts', () => { expect(workflow).toContain('run: npm run test:static') expect(workflow).toContain('run: npm run test:vitest:${{ matrix.project }}') + expect(workflow).toContain('node scripts/report-test-timings.js') + expect(workflow).toContain('--max-suite-seconds=20') + expect(workflow).toContain('--max-test-seconds=12') expect(workflow).toContain('run: npm run test:vitest:coverage') expect(workflow).toContain('name: production-dist') expect(workflow).toContain('package-smoke:') From 944a04961bc83b9c17c4b1a8a9f96a393563e992 Mon Sep 17 00:00:00 2001 From: tyl3r-ch Date: Thu, 30 Apr 2026 17:32:27 +0200 Subject: [PATCH 10/42] Add project test timing benchmark --- docs/testing.md | 13 + package.json | 2 + scripts/run-vitest-project-timings.js | 276 ++++++++++++++++++ tests/unit/run-vitest-project-timings.test.ts | 151 ++++++++++ tests/unit/test-pipeline-scripts.test.ts | 4 + 5 files changed, 446 insertions(+) create mode 100644 scripts/run-vitest-project-timings.js create mode 100644 tests/unit/run-vitest-project-timings.test.ts diff --git a/docs/testing.md b/docs/testing.md index d17f390..422dd31 100644 --- a/docs/testing.md +++ b/docs/testing.md @@ -139,6 +139,8 @@ Prioritize targeted branch coverage in runtime-heavy modules before adding anoth - Architecture tests only: `npm run test:architecture` - Dependency graph gate: `npm run check:deps` - Coverage-only unit/integration gate: `npm run test:vitest:coverage` +- Per-project timing budget pass: `npm run test:timings:projects` +- Three-run timing benchmark: `npm run test:timings:benchmark` - Playwright only, with a fresh app build: `PLAYWRIGHT_TEST_PORT=3016 npm run test:e2e:parallel` - CI-style Playwright smoke: `npm run test:e2e:ci` - Serial local mirror of the CI gate: `npm run verify:ci` @@ -163,6 +165,17 @@ Do not run `test:timings` in parallel with another Vitest command that writes th `test:timings:budget` uses `test-results/vitest-timing-budget.junit.xml` so it can be run separately from the diagnostic report when needed. +`npm run test:timings:projects` runs each Vitest project separately, writes project-scoped JUnit files +such as `test-results/vitest-frontend.timing.junit.xml`, and checks the same `20s` suite / `12s` +test hard budget after each project. Use it when you need to identify whether a regression is in +unit, frontend, integration, or background tests without adding another monolithic timing report. + +`npm run test:timings:benchmark` repeats the project timing pass three times and prints median and +worst observed duration per project. Use those repeated measurements before changing worker counts +or moving tests between layers; a single run is not enough to separate real improvements from cache, +CPU, or I/O variance. Repeated runs write `timing-run-N` JUnit files so their reports never overwrite +the normal project, coverage, or diagnostic reports. + ## CI Notes - Keep workflow test paths aligned with the split test structure. diff --git a/package.json b/package.json index 7c95041..2508ff0 100644 --- a/package.json +++ b/package.json @@ -43,6 +43,8 @@ "test:unit:coverage": "vitest run --coverage --project unit --project frontend --project integration --project integration-background --reporter=dot --reporter=junit --outputFile.junit=./test-results/vitest-coverage.junit.xml", "test:timings": "vitest run --project unit --project frontend --project integration --project integration-background --reporter=dot --reporter=junit --outputFile.junit=./test-results/vitest-timings.junit.xml && node scripts/report-test-timings.js ./test-results/vitest-timings.junit.xml", "test:timings:budget": "vitest run --project unit --project frontend --project integration --project integration-background --reporter=dot --reporter=junit --outputFile.junit=./test-results/vitest-timing-budget.junit.xml && node scripts/report-test-timings.js ./test-results/vitest-timing-budget.junit.xml --max-suite-seconds=20 --max-test-seconds=12", + "test:timings:projects": "node scripts/run-vitest-project-timings.js", + "test:timings:benchmark": "node scripts/run-vitest-project-timings.js --repeat=3", "test:e2e": "npm run test:e2e:parallel", "test:e2e:parallel": "npm run build:app && playwright test", "test:e2e:ci": "playwright test --workers=2", diff --git a/scripts/run-vitest-project-timings.js b/scripts/run-vitest-project-timings.js new file mode 100644 index 0000000..efa4c05 --- /dev/null +++ b/scripts/run-vitest-project-timings.js @@ -0,0 +1,276 @@ +#!/usr/bin/env node + +const fs = require('fs'); +const path = require('path'); +const { spawnSync } = require('child_process'); + +const ROOT = path.resolve(__dirname, '..'); +const DEFAULT_REPORT_DIR = path.join(ROOT, 'test-results'); +const DEFAULT_PROJECTS = [ + 'architecture', + 'unit', + 'frontend', + 'integration', + 'integration-background', +]; +const DEFAULT_MAX_SUITE_SECONDS = 20; +const DEFAULT_MAX_TEST_SECONDS = 12; + +function parsePositiveInteger(value, optionName) { + const parsed = Number(value); + + if (!Number.isInteger(parsed) || parsed <= 0) { + throw new Error(`${optionName} must be a positive integer.`); + } + + return parsed; +} + +function parsePositiveNumber(value, optionName) { + const parsed = Number(value); + + if (!Number.isFinite(parsed) || parsed <= 0) { + throw new Error(`${optionName} must be a positive number.`); + } + + return parsed; +} + +function parseProjects(value) { + const projects = value + .split(',') + .map((project) => project.trim()) + .filter(Boolean); + + if (projects.length === 0) { + throw new Error('--projects must list at least one Vitest project.'); + } + + const unknownProjects = projects.filter((project) => !DEFAULT_PROJECTS.includes(project)); + + if (unknownProjects.length > 0) { + throw new Error(`Unknown Vitest project: ${unknownProjects.join(', ')}`); + } + + return projects; +} + +function parseArgs(argv) { + const options = { + dryRun: false, + help: false, + maxSuiteSeconds: DEFAULT_MAX_SUITE_SECONDS, + maxTestSeconds: DEFAULT_MAX_TEST_SECONDS, + projects: DEFAULT_PROJECTS, + repeat: 1, + reportDir: DEFAULT_REPORT_DIR, + }; + + for (const arg of argv) { + if (arg === '--dry-run') { + options.dryRun = true; + } else if (arg === '--help' || arg === '-h') { + options.help = true; + } else if (arg.startsWith('--max-suite-seconds=')) { + options.maxSuiteSeconds = parsePositiveNumber( + arg.slice('--max-suite-seconds='.length), + '--max-suite-seconds', + ); + } else if (arg.startsWith('--max-test-seconds=')) { + options.maxTestSeconds = parsePositiveNumber( + arg.slice('--max-test-seconds='.length), + '--max-test-seconds', + ); + } else if (arg.startsWith('--projects=')) { + options.projects = parseProjects(arg.slice('--projects='.length)); + } else if (arg.startsWith('--repeat=')) { + options.repeat = parsePositiveInteger(arg.slice('--repeat='.length), '--repeat'); + } else if (arg.startsWith('--report-dir=')) { + options.reportDir = path.resolve(process.cwd(), arg.slice('--report-dir='.length)); + } else { + throw new Error(`Unknown option: ${arg}`); + } + } + + return options; +} + +function getReportPath(project, iteration, repeat, reportDir) { + const suffix = repeat === 1 ? 'timing' : `timing-run-${iteration}`; + return path.join(reportDir, `vitest-${project}.${suffix}.junit.xml`); +} + +function buildProjectRuns(options) { + const runs = []; + + for (let iteration = 1; iteration <= options.repeat; iteration += 1) { + for (const project of options.projects) { + runs.push({ + iteration, + project, + reportPath: getReportPath(project, iteration, options.repeat, options.reportDir), + }); + } + } + + return runs; +} + +function buildVitestCommand(run) { + return { + command: 'npx', + args: [ + 'vitest', + 'run', + '--project', + run.project, + '--reporter=dot', + '--reporter=junit', + `--outputFile.junit=${run.reportPath}`, + ], + }; +} + +function buildBudgetCommand(run, options) { + return { + command: process.execPath, + args: [ + path.join(ROOT, 'scripts', 'report-test-timings.js'), + run.reportPath, + `--max-suite-seconds=${options.maxSuiteSeconds}`, + `--max-test-seconds=${options.maxTestSeconds}`, + ], + }; +} + +function median(values) { + if (values.length === 0) { + return 0; + } + + const sorted = [...values].sort((left, right) => left - right); + const middle = Math.floor(sorted.length / 2); + + if (sorted.length % 2 === 1) { + return sorted[middle]; + } + + return (sorted[middle - 1] + sorted[middle]) / 2; +} + +function formatSeconds(ms) { + return `${(ms / 1000).toFixed(2)}s`; +} + +function printUsage(stdout) { + stdout.write(`Usage: node scripts/run-vitest-project-timings.js [options] + +Options: + --projects=a,b Comma-separated Vitest projects. Default: ${DEFAULT_PROJECTS.join(',')} + --repeat=N Run each project N times. Default: 1 + --report-dir=PATH Directory for per-run JUnit reports. Default: test-results + --max-suite-seconds=N Fail when any suite is slower than N seconds. Default: ${DEFAULT_MAX_SUITE_SECONDS} + --max-test-seconds=N Fail when any test is slower than N seconds. Default: ${DEFAULT_MAX_TEST_SECONDS} + --dry-run Print commands without running them. +`); +} + +function runCommand(command, args, spawnSyncImpl) { + return spawnSyncImpl(command, args, { + cwd: ROOT, + env: process.env, + stdio: 'inherit', + }); +} + +function run(argv = process.argv.slice(2), streams = process, spawnSyncImpl = spawnSync) { + let options; + + try { + options = parseArgs(argv); + } catch (error) { + streams.stderr.write(`${error instanceof Error ? error.message : String(error)}\n`); + return 1; + } + + if (options.help) { + printUsage(streams.stdout); + return 0; + } + + fs.mkdirSync(options.reportDir, { recursive: true }); + + const results = new Map(); + + for (const projectRun of buildProjectRuns(options)) { + const vitestCommand = buildVitestCommand(projectRun); + const budgetCommand = buildBudgetCommand(projectRun, options); + + streams.stdout.write( + `\n[${projectRun.project}] run ${projectRun.iteration}/${options.repeat} -> ${path.relative( + ROOT, + projectRun.reportPath, + )}\n`, + ); + + if (options.dryRun) { + streams.stdout.write(` ${vitestCommand.command} ${vitestCommand.args.join(' ')}\n`); + streams.stdout.write(` ${budgetCommand.command} ${budgetCommand.args.join(' ')}\n`); + continue; + } + + const startedAt = Date.now(); + const testResult = runCommand(vitestCommand.command, vitestCommand.args, spawnSyncImpl); + const durationMs = Date.now() - startedAt; + + if (testResult.status !== 0) { + return testResult.status || 1; + } + + const budgetResult = runCommand(budgetCommand.command, budgetCommand.args, spawnSyncImpl); + + if (budgetResult.status !== 0) { + return budgetResult.status || 1; + } + + const projectResults = results.get(projectRun.project) || []; + projectResults.push(durationMs); + results.set(projectRun.project, projectResults); + } + + if (options.dryRun) { + return 0; + } + + streams.stdout.write('\nVitest project timing summary\n'); + + for (const project of options.projects) { + const projectResults = results.get(project) || []; + const worst = projectResults.length > 0 ? Math.max(...projectResults) : 0; + streams.stdout.write( + ` ${project}: runs=${projectResults.length} median=${formatSeconds( + median(projectResults), + )} worst=${formatSeconds(worst)}\n`, + ); + } + + return 0; +} + +function main() { + process.exitCode = run(); +} + +if (require.main === module) { + main(); +} + +module.exports = { + buildBudgetCommand, + buildProjectRuns, + buildVitestCommand, + getReportPath, + median, + parseArgs, + run, +}; diff --git a/tests/unit/run-vitest-project-timings.test.ts b/tests/unit/run-vitest-project-timings.test.ts new file mode 100644 index 0000000..3aaacab --- /dev/null +++ b/tests/unit/run-vitest-project-timings.test.ts @@ -0,0 +1,151 @@ +import { createRequire } from 'node:module' +import path from 'node:path' +import { describe, expect, it, vi } from 'vitest' + +const require = createRequire(import.meta.url) + +type ProjectRun = { + iteration: number + project: string + reportPath: string +} + +type RunnerOptions = { + dryRun: boolean + maxSuiteSeconds: number + maxTestSeconds: number + projects: string[] + repeat: number + reportDir: string +} + +type RunnerScript = { + buildBudgetCommand: ( + run: ProjectRun, + options: Pick, + ) => { command: string; args: string[] } + buildProjectRuns: ( + options: Pick, + ) => ProjectRun[] + buildVitestCommand: (run: ProjectRun) => { command: string; args: string[] } + getReportPath: (project: string, iteration: number, repeat: number, reportDir: string) => string + median: (values: number[]) => number + parseArgs: (argv: string[]) => RunnerOptions + run: ( + argv: string[], + streams: { + stdout: { write: (message: string) => void } + stderr: { write: (message: string) => void } + }, + spawnSyncImpl: (command: string, args: string[], options: unknown) => { status: number | null }, + ) => number +} + +const timingRunner = require('../../scripts/run-vitest-project-timings.js') as RunnerScript + +describe('Vitest project timing runner', () => { + it('parses repeatable project timing options', () => { + const options = timingRunner.parseArgs([ + '--projects=unit,frontend', + '--repeat=3', + '--report-dir=.cache/test-timings', + '--max-suite-seconds=15', + '--max-test-seconds=8', + ]) + + expect(options.projects).toEqual(['unit', 'frontend']) + expect(options.repeat).toBe(3) + expect(options.reportDir).toMatch(/\.cache\/test-timings$/) + expect(options.maxSuiteSeconds).toBe(15) + expect(options.maxTestSeconds).toBe(8) + }) + + it('uses unique JUnit reports for each project and benchmark repeat', () => { + const reportDir = path.resolve('test-results') + const runs = timingRunner.buildProjectRuns({ + projects: ['unit', 'frontend'], + repeat: 2, + reportDir, + }) + + expect(runs).toEqual([ + { + iteration: 1, + project: 'unit', + reportPath: path.join(reportDir, 'vitest-unit.timing-run-1.junit.xml'), + }, + { + iteration: 1, + project: 'frontend', + reportPath: path.join(reportDir, 'vitest-frontend.timing-run-1.junit.xml'), + }, + { + iteration: 2, + project: 'unit', + reportPath: path.join(reportDir, 'vitest-unit.timing-run-2.junit.xml'), + }, + { + iteration: 2, + project: 'frontend', + reportPath: path.join(reportDir, 'vitest-frontend.timing-run-2.junit.xml'), + }, + ]) + + expect(timingRunner.getReportPath('unit', 1, 1, reportDir)).toBe( + path.join(reportDir, 'vitest-unit.timing.junit.xml'), + ) + }) + + it('builds Vitest and budget commands with isolated report paths', () => { + const projectRun = { + iteration: 1, + project: 'frontend', + reportPath: path.resolve('test-results/vitest-frontend.timing.junit.xml'), + } + + expect(timingRunner.buildVitestCommand(projectRun)).toEqual({ + command: 'npx', + args: [ + 'vitest', + 'run', + '--project', + 'frontend', + '--reporter=dot', + '--reporter=junit', + `--outputFile.junit=${projectRun.reportPath}`, + ], + }) + + const budgetCommand = timingRunner.buildBudgetCommand(projectRun, { + maxSuiteSeconds: 20, + maxTestSeconds: 12, + }) + + expect(budgetCommand.command).toBe(process.execPath) + expect(budgetCommand.args).toContain(projectRun.reportPath) + expect(budgetCommand.args).toContain('--max-suite-seconds=20') + expect(budgetCommand.args).toContain('--max-test-seconds=12') + }) + + it('prints dry-run commands without spawning Vitest', () => { + const stdout = { write: vi.fn() } + const stderr = { write: vi.fn() } + const spawnSyncImpl = vi.fn(() => ({ status: 0 })) + + const status = timingRunner.run( + ['--projects=unit', '--repeat=2', '--dry-run'], + { stdout, stderr }, + spawnSyncImpl, + ) + + expect(status).toBe(0) + expect(spawnSyncImpl).not.toHaveBeenCalled() + expect(stdout.write.mock.calls.join('\n')).toContain('vitest-unit.timing-run-1.junit.xml') + expect(stdout.write.mock.calls.join('\n')).toContain('vitest-unit.timing-run-2.junit.xml') + }) + + it('calculates median values for repeated timings', () => { + expect(timingRunner.median([3000, 1000, 2000])).toBe(2000) + expect(timingRunner.median([1000, 3000])).toBe(2000) + }) +}) diff --git a/tests/unit/test-pipeline-scripts.test.ts b/tests/unit/test-pipeline-scripts.test.ts index ab9a37a..d310401 100644 --- a/tests/unit/test-pipeline-scripts.test.ts +++ b/tests/unit/test-pipeline-scripts.test.ts @@ -13,6 +13,10 @@ describe('test pipeline scripts', () => { expect(scripts['test:e2e']).toBe('npm run test:e2e:parallel') expect(scripts['test:timings:budget']).toContain('--max-suite-seconds=20') expect(scripts['test:timings:budget']).toContain('--max-test-seconds=12') + expect(scripts['test:timings:projects']).toBe('node scripts/run-vitest-project-timings.js') + expect(scripts['test:timings:benchmark']).toBe( + 'node scripts/run-vitest-project-timings.js --repeat=3', + ) expect(scripts['verify:ci']).toBe('npm run verify:release && npm run test:e2e:ci') expect(scripts['verify:full']).toBe('npm run verify:ci') }) From 2932697455bfee9453fec26ba26b9fea5db43b44 Mon Sep 17 00:00:00 2001 From: tyl3r-ch Date: Thu, 30 Apr 2026 17:47:53 +0200 Subject: [PATCH 11/42] Tune frontend test parallelism --- docs/testing.md | 5 +++++ tests/unit/vitest-coverage-config.test.ts | 11 +++++++++++ vitest.config.ts | 2 +- 3 files changed, 17 insertions(+), 1 deletion(-) diff --git a/docs/testing.md b/docs/testing.md index 422dd31..355e49f 100644 --- a/docs/testing.md +++ b/docs/testing.md @@ -176,6 +176,11 @@ or moving tests between layers; a single run is not enough to separate real impr CPU, or I/O variance. Repeated runs write `timing-run-N` JUnit files so their reports never overwrite the normal project, coverage, or diagnostic reports. +The frontend Vitest project intentionally uses `maxWorkers: '80%'`. Local Phase-2 measurements showed +that the old `50%` setting left jsdom work under-parallelized, while `100%` saturated the machine and +made imports, setup, tests, and environment time much worse. Re-benchmark with +`npm run test:timings:benchmark -- --projects=frontend` before changing that worker cap. + ## CI Notes - Keep workflow test paths aligned with the split test structure. diff --git a/tests/unit/vitest-coverage-config.test.ts b/tests/unit/vitest-coverage-config.test.ts index 55aadf7..28dc76d 100644 --- a/tests/unit/vitest-coverage-config.test.ts +++ b/tests/unit/vitest-coverage-config.test.ts @@ -21,6 +21,7 @@ type ResolvedVitestConfig = { projects?: Array<{ test?: { environment?: string + maxWorkers?: number | string name?: string setupFiles?: string[] testTimeout?: number @@ -120,4 +121,14 @@ describe('vitest coverage configuration', () => { expect(project.test?.testTimeout).toBeUndefined() } }) + + it('keeps jsdom parallelism high but below full CPU saturation', async () => { + const config = await resolveVitestConfig() + const projects = config.test?.projects ?? [] + const frontendProject = projects.find((project) => project.test?.name === 'frontend') + const unitProject = projects.find((project) => project.test?.name === 'unit') + + expect(frontendProject?.test?.maxWorkers).toBe('80%') + expect(unitProject?.test?.maxWorkers).toBe('80%') + }) }) diff --git a/vitest.config.ts b/vitest.config.ts index cc6560e..875945d 100644 --- a/vitest.config.ts +++ b/vitest.config.ts @@ -81,7 +81,7 @@ export default defineConfig(async () => { include: ['tests/frontend/**/*.test.{ts,tsx}'], environment: 'jsdom', setupFiles: ['./vitest.setup.node.ts', './vitest.setup.frontend.ts'], - maxWorkers: '50%', + maxWorkers: '80%', testTimeout: 30_000, sequence: { groupOrder: 2, From c4882aebb33644817472196787cabf8c81fb1dfb Mon Sep 17 00:00:00 2001 From: tyl3r-ch Date: Thu, 30 Apr 2026 17:52:53 +0200 Subject: [PATCH 12/42] Move toktrack cache tests to fake spawn --- tests/unit/server-helpers-runner-core.test.ts | 161 +++++++++++++++++- .../server-helpers-runner-process.test.ts | 121 ------------- tests/unit/server-helpers.shared.ts | 1 + 3 files changed, 161 insertions(+), 122 deletions(-) diff --git a/tests/unit/server-helpers-runner-core.test.ts b/tests/unit/server-helpers-runner-core.test.ts index e202ed0..033cc61 100644 --- a/tests/unit/server-helpers-runner-core.test.ts +++ b/tests/unit/server-helpers-runner-core.test.ts @@ -1,7 +1,8 @@ -import { afterEach, describe, expect, it } from 'vitest' +import { afterEach, describe, expect, it, vi } from 'vitest' import { EventEmitter, TOKTRACK_VERSION, + createAutoImportRuntime, getExecutableName, getLocalToktrackDisplayCommand, getToktrackLatestLookupTimeoutMs, @@ -12,6 +13,76 @@ import { toAutoImportRunnerResolutionError, } from './server-helpers.shared' +type FakeSpawnOutcome = { + code?: number + hang?: boolean + stderr?: string + stdout?: string +} + +class FakeChildProcess extends EventEmitter { + stdout = new EventEmitter() + stderr = new EventEmitter() + exitCode: number | null = null + + kill(signal: string) { + if (this.exitCode !== null) { + return + } + + this.exitCode = signal === 'SIGKILL' ? 137 : 143 + queueMicrotask(() => this.emit('close', this.exitCode)) + } +} + +function createSpawnSequence(outcomes: FakeSpawnOutcome[]) { + const pendingOutcomes = [...outcomes] + + return vi.fn(() => { + const child = new FakeChildProcess() + const outcome = pendingOutcomes.shift() ?? { code: 0, stdout: '' } + + if (!outcome.hang) { + queueMicrotask(() => { + if (outcome.stdout) child.stdout.emit('data', Buffer.from(outcome.stdout)) + if (outcome.stderr) child.stderr.emit('data', Buffer.from(outcome.stderr)) + child.exitCode = outcome.code ?? 0 + child.emit('close', child.exitCode) + }) + } + + return child + }) +} + +function createRuntimeWithSpawn(spawnImpl: ReturnType) { + return createAutoImportRuntime({ + fs: { existsSync: () => false }, + processObject: { env: {} }, + spawnCrossPlatform: spawnImpl, + normalizeIncomingData: (input: unknown) => input, + withSettingsAndDataMutationLock: async (operation: () => Promise) => operation(), + writeData: () => undefined, + updateDataLoadState: async () => undefined, + toktrackPackageName: 'toktrack', + toktrackPackageSpec: `toktrack@${TOKTRACK_VERSION}`, + toktrackVersion: TOKTRACK_VERSION, + toktrackLocalBin: '/missing/toktrack', + npxCacheDir: '/tmp/ttdash-test-npx-cache', + isWindows: false, + processTerminationGraceMs: 1000, + toktrackLocalRunnerProbeTimeoutMs: 7000, + toktrackLocalRunnerVersionCheckTimeoutMs: 7000, + toktrackLocalRunnerImportTimeoutMs: 60000, + toktrackPackageRunnerProbeTimeoutMs: 45000, + toktrackPackageRunnerVersionCheckTimeoutMs: 45000, + toktrackPackageRunnerImportTimeoutMs: 60000, + toktrackLatestLookupTimeoutMs: 15000, + toktrackLatestCacheSuccessTtlMs: 5 * 60 * 1000, + toktrackLatestCacheFailureTtlMs: 60 * 1000, + }) +} + afterEach(() => { resetServerHelperTestState() }) @@ -63,6 +134,94 @@ describe('server helper utilities: toktrack runner core behavior', () => { expect(getToktrackLatestLookupTimeoutMs()).toBe(15000) }) + it('returns a structured warning when the latest toktrack version lookup times out', async () => { + vi.useFakeTimers() + const runtime = createRuntimeWithSpawn(createSpawnSequence([{ hang: true }])) + + try { + const statusPromise = runtime.lookupLatestToktrackVersion(50) + await vi.advanceTimersByTimeAsync(50) + + await expect(statusPromise).resolves.toMatchObject({ + configuredVersion: TOKTRACK_VERSION, + latestVersion: null, + isLatest: null, + lookupStatus: 'failed', + message: expect.stringContaining('Command timed out'), + }) + } finally { + vi.useRealTimers() + } + }) + + it('reuses a cached successful latest-version lookup until the TTL expires', async () => { + const spawnImpl = createSpawnSequence([ + { stdout: `${TOKTRACK_VERSION}\n` }, + { stdout: '9.9.9\n' }, + ]) + const runtime = createRuntimeWithSpawn(spawnImpl) + const nowSpy = vi.spyOn(Date, 'now') + + try { + nowSpy.mockReturnValue(1_000_000) + const firstStatus = await runtime.lookupLatestToktrackVersion() + nowSpy.mockReturnValue(1_000_000) + const secondStatus = await runtime.lookupLatestToktrackVersion() + nowSpy.mockReturnValue(1_000_000 + 5 * 60 * 1000 + 1) + const thirdStatus = await runtime.lookupLatestToktrackVersion() + + expect(firstStatus).toMatchObject({ + configuredVersion: TOKTRACK_VERSION, + latestVersion: TOKTRACK_VERSION, + isLatest: true, + lookupStatus: 'ok', + }) + expect(secondStatus).toEqual(firstStatus) + expect(thirdStatus).toMatchObject({ + latestVersion: '9.9.9', + isLatest: false, + lookupStatus: 'ok', + }) + expect(spawnImpl).toHaveBeenCalledTimes(2) + } finally { + nowSpy.mockRestore() + } + }) + + it('reuses a cached failed latest-version lookup until the failure TTL expires', async () => { + const spawnImpl = createSpawnSequence([ + { code: 1, stderr: 'lookup failed\n' }, + { code: 1, stderr: 'lookup still failed\n' }, + ]) + const runtime = createRuntimeWithSpawn(spawnImpl) + const nowSpy = vi.spyOn(Date, 'now') + + try { + nowSpy.mockReturnValue(2_000_000) + const firstStatus = await runtime.lookupLatestToktrackVersion() + nowSpy.mockReturnValue(2_000_000) + const secondStatus = await runtime.lookupLatestToktrackVersion() + nowSpy.mockReturnValue(2_000_000 + 60 * 1000 + 1) + const thirdStatus = await runtime.lookupLatestToktrackVersion() + + expect(firstStatus).toMatchObject({ + configuredVersion: TOKTRACK_VERSION, + latestVersion: null, + isLatest: null, + lookupStatus: 'failed', + message: 'lookup failed', + }) + expect(secondStatus).toEqual(firstStatus) + expect(thirdStatus).toMatchObject({ + lookupStatus: 'failed', + message: 'lookup still failed', + }) + expect(spawnImpl).toHaveBeenCalledTimes(2) + } finally { + nowSpy.mockRestore() + } + }) + it('waits for timed-out toktrack commands to exit before rejecting', async () => { class FakeChild extends EventEmitter { stdout = new EventEmitter() diff --git a/tests/unit/server-helpers-runner-process.test.ts b/tests/unit/server-helpers-runner-process.test.ts index e6ae978..61b7335 100644 --- a/tests/unit/server-helpers-runner-process.test.ts +++ b/tests/unit/server-helpers-runner-process.test.ts @@ -45,36 +45,6 @@ describe('server helper utilities: toktrack runner process integration', () => { } }) - it.runIf(process.platform !== 'win32')( - 'returns a structured warning when the latest toktrack version lookup times out', - async () => { - const tempDir = await fsPromises.mkdtemp(path.join(tmpdir(), 'ttdash-toktrack-timeout-')) - const originalPath = process.env.PATH - const nodePath = JSON.stringify(process.execPath) - process.env.PATH = tempDir - - try { - await writeExecutableScript( - tempDir, - 'npm', - `exec ${nodePath} -e "setTimeout(() => {}, 1000)"`, - ) - - const status = await lookupLatestToktrackVersion(50) - expect(status).toMatchObject({ - configuredVersion: TOKTRACK_VERSION, - latestVersion: null, - isLatest: null, - lookupStatus: 'failed', - }) - expect(status.message).toContain('Command timed out') - } finally { - process.env.PATH = originalPath - await fsPromises.rm(tempDir, { recursive: true, force: true }) - } - }, - ) - it.runIf(process.platform !== 'win32')( 'falls back to npx when bunx exists but cannot execute toktrack', async () => { @@ -166,97 +136,6 @@ describe('server helper utilities: toktrack runner process integration', () => { }, ) - it.runIf(process.platform !== 'win32')( - 'reuses a cached successful latest-version lookup until the TTL expires', - async () => { - const tempDir = await fsPromises.mkdtemp(path.join(tmpdir(), 'ttdash-latest-cache-success-')) - const countFile = path.join(tempDir, 'count.txt') - const originalPath = process.env.PATH - const nowSpy = vi.spyOn(Date, 'now') - process.env.PATH = tempDir - - try { - await writeExecutableScript( - tempDir, - 'npm', - `echo hit >> ${JSON.stringify(countFile)}\necho "${TOKTRACK_VERSION}"`, - ) - - nowSpy.mockReturnValue(1_000_000) - const firstStatus = await lookupLatestToktrackVersion() - nowSpy.mockReturnValue(1_000_000) - const secondStatus = await lookupLatestToktrackVersion() - nowSpy.mockReturnValue(1_000_000 + 5 * 60 * 1000 + 1) - const thirdStatus = await lookupLatestToktrackVersion() - - expect(firstStatus).toMatchObject({ - configuredVersion: TOKTRACK_VERSION, - latestVersion: TOKTRACK_VERSION, - isLatest: true, - lookupStatus: 'ok', - }) - expect(secondStatus).toEqual(firstStatus) - expect(thirdStatus).toEqual(firstStatus) - - const invocations = (await fsPromises.readFile(countFile, 'utf8')) - .trim() - .split('\n') - .filter(Boolean) - expect(invocations).toHaveLength(2) - } finally { - nowSpy.mockRestore() - process.env.PATH = originalPath - await fsPromises.rm(tempDir, { recursive: true, force: true }) - } - }, - ) - - it.runIf(process.platform !== 'win32')( - 'reuses a cached failed latest-version lookup until the failure TTL expires', - async () => { - const tempDir = await fsPromises.mkdtemp(path.join(tmpdir(), 'ttdash-latest-cache-failure-')) - const countFile = path.join(tempDir, 'count.txt') - const originalPath = process.env.PATH - const nowSpy = vi.spyOn(Date, 'now') - process.env.PATH = tempDir - - try { - await writeExecutableScript( - tempDir, - 'npm', - `echo hit >> ${JSON.stringify(countFile)}\necho "lookup failed" >&2\nexit 1`, - ) - - nowSpy.mockReturnValue(2_000_000) - const firstStatus = await lookupLatestToktrackVersion() - nowSpy.mockReturnValue(2_000_000) - const secondStatus = await lookupLatestToktrackVersion() - nowSpy.mockReturnValue(2_000_000 + 60 * 1000 + 1) - const thirdStatus = await lookupLatestToktrackVersion() - - expect(firstStatus).toMatchObject({ - configuredVersion: TOKTRACK_VERSION, - latestVersion: null, - isLatest: null, - lookupStatus: 'failed', - message: 'lookup failed', - }) - expect(secondStatus).toEqual(firstStatus) - expect(thirdStatus).toEqual(firstStatus) - - const invocations = (await fsPromises.readFile(countFile, 'utf8')) - .trim() - .split('\n') - .filter(Boolean) - expect(invocations).toHaveLength(2) - } finally { - nowSpy.mockRestore() - process.env.PATH = originalPath - await fsPromises.rm(tempDir, { recursive: true, force: true }) - } - }, - ) - it.runIf(process.platform === 'win32')( 'checks npx on Windows without emitting DEP0190 warnings', async () => { diff --git a/tests/unit/server-helpers.shared.ts b/tests/unit/server-helpers.shared.ts index 21c32eb..4d92d0d 100644 --- a/tests/unit/server-helpers.shared.ts +++ b/tests/unit/server-helpers.shared.ts @@ -206,6 +206,7 @@ export { EventEmitter, TOKTRACK_VERSION, commandExists, + createAutoImportRuntime, existsSync, fork, fsPromises, From 44c073db5d0dc91fc67ff0123c82713217cb54ac Mon Sep 17 00:00:00 2001 From: tyl3r-ch Date: Thu, 30 Apr 2026 17:59:30 +0200 Subject: [PATCH 13/42] Cover process runtime edge cases --- tests/unit/background-runtime.test.ts | 82 +++++++++++++++++++++++++++ tests/unit/process-utils.test.ts | 46 ++++++++++++++- 2 files changed, 126 insertions(+), 2 deletions(-) diff --git a/tests/unit/background-runtime.test.ts b/tests/unit/background-runtime.test.ts index f94cebf..897e9c0 100644 --- a/tests/unit/background-runtime.test.ts +++ b/tests/unit/background-runtime.test.ts @@ -53,6 +53,7 @@ const { createBackgroundRuntime } = require('../../server/background-runtime.js' pruneBackgroundInstances: () => Promise< Array<{ id: string; pid: number; port: number; url: string; startedAt: string }> > + runStopCommand: () => Promise startInBackground: () => Promise } } @@ -163,6 +164,87 @@ describe('background runtime', () => { ) }) + it('reports permission denied when the stop command cannot signal an owned instance', async () => { + const registryEntries = [ + { + id: 'owned-instance', + pid: 101, + port: 3101, + url: 'http://127.0.0.1:3101', + startedAt: '2026-04-01T08:00:00.000Z', + }, + ] + const fsMock = { + readFileSync: vi.fn(() => JSON.stringify(registryEntries)), + mkdirSync: vi.fn(), + chmodSync: vi.fn(), + rmSync: vi.fn(), + } + const processObject = { + ...process, + env: {}, + execPath: process.execPath, + exitCode: 0, + kill: vi.fn(() => { + throw Object.assign(new Error('permission denied'), { code: 'EPERM' }) + }), + } as unknown as NodeJS.Process + const errorSpy = vi.spyOn(console, 'error').mockImplementation(() => undefined) + const runtime = createBackgroundRuntime({ + fs: fsMock, + path, + processObject, + fetchImpl: vi.fn(async () => ({ + ok: true, + json: async () => ({ id: 'owned-instance', port: 3101 }), + })), + spawnImpl: vi.fn(), + readlinePromises: {} as typeof readlinePromisesModule, + entrypointPath: '/tmp/server.js', + appPaths: { + configDir: '/tmp/ttdash-config', + cacheDir: '/tmp/ttdash-cache', + }, + ensureAppDirs: vi.fn(), + ensureDir: vi.fn(), + writeJsonAtomic: vi.fn(), + normalizeIsoTimestamp: (value: string) => new Date(value).toISOString(), + bindHost: '127.0.0.1', + apiPrefix: '/api', + runtimeInstance: { + id: 'runtime-id', + pid: 999, + startedAt: '2026-04-01T07:00:00.000Z', + }, + normalizedCliArgs: [], + cliOptions: { + noOpen: true, + }, + forceOpenBrowser: false, + isWindows: false, + secureDirMode: 0o700, + secureFileMode: 0o600, + backgroundStartTimeoutMs: 15_000, + backgroundInstancesLockTimeoutMs: 5_000, + backgroundInstancesLockStaleMs: 10_000, + sleep: async () => {}, + isProcessRunning: () => true, + formatDateTime: (value: string) => value, + }) + + try { + await runtime.runStopCommand() + + expect(processObject.kill).toHaveBeenCalledWith(101, 'SIGTERM') + expect(errorSpy).toHaveBeenCalledWith( + 'Could not stop TTDash background server (permission denied): http://127.0.0.1:3101 (PID 101)', + ) + expect(processObject.exitCode).toBe(1) + } finally { + errorSpy.mockRestore() + } + }) + it('reports the background log path when startup fails before the log can be read', async () => { const readFileSync = vi.fn(() => { throw Object.assign(new Error('missing'), { code: 'ENOENT' }) diff --git a/tests/unit/process-utils.test.ts b/tests/unit/process-utils.test.ts index 4643651..2153185 100644 --- a/tests/unit/process-utils.test.ts +++ b/tests/unit/process-utils.test.ts @@ -1,9 +1,11 @@ import { createRequire } from 'node:module' -import { describe, expect, it } from 'vitest' +import { describe, expect, it, vi } from 'vitest' const require = createRequire(import.meta.url) -const { formatDateTime } = require('../../server/process-utils.js') as { +const { formatDateTime, isProcessRunning, sleep } = require('../../server/process-utils.js') as { formatDateTime: (value: string | number | Date, locale?: string) => string + isProcessRunning: (pid: number, processObject?: Pick) => boolean + sleep: (durationMs: number) => Promise } describe('process utilities', () => { @@ -11,4 +13,44 @@ describe('process utilities', () => { expect(formatDateTime('2026-04-27T12:00:00Z', 'en-US')).toContain('4/27/26') expect(formatDateTime('not-a-date')).toBe('') }) + + it('detects running, missing, and permission-protected processes', () => { + expect(isProcessRunning(0)).toBe(false) + expect(isProcessRunning(-1)).toBe(false) + expect(isProcessRunning(123, { kill: vi.fn() })).toBe(true) + expect( + isProcessRunning(123, { + kill: vi.fn(() => { + throw Object.assign(new Error('missing'), { code: 'ESRCH' }) + }), + }), + ).toBe(false) + expect( + isProcessRunning(123, { + kill: vi.fn(() => { + throw Object.assign(new Error('permission denied'), { code: 'EPERM' }) + }), + }), + ).toBe(true) + }) + + it('resolves sleep after the requested timer duration', async () => { + vi.useFakeTimers() + + try { + let settled = false + const sleepPromise = sleep(250).then(() => { + settled = true + }) + + await vi.advanceTimersByTimeAsync(249) + expect(settled).toBe(false) + + await vi.advanceTimersByTimeAsync(1) + await sleepPromise + expect(settled).toBe(true) + } finally { + vi.useRealTimers() + } + }) }) From 67db75975f94ffd9498e4cbd9e274ada365e8ee4 Mon Sep 17 00:00:00 2001 From: tyl3r-ch Date: Thu, 30 Apr 2026 18:04:53 +0200 Subject: [PATCH 14/42] Add parallel verification gate --- docs/testing.md | 11 ++ package.json | 2 + scripts/run-parallel-gate.js | 162 +++++++++++++++++++ tests/e2e/dashboard-forecast-filters.spec.ts | 22 ++- tests/unit/run-parallel-gate.test.ts | 88 ++++++++++ tests/unit/test-pipeline-scripts.test.ts | 2 + 6 files changed, 274 insertions(+), 13 deletions(-) create mode 100644 scripts/run-parallel-gate.js create mode 100644 tests/unit/run-parallel-gate.test.ts diff --git a/docs/testing.md b/docs/testing.md index 355e49f..d6d02fe 100644 --- a/docs/testing.md +++ b/docs/testing.md @@ -144,6 +144,8 @@ Prioritize targeted branch coverage in runtime-heavy modules before adding anoth - Playwright only, with a fresh app build: `PLAYWRIGHT_TEST_PORT=3016 npm run test:e2e:parallel` - CI-style Playwright smoke: `npm run test:e2e:ci` - Serial local mirror of the CI gate: `npm run verify:ci` +- Optional parallel local gate without Playwright: `npm run verify:parallel` +- Optional parallel full local gate with Playwright after build: `PLAYWRIGHT_TEST_PORT=3016 npm run verify:full:parallel` ## Architecture Guardrails @@ -181,6 +183,15 @@ that the old `50%` setting left jsdom work under-parallelized, while `100%` satu made imports, setup, tests, and environment time much worse. Re-benchmark with `npm run test:timings:benchmark -- --projects=frontend` before changing that worker cap. +`npm run verify:parallel` is an optional local fast path. It overlaps the static gate, API +integration tests, and `build:app`, then runs the high-contention suites in separate waves: unit, +frontend/jsdom, architecture, and background-process integration. That keeps the internally +parallel Vitest projects from over-subscribing the same cores and keeps real background servers away +from the jsdom/build storm. `verify:package` runs after those waves succeed. +`npm run verify:full:parallel` adds `test:e2e:ci` to the final wave. The canonical serial gates stay +unchanged; use the parallel gates for local feedback when the machine has enough CPU and the +per-project JUnit report paths must stay isolated. + ## CI Notes - Keep workflow test paths aligned with the split test structure. diff --git a/package.json b/package.json index 2508ff0..cd0afe9 100644 --- a/package.json +++ b/package.json @@ -59,6 +59,8 @@ "verify:release": "npm run test:static && npm run test:architecture && npm run test:vitest:coverage && npm run build:app && npm run verify:package", "verify:ci": "npm run verify:release && npm run test:e2e:ci", "verify:full": "npm run verify:ci", + "verify:parallel": "node scripts/run-parallel-gate.js", + "verify:full:parallel": "node scripts/run-parallel-gate.js --e2e", "prepare": "npm run build:app" }, "files": [ diff --git a/scripts/run-parallel-gate.js b/scripts/run-parallel-gate.js new file mode 100644 index 0000000..6de7589 --- /dev/null +++ b/scripts/run-parallel-gate.js @@ -0,0 +1,162 @@ +#!/usr/bin/env node + +const { spawn } = require('child_process'); + +const npmCommand = process.platform === 'win32' ? 'npm.cmd' : 'npm'; + +function createTask(name, script) { + return { + name, + command: npmCommand, + args: ['run', script], + }; +} + +function createParallelGatePlan({ e2e = false } = {}) { + const buildAndApiGroup = [ + createTask('static', 'test:static'), + createTask('integration', 'test:vitest:integration'), + createTask('build', 'build:app'), + ]; + const unitGroup = [createTask('unit', 'test:vitest:unit')]; + const frontendGroup = [createTask('frontend', 'test:vitest:frontend')]; + const architectureGroup = [createTask('architecture', 'test:vitest:architecture')]; + const backgroundGroup = [ + createTask('integration-background', 'test:vitest:integration-background'), + ]; + const finalGroup = [createTask('package-smoke', 'verify:package')]; + + if (e2e) { + finalGroup.push(createTask('e2e', 'test:e2e:ci')); + } + + return [ + buildAndApiGroup, + unitGroup, + frontendGroup, + architectureGroup, + backgroundGroup, + finalGroup, + ]; +} + +function parseArgs(argv) { + const options = { + dryRun: false, + e2e: false, + help: false, + }; + + for (const arg of argv) { + if (arg === '--dry-run') { + options.dryRun = true; + } else if (arg === '--e2e') { + options.e2e = true; + } else if (arg === '--help' || arg === '-h') { + options.help = true; + } else { + throw new Error(`Unknown option: ${arg}`); + } + } + + return options; +} + +function printUsage(stdout) { + stdout.write(`Usage: node scripts/run-parallel-gate.js [options] + +Options: + --e2e Run the Playwright CI smoke after the build group succeeds. + --dry-run Print the task graph without running commands. +`); +} + +function prefixChunk(stream, taskName, chunk) { + for (const line of chunk.toString().split(/\r?\n/)) { + if (line.length > 0) { + stream.write(`[${taskName}] ${line}\n`); + } + } +} + +function runTask(task, spawnImpl = spawn, streams = process) { + return new Promise((resolve) => { + streams.stdout.write(`[${task.name}] starting ${task.command} ${task.args.join(' ')}\n`); + + const child = spawnImpl(task.command, task.args, { + cwd: process.cwd(), + env: process.env, + stdio: ['ignore', 'pipe', 'pipe'], + }); + + child.stdout.on('data', (chunk) => prefixChunk(streams.stdout, task.name, chunk)); + child.stderr.on('data', (chunk) => prefixChunk(streams.stderr, task.name, chunk)); + child.on('error', (error) => { + streams.stderr.write(`[${task.name}] failed to start: ${error.message}\n`); + resolve({ task, status: 1 }); + }); + child.on('close', (status) => { + streams.stdout.write(`[${task.name}] exited with ${status ?? 1}\n`); + resolve({ task, status: status ?? 1 }); + }); + }); +} + +async function runTaskGroup(tasks, spawnImpl, streams) { + const results = await Promise.all(tasks.map((task) => runTask(task, spawnImpl, streams))); + return results.every((result) => result.status === 0) ? 0 : 1; +} + +async function run(argv = process.argv.slice(2), streams = process, spawnImpl = spawn) { + let options; + + try { + options = parseArgs(argv); + } catch (error) { + streams.stderr.write(`${error instanceof Error ? error.message : String(error)}\n`); + return 1; + } + + if (options.help) { + printUsage(streams.stdout); + return 0; + } + + const plan = createParallelGatePlan({ e2e: options.e2e }); + + if (options.dryRun) { + plan.forEach((group, index) => { + streams.stdout.write(`group ${index + 1}\n`); + group.forEach((task) => { + streams.stdout.write(` ${task.name}: ${task.command} ${task.args.join(' ')}\n`); + }); + }); + return 0; + } + + for (const [index, group] of plan.entries()) { + streams.stdout.write(`\nparallel gate group ${index + 1}/${plan.length}\n`); + const status = await runTaskGroup(group, spawnImpl, streams); + if (status !== 0) { + return status; + } + } + + return 0; +} + +function main() { + run().then((status) => { + process.exitCode = status; + }); +} + +if (require.main === module) { + main(); +} + +module.exports = { + createParallelGatePlan, + parseArgs, + run, +}; diff --git a/tests/e2e/dashboard-forecast-filters.spec.ts b/tests/e2e/dashboard-forecast-filters.spec.ts index fe5b568..9ca0d19 100644 --- a/tests/e2e/dashboard-forecast-filters.spec.ts +++ b/tests/e2e/dashboard-forecast-filters.spec.ts @@ -97,21 +97,17 @@ test('exposes pressed filter state and supports keyboard date selection in the d const dateDialog = page.getByRole('dialog') await expect(dateDialog).toBeVisible() - const focusedDayBefore = await page.evaluate( - () => document.activeElement?.textContent?.trim() ?? '', - ) + const midMonthDay = dateDialog.locator('button[aria-pressed]').filter({ hasText: /^15$/ }) + await midMonthDay.focus() + await expect(midMonthDay).toBeFocused() + const focusedDayBefore = await midMonthDay.getAttribute('aria-label') + expect(focusedDayBefore ?? '').toMatch(/\d/) await page.keyboard.press('ArrowRight') - await page.waitForFunction( - (previous) => (document.activeElement?.textContent?.trim() ?? '') !== previous, - focusedDayBefore, - ) - - const focusedDayAfter = await page.evaluate( - () => document.activeElement?.textContent?.trim() ?? '', - ) - expect(focusedDayAfter).not.toBe(focusedDayBefore) - expect(focusedDayAfter).toMatch(/^\d+$/) + const focusedDay = dateDialog.locator('button[aria-pressed][tabindex="0"]') + await expect(focusedDay).not.toHaveAttribute('aria-label', focusedDayBefore ?? '') + await expect(focusedDay).toBeFocused() + await expect(focusedDay).toHaveAttribute('aria-label', /\d/) await page.keyboard.press('Enter') diff --git a/tests/unit/run-parallel-gate.test.ts b/tests/unit/run-parallel-gate.test.ts new file mode 100644 index 0000000..2f5a03b --- /dev/null +++ b/tests/unit/run-parallel-gate.test.ts @@ -0,0 +1,88 @@ +import { createRequire } from 'node:module' +import { EventEmitter } from 'node:events' +import { describe, expect, it, vi } from 'vitest' + +const require = createRequire(import.meta.url) + +type GateTask = { + args: string[] + command: string + name: string +} + +type ParallelGateScript = { + createParallelGatePlan: (options?: { e2e?: boolean }) => GateTask[][] + parseArgs: (argv: string[]) => { dryRun: boolean; e2e: boolean; help: boolean } + run: ( + argv: string[], + streams: { + stdout: { write: (message: string) => void } + stderr: { write: (message: string) => void } + }, + spawnImpl: (command: string, args: string[], options: unknown) => EventEmitter, + ) => Promise +} + +class FakeChild extends EventEmitter { + stdout = new EventEmitter() + stderr = new EventEmitter() +} + +const parallelGate = require('../../scripts/run-parallel-gate.js') as ParallelGateScript + +describe('parallel verification gate', () => { + it('keeps independent work parallel and isolates high-contention test suites', () => { + const plan = parallelGate.createParallelGatePlan({ e2e: true }) + + expect(plan.map((group) => group.map((task) => task.name))).toEqual([ + ['static', 'integration', 'build'], + ['unit'], + ['frontend'], + ['architecture'], + ['integration-background'], + ['package-smoke', 'e2e'], + ]) + expect(plan[2]?.find((task) => task.name === 'frontend')?.args).toEqual([ + 'run', + 'test:vitest:frontend', + ]) + expect(plan[5]?.find((task) => task.name === 'e2e')?.args).toEqual(['run', 'test:e2e:ci']) + }) + + it('parses dry-run and e2e options', () => { + expect(parallelGate.parseArgs(['--dry-run', '--e2e'])).toEqual({ + dryRun: true, + e2e: true, + help: false, + }) + }) + + it('prints the task graph without spawning commands in dry-run mode', async () => { + const stdout = { write: vi.fn() } + const stderr = { write: vi.fn() } + const spawnImpl = vi.fn() + + const status = await parallelGate.run(['--dry-run', '--e2e'], { stdout, stderr }, spawnImpl) + + expect(status).toBe(0) + expect(spawnImpl).not.toHaveBeenCalled() + expect(stdout.write.mock.calls.join('\n')).toContain('group 1') + expect(stdout.write.mock.calls.join('\n')).toContain('frontend: npm run test:vitest:frontend') + expect(stdout.write.mock.calls.join('\n')).toContain('e2e: npm run test:e2e:ci') + }) + + it('stops after the first failing group', async () => { + const stdout = { write: vi.fn() } + const stderr = { write: vi.fn() } + const spawnImpl = vi.fn(() => { + const child = new FakeChild() + queueMicrotask(() => child.emit('close', 1)) + return child + }) + + const status = await parallelGate.run([], { stdout, stderr }, spawnImpl) + + expect(status).toBe(1) + expect(spawnImpl).toHaveBeenCalledTimes(3) + }) +}) diff --git a/tests/unit/test-pipeline-scripts.test.ts b/tests/unit/test-pipeline-scripts.test.ts index d310401..b5cf323 100644 --- a/tests/unit/test-pipeline-scripts.test.ts +++ b/tests/unit/test-pipeline-scripts.test.ts @@ -17,6 +17,8 @@ describe('test pipeline scripts', () => { expect(scripts['test:timings:benchmark']).toBe( 'node scripts/run-vitest-project-timings.js --repeat=3', ) + expect(scripts['verify:parallel']).toBe('node scripts/run-parallel-gate.js') + expect(scripts['verify:full:parallel']).toBe('node scripts/run-parallel-gate.js --e2e') expect(scripts['verify:ci']).toBe('npm run verify:release && npm run test:e2e:ci') expect(scripts['verify:full']).toBe('npm run verify:ci') }) From b7fbe80cbe1a11d34b9ce0880f0db1982f7b3224 Mon Sep 17 00:00:00 2001 From: tyl3r-ch Date: Thu, 30 Apr 2026 23:07:11 +0200 Subject: [PATCH 15/42] Stabilize background concurrency tests --- server/background-runtime.js | 2 +- .../server-background-concurrency.test.ts | 31 +- tests/integration/server-test-helpers.ts | 116 ++++++- tests/unit/background-runtime.test.ts | 313 ++++++++++++++++++ 4 files changed, 449 insertions(+), 13 deletions(-) diff --git a/server/background-runtime.js b/server/background-runtime.js index 7bee18a..f3cde11 100644 --- a/server/background-runtime.js +++ b/server/background-runtime.js @@ -299,7 +299,7 @@ function createBackgroundRuntime({ } function buildBackgroundLogFilePath() { - return path.join(backgroundLogDir, `server-${Date.now()}.log`); + return path.join(backgroundLogDir, `server-${Date.now()}-${processObject.pid}.log`); } function readBackgroundLogOutput(logFile) { diff --git a/tests/integration/server-background-concurrency.test.ts b/tests/integration/server-background-concurrency.test.ts index d6e45df..b97b682 100644 --- a/tests/integration/server-background-concurrency.test.ts +++ b/tests/integration/server-background-concurrency.test.ts @@ -4,6 +4,9 @@ import path from 'node:path' import { describe, expect, it } from 'vitest' import { createCliEnv, + formatBackgroundDiagnostics, + formatCliResult, + readBackgroundRegistry, runCli, stopAllBackgroundServers, waitForBackgroundRegistry, @@ -21,8 +24,22 @@ describe('local server background concurrent startup', () => { runCli(['--background', '--no-open'], { env: backgroundEnv }), ]) - expect(firstStart.code).toBe(0) - expect(secondStart.code).toBe(0) + expect( + firstStart.code, + [ + 'first concurrent background start failed', + formatCliResult(firstStart), + formatBackgroundDiagnostics(backgroundRoot), + ].join('\n\n'), + ).toBe(0) + expect( + secondStart.code, + [ + 'second concurrent background start failed', + formatCliResult(secondStart), + formatBackgroundDiagnostics(backgroundRoot), + ].join('\n\n'), + ).toBe(0) const registry = await waitForBackgroundRegistry( backgroundRoot, @@ -32,8 +49,14 @@ describe('local server background concurrent startup', () => { const [firstInstance, secondInstance] = registry await waitForUrlAvailable(firstInstance!.url) await waitForUrlAvailable(secondInstance!.url) - expect(registry).toHaveLength(2) - expect(new Set(registry.map((entry) => entry.url)).size).toBe(2) + expect( + readBackgroundRegistry(backgroundRoot), + formatBackgroundDiagnostics(backgroundRoot, 'registry changed after readiness checks'), + ).toHaveLength(2) + expect( + new Set(registry.map((entry) => entry.url)).size, + formatBackgroundDiagnostics(backgroundRoot, 'duplicate background registry URLs'), + ).toBe(2) } finally { await stopAllBackgroundServers(backgroundEnv, backgroundRoot) rmSync(backgroundRoot, { recursive: true, force: true }) diff --git a/tests/integration/server-test-helpers.ts b/tests/integration/server-test-helpers.ts index c59ff6b..e4baae1 100644 --- a/tests/integration/server-test-helpers.ts +++ b/tests/integration/server-test-helpers.ts @@ -4,6 +4,7 @@ import { existsSync, mkdirSync, mkdtempSync, + readdirSync, readFileSync, rmSync, statSync, @@ -48,6 +49,15 @@ export type LocalAuthSession = { createdAt: string } +export type CliResult = { + args: string[] + code: number | null + output: string + stdout: string + stderr: string + signal: NodeJS.Signals | null +} + const authHeadersByOrigin = new Map() const fetchProbeTimeoutMs = 1000 const fetchRequestTimeoutMs = 15_000 @@ -534,6 +544,18 @@ export function getCliDataDir(root: string) { return path.join(root, 'data', 'ttdash') } +export function getCliCacheDir(root: string) { + if (process.platform === 'darwin') { + return path.join(root, 'Library', 'Caches', 'TTDash') + } + + if (process.platform === 'win32') { + return path.join(root, 'AppData', 'Local', 'TTDash', 'Cache') + } + + return path.join(root, 'cache', 'ttdash') +} + export function getLocalAuthSessionPath(root: string) { return path.join(getCliConfigDir(root), 'session-auth.json') } @@ -717,6 +739,66 @@ export function tryReadBackgroundRegistry(root: string) { } } +function readBackgroundLogSnippets(root: string, entries: BackgroundRegistryEntry[]) { + const logFiles = new Set( + entries.map((entry) => entry.logFile).filter((logFile): logFile is string => Boolean(logFile)), + ) + const defaultLogDir = path.join(getCliCacheDir(root), 'background') + + try { + for (const fileName of readdirSync(defaultLogDir)) { + if (fileName.startsWith('server-') && fileName.endsWith('.log')) { + logFiles.add(path.join(defaultLogDir, fileName)) + } + } + } catch { + // Log files are optional diagnostics; missing log directories are expected + // when a background child exits before opening its log file. + } + + return [...logFiles].map((logFile) => { + let content = '' + try { + content = readFileSync(logFile, 'utf-8').trim() + } catch (error) { + content = `` + } + + return { + file: logFile, + content: content.slice(-4000), + } + }) +} + +export function formatCliResult(result: CliResult) { + return [ + `args: server.js ${result.args.join(' ')}`, + `code: ${result.code ?? 'null'}`, + `signal: ${result.signal ?? 'null'}`, + `stdout:\n${result.stdout.trim() || ''}`, + `stderr:\n${result.stderr.trim() || ''}`, + ].join('\n') +} + +export function formatBackgroundDiagnostics(root: string, label = 'background diagnostics') { + const registryPath = path.join(getCliConfigDir(root), 'background-instances.json') + const registry = tryReadBackgroundRegistry(root) + const logs = readBackgroundLogSnippets(root, registry) + + return [ + label, + `root: ${root}`, + `registry: ${registryPath}`, + `entries:\n${JSON.stringify(registry, null, 2)}`, + `logs:\n${ + logs.length + ? logs.map((log) => `--- ${log.file} ---\n${log.content || ''}`).join('\n') + : '' + }`, + ].join('\n') +} + export function writeBackgroundRegistry(root: string, entries: BackgroundRegistryEntry[]) { const normalizedEntries = normalizeBackgroundRegistryEntries(entries) if (normalizedEntries.length !== entries.length) { @@ -752,7 +834,10 @@ export async function waitForBackgroundRegistry( } throw new Error( - `Timed out waiting for background registry state: ${JSON.stringify(lastEntries, null, 2)}`, + [ + `Timed out waiting for background registry state: ${JSON.stringify(lastEntries, null, 2)}`, + formatBackgroundDiagnostics(root), + ].join('\n\n'), ) } @@ -781,14 +866,15 @@ export async function runCli( timeoutMs = cliCommandTimeoutMs, }: { env: NodeJS.ProcessEnv; input?: string; timeoutMs?: number }, ) { - return await new Promise<{ code: number | null; output: string }>((resolve, reject) => { + return await new Promise((resolve, reject) => { const cli = spawn(process.execPath, ['server.js', ...args], { cwd: process.cwd(), env, stdio: ['pipe', 'pipe', 'pipe'], }) - let cliOutput = '' + let cliStdout = '' + let cliStderr = '' let timedOut = false let forceKillId: ReturnType | null = null const timeoutId = setTimeout(() => { @@ -809,27 +895,41 @@ export async function runCli( } cli.stdout.on('data', (chunk) => { - cliOutput += chunk.toString() + cliStdout += chunk.toString() }) cli.stderr.on('data', (chunk) => { - cliOutput += chunk.toString() + cliStderr += chunk.toString() }) cli.on('error', (error) => { cleanup() reject(error) }) - cli.on('close', (code) => { + cli.on('close', (code, signal) => { cleanup() + const cliOutput = cliStdout + cliStderr if (timedOut) { reject( - new Error(`Timed out waiting for CLI command: server.js ${args.join(' ')}\n${cliOutput}`), + new Error( + [ + `Timed out waiting for CLI command: server.js ${args.join(' ')}`, + `stdout:\n${cliStdout.trim() || ''}`, + `stderr:\n${cliStderr.trim() || ''}`, + ].join('\n'), + ), ) return } - resolve({ code, output: cliOutput }) + resolve({ + args, + code, + output: cliOutput, + stdout: cliStdout, + stderr: cliStderr, + signal, + }) }) if (input) { diff --git a/tests/unit/background-runtime.test.ts b/tests/unit/background-runtime.test.ts index 897e9c0..4b99d29 100644 --- a/tests/unit/background-runtime.test.ts +++ b/tests/unit/background-runtime.test.ts @@ -11,6 +11,7 @@ const { createBackgroundRuntime } = require('../../server/background-runtime.js' mkdirSync: (dirPath: string, options?: unknown) => void chmodSync: (filePath: string, mode: number) => void rmSync: (targetPath: string, options?: unknown) => void + statSync?: (targetPath: string) => { mtimeMs: number } openSync?: (filePath: string, flags: string, mode: number) => number fchmodSync?: (fd: number, mode: number) => void closeSync?: (fd: number) => void @@ -50,6 +51,12 @@ const { createBackgroundRuntime } = require('../../server/background-runtime.js' isProcessRunning: (pid: number) => boolean formatDateTime: (value: string) => string }) => { + fetchRuntimeIdentity: ( + url: string, + requestApiPrefix?: string, + timeoutMs?: number, + requestAuthHeader?: string | null, + ) => Promise<{ id: string; port: number } | null> pruneBackgroundInstances: () => Promise< Array<{ id: string; pid: number; port: number; url: string; startedAt: string }> > @@ -58,7 +65,114 @@ const { createBackgroundRuntime } = require('../../server/background-runtime.js' } } +type BackgroundRuntimeOptions = Parameters[0] + +function createTestBackgroundRuntime(overrides: Partial = {}) { + const fsMock = { + readFileSync: vi.fn(() => '[]'), + mkdirSync: vi.fn(), + chmodSync: vi.fn(), + rmSync: vi.fn(), + ...overrides.fs, + } + + return createBackgroundRuntime({ + fs: fsMock, + path, + processObject: { + ...process, + env: {}, + execPath: process.execPath, + } as NodeJS.Process, + fetchImpl: vi.fn(async () => ({ + ok: true, + json: async () => ({ id: 'runtime-id', port: 3101 }), + })), + spawnImpl: vi.fn(), + readlinePromises: {} as typeof readlinePromisesModule, + entrypointPath: '/tmp/server.js', + appPaths: { + configDir: '/tmp/ttdash-config', + cacheDir: '/tmp/ttdash-cache', + }, + ensureAppDirs: vi.fn(), + ensureDir: vi.fn(), + writeJsonAtomic: vi.fn(), + normalizeIsoTimestamp: (value: string) => new Date(value).toISOString(), + bindHost: '127.0.0.1', + apiPrefix: '/api', + runtimeInstance: { + id: 'runtime-id', + pid: 999, + startedAt: '2026-04-01T07:00:00.000Z', + }, + normalizedCliArgs: [], + cliOptions: { + noOpen: true, + }, + forceOpenBrowser: false, + isWindows: false, + secureDirMode: 0o700, + secureFileMode: 0o600, + backgroundStartTimeoutMs: 15_000, + backgroundInstancesLockTimeoutMs: 5_000, + backgroundInstancesLockStaleMs: 10_000, + sleep: async () => {}, + isProcessRunning: () => true, + formatDateTime: (value: string) => value, + ...overrides, + fs: fsMock, + }) +} + describe('background runtime', () => { + it('fetches runtime identity with the requested API prefix and auth header', async () => { + const fetchImpl = vi.fn(async () => ({ + ok: true, + json: async () => ({ id: 'remote-runtime', port: 3107 }), + })) + const runtime = createTestBackgroundRuntime({ + fetchImpl, + apiPrefix: '/default-api', + }) + + await expect( + runtime.fetchRuntimeIdentity( + 'http://127.0.0.1:3107', + '/custom-api/', + 250, + 'Bearer registry-token', + ), + ).resolves.toEqual({ id: 'remote-runtime', port: 3107 }) + + expect(fetchImpl).toHaveBeenCalledWith(new URL('http://127.0.0.1:3107/custom-api/runtime'), { + headers: { Authorization: 'Bearer registry-token' }, + signal: expect.any(AbortSignal), + }) + }) + + it('treats unavailable runtime identity responses as stale registry entries', async () => { + const fetchImpl = vi + .fn() + .mockResolvedValueOnce({ + ok: false, + json: async () => ({ id: 'ignored', port: 3101 }), + }) + .mockResolvedValueOnce({ + ok: true, + json: async () => null, + }) + .mockRejectedValueOnce(new Error('offline')) + const runtime = createTestBackgroundRuntime({ + fetchImpl, + }) + + await expect(runtime.fetchRuntimeIdentity('')).resolves.toBeNull() + await expect(runtime.fetchRuntimeIdentity('http://127.0.0.1:3101')).resolves.toBeNull() + await expect(runtime.fetchRuntimeIdentity('http://127.0.0.1:3102')).resolves.toBeNull() + await expect(runtime.fetchRuntimeIdentity('http://127.0.0.1:3103')).resolves.toBeNull() + }) + it('prunes stale instances without re-reading the registry snapshot', async () => { const registryEntries = [ { @@ -164,6 +278,102 @@ describe('background runtime', () => { ) }) + it('removes stale registry locks before reading background instances', async () => { + const dateNowSpy = vi.spyOn(Date, 'now').mockReturnValue(20_000) + const fsMock = { + readFileSync: vi.fn(() => '[]'), + mkdirSync: vi + .fn() + .mockImplementationOnce(() => { + throw Object.assign(new Error('lock exists'), { code: 'EEXIST' }) + }) + .mockImplementationOnce(() => undefined), + chmodSync: vi.fn(), + rmSync: vi.fn(), + statSync: vi.fn(() => ({ mtimeMs: 0 })), + } + const runtime = createTestBackgroundRuntime({ + fs: fsMock, + backgroundInstancesLockStaleMs: 10_000, + }) + + try { + await expect(runtime.pruneBackgroundInstances()).resolves.toEqual([]) + + expect(fsMock.statSync).toHaveBeenCalledWith( + path.join('/tmp/ttdash-config', 'background-instances.lock'), + ) + expect(fsMock.rmSync).toHaveBeenCalledWith( + path.join('/tmp/ttdash-config', 'background-instances.lock'), + { recursive: true, force: true }, + ) + expect(fsMock.mkdirSync).toHaveBeenCalledTimes(2) + } finally { + dateNowSpy.mockRestore() + } + }) + + it('fails fast with a clear error when the registry lock cannot be acquired', async () => { + let currentTime = 0 + const dateNowSpy = vi.spyOn(Date, 'now').mockImplementation(() => currentTime) + const fsMock = { + readFileSync: vi.fn(() => '[]'), + mkdirSync: vi.fn(() => { + throw Object.assign(new Error('lock exists'), { code: 'EEXIST' }) + }), + chmodSync: vi.fn(), + rmSync: vi.fn(), + statSync: vi.fn(() => ({ mtimeMs: currentTime })), + } + const sleep = vi.fn(async (durationMs: number) => { + currentTime += durationMs + }) + const runtime = createTestBackgroundRuntime({ + fs: fsMock, + backgroundInstancesLockTimeoutMs: 100, + backgroundInstancesLockStaleMs: 1_000, + sleep, + }) + + try { + await expect(runtime.pruneBackgroundInstances()).rejects.toThrow( + 'Could not acquire background registry lock.', + ) + + expect(sleep).toHaveBeenCalledWith(50) + expect(fsMock.rmSync).not.toHaveBeenCalled() + } finally { + dateNowSpy.mockRestore() + } + }) + + it('rewrites invalid registry payloads to an empty alive snapshot', async () => { + const invalidRegistry = [ + { + id: 'missing-port', + pid: 101, + url: 'http://127.0.0.1:3101', + startedAt: '2026-04-01T08:00:00.000Z', + }, + ] + const writeJsonAtomic = vi.fn() + const runtime = createTestBackgroundRuntime({ + fs: { + readFileSync: vi.fn(() => JSON.stringify(invalidRegistry)), + mkdirSync: vi.fn(), + chmodSync: vi.fn(), + rmSync: vi.fn(), + }, + writeJsonAtomic, + }) + + await expect(runtime.pruneBackgroundInstances()).resolves.toEqual([]) + expect(writeJsonAtomic).toHaveBeenCalledWith( + path.join('/tmp/ttdash-config', 'background-instances.json'), + [], + ) + }) + it('reports permission denied when the stop command cannot signal an owned instance', async () => { const registryEntries = [ { @@ -245,6 +455,102 @@ describe('background runtime', () => { } }) + it('removes a registry entry when stop finds the owned process already gone', async () => { + const registryEntries = [ + { + id: 'owned-instance', + pid: 101, + port: 3101, + url: 'http://127.0.0.1:3101', + startedAt: '2026-04-01T08:00:00.000Z', + }, + ] + const writeJsonAtomic = vi.fn() + const processObject = { + ...process, + env: {}, + execPath: process.execPath, + exitCode: 0, + kill: vi.fn(() => { + throw Object.assign(new Error('already gone'), { code: 'ESRCH' }) + }), + } as unknown as NodeJS.Process + const logSpy = vi.spyOn(console, 'log').mockImplementation(() => undefined) + const runtime = createTestBackgroundRuntime({ + fs: { + readFileSync: vi.fn(() => JSON.stringify(registryEntries)), + mkdirSync: vi.fn(), + chmodSync: vi.fn(), + rmSync: vi.fn(), + }, + processObject, + fetchImpl: vi.fn(async () => ({ + ok: true, + json: async () => ({ id: 'owned-instance', port: 3101 }), + })), + writeJsonAtomic, + }) + + try { + await runtime.runStopCommand() + + expect(writeJsonAtomic).toHaveBeenLastCalledWith( + path.join('/tmp/ttdash-config', 'background-instances.json'), + [], + ) + expect(logSpy).toHaveBeenCalledWith( + 'Instance was already stopped and was removed from the registry: http://127.0.0.1:3101 (PID 101)', + ) + expect(processObject.exitCode).toBe(0) + } finally { + logSpy.mockRestore() + } + }) + + it('passes the explicit browser-open decision to background children', async () => { + const spawnImpl = vi.fn(() => ({ + pid: 123, + unref: vi.fn(), + })) + const runtime = createTestBackgroundRuntime({ + fs: { + readFileSync: vi.fn(() => ''), + mkdirSync: vi.fn(), + chmodSync: vi.fn(), + rmSync: vi.fn(), + openSync: vi.fn(() => 42), + fchmodSync: vi.fn(), + closeSync: vi.fn(), + }, + processObject: { + ...process, + env: {}, + execPath: '/usr/bin/node', + } as NodeJS.Process, + spawnImpl, + normalizedCliArgs: ['--background'], + cliOptions: { + noOpen: false, + }, + forceOpenBrowser: true, + isProcessRunning: () => false, + }) + + await expect(runtime.startInBackground()).rejects.toThrow( + 'Could not start TTDash as a background process.', + ) + expect(spawnImpl).toHaveBeenCalledWith( + '/usr/bin/node', + ['/tmp/server.js'], + expect.objectContaining({ + env: expect.objectContaining({ + TTDASH_BACKGROUND_CHILD: '1', + TTDASH_FORCE_OPEN_BROWSER: '1', + }), + }), + ) + }) + it('reports the background log path when startup fails before the log can be read', async () => { const readFileSync = vi.fn(() => { throw Object.assign(new Error('missing'), { code: 'ENOENT' }) @@ -312,6 +618,13 @@ describe('background runtime', () => { expect.stringContaining('/tmp/ttdash-cache/background/server-'), 'utf-8', ) + expect(fsMock.openSync).toHaveBeenCalledWith( + expect.stringMatching( + new RegExp(`/tmp/ttdash-cache/background/server-\\d+-${process.pid}\\.log$`), + ), + 'a', + 0o600, + ) expect(fsMock.closeSync).toHaveBeenCalledWith(42) }) }) From 165782244c30f85d0cc474d593ff6db8ac2b1e0f Mon Sep 17 00:00:00 2001 From: tyl3r-ch Date: Thu, 30 Apr 2026 23:24:46 +0200 Subject: [PATCH 16/42] Move command palette contracts to unit tests --- .../command-palette-action-groups.test.tsx | 118 ------------------ .../dashboard-language-regressions.test.tsx | 61 ++++++++- tests/frontend/filter-bar-presets.test.tsx | 13 -- tests/unit/command-palette-commands.test.ts | 53 ++++++-- 4 files changed, 105 insertions(+), 140 deletions(-) delete mode 100644 tests/frontend/command-palette-action-groups.test.tsx diff --git a/tests/frontend/command-palette-action-groups.test.tsx b/tests/frontend/command-palette-action-groups.test.tsx deleted file mode 100644 index 2679b5b..0000000 --- a/tests/frontend/command-palette-action-groups.test.tsx +++ /dev/null @@ -1,118 +0,0 @@ -// @vitest-environment jsdom - -import { fireEvent, screen, within } from '@testing-library/react' -import type { ComponentProps } from 'react' -import { beforeEach, describe, expect, it, vi } from 'vitest' -import { CommandPalette } from '@/components/features/command-palette/CommandPalette' -import { DEFAULT_APP_SETTINGS } from '@/lib/app-settings' -import { initI18n } from '@/lib/i18n' -import { renderWithAppProviders } from '../test-utils' - -type CommandPaletteProps = ComponentProps - -function buildCommandPaletteProps( - overrides: Partial = {}, -): CommandPaletteProps { - const noop = () => {} - - return { - isDark: true, - availableProviders: [], - selectedProviders: [], - availableModels: [], - selectedModels: [], - hasTodaySection: false, - hasMonthSection: false, - hasRequestSection: false, - sectionVisibility: { ...DEFAULT_APP_SETTINGS.sectionVisibility }, - sectionOrder: [...DEFAULT_APP_SETTINGS.sectionOrder], - reportGenerating: false, - onToggleTheme: noop, - onExportCSV: noop, - onGenerateReport: noop, - onDelete: noop, - onUpload: noop, - onAutoImport: noop, - onOpenSettings: noop, - onScrollTo: noop, - onViewModeChange: noop, - onApplyPreset: noop, - onToggleProvider: noop, - onToggleModel: noop, - onClearProviders: noop, - onClearModels: noop, - onClearDateRange: noop, - onResetAll: noop, - onHelp: noop, - onLanguageChange: noop, - ...overrides, - } -} - -function renderCommandPalette(overrides: Partial = {}) { - return renderWithAppProviders() -} - -function openCommandPalette() { - fireEvent.keyDown(document, { key: 'k', metaKey: true }) -} - -function getCommandGroup(name: string) { - const group = screen.getByText(name).closest('[cmdk-group]') - expect(group).not.toBeNull() - return group as HTMLElement -} - -describe('CommandPalette action groups', () => { - beforeEach(async () => { - Element.prototype.scrollIntoView = vi.fn() - await initI18n('en') - }) - - it('keeps action command ids stable while separating daily, export, and maintenance groups', async () => { - renderCommandPalette() - - openCommandPalette() - expect(await screen.findByRole('dialog', { name: 'Command palette' })).toBeInTheDocument() - - const loadDataGroup = getCommandGroup('Load data') - expect(within(loadDataGroup).getByTestId('command-auto-import')).toBeInTheDocument() - expect(within(loadDataGroup).getByTestId('command-upload')).toBeInTheDocument() - - const exportsGroup = getCommandGroup('Exports') - expect(within(exportsGroup).getByTestId('command-csv')).toBeInTheDocument() - expect(within(exportsGroup).getByTestId('command-report')).toBeInTheDocument() - - const maintenanceGroup = getCommandGroup('Maintenance') - expect(within(maintenanceGroup).getByTestId('command-settings-open')).toBeInTheDocument() - expect(within(maintenanceGroup).getByTestId('command-delete')).toBeInTheDocument() - - for (const commandId of [ - 'command-auto-import', - 'command-upload', - 'command-csv', - 'command-report', - 'command-settings-open', - 'command-delete', - ]) { - expect(screen.getByTestId(commandId)).toBeInTheDocument() - } - }) - - it('localizes the command group labels', async () => { - const currentLanguage = document.documentElement.lang - - try { - await initI18n('de') - renderCommandPalette() - - openCommandPalette() - - expect(await screen.findByText('Daten laden')).toBeInTheDocument() - expect(screen.getByText('Exporte')).toBeInTheDocument() - expect(screen.getByText('Wartung')).toBeInTheDocument() - } finally { - await initI18n(currentLanguage || 'en') - } - }) -}) diff --git a/tests/frontend/dashboard-language-regressions.test.tsx b/tests/frontend/dashboard-language-regressions.test.tsx index 5298895..5ee2ca1 100644 --- a/tests/frontend/dashboard-language-regressions.test.tsx +++ b/tests/frontend/dashboard-language-regressions.test.tsx @@ -1,15 +1,20 @@ // @vitest-environment jsdom import { fireEvent, screen } from '@testing-library/react' -import { beforeAll, describe, expect, it } from 'vitest' +import type { ComponentProps } from 'react' +import { beforeAll, describe, expect, it, vi } from 'vitest' import { PrimaryMetrics } from '@/components/cards/PrimaryMetrics' import { ChartCard } from '@/components/charts/ChartCard' +import { CommandPalette } from '@/components/features/command-palette/CommandPalette' import { UsageInsights } from '@/components/features/insights/UsageInsights' import { Header } from '@/components/layout/Header' +import { DEFAULT_APP_SETTINGS } from '@/lib/app-settings' import { initI18n } from '@/lib/i18n' import type { DashboardMetrics } from '@/types' import { renderWithAppProviders } from '../test-utils' +type CommandPaletteProps = ComponentProps + const emptyMetrics: DashboardMetrics = { totalCost: 0, totalTokens: 0, @@ -45,8 +50,48 @@ const emptyMetrics: DashboardMetrics = { providerConcentrationIndex: 0, } +function buildCommandPaletteProps( + overrides: Partial = {}, +): CommandPaletteProps { + const noop = () => {} + + return { + isDark: true, + availableProviders: [], + selectedProviders: [], + availableModels: [], + selectedModels: [], + hasTodaySection: false, + hasMonthSection: false, + hasRequestSection: false, + sectionVisibility: { ...DEFAULT_APP_SETTINGS.sectionVisibility }, + sectionOrder: [...DEFAULT_APP_SETTINGS.sectionOrder], + reportGenerating: false, + onToggleTheme: noop, + onExportCSV: noop, + onGenerateReport: noop, + onDelete: noop, + onUpload: noop, + onAutoImport: noop, + onOpenSettings: noop, + onScrollTo: noop, + onViewModeChange: noop, + onApplyPreset: noop, + onToggleProvider: noop, + onToggleModel: noop, + onClearProviders: noop, + onClearModels: noop, + onClearDateRange: noop, + onResetAll: noop, + onHelp: noop, + onLanguageChange: noop, + ...overrides, + } +} + describe('Dashboard language regressions', () => { beforeAll(async () => { + Element.prototype.scrollIntoView = vi.fn() await initI18n('de') }) @@ -151,4 +196,18 @@ describe('Dashboard language regressions', () => { expect(document.body).not.toHaveTextContent('Quick Read') expect(document.body).not.toHaveTextContent('Streak') }) + + it('localizes command palette action groups while keeping representative command ids renderable', async () => { + renderWithAppProviders() + + fireEvent.keyDown(document, { key: 'k', metaKey: true }) + + expect(await screen.findByRole('dialog', { name: 'Command Palette' })).toBeInTheDocument() + expect(screen.getByText('Daten laden')).toBeInTheDocument() + expect(screen.getByText('Exporte')).toBeInTheDocument() + expect(screen.getByText('Wartung')).toBeInTheDocument() + expect(screen.getByTestId('command-auto-import')).toBeInTheDocument() + expect(screen.getByTestId('command-csv')).toBeInTheDocument() + expect(screen.getByTestId('command-settings-open')).toBeInTheDocument() + }) }) diff --git a/tests/frontend/filter-bar-presets.test.tsx b/tests/frontend/filter-bar-presets.test.tsx index c916abc..7eb9404 100644 --- a/tests/frontend/filter-bar-presets.test.tsx +++ b/tests/frontend/filter-bar-presets.test.tsx @@ -80,19 +80,6 @@ describe('FilterBar preset and chip states', () => { ) }) - it('renders quick presets in the shared display order', () => { - renderFilterBar() - - const presetLabels = screen - .getAllByRole('button') - .map((button) => button.textContent) - .filter((label): label is string => - ['7D', '30D', 'Month', 'Year', 'All'].includes(label ?? ''), - ) - - expect(presetLabels).toEqual(['7D', '30D', 'Month', 'Year', 'All']) - }) - it('marks unfiltered provider and model chips as included instead of selected', () => { renderFilterBar({ availableProviders: ['Anthropic', 'OpenAI'], diff --git a/tests/unit/command-palette-commands.test.ts b/tests/unit/command-palette-commands.test.ts index 9047ec9..162a44a 100644 --- a/tests/unit/command-palette-commands.test.ts +++ b/tests/unit/command-palette-commands.test.ts @@ -110,18 +110,22 @@ const translations: Record = { 'common.model': 'Model', } -function t(key: string, options?: Record) { - if (key === 'commandPalette.commands.goToSection.label') { - return `Go to ${String(options?.section)}` - } +function createTranslator(dictionary: Record) { + return (key: string, options?: Record) => { + if (key === 'commandPalette.commands.goToSection.label') { + return `Go to ${String(options?.section)}` + } - if (key === 'commandPalette.commands.goToSection.description') { - return `Scroll to ${String(options?.section)}` - } + if (key === 'commandPalette.commands.goToSection.description') { + return `Scroll to ${String(options?.section)}` + } - return translations[key] ?? key + return dictionary[key] ?? key + } } +const t = createTranslator(translations) + function buildOptions(overrides: Partial = {}) { const noop = vi.fn() @@ -195,6 +199,39 @@ describe('command palette command builder', () => { ).toEqual(['command-section-tables']) }) + it('keeps action command ids stable while separating load, export, and maintenance groups', () => { + const commands = buildCommandPaletteCommands(buildOptions({ hasTodaySection: false })) + const getCommandTestIdsForGroup = (group: string) => + commands.filter((cmd) => cmd.group === group).map((cmd) => cmd.testId ?? `command-${cmd.id}`) + + expect(getCommandTestIdsForGroup('Load data')).toEqual([ + 'command-auto-import', + 'command-upload', + ]) + expect(getCommandTestIdsForGroup('Exports')).toEqual(['command-csv', 'command-report']) + expect(getCommandTestIdsForGroup('Maintenance')).toEqual([ + 'command-settings-open', + 'command-delete', + ]) + }) + + it('localizes action command groups through the command builder contract', () => { + const commands = buildCommandPaletteCommands( + buildOptions({ + t: createTranslator({ + ...translations, + 'commandPalette.groups.loadData': 'Daten laden', + 'commandPalette.groups.exports': 'Exporte', + 'commandPalette.groups.maintenance': 'Wartung', + }), + }), + ) + + expect(commands.find((cmd) => cmd.id === 'auto-import')?.group).toBe('Daten laden') + expect(commands.find((cmd) => cmd.id === 'csv')?.group).toBe('Exporte') + expect(commands.find((cmd) => cmd.id === 'settings-open')?.group).toBe('Wartung') + }) + it('normalizes aliases and ranks matching commands for keyboard quick-select', () => { const commands = buildCommandPaletteCommands(buildOptions()) const settingsResults = filterCommandItems(commands, 'einstellungen offnen') From 7555c0e558f4a4529dab5d9ff2ed4575eba3e87a Mon Sep 17 00:00:00 2001 From: tyl3r-ch Date: Thu, 30 Apr 2026 23:48:09 +0200 Subject: [PATCH 17/42] Harden process-adjacent test coverage --- tests/unit/dashboard-preferences.test.ts | 102 ++++++++++ tests/unit/server-helpers-file-locks.test.ts | 24 ++- tests/unit/server-helpers-runner-core.test.ts | 177 ++++++++++++++++-- 3 files changed, 278 insertions(+), 25 deletions(-) diff --git a/tests/unit/dashboard-preferences.test.ts b/tests/unit/dashboard-preferences.test.ts index 67d0579..649dd5e 100644 --- a/tests/unit/dashboard-preferences.test.ts +++ b/tests/unit/dashboard-preferences.test.ts @@ -1,11 +1,15 @@ import { describe, expect, it } from 'vitest' import dashboardPreferences from '../../shared/dashboard-preferences.json' import { + DEFAULT_DASHBOARD_FILTERS, DASHBOARD_DATE_PRESETS, DASHBOARD_QUICK_DATE_PRESETS, DASHBOARD_SECTION_DEFINITIONS, DASHBOARD_SECTION_DEFINITION_MAP, DASHBOARD_VIEW_MODES, + normalizeDashboardDefaultFilters, + normalizeDashboardSectionOrder, + normalizeDashboardSectionVisibility, parseDashboardPreferencesConfig, resolveDashboardActivePreset, resolveDashboardPresetRange, @@ -122,6 +126,96 @@ describe('dashboard preferences config', () => { ]) }) + it('rejects malformed custom preference shapes before publishing partial configs', () => { + expect(() => parseSharedDashboardPreferencesConfig(null)).toThrow('expected an object') + expect(() => + parseSharedDashboardPreferencesConfig( + { + datePresets: 'all', + viewModes: ['daily'], + sectionDefinitions: [{ id: 'overview', domId: 'overview', labelKey: 'overview' }], + }, + { + validDatePresets: ['all'], + validViewModes: ['daily'], + validSectionIds: ['overview'], + }, + ), + ).toThrow('"datePresets" must be an array') + expect(() => + parseSharedDashboardPreferencesConfig( + { + datePresets: ['all'], + viewModes: ['daily'], + sectionDefinitions: [null], + }, + { + validDatePresets: ['all'], + validViewModes: ['daily'], + validSectionIds: ['overview'], + }, + ), + ).toThrow('each "sectionDefinitions" entry must be an object') + expect(() => + parseSharedDashboardPreferencesConfig( + { + datePresets: ['all'], + viewModes: ['daily'], + sectionDefinitions: [{ id: 'overview', domId: '', labelKey: 'overview' }], + }, + { + validDatePresets: ['all'], + validViewModes: ['daily'], + validSectionIds: ['overview'], + }, + ), + ).toThrow('require a domId') + expect(() => + parseSharedDashboardPreferencesConfig( + { + datePresets: ['all'], + viewModes: ['daily'], + sectionDefinitions: [{ id: 'overview', domId: 'overview', labelKey: '' }], + }, + { + validDatePresets: ['all'], + validViewModes: ['daily'], + validSectionIds: ['overview'], + }, + ), + ).toThrow('require a labelKey') + }) + + it('normalizes persisted dashboard preference branches defensively', () => { + expect(normalizeDashboardDefaultFilters(null)).toEqual(DEFAULT_DASHBOARD_FILTERS) + expect( + normalizeDashboardDefaultFilters({ + viewMode: 'yearly', + datePreset: '30d', + providers: [' OpenAI ', 'OpenAI', '', 42], + models: [' Sonnet ', null, 'Sonnet'], + }), + ).toEqual({ + viewMode: 'yearly', + datePreset: '30d', + providers: ['OpenAI'], + models: ['Sonnet'], + }) + + const visibility = normalizeDashboardSectionVisibility({ + metrics: false, + today: 'false', + unknown: false, + }) + expect(visibility.metrics).toBe(false) + expect(visibility.today).toBe(true) + expect(visibility).not.toHaveProperty('unknown') + + const orderedSections = normalizeDashboardSectionOrder(['tables', 'metrics', 'tables', 7]) + expect(orderedSections.slice(0, 2)).toEqual(['tables', 'metrics']) + expect(new Set(orderedSections).size).toBe(DASHBOARD_SECTION_DEFINITIONS.length) + }) + it('resolves preset ranges through the same shared contract used by runtime consumers', () => { const referenceDate = new Date('2026-04-06T12:00:00Z') @@ -160,5 +254,13 @@ describe('dashboard preferences config', () => { startDate: monthRange.startDate, }), ).toBeNull() + expect(resolveDashboardActivePreset(null)).toBe('all') + expect( + resolveDashboardActivePreset({ + referenceDate, + startDate: '2026-04-01', + endDate: '2026-04-05', + }), + ).toBeNull() }) }) diff --git a/tests/unit/server-helpers-file-locks.test.ts b/tests/unit/server-helpers-file-locks.test.ts index ec25d92..a882b66 100644 --- a/tests/unit/server-helpers-file-locks.test.ts +++ b/tests/unit/server-helpers-file-locks.test.ts @@ -354,7 +354,13 @@ dataRuntime.withFileMutationLock(filePath, async () => { if (typeof process.send === 'function') { process.send('locked') } - await new Promise((resolve) => setTimeout(resolve, 250)) + await new Promise((resolve) => { + process.once('message', (message) => { + if (message === 'release') { + resolve() + } + }) + }) }).then(() => process.exit(0)).catch((error) => { console.error(error) process.exit(1) @@ -370,11 +376,19 @@ dataRuntime.withFileMutationLock(filePath, async () => { try { await waitForChildMessage(child, 5_000) - const startedAt = Date.now() - await withFileMutationLock(targetFile, async () => undefined) - const waitedMs = Date.now() - startedAt + let parentAcquiredLock = false + const parentLock = withFileMutationLock(targetFile, async () => { + parentAcquiredLock = true + }) + + await vi.waitFor(() => { + expect(getPendingFileMutationLockCount()).toBeGreaterThan(0) + }) + expect(parentAcquiredLock).toBe(false) - expect(waitedMs).toBeGreaterThanOrEqual(150) + child.send('release') + await parentLock + expect(parentAcquiredLock).toBe(true) if (child.exitCode === null) { await waitForChildExit(child, 5_000) diff --git a/tests/unit/server-helpers-runner-core.test.ts b/tests/unit/server-helpers-runner-core.test.ts index 033cc61..78cab66 100644 --- a/tests/unit/server-helpers-runner-core.test.ts +++ b/tests/unit/server-helpers-runner-core.test.ts @@ -55,9 +55,15 @@ function createSpawnSequence(outcomes: FakeSpawnOutcome[]) { }) } -function createRuntimeWithSpawn(spawnImpl: ReturnType) { +function createRuntimeWithSpawn( + spawnImpl: ReturnType, + options: { localBinExists?: boolean } = {}, +) { return createAutoImportRuntime({ - fs: { existsSync: () => false }, + fs: { + existsSync: (filePath: string) => + Boolean(options.localBinExists && filePath === '/missing/toktrack'), + }, processObject: { env: {} }, spawnCrossPlatform: spawnImpl, normalizeIncomingData: (input: unknown) => input, @@ -134,6 +140,47 @@ describe('server helper utilities: toktrack runner core behavior', () => { expect(getToktrackLatestLookupTimeoutMs()).toBe(15000) }) + it('resolves a compatible local toktrack runner without probing package fallbacks', async () => { + const spawnImpl = createSpawnSequence([{ stdout: `toktrack ${TOKTRACK_VERSION}\n` }]) + const runtime = createRuntimeWithSpawn(spawnImpl, { localBinExists: true }) + + await expect(runtime.resolveToktrackRunner()).resolves.toMatchObject({ + command: '/missing/toktrack', + method: 'local', + label: 'local toktrack', + }) + expect(spawnImpl).toHaveBeenCalledTimes(1) + expect(spawnImpl).toHaveBeenCalledWith( + '/missing/toktrack', + ['--version'], + expect.objectContaining({ + stdio: ['ignore', 'pipe', 'pipe'], + }), + ) + }) + + it('falls back to npm when the local runner version mismatches and bunx probing fails', async () => { + const warnSpy = vi.spyOn(console, 'warn').mockImplementation(() => undefined) + const spawnImpl = createSpawnSequence([ + { stdout: 'toktrack 0.0.0\n' }, + { code: 1, stderr: 'bunx failed\n' }, + { stdout: `toktrack ${TOKTRACK_VERSION}\n` }, + ]) + const runtime = createRuntimeWithSpawn(spawnImpl, { localBinExists: true }) + + try { + await expect(runtime.resolveToktrackRunner()).resolves.toMatchObject({ + command: 'npx', + method: 'npm', + label: 'npm exec', + }) + expect(spawnImpl).toHaveBeenCalledTimes(3) + expect(warnSpy).toHaveBeenCalledWith(expect.stringContaining('Failed to probe bunx')) + } finally { + warnSpy.mockRestore() + } + }) + it('returns a structured warning when the latest toktrack version lookup times out', async () => { vi.useFakeTimers() const runtime = createRuntimeWithSpawn(createSpawnSequence([{ hang: true }])) @@ -222,7 +269,83 @@ describe('server helper utilities: toktrack runner core behavior', () => { } }) + it('uses stdout as the toktrack command error message when stderr is empty', async () => { + const spawnImpl = createSpawnSequence([{ code: 1, stdout: 'stdout failure\n' }]) + const runtime = createRuntimeWithSpawn(spawnImpl) + + await expect( + runtime.runToktrack( + { + command: 'fake-runner', + prefixArgs: ['--prefix'], + env: { PATH: '/fake-bin' }, + }, + ['--version'], + ), + ).rejects.toMatchObject({ + message: 'stdout failure', + stdout: 'stdout failure\n', + stderr: '', + exitCode: 1, + }) + expect(spawnImpl).toHaveBeenCalledWith( + 'fake-runner', + ['--prefix', '--version'], + expect.objectContaining({ + stdio: ['ignore', 'pipe', 'pipe'], + env: { PATH: '/fake-bin' }, + }), + ) + }) + + it('streams stderr and lets callers terminate a running command on close', async () => { + const child = new FakeChildProcess() + const spawnImpl = vi.fn(() => child) + const stderrLines: string[] = [] + let closeCommand: (() => void) | null = null + + const outcomePromise = runCommandWithSpawn('fake-runner', ['daily', '--json'], { + streamStderr: true, + onStderr: (line) => stderrLines.push(line), + signalOnClose: (close) => { + closeCommand = close + }, + spawnImpl, + }) + + child.stderr.emit('data', Buffer.from('runner warning\n')) + expect(stderrLines).toEqual(['runner warning']) + + closeCommand?.() + + await expect(outcomePromise).rejects.toMatchObject({ + message: 'runner warning', + stderr: 'runner warning\n', + exitCode: 143, + }) + }) + + it('wraps spawn errors with command diagnostics', async () => { + const child = new FakeChildProcess() + const spawnImpl = vi.fn(() => child) + const outcomePromise = runCommandWithSpawn('missing-runner', ['--version'], { + spawnImpl, + }) + + child.emit('error', new Error('spawn denied')) + + await expect(outcomePromise).rejects.toMatchObject({ + message: 'spawn denied', + command: 'missing-runner', + args: ['--version'], + stdout: '', + stderr: '', + }) + }) + it('waits for timed-out toktrack commands to exit before rejecting', async () => { + vi.useFakeTimers() + class FakeChild extends EventEmitter { stdout = new EventEmitter() stderr = new EventEmitter() @@ -243,27 +366,41 @@ describe('server helper utilities: toktrack runner core behavior', () => { } const child = new FakeChild() + const spawnImpl = vi.fn(() => child) as unknown as ReturnType + const runtime = createRuntimeWithSpawn(spawnImpl) let settled = false - const outcomePromise = runCommandWithSpawn('fake-runner', ['daily', '--json'], { - timeoutMs: 50, - spawnImpl: () => child, - }) - .then((value) => ({ ok: true as const, value })) - .catch((error) => ({ ok: false as const, error })) - .finally(() => { - settled = true - }) - await new Promise((resolve) => setTimeout(resolve, 75)) - expect(settled).toBe(false) + try { + const outcomePromise = runtime + .runToktrack( + { + command: 'fake-runner', + prefixArgs: [], + env: {}, + }, + ['daily', '--json'], + { timeoutMs: 50 }, + ) + .then((value) => ({ ok: true as const, value })) + .catch((error) => ({ ok: false as const, error })) + .finally(() => { + settled = true + }) - await expect(outcomePromise).resolves.toMatchObject({ - ok: false, - error: { - message: expect.stringContaining('Command timed out'), - timedOut: true, - }, - }) + await vi.advanceTimersByTimeAsync(50) + expect(settled).toBe(false) + + await vi.advanceTimersByTimeAsync(150) + await expect(outcomePromise).resolves.toMatchObject({ + ok: false, + error: { + message: expect.stringContaining('Command timed out'), + timedOut: true, + }, + }) + } finally { + vi.useRealTimers() + } }) it('prioritizes local version mismatches over generic no-runner feedback', () => { From f2b34008b452af11ff7490239ac7f1db766d0c9f Mon Sep 17 00:00:00 2001 From: tyl3r-ch Date: Thu, 30 Apr 2026 23:56:03 +0200 Subject: [PATCH 18/42] Cover router auto-import error paths --- tests/unit/http-router-mutations.test.ts | 115 +++++++++++++++++++++-- 1 file changed, 109 insertions(+), 6 deletions(-) diff --git a/tests/unit/http-router-mutations.test.ts b/tests/unit/http-router-mutations.test.ts index f773023..e078668 100644 --- a/tests/unit/http-router-mutations.test.ts +++ b/tests/unit/http-router-mutations.test.ts @@ -6,7 +6,12 @@ const require = createRequire(import.meta.url) const { createHttpRouter } = require('../../server/http-router.js') as { createHttpRouter: (options: Record) => { handleServerRequest: ( - req: { url: string; method: string; headers: Record }, + req: { + url: string + method: string + headers: Record + on?: (event: string, listener: () => void) => unknown + }, res: MockResponse, ) => Promise } @@ -22,8 +27,14 @@ class MockResponse { this.headers = headers } + write(body: string | Buffer) { + this.body += Buffer.isBuffer(body) ? body.toString('utf8') : body + } + end(body?: string | Buffer) { - this.body = Buffer.isBuffer(body) ? body.toString('utf8') : (body ?? '') + if (body !== undefined) { + this.body += Buffer.isBuffer(body) ? body.toString('utf8') : body + } } } @@ -157,10 +168,13 @@ async function requestRaw( method: string, ) { const res = new MockResponse() - await router.handleServerRequest( - { url, method, headers: { 'content-type': 'application/json' } }, - res, - ) + const req = { + url, + method, + headers: { 'content-type': 'application/json' }, + on: vi.fn(), + } + await router.handleServerRequest(req, res) return { res } } @@ -373,6 +387,84 @@ describe('HTTP router mutation errors', () => { }) }) + it('streams auto-import success events and releases the acquired lease', async () => { + const lease = { release: vi.fn() } + const closeImport = vi.fn() + const performAutoImport = vi.fn(async ({ onCheck, onOutput, onProgress, signalOnClose }) => { + signalOnClose(closeImport) + onCheck({ tool: 'toktrack', status: 'found' }) + onProgress({ key: 'startingLocalImport', vars: {} }) + onOutput('runner warning') + return { days: 2, totalCost: 3.5 } + }) + const { router } = createRouter({ + autoImportRuntimeOverrides: { + acquireAutoImportLease: vi.fn(() => lease), + performAutoImport, + }, + }) + + const { res } = await requestRaw(router, '/api/auto-import/stream', 'POST') + + expect(res.status).toBe(200) + expect(res.headers['Content-Type']).toBe('text/event-stream') + expect(res.body).toContain('event: check') + expect(res.body).toContain('"status":"found"') + expect(res.body).toContain('event: progress') + expect(res.body).toContain('event: stderr') + expect(res.body).toContain('runner warning') + expect(res.body).toContain('event: success') + expect(res.body).toContain('"totalCost":3.5') + expect(res.body).toContain('event: done') + expect(performAutoImport).toHaveBeenCalledWith( + expect.objectContaining({ + source: 'auto-import', + lease, + }), + ) + expect(lease.release).toHaveBeenCalledTimes(1) + }) + + it('maps concurrent auto-import starts to a localized conflict response', async () => { + const { router } = createRouter({ + autoImportRuntimeOverrides: { + acquireAutoImportLease: vi.fn(() => { + throw Object.assign(new Error('already running'), { messageKey: 'autoImportRunning' }) + }), + createAutoImportMessageEvent: vi.fn((key: string) => ({ key })), + formatAutoImportMessageEvent: vi.fn(() => 'An auto-import is already running.'), + }, + }) + + const { res, body } = await request(router, '/api/auto-import/stream', 'POST') + + expect(res.status).toBe(409) + expect(body).toEqual({ message: 'An auto-import is already running.' }) + }) + + it('streams structured auto-import errors and releases the lease after failures', async () => { + const lease = { release: vi.fn() } + const { router } = createRouter({ + autoImportRuntimeOverrides: { + acquireAutoImportLease: vi.fn(() => lease), + performAutoImport: vi.fn(async () => { + throw new Error('toktrack failed') + }), + toAutoImportErrorEvent: vi.fn((error: Error) => ({ + message: error.message, + })), + }, + }) + + const { res } = await requestRaw(router, '/api/auto-import/stream', 'POST') + + expect(res.status).toBe(200) + expect(res.body).toContain('event: error') + expect(res.body).toContain('"message":"toktrack failed"') + expect(res.body).toContain('event: done') + expect(lease.release).toHaveBeenCalledTimes(1) + }) + it('rejects untrusted hosts before API auth is evaluated', async () => { const validateApiRequest = vi.fn(() => ({ status: 401, @@ -471,6 +563,14 @@ describe('HTTP router mutation errors', () => { it('maps PDF body and generator failures to client or service errors', async () => { const usageData = { daily: [{ date: '2026-04-27' }], totals: { totalCost: 1 } } + const invalidBodyRouter = createRouter({ + dataRuntimeOverrides: { + readData: vi.fn(() => usageData), + }, + readBody: vi.fn(async () => { + throw new Error('broken report JSON') + }), + }).router const oversizedRouter = createRouter({ dataRuntimeOverrides: { readData: vi.fn(() => usageData), @@ -489,9 +589,12 @@ describe('HTTP router mutation errors', () => { readBody: vi.fn(async () => ({ locale: 'de-CH' })), }).router + const invalidBody = await request(invalidBodyRouter, '/api/report/pdf', 'POST') const oversized = await request(oversizedRouter, '/api/report/pdf', 'POST') const typstMissing = await request(typstMissingRouter, '/api/report/pdf', 'POST') + expect(invalidBody.res.status).toBe(400) + expect(invalidBody.body).toEqual({ message: 'Invalid report request' }) expect(oversized.res.status).toBe(413) expect(oversized.body).toEqual({ message: 'Report request too large' }) expect(typstMissing.res.status).toBe(503) From eab08c595aff8044cb352b3d55507cdb10c97826 Mon Sep 17 00:00:00 2001 From: tyl3r-ch Date: Fri, 1 May 2026 00:05:48 +0200 Subject: [PATCH 19/42] Harden parallel gate diagnostics --- docs/testing.md | 6 + scripts/run-parallel-gate.js | 118 +++++++++++++++++--- tests/frontend/metric-ratio-locale.test.tsx | 9 +- tests/unit/run-parallel-gate.test.ts | 83 +++++++++++++- 4 files changed, 200 insertions(+), 16 deletions(-) diff --git a/docs/testing.md b/docs/testing.md index d6d02fe..3bb4dd3 100644 --- a/docs/testing.md +++ b/docs/testing.md @@ -192,6 +192,12 @@ from the jsdom/build storm. `verify:package` runs after those waves succeed. unchanged; use the parallel gates for local feedback when the machine has enough CPU and the per-project JUnit report paths must stay isolated. +Use `node scripts/run-parallel-gate.js --dry-run --e2e` after changing gate scripts to inspect the +task waves and their declared report outputs before running the expensive full gate. The script +fails before spawning children if two tasks in the same wave declare the same output path, and a +successful run prints a per-task timing summary so local bottlenecks are visible without opening +JUnit reports. + ## CI Notes - Keep workflow test paths aligned with the split test structure. diff --git a/scripts/run-parallel-gate.js b/scripts/run-parallel-gate.js index 6de7589..270e699 100644 --- a/scripts/run-parallel-gate.js +++ b/scripts/run-parallel-gate.js @@ -4,30 +4,43 @@ const { spawn } = require('child_process'); const npmCommand = process.platform === 'win32' ? 'npm.cmd' : 'npm'; -function createTask(name, script) { +function createTask(name, script, outputPaths = []) { return { name, command: npmCommand, args: ['run', script], + outputPaths, }; } function createParallelGatePlan({ e2e = false } = {}) { const buildAndApiGroup = [ createTask('static', 'test:static'), - createTask('integration', 'test:vitest:integration'), + createTask('integration', 'test:vitest:integration', [ + 'test-results/vitest-integration.junit.xml', + ]), createTask('build', 'build:app'), ]; - const unitGroup = [createTask('unit', 'test:vitest:unit')]; - const frontendGroup = [createTask('frontend', 'test:vitest:frontend')]; - const architectureGroup = [createTask('architecture', 'test:vitest:architecture')]; + const unitGroup = [ + createTask('unit', 'test:vitest:unit', ['test-results/vitest-unit.junit.xml']), + ]; + const frontendGroup = [ + createTask('frontend', 'test:vitest:frontend', ['test-results/vitest-frontend.junit.xml']), + ]; + const architectureGroup = [ + createTask('architecture', 'test:vitest:architecture', [ + 'test-results/vitest-architecture.junit.xml', + ]), + ]; const backgroundGroup = [ - createTask('integration-background', 'test:vitest:integration-background'), + createTask('integration-background', 'test:vitest:integration-background', [ + 'test-results/vitest-integration-background.junit.xml', + ]), ]; const finalGroup = [createTask('package-smoke', 'verify:package')]; if (e2e) { - finalGroup.push(createTask('e2e', 'test:e2e:ci')); + finalGroup.push(createTask('e2e', 'test:e2e:ci', ['playwright-report/', 'test-results/'])); } return [ @@ -62,6 +75,34 @@ function parseArgs(argv) { return options; } +function findParallelOutputCollisions(plan) { + const collisions = []; + + plan.forEach((group, groupIndex) => { + const outputOwners = new Map(); + + for (const task of group) { + for (const outputPath of task.outputPaths || []) { + const owners = outputOwners.get(outputPath) || []; + owners.push(task.name); + outputOwners.set(outputPath, owners); + } + } + + for (const [outputPath, tasks] of outputOwners.entries()) { + if (tasks.length > 1) { + collisions.push({ + group: groupIndex + 1, + outputPath, + tasks, + }); + } + } + }); + + return collisions; +} + function printUsage(stdout) { stdout.write(`Usage: node scripts/run-parallel-gate.js [options] @@ -79,9 +120,14 @@ function prefixChunk(stream, taskName, chunk) { } } +function formatDuration(durationMs) { + return `${(durationMs / 1000).toFixed(2)}s`; +} + function runTask(task, spawnImpl = spawn, streams = process) { return new Promise((resolve) => { streams.stdout.write(`[${task.name}] starting ${task.command} ${task.args.join(' ')}\n`); + const startedAt = Date.now(); const child = spawnImpl(task.command, task.args, { cwd: process.cwd(), @@ -92,19 +138,42 @@ function runTask(task, spawnImpl = spawn, streams = process) { child.stdout.on('data', (chunk) => prefixChunk(streams.stdout, task.name, chunk)); child.stderr.on('data', (chunk) => prefixChunk(streams.stderr, task.name, chunk)); child.on('error', (error) => { + const durationMs = Date.now() - startedAt; streams.stderr.write(`[${task.name}] failed to start: ${error.message}\n`); - resolve({ task, status: 1 }); + resolve({ durationMs, task, status: 1 }); }); child.on('close', (status) => { - streams.stdout.write(`[${task.name}] exited with ${status ?? 1}\n`); - resolve({ task, status: status ?? 1 }); + const durationMs = Date.now() - startedAt; + streams.stdout.write( + `[${task.name}] exited with ${status ?? 1} after ${formatDuration(durationMs)}\n`, + ); + resolve({ durationMs, task, status: status ?? 1 }); }); }); } async function runTaskGroup(tasks, spawnImpl, streams) { const results = await Promise.all(tasks.map((task) => runTask(task, spawnImpl, streams))); - return results.every((result) => result.status === 0) ? 0 : 1; + return { + results, + status: results.every((result) => result.status === 0) ? 0 : 1, + }; +} + +function printTimingSummary(results, streams = process) { + if (results.length === 0) { + return; + } + + streams.stdout.write('\nparallel gate timing summary\n'); + + for (const result of results) { + streams.stdout.write( + ` ${result.task.name}: status=${result.status} duration=${formatDuration( + result.durationMs, + )}\n`, + ); + } } async function run(argv = process.argv.slice(2), streams = process, spawnImpl = spawn) { @@ -123,25 +192,45 @@ async function run(argv = process.argv.slice(2), streams = process, spawnImpl = } const plan = createParallelGatePlan({ e2e: options.e2e }); + const outputCollisions = findParallelOutputCollisions(plan); + + if (outputCollisions.length > 0) { + streams.stderr.write('Parallel gate output collision detected.\n'); + outputCollisions.forEach((collision) => { + streams.stderr.write( + ` group ${collision.group}: ${collision.outputPath} <- ${collision.tasks.join(', ')}\n`, + ); + }); + return 1; + } if (options.dryRun) { plan.forEach((group, index) => { streams.stdout.write(`group ${index + 1}\n`); group.forEach((task) => { streams.stdout.write(` ${task.name}: ${task.command} ${task.args.join(' ')}\n`); + if (task.outputPaths.length > 0) { + streams.stdout.write(` outputs: ${task.outputPaths.join(', ')}\n`); + } }); }); return 0; } + const allResults = []; + for (const [index, group] of plan.entries()) { streams.stdout.write(`\nparallel gate group ${index + 1}/${plan.length}\n`); - const status = await runTaskGroup(group, spawnImpl, streams); - if (status !== 0) { - return status; + const groupResult = await runTaskGroup(group, spawnImpl, streams); + allResults.push(...groupResult.results); + + if (groupResult.status !== 0) { + printTimingSummary(allResults, streams); + return groupResult.status; } } + printTimingSummary(allResults, streams); return 0; } @@ -157,6 +246,7 @@ if (require.main === module) { module.exports = { createParallelGatePlan, + findParallelOutputCollisions, parseArgs, run, }; diff --git a/tests/frontend/metric-ratio-locale.test.tsx b/tests/frontend/metric-ratio-locale.test.tsx index 7245e74..44d4f8a 100644 --- a/tests/frontend/metric-ratio-locale.test.tsx +++ b/tests/frontend/metric-ratio-locale.test.tsx @@ -1,7 +1,7 @@ // @vitest-environment jsdom import { render } from '@testing-library/react' -import { beforeEach, describe, expect, it, vi } from 'vitest' +import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest' import { MonthMetrics } from '@/components/cards/MonthMetrics' import { PrimaryMetrics } from '@/components/cards/PrimaryMetrics' import { TooltipProvider } from '@/components/ui/tooltip' @@ -74,6 +74,8 @@ describe('metric ratio localization', () => { ] beforeEach(async () => { + vi.useFakeTimers() + vi.setSystemTime(new Date('2026-04-30T12:00:00Z')) vi.stubGlobal( 'IntersectionObserver', class { @@ -85,6 +87,11 @@ describe('metric ratio localization', () => { await initI18n('de') }) + afterEach(() => { + vi.unstubAllGlobals() + vi.useRealTimers() + }) + it('formats the primary metrics I/O ratio with the active locale', () => { const ratio = new Intl.NumberFormat(getCurrentLocale(), { minimumFractionDigits: 1, diff --git a/tests/unit/run-parallel-gate.test.ts b/tests/unit/run-parallel-gate.test.ts index 2f5a03b..5ae65da 100644 --- a/tests/unit/run-parallel-gate.test.ts +++ b/tests/unit/run-parallel-gate.test.ts @@ -1,6 +1,6 @@ import { createRequire } from 'node:module' import { EventEmitter } from 'node:events' -import { describe, expect, it, vi } from 'vitest' +import { afterEach, describe, expect, it, vi } from 'vitest' const require = createRequire(import.meta.url) @@ -8,10 +8,14 @@ type GateTask = { args: string[] command: string name: string + outputPaths: string[] } type ParallelGateScript = { createParallelGatePlan: (options?: { e2e?: boolean }) => GateTask[][] + findParallelOutputCollisions: ( + plan: GateTask[][], + ) => { group: number; outputPath: string; tasks: string[] }[] parseArgs: (argv: string[]) => { dryRun: boolean; e2e: boolean; help: boolean } run: ( argv: string[], @@ -31,6 +35,10 @@ class FakeChild extends EventEmitter { const parallelGate = require('../../scripts/run-parallel-gate.js') as ParallelGateScript describe('parallel verification gate', () => { + afterEach(() => { + vi.restoreAllMocks() + }) + it('keeps independent work parallel and isolates high-contention test suites', () => { const plan = parallelGate.createParallelGatePlan({ e2e: true }) @@ -49,6 +57,52 @@ describe('parallel verification gate', () => { expect(plan[5]?.find((task) => task.name === 'e2e')?.args).toEqual(['run', 'test:e2e:ci']) }) + it('declares output paths without collisions inside parallel groups', () => { + const plan = parallelGate.createParallelGatePlan({ e2e: true }) + + expect(parallelGate.findParallelOutputCollisions(plan)).toEqual([]) + expect(plan[0]?.find((task) => task.name === 'integration')?.outputPaths).toEqual([ + 'test-results/vitest-integration.junit.xml', + ]) + expect(plan[1]?.find((task) => task.name === 'unit')?.outputPaths).toEqual([ + 'test-results/vitest-unit.junit.xml', + ]) + expect(plan[2]?.find((task) => task.name === 'frontend')?.outputPaths).toEqual([ + 'test-results/vitest-frontend.junit.xml', + ]) + expect(plan[5]?.find((task) => task.name === 'e2e')?.outputPaths).toEqual([ + 'playwright-report/', + 'test-results/', + ]) + }) + + it('detects duplicate report outputs within a parallel group', () => { + const duplicatePlan = [ + [ + { + args: ['run', 'left'], + command: 'npm', + name: 'left', + outputPaths: ['test-results/shared.junit.xml'], + }, + { + args: ['run', 'right'], + command: 'npm', + name: 'right', + outputPaths: ['test-results/shared.junit.xml'], + }, + ], + ] + + expect(parallelGate.findParallelOutputCollisions(duplicatePlan)).toEqual([ + { + group: 1, + outputPath: 'test-results/shared.junit.xml', + tasks: ['left', 'right'], + }, + ]) + }) + it('parses dry-run and e2e options', () => { expect(parallelGate.parseArgs(['--dry-run', '--e2e'])).toEqual({ dryRun: true, @@ -68,6 +122,9 @@ describe('parallel verification gate', () => { expect(spawnImpl).not.toHaveBeenCalled() expect(stdout.write.mock.calls.join('\n')).toContain('group 1') expect(stdout.write.mock.calls.join('\n')).toContain('frontend: npm run test:vitest:frontend') + expect(stdout.write.mock.calls.join('\n')).toContain( + 'outputs: test-results/vitest-frontend.junit.xml', + ) expect(stdout.write.mock.calls.join('\n')).toContain('e2e: npm run test:e2e:ci') }) @@ -85,4 +142,28 @@ describe('parallel verification gate', () => { expect(status).toBe(1) expect(spawnImpl).toHaveBeenCalledTimes(3) }) + + it('prints task durations and a timing summary', async () => { + let now = 1_000 + vi.spyOn(Date, 'now').mockImplementation(() => now) + const stdout = { write: vi.fn() } + const stderr = { write: vi.fn() } + const spawnImpl = vi.fn(() => { + const child = new FakeChild() + queueMicrotask(() => { + now += 1_250 + child.emit('close', 0) + }) + return child + }) + + const status = await parallelGate.run([], { stdout, stderr }, spawnImpl) + const output = stdout.write.mock.calls.join('\n') + + expect(status).toBe(0) + expect(output).toContain('[static] exited with 0 after 1.25s') + expect(output).toContain('parallel gate timing summary') + expect(output).toContain('static: status=0 duration=1.25s') + expect(output).toContain('package-smoke: status=0 duration=1.25s') + }) }) From efb9ceefcf3375ad0e04ed5d2892ce40704cf8f9 Mon Sep 17 00:00:00 2001 From: tyl3r-ch Date: Fri, 1 May 2026 00:11:25 +0200 Subject: [PATCH 20/42] Guard Playwright smoke scope --- docs/testing.md | 1 + tests/unit/playwright-config.test.ts | 68 ++++++++++++++++++++++++++++ 2 files changed, 69 insertions(+) diff --git a/docs/testing.md b/docs/testing.md index 3bb4dd3..f4fc49b 100644 --- a/docs/testing.md +++ b/docs/testing.md @@ -64,6 +64,7 @@ Architecture constraints are documented separately in [`docs/architecture.md`](. - For large server helper or integration suites, group tests by subsystem so Vitest can schedule them more efficiently. - Keep background-process integration files focused by behavior; the background Vitest project intentionally uses a small worker cap instead of one serial catch-all file or unbounded process fan-out. - Keep Playwright files grouped by end-to-end journey, such as load/upload, forecast/filter interaction, settings/backups, reporting, and command palette behavior. Import `test` and `expect` from `tests/e2e/fixtures.ts` so each worker gets its own server, port, auth session, and runtime directory. Share authentication, server reset, seeding, and download helpers through `tests/e2e/helpers.ts` instead of creating new browser catch-all files. +- `tests/unit/playwright-config.test.ts` guards the small E2E journey list, the shared fixture import, CI worker cap, and Playwright reporter paths. Update that contract intentionally when adding a new browser journey. ## Choosing the Right Layer diff --git a/tests/unit/playwright-config.test.ts b/tests/unit/playwright-config.test.ts index 0cfc78c..ef046ef 100644 --- a/tests/unit/playwright-config.test.ts +++ b/tests/unit/playwright-config.test.ts @@ -1,8 +1,18 @@ +import { readdir, readFile } from 'node:fs/promises' +import path from 'node:path' import { afterEach, describe, expect, it, vi } from 'vitest' import packageJson from '../../package.json' type PlaywrightConfig = { fullyParallel?: boolean + reporter?: unknown + testDir?: string + timeout?: number + use?: { + screenshot?: string + trace?: string + video?: string + } webServer?: unknown workers?: number } @@ -21,6 +31,18 @@ async function importPlaywrightConfig(ci: string | undefined): Promise filename.endsWith('.spec.ts')) + + return Promise.all( + filenames.sort().map(async (filename) => ({ + filename, + source: await readFile(path.join(e2eDir, filename), 'utf8'), + })), + ) +} + describe('playwright config', () => { afterEach(() => { if (originalCi === undefined) { @@ -35,6 +57,13 @@ describe('playwright config', () => { expect(localConfig).toMatchObject({ fullyParallel: true, + testDir: './tests/e2e', + timeout: 30_000, + use: { + screenshot: 'only-on-failure', + trace: 'on-first-retry', + video: 'retain-on-failure', + }, workers: undefined, }) expect(localConfig.webServer).toBeUndefined() @@ -49,4 +78,43 @@ describe('playwright config', () => { expect(packageJson.scripts['test:e2e:ci']).toBe('playwright test --workers=2') expect(packageJson.scripts['test:e2e:ci']).not.toContain('CI=1') }, 10_000) + + it('keeps Playwright reports in stable job-scoped output paths', async () => { + const localConfig = await importPlaywrightConfig(undefined) + + expect(localConfig.reporter).toEqual([ + ['list'], + ['html', { outputFolder: 'playwright-report', open: 'never' }], + ['junit', { outputFile: 'test-results/playwright.junit.xml' }], + ]) + }) + + it('keeps browser specs on the shared worker-isolated fixture', async () => { + const specs = await readE2ESpecs() + + expect(specs.map((spec) => spec.filename)).toEqual([ + 'command-palette.spec.ts', + 'dashboard-forecast-filters.spec.ts', + 'dashboard-load-upload.spec.ts', + 'dashboard-reporting.spec.ts', + 'dashboard-settings-backups.spec.ts', + ]) + + for (const spec of specs) { + expect(spec.source).toContain("from './fixtures'") + expect(spec.source).not.toContain("from '@playwright/test'") + } + }) + + it('keeps Playwright as a representative smoke suite instead of a contract matrix', async () => { + const specs = await readE2ESpecs() + const totalTests = specs.reduce((count, spec) => { + return count + (spec.source.match(/\btest\(/g) ?? []).length + }, 0) + + expect(totalTests).toBe(11) + expect(totalTests).toBeLessThanOrEqual(12) + expect(packageJson.scripts['test:e2e:parallel']).toBe('npm run build:app && playwright test') + expect(packageJson.scripts['test:e2e']).toBe('npm run test:e2e:parallel') + }) }) From 7e600088feaacf652630c8100a7b5f1116407f18 Mon Sep 17 00:00:00 2001 From: tyl3r-ch Date: Fri, 1 May 2026 00:51:02 +0200 Subject: [PATCH 21/42] Refresh test review baseline --- docs/review/test-review.md | 804 +++++++++++++++++++------------------ 1 file changed, 422 insertions(+), 382 deletions(-) diff --git a/docs/review/test-review.md b/docs/review/test-review.md index 1c57134..73ba54c 100644 --- a/docs/review/test-review.md +++ b/docs/review/test-review.md @@ -2,415 +2,455 @@ ## Kurzfazit -Die Testbasis ist breit, aber das Testsystem ist noch nicht auf maximale lokale und CI-Performance -ausgelegt. Der groesste Engpass ist nicht der Build, sondern die Kombination aus serieller Pipeline, -nicht isolierbarer Playwright-Ausfuehrung und hohem `frontend/jsdom`-Overhead. +Stand: 2026-05-01, lokale Arbeitskopie nach den bisherigen +Testsystem-Fixes. + +Die Testpipeline ist inzwischen solide strukturiert: Vitest ist in Projekte +getrennt, die CI laeuft als DAG, lokale Timing-Kommandos schreiben +projektbezogene Reports, Playwright nutzt worker-isolierte Server, und das +optionale lokale Parallel-Gate ist lastbewusst. Der naechste grosse +Performance- und Robustheitsschritt liegt deshalb nicht in pauschal mehr +Parallelisierung, sondern in gezielter Reduktion von jsdom-, Prozess-, +Registry- und Coverage-Kosten. + +Groesste verbleibende Hebel: + +- `tests/frontend` ist mit Abstand der groesste Vitest-Walltime-Block. +- `integration-background` ist der kritischste Robustheitsbereich, weil echte + Background-Prozesse parallel starten und Registry-/Port-/Readiness-Zustand + teilen. +- Prozess-, Port-, Lock-, PATH-, Runner- und CLI-Tests sind fachlich wertvoll, + duerfen aber nicht fuer reine Branch-Coverage missbraucht werden. +- Coverage ist global gut, aber die wichtigsten Branch-Luecken liegen weiter + in Server-Runtime, HTTP-Router, Report, Auto-Import und + Dashboard-Controller-Actions. +- Die CI ist schon sinnvoll parallelisiert. Weitere Beschleunigung muss anhand + echter Timing-Daten erfolgen, damit keine Report-Kollisionen oder neue + Flakes entstehen. + +Keine der folgenden Verbesserungen darf App-Aussehen, Texte, Layout, +Animationen, User-Flows oder produktives Verhalten aendern. Der Umbau betrifft +Tests, Testhelfer, Testkonfiguration, CI-Orchestrierung und Dokumentation. + +## Aktuelle Messbasis + +Frischer lokaler Lauf: +`node scripts/run-vitest-project-timings.js --projects=architecture,unit,frontend,integration,integration-background --repeat=1` + +| Projekt | Umfang | Walltime | Aufsummierte Kosten | Bewertung | +| --- | ---: | ---: | --- | --- | +| `architecture` | 10 Dateien, 22 Tests | `4.78s` | Import `1.40s`, Tests `1.04s` | Stabil, nicht Hauptproblem. | +| `unit` | 57 Dateien, 380 Tests | `4.40s` | Import `3.58s`, Tests `3.76s` | Schnell; Hotspots sind echte Prozess-/Lock-Smokes. | +| `frontend` | 76 Dateien, 204 Tests | `30.66s` | Setup `17.90s`, Import `31.28s`, Tests `34.66s`, Environment `71.10s` | Groesster Block und wichtigster Performance-Hebel. | +| `integration` | 13 Dateien, 44 Tests | `4.03s` | Tests `9.08s` | Meist gesund; teuer durch echte Server/HTTP/CLI. | +| `integration-background` | 4 Dateien, 5 Tests | `3.07s` | Tests `4.03s` | Klein, aber robustheitskritisch durch echte Background-Prozesse. | + +Aktuelle Coverage aus `coverage/lcov.info`: + +| Kennzahl | Wert | +| --- | ---: | +| Lines | `83.96%` | +| Branches | `70.02%` | +| Functions | `83.12%` | + +Die globale Coverage ist damit gruen. Der relevante naechste Schritt ist nicht, +beliebige einfache UI-Wrapper auf 100% zu bringen, sondern kritische +Fehler-, Timeout-, Permission-, Registry-, Import-, Export- und Report-Branches +zu schliessen. + +## Bottlenecks + +### 1. Frontend/jsdom dominiert die Walltime + +`frontend` braucht lokal rund `30s`; die aufsummierte Environment-Zeit von +`71.10s` und Import-Zeit von `31.28s` zeigt den Kern des Problems. Viele +Dateien bezahlen wiederholt jsdom, React Testing Library, i18n, +Browser-API-Shims, Radix, Recharts, cmdk und Framer Motion. Mehr Worker kann +diese Kosten nur teilweise verdecken; ab einem Punkt steigen CPU- und +Speicherdruck. + +Aktuelle Hotspots: + +| Suite/Testbereich | Signal | Ursache | +| --- | ---: | --- | +| `filter-bar-date-picker.test.tsx` | `1.353s` | Kalenderdialog, Focus, Fake-Timer, Keyboard. | +| `drill-down-modal-motion.test.tsx` | `1.322s` | Modal, Motion, Re-Render und Positioning. | +| `settings-modal-sections.test.tsx` | `1.279s` | Settings-Provider, Draft-State, Save-Flow. | +| `settings-modal-tabs.test.tsx` | `1.240s` | Modal-Tabs, i18n, Re-Render. | +| `settings-modal-provider-limits.test.tsx` | `1.215s` | Provider-Limits, Draft-Mapping, Controls. | +| `sortable-table-recent-days.test.tsx` | `1.064s` | Tabelleninteraktion und ARIA-Sort-State. | +| `drill-down-modal-regressions.test.tsx` | `1.040s` | Breite Modal-Regressionen. | +| `request-quality.test.tsx` | `0.930s` | IntersectionObserver, Reveal-Animation, Metric Cards. | + +Die teuersten Einzeltests bestaetigen das Muster: DOM-/ARIA-/Focus-Verhalten +ist wertvoll, aber pure Datenableitung, Sortierung, Labelbildung und +Konfigurationsmapping sollten nicht in jsdom liegen. + +Optimierung: + +- Jede `tests/frontend`-Datei klassifizieren: pure logic, hook state, + component interaction, chart rendering, accessibility/ARIA, motion/reveal, + regression smoke. +- Pure Logik nach `tests/unit` verschieben: Command-Building, + Tabellen-Sortierung, Datepicker-Daten, Settings-Transformationen, + Chart-Datenformung, Label-/Formatierungslogik. +- Komponententests fokussieren: ein User-observable Verhalten pro Test, + keine langen Multi-Regression-Renders. +- Zentrale Mocks/Helper ausbauen fuer Recharts, IntersectionObserver, + ResizeObserver, matchMedia, requestAnimationFrame und Motion. +- `initI18n(...)` nur in Dateien explizit aufrufen, die Sprache variieren oder + lokalisierte Ausgabe testen. Der globale Frontend-Setup initialisiert bereits + Deutsch. +- `waitFor(...)` auditieren: behalten fuer React Query, async effects, Timer, + Observer und echte eventual consistency; ersetzen durch direkte Assertions, + `findBy...`, Fake-Timer-Flushing oder explizite User-Event-Schritte, wenn + der Zustand synchron ist. + +Nicht empfohlen: + +- `frontend.maxWorkers` blind ueber `'80%'` erhoehen. +- `happy-dom` als schnellen Drop-in versuchen, bevor Radix/cmdk/Focus- und + Accessibility-Verhalten gezielt validiert ist. +- Breite Dashboard-Smokes in jsdom ergaenzen, wenn ein Node-Unit-Test denselben + Vertrag guenstiger pruefen kann. + +### 2. Background-Concurrency ist der groesste Robustheitsblocker + +`tests/integration/server-background-concurrency.test.ts` prueft einen +wichtigen Vertrag: zwei gleichzeitige `ttdash --background --no-open` Starts +sollen beide in der Registry landen und erreichbar sein. Dieser Bereich ist +klein, aber riskant, weil er echte CLI-Prozesse, Portwahl, Registry-Dateien, +Locks, Logfiles, Auth-Header und Readiness kombiniert. + +Aktuelle Risikoflaechen: + +- Zwei Prozesse konkurrieren um Registry-Read/Write, Portwahl und Pruning. +- Ein Prozess kann vor dem erwarteten Registry-Zustand aussteigen. +- Tests warten auf Registry und URL-Readiness, aber Diagnose muss im Fehlerfall + sofort Port-, Registry-, Startup- und Logursache unterscheidbar machen. +- Weitere Parallelisierung des Background-Projekts wuerde Robustheit eher + verschlechtern als verbessern. + +Optimierung: + +- `integration-background.maxWorkers` bei `2` lassen. +- Background-Fehlerdiagnostik weiter schaerfen: Exitcode, Signal, stdout, + stderr, Runtime-Root, Registry-Datei, Registry-Inhalt, Log-Snippets, + erwartete Ports und relevante Env-Werte. +- Registry-, Selection-, Prune-, EPERM-/ESRCH- und Startup-Branches mit + Fake-FS/Fake-Clock/Fake-Spawn als Unit-Tests abdecken. +- Echte Cross-Process-Smokes auf wenige Vertraege begrenzen: concurrent + startup, selected stop, custom prefix/permissions. +- Nach jedem Fix mindestens drei Wiederholungen von + `node scripts/run-vitest-project-timings.js --projects=integration-background --repeat=3` + verlangen. + +### 3. Prozessnahe Tests sind wertvoll, aber teilweise zu breit + +Unit-Hotspots: + +| Suite | Signal | Bewertung | +| --- | ---: | --- | +| `server-helpers-runner-process.test.ts` | `1.215s` | Echte `bunx`/`npx`/PATH-Pfade; als Smoke wertvoll, fuer Branch-Matrix teuer. | +| `server-helpers-file-locks.test.ts` | `0.479s` | Cross-Process-Locking; echte Prozesskoordination nur fuer wenige Faelle noetig. | +| `playwright-config.test.ts` | `0.428s` | Config-Load und worker-scoped Server-Vertrag. | +| `toktrack-version.test.ts` | `0.283s` | Package-/Version-Guardrails. | + +Integration-Hotspots: + +| Suite | Signal | Bewertung | +| --- | ---: | --- | +| `server-auto-import.test.ts` | `1.798s` | Auto-Import-Streaming, Fake Toktrack Runner, Singleton-Start. | +| `server-local-auth.test.ts` | `1.687s` | Echter Server, Bootstrap-Cookie/Bearer, Guards. | +| `server-api-reporting.test.ts` | `0.865s` | PDF/Typst-Pfad und Report-Router. | +| `server-startup-cli.test.ts` | `0.845s` | CLI-Startup, Host/Port/Permission-Vertraege. | +| `server-api-persistence.test.ts` | `0.760s` | Echte Persistenz- und Runtime-Root-Vertraege. | + +Optimierung: + +- Echte Prozesse nur fuer Shell, PATH, Portbindung, Signalhandling, + Cross-Process-Locking, Registry-Koordination und CLI-Startup verwenden. +- Statusmapping, Error-Mapping, Timeout, Retry, Cache, Permission-Branches, + Request-Shaping und malformed payloads mit Fake-Spawn/Fake-FS/Fake-Clock + testen. +- Fixed sleeps in Testhelfern weiter durch Readiness-Endpoints, + Sentinel-Dateien, Stream-Events oder explizite Child-Process-Signale + ersetzen. +- `server-api-reporting` in Router-Fehlerpfade mit Fake-Report-Generator und + einen echten PDF-Smoke trennen. +- Auth-Guard-Matrix so weit wie moeglich auf Router-/HTTP-Unit-Ebene testen; + echte Server-Integration nur fuer Cookie/Bearer/Origin-Zusammenspiel + behalten. + +### 4. CI ist gut parallelisiert, aber weitere Daten fehlen + +`.github/workflows/ci.yml` ist bereits als DAG aufgebaut: -Die wichtigsten Ziele fuer die naechste Umbauphase: - -- E2E-Tests muessen worker-isoliert laufen koennen, ohne gemeinsamen Server- oder Datenzustand. -- Die CI-Pipeline soll aus unabhaengigen Jobs bestehen statt aus einem langen seriellen Job. -- Command-Palette- und Dashboard-Composition-Tests sollen mehr Logik in Vitest beweisen und weniger - Browserzeit verbrauchen. -- Coverage soll risikogewichtet verbessert werden: zentrale Runtime- und Composition-Module zuerst, - nicht blind globale Thresholds erhoehen. - -## Baseline und Bottlenecks - -Gemessene lokale Baseline am 2026-04-28: - -| Gate | Zeit | Bewertung | -| --------------------------- | ----------: | ---------------------------------------------------- | -| `npm run check` | ca. `25s` | Statische Checks laufen komplett seriell. | -| `npm run test:architecture` | ca. `4.5s` | Stabil, aber aktuell explizit ohne File-Parallelism. | -| `npm run test:unit` | ca. `28s` | Wird vom `frontend`-Projekt dominiert. | -| `npm run test:timings` | ca. `30s` | Coverage aktiv, gute Timing-Signale. | -| `npm run build:app` | ca. `0.75s` | Kein relevanter Bottleneck. | -| `npm run verify:package` | ca. `7.4s` | Wertvoller Smoke, aber seriell im Full-Gate. | -| `npm run test:e2e:ci` | ca. `47s` | Groesster Einzelblock, absichtlich single-worker. | - -Vitest-Projekte isoliert: - -| Projekt | Zeit | Hauptkosten | -| -------------------------------------- | ----------: | ---------------------------------------------------------------- | -| `frontend` | ca. `18.4s` | `jsdom` environment, imports, React setup, viele kleine Dateien. | -| `unit` | ca. `4.4s` | Einzelne echte Prozess-/Timer-Tests dominieren. | -| `integration + integration-background` | ca. `4.6s` | Subprozesse und lokale Server, aber insgesamt gesund. | -| `architecture` | ca. `4.5s` | Source-Graph-/AST-Arbeit, aktuell seriell konfiguriert. | - -Playwright-Hotspots aus dem JUnit-Report: - -| Test | Zeit | -| ------------------------------------------------------------------ | ----------: | -| `command-palette.spec.ts` - dashboard section navigation | ca. `14.5s` | -| `command-palette.spec.ts` - analysis section navigation | ca. `5.5s` | -| `command-palette.spec.ts` - scroll and filter navigation | ca. `5.1s` | -| `dashboard-settings-backups.spec.ts` - settings and backup imports | ca. `4.4s` | - -Ein Testlauf mit `npx playwright test --workers=2` war schneller (`ca. 35s`), aber nicht robust: -ein Settings-/Backup-Test erwartete `6` Usage-Tage und bekam `8`. Das ist echte State-Leakage, kein -Timing-Artefakt. - -## Was bereits gut ist - -- Die Testpyramide ist grundsaetzlich vorhanden: Unit, Frontend/jsdom, Integration, Architektur und - E2E sind getrennt. -- Integration-Tests nutzen echte Server/Subprozesse nur dort, wo Prozess- oder CLI-Verhalten relevant - ist. -- `test:timings` liefert verwertbare Slow-Suite- und Slow-Test-Auswertungen. -- Playwright-E2E deckt wichtige echte Journeys ab: Load/Upload, Forecast/Filter, Settings/Backups, - Reporting und Command Palette. -- Coverage umfasst Produkt-Runtime wie `server/`, `shared/`, `src/` und `usage-normalizer.js`, nicht - nur Frontend-Ausschnitte. - -## Findings - -### H-01 - Playwright ist nicht worker-isoliert - -**Referenzen:** `playwright.config.ts`, `scripts/start-test-server.js`, `tests/e2e/helpers.ts` - -Der Playwright-Server nutzt eine gemeinsame Runtime unter `.tmp-playwright/app`. Beim Start wird dieser -Ordner geloescht und neu aufgebaut. Die Testhelfer lesen ausserdem eine globale Auth-Session und -mutieren globalen App-Zustand ueber `/api/usage` und `/api/settings`. - -Das blockiert sichere Parallelisierung: - -- Mehrere Worker koennen denselben Datenordner loeschen oder ueberschreiben. -- `resetAppState()` loescht globale Usage/Settings waehrend andere Tests dieselbe App verwenden. -- Relative `page.request`-Aufrufe haengen am gemeinsamen `baseURL`. -- E2E-Tests, die Daten importieren oder loeschen, beeinflussen andere E2E-Tests. - -**Impact:** `workers > 1` ist schneller, aber nicht verlaesslich. Der aktuelle CI-Befehl -`test:e2e:ci` erzwingt deshalb `--workers=1`, wodurch E2E zum groessten Full-Gate-Bottleneck wird. - -**Improvement:** E2E muss auf worker-scoped App-Instanzen umgestellt werden: - -- Jeder Playwright-Worker bekommt eigenen Port, Runtime-Root, Cache-, Config- und Data-Dir. -- Jeder Worker startet seinen eigenen Testserver und liest seine eigene Auth-Session. -- Die Playwright-Fixture ueberschreibt `baseURL` worker-spezifisch. -- `resetAppState()` darf nur noch den Server des aktuellen Workers betreffen. -- Der globale `webServer` in `playwright.config.ts` wird fuer die parallele Suite durch eine - worker-scoped Server-Fixture ersetzt oder er startet mehrere isolierte Server vorab. - -Akzeptanzkriterium fuer die spaetere Umsetzung: `npx playwright test --workers=2` und danach -`--workers=4` laufen mehrfach gruen, ohne Usage-Day-Mismatch, `socket hang up` oder Auth-Session-Race. - -### H-02 - CI ist ein monolithischer serieller Full-Gate - -**Referenzen:** `.github/workflows/ci.yml`, `package.json` - -Der Haupt-CI-Job fuehrt Format, Lint, Docstring-Lint, Dependency-Cruiser, TypeScript, Architecture, -Vitest-Coverage, Build, Package-Smoke, Playwright-Install und E2E nacheinander aus. - -Das ist robust, aber nicht maximal performant: - -- Unabhaengige Gates warten aufeinander. -- Ein langer Job nutzt GitHub-Runner-Parallelitaet nicht aus. -- Vitest-Projekte werden nicht als CI-Matrix genutzt. -- Package-Smoke und E2E laufen nach allen anderen Gates, obwohl sie nur den gebauten App-Artifact - benoetigen. - -**Improvement:** CI als DAG aufbauen: - -- `static`: Format, Lint, Dependency-Cruiser, TypeScript. -- `vitest-unit`, `vitest-frontend`, `vitest-integration`, `vitest-architecture`: getrennte Jobs. -- `build`: erzeugt `dist/` als Artifact. -- `package-smoke`: nutzt `dist/` Artifact und prueft Pack/Install/CLI/Server. -- `e2e`: nutzt `dist/` Artifact und worker-isolierte Playwright-Server. - -Akzeptanzkriterium: Full-CI-Walltime sinkt deutlich, ohne Testumfang zu reduzieren. Jeder Job laedt -seine eigenen Reports hoch und blockiert nicht durch gemeinsame Output-Pfade. - -**Umsetzungsstand Phase 5:** `ci.yml` ist jetzt als DAG geschnitten: `static`, Vitest-Projektmatrix, -dedizierter `coverage`-Job und `build` laufen unabhaengig; `package-smoke` und `e2e` verwenden das -einmal erzeugte `production-dist`-Artifact parallel. - -### H-03 - Command-Palette-E2E mischt zu viele Testarten - -**Referenzen:** `tests/e2e/command-palette.spec.ts`, -`src/components/features/command-palette/CommandPalette.tsx` - -Die Command-Palette-Suite testet gleichzeitig: - -- Vollstaendige Command-Liste. -- Search aliases und Scoring. -- Action command execution. -- Filter- und View-State. -- Dynamische Provider-/Model-Commands. -- Scroll-Ziele fuer fast alle Dashboard-Sections. -- Theme, Sprache, Help und Quick-Select. - -Das ist ein klassischer Browser-Monolith: sehr wertvoll als Smoke, aber zu teuer und zu breit als -primaere Contract-Abdeckung. Besonders Scroll-Navigation durch viele Sections verursacht hohe -Browserzeit. - -**Impact:** Ein einzelnes Feature dominiert den E2E-Gate, obwohl viele Assertions keine echte -Browser-Integration brauchen. - -**Improvement:** Command-Palette in testbare Logik und E2E-Smoke trennen: - -- Pure Command-Erzeugung, Search-Scoring, Quick-Select-Mapping und Section-Command-Filterung in eine - testbare Helper-/Builder-Schicht extrahieren. -- Vitest-Tests fuer alle Command-IDs, Gruppen, Aliases, Visibility-Regeln, Provider/Model-Dynamik und - Section-Order. -- E2E nur fuer wenige representative Flows behalten: - - Palette oeffnet per Keyboard. - - Ein Action-Command triggert Download oder Dialog. - - Ein Filter-Command veraendert sichtbaren UI-State. - - Ein Section-Command scrollt zu genau einer representative Section. - -Akzeptanzkriterium: Command-Palette-E2E-Zeit sinkt signifikant, waehrend die Command-Contract-Coverage -in Vitest steigt. - -### H-04 - Report- und Coverage-Ausgaben sind nicht matrix-sicher - -**Referenzen:** `vitest.config.ts`, `playwright.config.ts`, `package.json` - -Vitest schreibt standardmaessig nach `test-results/vitest.junit.xml`. Playwright schreibt nach -`test-results/playwright.junit.xml` und `playwright-report/`. Das ist fuer einen seriellen lokalen Lauf -okay, kollidiert aber bei parallelen CI-Jobs, Shards oder mehreren Vitest-Prozessen. - -**Impact:** Sobald Vitest-Projekte oder Playwright-Shards parallel laufen, koennen Reports -ueberschrieben oder unvollstaendig hochgeladen werden. - -**Improvement:** Output-Pfade pro Projekt/Shard/Job trennen: - -- `test-results/vitest-unit.junit.xml` -- `test-results/vitest-frontend-1.junit.xml` -- `test-results/vitest-integration.junit.xml` -- `test-results/playwright-e2e-1.junit.xml` -- Coverage je Shard in eigene Verzeichnisse schreiben und danach bewusst mergen oder dediziert in - einem Coverage-Job erzeugen. - -### M-01 - `frontend/jsdom` ist der Vitest-Hauptkostentreiber - -**Referenzen:** `vitest.config.ts`, `vitest.setup.frontend.ts`, `tests/frontend` - -Das `frontend`-Projekt hat `75` Testdateien und braucht isoliert ca. `18.4s`. Die aggregierten -Vitest-Metriken zeigen hohe Kosten fuer `environment`, `import` und `setup`. Ein Lauf mit mehr -Workern (`--maxWorkers=80%`) war sogar etwas langsamer als die aktuelle `50%`-Einstellung. - -**Impact:** Einfach mehr Worker zu geben ist aktuell keine Loesung. Die Kosten liegen stark in -Environment-Erzeugung und schweren Importbaeumen. - -**Improvement:** - -- Aktuelle `maxWorkers: '50%'` beibehalten, bis Setup-/Import-Kosten reduziert sind. -- Frontend-Tests nach schweren Importbaeumen pruefen und dort konsolidieren, wo viele kleine Dateien - dieselbe Dashboard-/Chart-Struktur laden. -- Gemeinsame Recharts-, IntersectionObserver-, ResizeObserver- und Motion-Mocks zentralisieren. -- Kleine pure Daten-/Formatierungsfaelle aus jsdom-Tests in Node-Unit-Tests verschieben. -- Optional nach dem Umbau `happy-dom` nur dann evaluieren, wenn React/Radix/cmdk-Tests kompatibel - bleiben. Nicht blind wechseln. - -### M-02 - Node-Projekte laden unnoetiges React-Test-Setup - -**Referenzen:** `vitest.setup.ts`, `vitest.setup.frontend.ts`, `vitest.config.ts` - -`vitest.setup.ts` importiert `@testing-library/jest-dom/vitest` und `@testing-library/react` -`cleanup()`. Diese Datei wird auch fuer Node-only Projekte wie `unit`, `integration`, -`integration-background` und `architecture` geladen. - -**Impact:** Node-Tests zahlen React-/Testing-Library-Importkosten, obwohl sie kein DOM brauchen. - -**Improvement:** Setup splitten: - -- `vitest.setup.node.ts`: nur Timer/MOCK cleanup, keine Testing Library. -- `vitest.setup.frontend.ts`: `jest-dom`, `cleanup`, i18n, browser API shims. -- Architecture entweder ohne Setup oder mit minimalem Node-Setup. - -Akzeptanzkriterium: Node-Projekt-Setupzeit sinkt, ohne Cleanup-Sicherheit in Frontend-Tests zu -verlieren. - -### M-03 - Coverage ist global gruen, aber risikogewichtet schwach - -Die globale Coverage lag zuletzt bei ca. `75%` Statements, `64%` Branches, `76%` Functions und -`76%` Lines. Das reicht fuer den Gate, verdeckt aber zentrale Luecken. - -Wichtige Low-Coverage-Module: - -| Modul | Lines | Branches | Risiko | -| --------------------------------------------------------------------------------- | --------: | --------: | --------------------------------------------------- | -| `server/app-runtime.js` | `0%` | `0%` | Runtime-Wiring, CLI/Server-Komposition. | -| `DashboardSections.tsx` | ca. `22%` | `0%` | Haupt-Komposition der Dashboard-Sections. | -| `server/background-runtime.js` | ca. `40%` | ca. `41%` | Hintergrundinstanzen, Registry, Prozess-Handling. | -| `server/data-runtime.js` | ca. `50%` | ca. `29%` | Daten-/Settings-Persistenz, Locking, Import/Export. | -| `server/http-router.js` | ca. `54%` | ca. `39%` | API-Routing, Mutation Guards, Fehlerfaelle. | -| `CommandPalette.tsx` | ca. `47%` | ca. `35%` | Keyboard/Search/Command-Vertraege. | -| `CostByWeekday.tsx`, `ModelMix.tsx`, `TokensOverTime.tsx`, `PeriodComparison.tsx` | `0%` | `0%` | Lazy dashboard sections ohne Vitest-Signal. | - -**Improvement:** Coverage risikogewichtet erhoehen: - -- `server/app-runtime.js`: Wiring-Test mit gemockten Factory-Abhaengigkeiten, der sicherstellt, dass - Runtimes korrekt komponiert werden. -- `server/data-runtime.js`: Pfadprioritaeten, Settings-/Usage-Import, Lock-Timeouts, stale locks, - corrupted state und Recovery. -- `server/http-router.js`: Route-Matrix fuer Methoden, Auth, Mutation Guards, Payload-Fehler, - statische Dateien, SPA-Fallback und API-Prefix. -- `DashboardSections.tsx`: Section visibility/order, request section availability, lazy fallback, - forecast dialog, preload scheduling und ErrorBoundary. -- `CommandPalette.tsx`: Command Builder/Scoring/Visibility als pure Unit/Frontend-Tests. -- Lazy charts: je ein kleiner render-/empty-state-/branch Test pro `0%` Modul. - -Nicht empfohlen: globale Thresholds sofort stark erhoehen. Zuerst gezielte Coverage fuer die oben -genannten Module schaffen, dann file-/directory-spezifische Mindestwerte einfuehren. - -### M-04 - Einige Unit-Tests nutzen echte Zeit oder echte Prozesse - -**Referenzen:** `tests/unit/server-helpers-runner-process.test.ts`, -`tests/unit/server-helpers-file-locks.test.ts`, `tests/integration/server-auto-import.test.ts` - -Einzelne Tests sind bewusst prozessnah. Das ist richtig, weil genau Prozess-/CLI-Verhalten getestet -wird. Einige Faelle nutzen aber echte Sleeps, echte Child-Prozesse oder Timeout-Fenster. - -**Impact:** Diese Tests sind wertvoll, aber koennen lokal variieren und setzen eine Untergrenze fuer -Suite-Zeit. - -**Improvement:** - -- Prozessnahe Tests behalten, aber streng auf wenige Smoke-Faelle begrenzen. -- Timeout-/Fallback-Logik dort, wo moeglich, ueber injizierte Clock/Sleep/Spawn-Fakes testen. -- Echte Subprozess-Tests als Integration einstufen, wenn sie Shell/PATH/Cross-Process-Verhalten - pruefen. -- Slow-Test-Budget dokumentieren: kein neuer Unit-Test sollte ohne Begruendung > `300ms` brauchen. - -### M-05 - Statische Checks haben Doppelarbeit - -**Referenzen:** `package.json`, `eslint.config.mjs` - -`npm run check` fuehrt `eslint .` und danach `lint:docstrings` als zweiten ESLint-Lauf aus. Der zweite -Lauf ist enger, aber Regeln und Dateien ueberschneiden sich deutlich. - -**Impact:** Static-Gate-Zeit steigt, obwohl die meisten Inputs bereits im ersten ESLint-Lauf gesehen -werden. - -**Improvement:** +- `static` +- Vitest-Matrix fuer `architecture`, `unit`, `frontend`, `integration`, + `integration-background` +- dedizierter `coverage`-Job +- `build` +- `package-smoke` nach `build` +- `e2e` nach `build` +- `windows-smoke` + +Verbleibende CI-Kosten sind wahrscheinlich: + +- `npm ci --ignore-scripts` pro Job +- Playwright-Browser-Install im E2E-Job +- Coverage-Instrumentierung und HTML/LCOV-Erzeugung +- Artifact upload/download fuer `dist`, Coverage und Reports + +Optimierung: + +- Vor CI-Umbauten echte Job-Timings aus GitHub Actions auswerten. +- Coverage nicht in jede Matrix duplizieren; dedizierten Coverage-Job + beibehalten. +- E2E-Sharding erst einfuehren, wenn HTML-/JUnit-/Artifact-Pfade shard-sicher + sind. +- `verify:full` als serielle Referenz behalten und `verify:full:parallel` als + lokalen Fast Path dokumentieren. +- Jede Worker- oder Shard-Aenderung mit mindestens drei Wiederholungen messen: + Median und Worst Run dokumentieren. + +## Coverage-Luecken + +Aktuelle Low-Coverage-Signale aus `coverage/lcov.info`: + +| Bereich | Lines | Branches | Risiko | +| --- | ---: | ---: | --- | +| `src/App.tsx` | `0.0%` | `0.0%` | Entry-Wiring; nur testen, wenn App-shell-Vertrag nicht anders abgedeckt ist. | +| `src/components/features/risk/ConcentrationRisk.tsx` | `0.0%` | `0.0%` | Feature ist ohne Testsignal; zuerst entscheiden, ob Smoke oder Exclude sinnvoll ist. | +| `server/report/index.js` | `40.9%` | `41.7%` | Typst-Erkennung, Compile-Fehler, Cleanup, PDF-Ausgabe. | +| `src/lib/app-settings.ts` | `40.0%` | `50.0%` | Defaults, Settings-Mapping, Recovery-Faelle. | +| `src/components/layout/Header.tsx` | `45.0%` | `35.7%` | Header-Actions, Links, Statusanzeigen. | +| `src/lib/csv-export.ts` | `46.7%` | n/a | Export-Korrektheit, Escaping, leere Daten. | +| `server/background-runtime.js` | `59.5%` | `53.9%` | Background-Start/Stop, Registry, EPERM/ESRCH, Pruning. | +| `server/http-router.js` | `68.0%` | `54.6%` | API-Fehlerpfade, Mutationsschutz, malformed JSON. | +| `server/auto-import-runtime.js` | `69.4%` | `54.7%` | Stream, Kill, Timeout, malformed output. | +| `src/hooks/use-dashboard-controller-actions.ts` | `69.0%` | `42.0%` | Upload/Delete/Export/Backup/PDF/toast side effects. | +| `src/components/features/auto-import/AutoImportModal.tsx` | `66.7%` | `51.4%` | Stream-Status, Cancel, Error, Success. | +| `src/components/features/command-palette/CommandPalette.tsx` | `67.4%` | `58.1%` | Visibility, action dispatch, keyboard/empty states. | +| `server/report/charts.js` | `75.7%` | `37.5%` | Chart asset edge cases and formatting. | + +Prioritaet fuer Coverage-Erhoehung: + +1. `server/background-runtime.js` + - EPERM/ESRCH, stale registry, invalid registry, selected stop, + startup failure, readiness timeout, no-open path, custom prefix. +2. `server/http-router.js` + - malformed JSON, unsupported methods, missing/invalid auth, origin/host + guards, permission denied, corrupt data/settings recovery, report + success/failure. +3. `server/auto-import-runtime.js` + - malformed toktrack output, child error, timeout, kill escalation, stream + tail flush, singleton guard. +4. `server/report/index.js` und `server/report/charts.js` + - missing Typst, compile stderr, cleanup failure tolerance, invalid payload, + chart edge values. +5. `src/hooks/use-dashboard-controller-actions.ts` + - upload success/error, delete/reset, backup import/export, PDF request, + blob download, toast side effects, mutation invalidation. +6. `src/components/features/auto-import/AutoImportModal.tsx` + - idle/running/success/error/cancel states, stream event rendering, + disabled controls. +7. Branch-heavy UI-Workflows + - Header, Command Palette, Settings sections, sortable tables, date picker. +8. Kleine Lib-Module + - `app-settings.ts`, `csv-export.ts`, `csv.ts`, `model-utils.ts` mit + parametrisierten Node-Unit-Tests. + +Nicht jede 0%-Datei ist automatisch ein Problem. Primitive Wrapper, +Entry-Points oder rein deklarative UI sollten entweder mit einem guenstigen +Smoke abgedeckt oder bewusst vom Coverage-Ratchet ausgenommen werden. Der +entscheidende Massstab ist Produktionsrisiko, nicht kosmetische Prozentzahl. + +## Nicht optimale Einstellungen + +| Bereich | Aktuell | Bewertung | Empfehlung | +| --- | --- | --- | --- | +| `architecture.fileParallelism` | `false` | Stabil und klein; Parallelisierung bringt wenig und kann Source-Graph-Arbeit duplizieren. | Beibehalten, nur nach 3-run Benchmark aendern. | +| `unit.maxWorkers` | `'80%'` | Schnell; einzelne Prozess-Smokes dominieren. | Beibehalten, Branch-Matrix aus echten Prozess-Smokes entfernen. | +| `frontend.maxWorkers` | `'80%'` | Aktuell sinnvoll, aber varianzreich. | Nicht erhoehen; zuerst jsdom-Dateien und Importkosten reduzieren. | +| `integration.maxWorkers` | `'50%'` | Gute Balance fuer HTTP/I/O. | Erst nach besserer Tempdir-/Port-Isolation neu messen. | +| `integration-background.maxWorkers` | `2` | Richtig wegen echter Background-Prozesse. | Nicht erhoehen, bis Concurrency mehrfach stabil ist. | +| `frontend.testTimeout` | `30_000` | Robust unter Coverage, kann langsame Tests verdecken. | Timing-Budgets schaerfen statt Timeout zuerst senken. | +| Playwright CI workers | `2` | Runnerfreundlich und stabil. | `4` oder Sharding nur nach report-sicherem Mehrfachbenchmark. | +| Coverage | dedizierter Lauf | Richtig, aber teuer. | Beibehalten; spaeter gezielte Threshold-Ratchets. | +| Direkte Parallelaufrufe | feste Reportpfade moeglich | Manuelle Parallelitaet kann JUnit/HTML/Coverage ueberschreiben. | Nur scripts mit eindeutigen Output-Pfaden parallel nutzen. | + +## Verbesserungsplan + +### Phase 1 - Background-Concurrency stabilisieren + +Ziel: Den kritischsten Flake-Bereich stabilisieren, bevor mehr +Parallelisierung oder scharfere Budgets eingefuehrt werden. + +Umsetzung: + +- Background-Testhelfer so erweitern, dass jeder CLI-Fehler Registry, + Logfiles, Ports, Runtime-Root und Env-Kontext ausgibt. +- Concurrent-Startup-Test auf konkrete Zwischenzustaende pruefen: + Prozess beendet erfolgreich, Registry enthaelt zwei Eintraege, beide Ports + sind erreichbar, Readiness-Endpoint antwortet mit Auth. +- Registry-/Startup-/Prune-Branches in Unit-Tests mit Fakes abdecken. +- Echte Background-Smokes auf wenige Prozessvertraege begrenzen. + +Akzeptanz: + +- `integration-background` laeuft drei Wiederholungen ohne Flake. +- Fehlerdiagnose reicht aus, um Port-, Registry-, Auth- und Startup-Ursache zu + unterscheiden. +- `maxWorkers` bleibt `2`. + +### Phase 2 - Frontend/jsdom-Kosten senken + +Ziel: Weniger jsdom-Fixkosten pro Assertion, ohne UI-Verhalten zu verlieren. + +Umsetzung: + +- `tests/frontend` klassifizieren und pure Logik in Node-Unit-Tests + verschieben. +- Schwere Settings-, Drilldown-, FilterBar- und Table-Tests in kleinere, + vertragsscharfe Tests schneiden. +- Shared Helper fuer Recharts, Observer, Motion, rAF und App-Provider + konsolidieren. +- Unnoetige i18n-Initialisierung und unnoetige `waitFor(...)`-Nutzung + entfernen. +- Danach `frontend` dreimal benchmarken und Median/Worst Run dokumentieren. + +Akzeptanz: + +- Frontend-Median oder Frontend-Worst-Run sinkt messbar. +- ARIA-, Keyboard-, Focus-, i18n-, Motion- und Chart-Verhalten bleibt + abgedeckt. +- Keine Snapshots oder Layout-Goldens einfuehren. + +### Phase 3 - Prozessnahe Tests trennen + +Ziel: Echte Prozesse nur fuer echte Prozessvertraege verwenden; Branch-Logik +billiger und deterministischer testen. + +Umsetzung: + +- Runner-/PATH-Smokes in `server-helpers-runner-process.test.ts` behalten, + Cache-/Timeout-/stderr-/stdout-Branches in Fake-Spawn-Tests verschieben. +- File-Lock-Smokes behalten, stale-lock-, permission- und cleanup-Branches mit + Fake-FS/Fake-Clock testen. +- Auto-Import-Integration um fixed sleeps bereinigen und Branch-Matrix auf + Runtime-/Router-Unit-Tests verlagern. +- Reporting in Fake-Generator-Routertests und einen echten PDF-Smoke trennen. + +Akzeptanz: + +- Integration-Worst-Run steigt nicht durch neue Coverage. +- Jeder echte Prozess-Smoke prueft einen Prozessvertrag, nicht nur + Branch-Logik. +- Keine neuen fixed sleeps ohne konkrete Begruendung. -- Pruefen, ob `lint:docstrings` vollstaendig in `eslint .` enthalten ist. Falls ja, `lint:docstrings` - als separaten Gate entfernen. -- Falls getrennt noetig, ESLint Cache aktivieren und unterschiedliche Cache-Keys fuer beide Laeufe - verwenden. -- Prettier Cache und TypeScript incremental fuer lokale Gates nutzen. +### Phase 4 - Coverage risikogewichtet erhoehen -**Umsetzungsstand Phase 5:** `test:static` nutzt einen einzigen `lint`-Lauf fuer den regulaeren Gate, -waehrend `lint:docstrings` als gezielter Diagnosebefehl erhalten bleibt. Prettier, ESLint und -TypeScript schreiben lokale Cache-Dateien unter `.cache/`. +Ziel: Branch-Coverage dort erhoehen, wo Fehler teuer waeren. -### M-06 - Package-Smoke ist wichtig, aber gehoert in einen eigenen CI-Job +Umsetzung: -**Referenzen:** `scripts/verify-package.js`, `package.json` +- Server-Runtime zuerst: Background, HTTP-Router, Auto-Import, Report. +- Danach Controller-Actions und browsernahe Side Effects. +- Danach UI-Workflow-Branches mit echten DOM-Vertraegen. +- Kleine Lib-Module per parametrisierten Node-Unit-Tests abdecken. +- Nach jeder Coverage-Phase `npm run test:vitest:coverage` auswerten und + globale Werte plus betroffene Dateien dokumentieren. -`verify:package` packt das Projekt, installiert den Tarball in ein Temp-Projekt, prueft CLI-Help, -`npm exec` und Server-Startup. Das ist ein guter Release-Smoke, aber er muss nicht denselben seriellen -Job wie Unit, Integration und E2E blockieren. +Akzeptanz: -**Impact:** Im lokalen Full-Gate addiert er ca. `7s`. In CI kann er parallel zu E2E laufen, sobald -`dist/` als Artifact gebaut wurde. +- Globale Coverage faellt nicht. +- Branch-Coverage steigt zuerst in `server/` und branch-heavy Hooks. +- Neue Tests liegen auf dem schmalsten sinnvollen Layer. -**Improvement:** +### Phase 5 - Gates datenbasiert schaerfen -- `build` erzeugt `dist/` einmal. -- `package-smoke` und `e2e` nutzen dieses Artifact parallel. -- Package-Smoke bleibt im Full-Gate, aber nicht als serieller Nachklapp nach allen Tests. +Ziel: Lokale und CI-Gates schneller machen, ohne Strenge zu verlieren. -**Umsetzungsstand Phase 5:** Der CI-`build`-Job laedt `production-dist` hoch; `package-smoke` und -`e2e` laden dieses Artifact herunter und laufen nur noch mit `needs: build`. +Umsetzung: -### M-07 - Timing-Regressions sind sichtbar, aber nicht budgetiert +- `verify:full` als serielle Referenz behalten. +- `verify:full:parallel` als lokalen Fast Path dokumentieren und regelmaessig + gegen CI-DAG spiegeln. +- Timing-Budgets projektweise schaerfen, aber erst nach Background-Stabilitaet. +- CI-Timing-Artefakte auswerten, bevor Dependency-Install, Browser-Install, + Coverage oder Artifacts umgebaut werden. +- Sharding nur einfuehren, wenn alle Reportpfade shard-sicher sind. -`test:timings` listet Slow-Suites und Slow-Tests, erzwingt aber kein Budget und vergleicht nicht gegen -eine Baseline. +Akzeptanz: -**Improvement:** +- Keine parallelen Jobs ueberschreiben JUnit, Coverage oder Playwright-HTML. +- CI-DAG bleibt lesbar und liefert aussagekraeftige Failures. +- Lokaler Fast Path ist schneller, aber nicht weniger streng. -- Im Review/CI einen Timing-Budget-Abschnitt pflegen. -- Optional spaeter Script erweitern: - - Warnung fuer neue Tests > `500ms`. - - Warnung fuer Suites > `2s`. - - Separates Ranking nach Projekt. - - Trendvergleich gegen eine gespeicherte Baseline in CI-Artefakten. +### Phase 6 - Playwright klein und wertvoll halten -**Umsetzungsstand Phase 6:** `scripts/report-test-timings.js` wertet JUnit-Reports jetzt mit -Warn- und Hard-Budgets aus. Lokal bleibt `test:timings` diagnostisch; `test:timings:budget` nutzt -einen separaten JUnit-Output und scheitert bei Suiten > `20s` oder Tests > `12s`. Die CI-Matrix -wendet denselben Hard-Budget-Check auf die bereits erzeugten Vitest-Projektreports an, ohne eine -zusaetzliche Vitest-Runde zu starten. +Ziel: Browserzeit nur fuer echte User Journeys verwenden. -### N-01 - macOS-Sandbox kann Playwright-False-Negatives erzeugen +Behaltene E2E-Vertraege: -Ein sandboxed Playwright-Lauf scheiterte mit Chromium -`MachPortRendezvousServer ... Permission denied`. Derselbe Lauf ausserhalb der Sandbox bestand. +- Dashboard load/upload. +- Settings/backups. +- Forecast/filter flow. +- Reporting smoke. +- Representative Command Palette smoke. -**Impact:** Agent-/Sandbox-Umgebungen koennen Browser-Failures erzeugen, die nicht repo-bezogen sind. +Nicht in E2E ausweiten: -**Improvement:** In Review- und Agent-Doku festhalten: Playwright auf macOS ggf. ausserhalb strenger -Sandbox ausfuehren. Das ist kein Produktfix. +- Vollstaendige Command-Liste, IDs, Aliases und Visibility. +- Exhaustive Sortier-, Label- und Filter-Matrizen. +- Reine Datenformung oder Settings-Mapping. -## Zielarchitektur fuer das Testsystem +Akzeptanz: -### Lokale Befehle +- Playwright bleibt kurz und prueft echte Browserintegration. +- Worker-scoped Server, Port, Runtime-Root und Auth-Session bleiben erhalten. +- Kein User-Flow wird ausschliesslich durch pure Unit-Tests ersetzt, wenn der + Browser selbst Teil des Risikos ist. -Empfohlene Zielstruktur: +## Zielarchitektur -- `npm run test:static`: Format, Lint, Dependency-Cruiser, TypeScript. -- `npm run test:vitest`: alle Vitest-Projekte ohne Coverage. -- `npm run test:vitest:coverage`: Coverage-Gate mit getrennten Outputs. -- `npm run test:e2e:parallel`: Playwright mit worker-isolierter App. -- `npm run verify:ci`: CI-kompatibler Full-Gate ohne doppelte Builds. -- `npm run verify:full`: lokaler kompletter Gate, der dieselbe Logik wie CI nutzt. +| Testlevel | Zweck | Performance-Regel | +| --- | --- | --- | +| Node Unit | Pure Logik, Branches, Formatierung, Runtime-Factories | Schnell, fake by default. | +| Frontend jsdom | DOM, ARIA, Keyboard, Focus, Motion, Provider-Zusammenspiel | Zielgenau, keine breiten Dashboard-Smokes. | +| Integration Node | Echter Server, HTTP, CLI, FS, Ports, Cross-Process | Wenige echte Vertraege, gute Diagnose. | +| Architecture | Import-, Layer- und Strukturregeln | Klein halten, shared source graph nutzen. | +| E2E | Echte Browser-Journeys | Kurz, worker-isoliert, keine Contract-Matrix. | +| Coverage | Breiter Denominator und Risk Ratchet | Separater Lauf, nicht in jede Matrix duplizieren. | -### E2E-Isolation +## Empfohlene Reihenfolge -Die spaetere Playwright-Fixture soll worker-scoped sein: +1. Background-Concurrency diagnostizieren und stabilisieren. +2. Frontend/jsdom-Testklassifizierung durchfuehren. +3. Pure Logik aus Frontendtests in Node-Unit-Tests verschieben. +4. Prozessnahe Tests in Fake-Branch-Tests und echte Smokes trennen. +5. Server-Runtime-Coverage fuer Background, Router, Auto-Import und Report + erhoehen. +6. Controller-/Hook-Coverage fuer Dashboard Actions, Settings, CSV und Model + Utils erhoehen. +7. Timing-Budgets projektweise schaerfen. +8. CI- und Playwright-Sharding nur nach report-sicherem Mehrfachbenchmark + erweitern. -- Worker startet eigenen Serverprozess. -- Worker bekommt eigenen Runtime-Root: `.tmp-playwright/workers//app`. -- Worker bekommt eigenen Port, z. B. ueber freien Port oder deterministisch aus Basisport und Worker. -- Worker wartet auf eigene `session-auth.json`. -- `baseURL`, `bootstrapUrl` und Auth-Header kommen aus der Worker-Fixture. -- Tests importieren `test` und `expect` aus einem lokalen E2E-Fixture-Modul statt direkt aus - `@playwright/test`. +## Validierung -Damit koennen Tests parallel laufen, ohne Daten zu teilen. +Da diese Datei nur Review und Plan enthaelt: -### CI-DAG +- `npx prettier --check docs/review/test-review.md` +- `git diff --check -- docs/review/test-review.md` -Ziel-CI: +Fuer spaetere Implementierungsphasen: -- `static` -- `vitest-unit` -- `vitest-frontend` optional geshardet -- `vitest-integration` -- `vitest-architecture` -- `build` -- `package-smoke` mit Build-Artifact -- `e2e` mit Build-Artifact und worker-isolierten Servern -- `reports` oder Artifact Upload pro Job - -Coverage-Strategie: - -- Einfachste robuste Variante: ein dedizierter `coverage`-Job, der alle Vitest-Projekte mit Coverage - seriell oder kontrolliert parallel ausfuehrt. -- Schnellere Variante: Coverage je Vitest-Shard erzeugen und danach mergen. Diese Variante erst - waehlen, wenn Report-Merging sauber automatisiert ist. - -## Priorisierte Roadmap - -1. **Dokumentation aktualisieren.** Diese Review-Datei als aktuelle Master-Analyse nutzen. -2. **E2E worker-isolieren.** Ohne das bleibt jede Playwright-Parallelisierung instabil. -3. **Command-Palette-Teststrategie umbauen.** Contract-Logik in Vitest, wenige Browser-Smokes. -4. **Vitest-Setup splitten.** Node-Projekte von React Testing Library und `jest-dom` entkoppeln. -5. **Coverage-Luecken gezielt schliessen.** Runtime-Komposition und Dashboard-Composition zuerst. -6. **CI in Jobs/Matrix aufteilen.** Erst nachdem Output-Pfade und E2E-Isolation parallel-sicher sind. -7. **Budgets einfuehren.** Slow-Test- und Coverage-Budgets als Regressionsschutz. - -## Akzeptanzkriterien fuer die spaetere Umsetzung - -- `npx playwright test --workers=2` und `--workers=4` laufen mehrfach gruen. -- `npm run verify:full` laeuft lokal vollstaendig und robust unter der neuen Struktur. -- CI-Full-Gate nutzt mehrere Jobs und ist deutlich schneller als der aktuelle serielle Job. -- Keine JUnit-, Coverage- oder Playwright-Reports werden durch parallele Jobs ueberschrieben. -- `frontend/jsdom`-Zeit sinkt oder bleibt trotz hoeherer Coverage stabil. -- Coverage steigt gezielt bei `server/app-runtime.js`, `server/data-runtime.js`, - `server/http-router.js`, `DashboardSections.tsx` und `CommandPalette.tsx`. -- E2E-Suite bleibt fokussiert auf echte Browserintegration statt auf exhaustive Contract-Tests. +- Nach Performance-Aenderungen: + `node scripts/run-vitest-project-timings.js --projects= --repeat=3` +- Nach Coverage-Aenderungen: `npm run test:vitest:coverage` +- Nach Pipeline-Aenderungen: + `PLAYWRIGHT_TEST_PORT=3016 npm run verify:full:parallel` +- Nach E2E-Aenderungen: `npm run test:e2e:ci` und bei Sharding/Worker-Aenderung + mehrere Wiederholungen mit eindeutigen Reportpfaden. From 708c3c7db9a2191835365904dcb01ea5f0b1d2cb Mon Sep 17 00:00:00 2001 From: tyl3r-ch Date: Fri, 1 May 2026 00:58:54 +0200 Subject: [PATCH 22/42] Expand background runtime branch coverage --- docs/review/fixed-findings.md | 1113 +++++++++++++++++++++++++ tests/unit/background-runtime.test.ts | 206 +++++ 2 files changed, 1319 insertions(+) create mode 100644 docs/review/fixed-findings.md diff --git a/docs/review/fixed-findings.md b/docs/review/fixed-findings.md new file mode 100644 index 0000000..70a5eec --- /dev/null +++ b/docs/review/fixed-findings.md @@ -0,0 +1,1113 @@ +# Fixed Findings + +## 2026-05-01 + +### test-review.md / Phase 1 - Background-Concurrency stabilisieren + +- Status: fixed +- Scope: Die Background-Runtime-Branch-Coverage wurde ohne weitere echte Prozesse erweitert. + Stop-Noop, Multi-Instance-Cancel, Stop-Timeout mit Logausgabe und erfolgreicher Background-Start + laufen jetzt im schnellen Fake-basierten Unit-Layer. Die vorhandenen echten + `integration-background`-Smokes bleiben unveraendert und pruefen weiter Concurrent Startup, + selected stop, Registry pruning und custom prefix/log permissions. +- Fix reference: phase commit `Expand background runtime branch coverage` +- Closed findings: + - `test-review.md / Phase 1` - Background-Concurrency stabilisieren, Restabdeckung fuer + Stop-/Start-Branches. + - `test-review.md / Coverage-Luecken` - `server/background-runtime.js`, Teil Stop-Cancel, + Stop-Timeout und erfolgreicher Start. +- Guardrails: + - Keine Produktionslogik wurde geaendert. + - Neue Tests verwenden Fake-FS, Fake-Spawn, Fake-Readline, Fake-Clock und Spy-Ausgaben statt + zusaetzlicher Subprozesse. + - `integration-background.maxWorkers` bleibt unveraendert bei `2`. + - Der dreifache Background-Benchmark lief ohne Concurrency-Flake. +- Validation: + - `npm_config_cache=/tmp/ttdash-npm-cache npx vitest run --project unit tests/unit/background-runtime.test.ts --reporter=verbose` -> passed with `14` tests. + - `npm_config_cache=/tmp/ttdash-npm-cache npm run test:vitest:integration-background` -> + passed with `4` files and `5` tests. + - `node scripts/run-vitest-project-timings.js --projects=integration-background --repeat=3` -> + passed with median `2.74s`, worst `3.12s`. + - `npm_config_cache=/tmp/ttdash-npm-cache npm run format:check` -> passed. + - `npm_config_cache=/tmp/ttdash-npm-cache npm run lint` -> passed. + - `npm_config_cache=/tmp/ttdash-npm-cache npm run typecheck` -> passed. + - `npm_config_cache=/tmp/ttdash-npm-cache npm run test:vitest:unit` -> passed with `383` + passed and `1` skipped. + - `coderabbit review --agent -t uncommitted -c AGENTS.md -c .coderabbit.yaml -f docs/review/fixed-findings.md -f tests/unit/background-runtime.test.ts` -> CodeRabbit raised 0 issues. + +## 2026-04-30 + +### test-review.md / Phase 1 - Background-Concurrency stabilisieren + +- Status: fixed +- Scope: Background-Concurrency-Smoke behält den echten Cross-Process-Vertrag, liefert aber im + Fehlerfall jetzt verwertbare Diagnose zu CLI-Ausgaben, Registry, Runtime-Root, Ports und + Background-Logs. Background-Registry-/Lock-/Runtime-Identity-Branches sind zusätzlich auf dem + schnellen Unit-Layer abgedeckt. +- Fix reference: `b7fbe80` `Stabilize background concurrency tests` +- Closed findings: + - `test-review.md / H-01` - Background-Concurrency flakt unter wiederholter Last, Teil + Diagnostik und Lock-/Registry-Branch-Abdeckung. + - `test-review.md / Phase 1` - Background-Concurrency stabilisieren. +- Guardrails: + - `runCli` trennt stdout/stderr und liefert Signal, Args und kombinierten Output weiterhin + kompatibel fuer bestehende Tests. + - `waitForBackgroundRegistry` meldet bei Timeout Registry-Inhalt und Background-Log-Snippets. + - Der Concurrent-Startup-Test prueft nach Readiness erneut, dass beide Registry-Eintraege noch + vorhanden sind und eindeutige URLs besitzen. + - Background-Logdateien enthalten jetzt den Parent-PID-Suffix, damit parallele Starts keine + Diagnose-Logs teilen. + - Unit-Tests decken Runtime-Identity Fetch, nicht verfuegbare Runtime-Responses, stale Locks, + Lock-Timeout, invalid Registry Payloads, ESRCH beim Stop und Browser-Open-Env fuer + Background-Kinder ab. +- Validation: + - `npm_config_cache=/tmp/ttdash-npm-cache npx vitest run --project unit tests/unit/background-runtime.test.ts --reporter=verbose` -> passed. + - `npm_config_cache=/tmp/ttdash-npm-cache npx vitest run --project integration-background tests/integration/server-background-concurrency.test.ts --reporter=verbose` -> passed. + - `npm_config_cache=/tmp/ttdash-npm-cache npm run test:vitest:integration-background` -> passed. + - `npm_config_cache=/tmp/ttdash-npm-cache npm run test:timings:benchmark -- --projects=integration-background` -> passed with median `4.95s`, worst `5.51s` in the final isolated run; an earlier same-session isolated control before extra load measured median `2.52s`, worst `2.66s`. + - `npm_config_cache=/tmp/ttdash-npm-cache npm run test:vitest:unit` -> passed with `361 passed | 1 skipped`. + - `npm_config_cache=/tmp/ttdash-npm-cache npm run format:check` -> passed. + - `npm_config_cache=/tmp/ttdash-npm-cache npm run lint` -> passed. + - `npm_config_cache=/tmp/ttdash-npm-cache npm run typecheck` -> passed. + - `npm_config_cache=/tmp/ttdash-npm-cache npm run test:vitest:coverage` -> passed with `151` + files, `616 passed | 1 skipped`. + - `PLAYWRIGHT_TEST_PORT=3016 npm_config_cache=/tmp/ttdash-npm-cache npm run verify:full:parallel` -> passed with Playwright `11 passed`. +- Measurement notes: + - Coverage summary after this phase: Statements `81.98%`, Branches `69.51%`, Functions + `83.06%`, Lines `83.43%`. + - `server/background-runtime.js` improved to Statements `58.60%`, Branches `53.89%`, Functions + `73.52%`, Lines `59.52%`. + - `coderabbit review --agent -t committed --base-commit HEAD~1 -c AGENTS.md -f ...` -> 0 + issues. + - Commit is SSH-signed. + +### test-review.md / Phase 2 - Frontend/jsdom-Kosten senken + +- Status: fixed +- Scope: Command-Palette-Gruppen- und ID-Vertraege laufen jetzt im schnellen + Node-Unit-Layer; ein schmaler Command-Palette-Render-Smoke bleibt in einer bestehenden + i18n-jsdom-Datei. Der redundante FilterBar-Preset-Order-DOM-Test wurde entfernt, weil die + gemeinsame Preset-Reihenfolge bereits im Unit-Layer abgesichert ist. +- Fix reference: `1657822` `Move command palette contracts to unit tests` +- Closed findings: + - `test-review.md / H-02` - Frontend/jsdom dominiert die Vitest-Walltime, Teil Command-Palette + Contract-Tests und redundante Preset-Assertion. + - `test-review.md / Phase 2` - Frontend/jsdom-Kosten senken. +- Guardrails: + - Command-IDs fuer Load-Data-, Export- und Maintenance-Aktionen bleiben im Builder-Vertrag + explizit stabil. + - Lokalisierte Command-Gruppen werden im Unit-Layer ueber einen kontrollierten Translator + geprueft. + - Der verbleibende jsdom-Smoke prueft weiterhin, dass die Command Palette gerendert werden kann + und representative Commands in deutscher UI sichtbar sind. + - FilterBar-Preset-Reihenfolge bleibt ueber `DASHBOARD_QUICK_DATE_PRESETS` im Unit-Test + abgesichert. + - Keine Produktionsdateien, kein Layout und kein App-Verhalten wurden geaendert. +- Validation: + - `npm_config_cache=/tmp/ttdash-npm-cache npx vitest run --project unit tests/unit/command-palette-commands.test.ts tests/unit/dashboard-preferences.test.ts --reporter=verbose` -> passed. + - `npm_config_cache=/tmp/ttdash-npm-cache npx vitest run --project frontend tests/frontend/filter-bar-presets.test.tsx --reporter=verbose` -> passed. + - `npm_config_cache=/tmp/ttdash-npm-cache npx vitest run --project frontend tests/frontend/dashboard-language-regressions.test.tsx --reporter=verbose` -> passed. + - `npm_config_cache=/tmp/ttdash-npm-cache npm run format:check` -> passed. + - `npm_config_cache=/tmp/ttdash-npm-cache npm run lint` -> passed. + - `npm_config_cache=/tmp/ttdash-npm-cache npm run typecheck` -> passed. + - `npm_config_cache=/tmp/ttdash-npm-cache npm run test:vitest:unit` -> passed with + `363 passed | 1 skipped`. + - `npm_config_cache=/tmp/ttdash-npm-cache npm run test:vitest:frontend` -> passed with `76` + files and `204 passed`. + - `npm_config_cache=/tmp/ttdash-npm-cache npm run test:timings:projects -- --projects=frontend` -> passed with `76` files, `204 passed`, and `55.86s` measured under high local system load. + - `npm_config_cache=/tmp/ttdash-npm-cache npm run test:vitest:coverage` -> passed with global + coverage unchanged from Phase 1. + - `PLAYWRIGHT_TEST_PORT=3016 npm_config_cache=/tmp/ttdash-npm-cache npm run verify:full:parallel` -> passed with Playwright `11 passed`. + - `coderabbit review --agent -t committed --base-commit HEAD~1 -c AGENTS.md -f ...` -> + CodeRabbit raised 0 issues. +- Measurement notes: + - Frontend/jsdom files dropped from `77` to `76`. + - Frontend project now runs `204` jsdom tests; removed broad Command-Palette group coverage was + replaced by cheaper Unit-Contract coverage plus one existing-file render smoke. + - Coverage summary after this phase: Statements `81.98%`, Branches `69.51%`, Functions + `83.06%`, Lines `83.43%`. + - Current absolute frontend timings are load-distorted; the structural improvement is lower + jsdom file count with no coverage regression. + - Commit is SSH-signed. + +### test-review.md / Phase 3 - Prozessnahe Tests weiter in Vertraege und Branches trennen + +- Status: fixed +- Scope: Runner-Resolution-, Command-Error-, stderr-streaming-, external-close- und Timeout- + Branches laufen jetzt im Fake-Spawn-Unit-Layer; der echte Cross-Process-File-Lock-Smoke nutzt + ein explizites IPC-Release statt eines fixed `250ms` Sleeps. Shared Dashboard-Preference- + Normalisierungs-Branches sind als billige Node-Unit-Tests stabilisiert, damit Coverage nicht von + teuren jsdom- oder Prozess-Smokes abhaengt. +- Fix reference: `7555c0e` `Harden process-adjacent test coverage` +- Closed findings: + - `test-review.md / H-03` - Prozessnahe Tests sind teilweise zu breit, Teil Runner-Branch- + Coverage und File-Lock-Smoke-Wartezeit. + - `test-review.md / H-04` - Kritische Runtime-/Config-Branches sind unterdurchschnittlich + abgedeckt, Teil Auto-Import-Runner und Dashboard-Preference-Normalisierung. + - `test-review.md / Phase 3` - Prozessnahe Tests weiter in Vertraege und Branches trennen. +- Guardrails: + - Echte Runner-Smokes fuer lokale Installation, PATH-Fallback, Timeout und stdout-Fehler bleiben + erhalten; neue Fake-Spawn-Tests decken die branchigen Fehler- und Fallback-Pfade zusaetzlich + deterministisch ab. + - Der Fake-Timer-Timeout-Test beweist, dass `runToktrack` erst nach Child-Exit rejected und nicht + direkt beim Timeout-Alarm settled. + - `signalOnClose`, stderr streaming und Spawn-Error-Wrapping werden ohne echte Subprozesse + getestet. + - Der Cross-Process-File-Lock-Test wartet nicht mehr ueber elapsed time, sondern haelt den + Child-Lock bis zum expliziten Parent-Release. + - Dashboard-Preference-Tests sichern malformed config, defensive default filters, visibility, + ordering und custom active range ohne jsdom ab. +- Validation: + - `npm_config_cache=/tmp/ttdash-npm-cache npx vitest run --project unit tests/unit/dashboard-preferences.test.ts tests/unit/server-helpers-runner-core.test.ts tests/unit/server-helpers-runner-process.test.ts tests/unit/server-helpers-file-locks.test.ts --reporter=verbose` -> passed. + - `npm_config_cache=/tmp/ttdash-npm-cache npm run format:check` -> passed. + - `npm_config_cache=/tmp/ttdash-npm-cache npm run lint` -> passed. + - `npm_config_cache=/tmp/ttdash-npm-cache npm run typecheck` -> passed. + - `npm_config_cache=/tmp/ttdash-npm-cache npm run test:timings:projects -- --projects=unit` -> passed, but absolute timing was distorted by local system CPU load. + - `npm_config_cache=/tmp/ttdash-npm-cache npm run test:vitest:coverage` -> passed with `150` + files, `623 passed | 1 skipped`. + - `PLAYWRIGHT_TEST_PORT=3016 npm_config_cache=/tmp/ttdash-npm-cache npm run verify:full:parallel` -> passed with Playwright `11 passed`. + - `coderabbit review --agent -t committed --base-commit HEAD~1 -c AGENTS.md -f ...` -> + CodeRabbit raised 0 issues. +- Measurement notes: + - Coverage summary after this phase: Statements `82.07%`, Branches `69.58%`, Functions + `83.11%`, Lines `83.52%`. + - `server/auto-import-runtime.js` improved to Statements `69.95%`, Branches `54.72%`, + Functions `79.03%`, Lines `69.43%`. + - `shared/dashboard-preferences.js` improved to Statements `99.13%`, Branches `94.11%`, + Functions `100%`, Lines `99.09%`. + - Full gate timing under high local load: Unit `9.54s`, Frontend `73.51s`, Background + Integration `7.97s`, Playwright `11 passed` in `42.3s`; `ps aux` showed heavy non-test load + from WindowServer, ControlCenter, IDEA, Bitdefender and WebKit, so these absolute timings are + not a stable performance baseline. + - Commit is SSH-signed. + +### test-review.md / Phase 4 - Coverage risikogewichtet erhoehen + +- Status: fixed +- Scope: HTTP-Router-Branch-Coverage wurde auf die produktionskritischen Auto-Import-Stream- und + Report-Request-Fehlerpfade erweitert, ohne echte Server, Browser oder Typst-Prozesse zu starten. +- Fix reference: `f2b3400` `Cover router auto-import error paths` +- Closed findings: + - `test-review.md / H-04` - Kritische Runtime-Branches sind unterdurchschnittlich abgedeckt, + Teil HTTP-Router Auto-Import-Stream, Lease-Handling und Report-Body-Fehler. + - `test-review.md / Phase 4` - Coverage risikogewichtet erhoehen. +- Guardrails: + - Der Router-Mock unterstuetzt jetzt `res.write(...)`, damit Server-Sent-Event-Streams im + Unit-Layer realistisch akkumuliert werden. + - Auto-Import-Erfolg prueft `check`, `progress`, `stderr`, `success` und `done` Events sowie die + Weitergabe der erworbenen Lease an `performAutoImport`. + - Concurrent Auto-Import Starts werden als lokalisierte `409`-Antwort abgebildet. + - Strukturierte Auto-Import-Fehler pruefen `error` und `done` Stream-Events und stellen sicher, + dass die Lease auch im Fehlerpfad genau einmal freigegeben wird. + - Malformed Report-Requests liefern `400` mit `Invalid report request`, getrennt von oversized + Request- und fehlendem Typst-Fehlerpfad. +- Validation: + - `npm_config_cache=/tmp/ttdash-npm-cache npx vitest run --project unit tests/unit/http-router-mutations.test.ts --reporter=verbose` -> passed with `21` tests. + - `npm_config_cache=/tmp/ttdash-npm-cache npm run format:check` -> passed. + - `npm_config_cache=/tmp/ttdash-npm-cache npm run lint` -> passed. + - `npm_config_cache=/tmp/ttdash-npm-cache npm run typecheck` -> passed. + - `npm_config_cache=/tmp/ttdash-npm-cache npm run test:timings:projects -- --projects=unit` + -> passed with unit median `3.69s` in the local timing summary. + - `npm_config_cache=/tmp/ttdash-npm-cache npm run test:vitest:coverage` -> passed with `150` + files, `626 passed | 1 skipped`. + - `PLAYWRIGHT_TEST_PORT=3016 npm_config_cache=/tmp/ttdash-npm-cache npm run verify:full:parallel` -> passed with Playwright `11 passed`. + - `coderabbit review --agent -t committed --base-commit HEAD~1 -c AGENTS.md -f tests/unit/http-router-mutations.test.ts` -> CodeRabbit raised 0 issues. +- Measurement notes: + - Coverage summary after this phase: Statements `82.46%`, Branches `70.01%`, Functions + `83.11%`, Lines `83.95%`. + - `server/http-router.js` improved to Statements `68%`, Branches `54.63%`, Functions `68%`, + Lines `68%`. + - Full gate timing in the normalized local run: Integration `2.69s`, Unit `2.64s`, Frontend + `22.34s`, Architecture `3.49s`, Integration-Background `1.95s`, Playwright `11 passed` in + `14.7s`. + - Commit is SSH-signed. + +### test-review.md / Phase 5 - Lokale und CI-Gates datenbasiert nachschaerfen + +- Status: fixed +- Scope: Das lokale Parallel-Gate deklariert jetzt die Report-/Artefakt-Outputs der parallel + laufenden Tasks, prueft vor dem Spawn auf Output-Kollisionen und gibt nach jedem Run eine + per-Task-Timing-Zusammenfassung aus. Die bestehende serielle Referenz bleibt unveraendert. +- Fix reference: `eab08c5` `Harden parallel gate diagnostics` +- Closed findings: + - `test-review.md / M-02` - Timing-Budgets erkennen Ausreisser, Teil lokale Task-Walltime- + Sichtbarkeit im Full-Gate. + - `test-review.md / Phase 5` - Lokale und CI-Gates datenbasiert nachschaerfen. +- Guardrails: + - `node scripts/run-parallel-gate.js --dry-run --e2e` zeigt jetzt Task-Wellen und deklarierte + Outputs wie `test-results/vitest-frontend.junit.xml`, `playwright-report/` und + `test-results/`. + - `findParallelOutputCollisions(...)` verhindert, dass zwei Tasks in derselben Welle denselben + Output-Pfad deklarieren. + - Erfolgreiche und fehlschlagende Gate-Runs drucken eine Timing-Summary der bereits beendeten + Tasks, damit lokale Engpaesse ohne JUnit-Inspektion sichtbar sind. + - `tests/frontend/metric-ratio-locale.test.tsx` fixiert den aktuellen Monat selbst und stellt + Timer/Globals wieder zurueck; der MonthMetrics-Test ist damit nicht mehr vom echten Datum oder + von fremden Fake-Timer-Zustaenden abhaengig. + - `docs/testing.md` dokumentiert Dry-Run, Output-Pruefung und Timing-Summary als Gate-Runbook. +- Validation: + - `node scripts/run-parallel-gate.js --dry-run --e2e` -> passed. + - `npm_config_cache=/tmp/ttdash-npm-cache npx vitest run --project unit tests/unit/run-parallel-gate.test.ts --reporter=verbose` -> passed with `7` tests. + - `npm_config_cache=/tmp/ttdash-npm-cache npx vitest run --project frontend tests/frontend/metric-ratio-locale.test.tsx --reporter=verbose` -> passed with `3` tests. + - `npm_config_cache=/tmp/ttdash-npm-cache npm run format:check` -> passed. + - `npm_config_cache=/tmp/ttdash-npm-cache npm run lint` -> passed. + - `npm_config_cache=/tmp/ttdash-npm-cache npm run typecheck` -> passed. + - `npm_config_cache=/tmp/ttdash-npm-cache npm run test:timings:projects -- --projects=unit` + -> passed with unit median `3.29s`, worst `3.29s`. + - `npm_config_cache=/tmp/ttdash-npm-cache npm run test:vitest:coverage` -> passed with `150` + files, `629 passed | 1 skipped`. + - `PLAYWRIGHT_TEST_PORT=3016 npm_config_cache=/tmp/ttdash-npm-cache npm run verify:full:parallel` -> passed with Playwright `11 passed`. + - `coderabbit review --agent -t committed --base-commit HEAD~1 -c AGENTS.md -f ...` -> + CodeRabbit raised 0 issues. +- Measurement notes: + - Coverage summary after this phase: Statements `82.30%`, Branches `69.78%`, Functions + `83.06%`, Lines `83.80%`. + - Full gate timing summary: Static `6.73s`, Integration `4.01s`, Build `1.07s`, Unit `5.65s`, + Frontend `52.49s`, Architecture `6.24s`, Integration-Background `3.39s`, + Package-Smoke `21.34s`, E2E `25.67s`. + - The Full-Gate bottleneck in this run is still frontend/jsdom, followed by Playwright and the + package smoke. + - Commit is SSH-signed. + +### test-review.md / Phase 6 - Playwright klein und wertvoll halten + +- Status: fixed +- Scope: Playwright-Scope, worker-isolierte Fixtures, CI-Worker-Cap und Reportpfade sind jetzt im + schnellen Unit-Layer abgesichert. Die Browser-Suite bleibt bei den bestehenden `11` + representative Smoke-Flows. +- Fix reference: `efb9cee` `Guard Playwright smoke scope` +- Closed findings: + - `test-review.md / M-03` - E2E-Sharding ist erst nach Report-Isolation sinnvoll, Teil + Reporterpfad-Guardrail. + - `test-review.md / Phase 6` - Playwright klein und wertvoll halten. +- Guardrails: + - `tests/unit/playwright-config.test.ts` prueft `testDir`, `fullyParallel`, CI-Worker-Cap, + fehlenden globalen `webServer`, Trace/Screenshot/Video-Policy und stabile HTML-/JUnit- + Reporterpfade. + - E2E-Specs muessen `test`/`expect` aus `tests/e2e/fixtures.ts` importieren und duerfen nicht + direkt `@playwright/test` verwenden; damit bleiben Worker-Port, Runtime-Root und Auth-Session + isoliert. + - Die erlaubte Journey-Liste ist explizit: Command Palette, Forecast/Filters, Load/Upload, + Reporting und Settings/Backups. + - Die Browser-Suite bleibt auf `11` Tests und hat eine obere Guardrail von `12`, damit neue + Contract-Matrizen nicht unbemerkt in Playwright wachsen. + - `docs/testing.md` dokumentiert, dass dieser Vertrag bewusst angepasst werden muss, wenn eine + neue echte Browser-Journey dazukommt. +- Validation: + - `npm_config_cache=/tmp/ttdash-npm-cache npx vitest run --project unit tests/unit/playwright-config.test.ts --reporter=verbose` -> passed with `4` tests. + - `npm_config_cache=/tmp/ttdash-npm-cache npm run format:check` -> passed. + - `npm_config_cache=/tmp/ttdash-npm-cache npm run lint` -> passed. + - `npm_config_cache=/tmp/ttdash-npm-cache npm run typecheck` -> passed. + - `npm_config_cache=/tmp/ttdash-npm-cache npm run test:vitest:coverage` -> passed with `150` + files, `632 passed | 1 skipped`. + - `PLAYWRIGHT_TEST_PORT=3016 npm_config_cache=/tmp/ttdash-npm-cache npm run verify:full:parallel` -> passed with Playwright `11 passed`. + - `coderabbit review --agent -t committed --base-commit HEAD~1 -c AGENTS.md -f ...` -> + CodeRabbit raised 0 issues. +- Measurement notes: + - Coverage summary after this phase: Statements `82.30%`, Branches `69.78%`, Functions + `83.06%`, Lines `83.80%`. + - Full gate timing summary: Static `6.12s`, Integration `3.94s`, Build `1.30s`, Unit `3.62s`, + Frontend `23.68s`, Architecture `5.07s`, Integration-Background `2.96s`, + Package-Smoke `13.16s`, E2E `17.02s`. + - Playwright stayed at `11` browser tests and `workers=2` for CI-style runs. + - Commit is SSH-signed. + +### test-review.md / Phase 5 - Lokales Full-Gate parallelisieren + +- Status: fixed +- Scope: neues lokales, lastbewusstes Parallel-Gate fuer statische Checks, Vitest-Projekte, Build, + Package-Smoke und optionalen E2E-Full-Gate. +- Fix reference: `67db759` `Add parallel verification gate` +- Closed findings: + - `test-review.md / H-04` - Lokales Full-Gate laeuft unnoetig seriell, obwohl grosse Teile + unabhaengig sind. + - `test-review.md / Phase 5` - Lokale und CI-nahe Verifikation parallel und robuster + orchestrieren. +- Guardrails: + - `verify:parallel` ueberlappt nur die gut kombinierbaren Gates `test:static`, + `test:integration` und `build:app`. + - Die intern bereits stark parallelen oder prozessnahen Suites laufen in eigenen Wellen: + `test:unit`, `test:frontend`, `test:architecture` und `test:integration:background`. + - `verify:package` laeuft erst nach erfolgreichem Build und erfolgreichen Testwellen, damit der + Paket-Smoke weiterhin gegen das frische Produktionsbundle prueft. + - `verify:full:parallel` haengt Playwright an die finale Welle. + - Der E2E-Datepicker-Test fokussiert einen stabilen mittleren Monatstag, damit der Test am + Monatsletzten nicht faelschlich erwartet, dass `ArrowRight` ueber das Monatsende hinaus + navigiert. + - Unit-Tests decken Dry-Run, E2E-Plan, Spawn-Argumente, Fehlerweitergabe und Paket-Smoke- + Abhaengigkeit ab. +- Validation: + - `node scripts/run-parallel-gate.js --dry-run --e2e` -> passed. + - `npm_config_cache=/tmp/ttdash-npm-cache npx vitest run --project unit tests/unit/run-parallel-gate.test.ts tests/unit/test-pipeline-scripts.test.ts --reporter=verbose` -> passed. + - `npm_config_cache=/tmp/ttdash-npm-cache npm run format:check` -> passed. + - `npm_config_cache=/tmp/ttdash-npm-cache npm run lint` -> passed. + - `npm_config_cache=/tmp/ttdash-npm-cache npm run typecheck` -> passed. + - `PLAYWRIGHT_TEST_PORT=3016 npm_config_cache=/tmp/ttdash-npm-cache npx playwright test tests/e2e/dashboard-forecast-filters.spec.ts -g 'exposes pressed filter state' --workers=1 --reporter=line` -> passed. + - `PLAYWRIGHT_TEST_PORT=3016 npm_config_cache=/tmp/ttdash-npm-cache npm run verify:full:parallel` -> passed with Playwright `11 passed`. + - `npm_config_cache=/tmp/ttdash-npm-cache npm run test:vitest:coverage` -> passed with `151` + files, `609 passed | 1 skipped`. + - `coderabbit review --agent -t committed --base-commit HEAD~1 -c AGENTS.md -f ...` -> 0 + issues. +- Measurement notes: + - Naive "alles parallel" war nicht robust: Architektur konnte unter CPU-Saettigung in das 5s- + Testtimeout laufen; `integration-background` konnte unter voller Last beim parallelen + Background-Start nur eine Registry-Instanz sehen. + - Balancierte finale Messung: Integration `3.06s`, Unit `3.25s`, Frontend `25.89s`, + Architektur `4.44s`, Integration-Background `3.08s`, Playwright `11 passed` in `51.8s`. + - Coverage summary nach dieser Phase: Statements `81.54%`, Branches `69.33%`, Functions + `82.59%`, Lines `82.97%`. + - Commit is SSH-signed. + +### test-review.md / Phase 4 - Runtime-Branch-Coverage + +- Status: fixed +- Scope: gezielte Branch-Tests fuer Prozess-Utilities und Background-Stop-Fehlerpfad. +- Fix reference: `44c073d` `Cover process runtime edge cases` +- Closed findings: + - `test-review.md / H-03` - Coverage-Luecken liegen in kritischen Runtime-Branches, Teil + `server/process-utils.js` und `server/background-runtime.js`. + - `test-review.md / Phase 4` - Coverage risikogewichtet erhoehen. +- Guardrails: + - `process-utils.test.ts` prueft `isProcessRunning` fuer invalid PID, laufenden Prozess, ESRCH + und EPERM sowie `sleep` mit Fake Timers. + - `background-runtime.test.ts` prueft, dass `ttdash stop` bei EPERM den Fehlerpfad und Exitcode + korrekt setzt, ohne echte Prozesse zu signalisieren. +- Validation: + - `npm_config_cache=/tmp/ttdash-npm-cache npx vitest run --project unit tests/unit/process-utils.test.ts tests/unit/background-runtime.test.ts --reporter=verbose` -> passed. + - `npm_config_cache=/tmp/ttdash-npm-cache npm run format:check` -> passed. + - `npm_config_cache=/tmp/ttdash-npm-cache npm run lint` -> passed. + - `npm_config_cache=/tmp/ttdash-npm-cache npm run typecheck` -> passed. + - `npm_config_cache=/tmp/ttdash-npm-cache npm run test:vitest:coverage` -> passed with `150` + files, `605 passed | 1 skipped`. + - `coderabbit review --agent -t committed --base-commit HEAD~1 -c AGENTS.md -f ...` -> 0 + issues. +- Coverage notes: + - Global coverage after this phase: Statements `81.46%`, Branches `69.09%`, Functions `82.54%`, + Lines `82.86%`. + - `server/process-utils.js` now reports Statements `100%`, Functions `100%`, Lines `100%`, + Branches `80%`. + - `server/background-runtime.js` improved to Statements `46.51%`, Branches `47.90%`, + Functions `47.05%`, Lines `47.14%`. + - Commit is SSH-signed. + +### test-review.md / Phase 3 - Prozessnahe Runner-Tests straffen + +- Status: fixed +- Scope: reine Latest-Version Timeout-/Cache-Branches laufen jetzt mit Fake Spawn im Node-Unit- + Test statt mit echten PATH-Skripten und Subprozessen. +- Fix reference: `c4882ae` `Move toktrack cache tests to fake spawn` +- Closed findings: + - `test-review.md / H-02` - Prozessnahe Tests mischen Vertragspruefung und Branch-Coverage, + Teil Toktrack-Latest-Version Cache/Timeout. + - `test-review.md / Phase 3` - Prozessnahe Tests straffen. +- Guardrails: + - Echte Prozess-Smokes bleiben fuer lokale Binary, PATH-Fallback, Runner-Timeout und stdout- + Fehler bestehen. + - Cache-Erfolg, Cache-Fehler und Lookup-Timeout werden ueber Fake Spawn, Fake Timers und + kontrolliertes `Date.now()` abgedeckt. +- Validation: + - `npm_config_cache=/tmp/ttdash-npm-cache npx vitest run --project unit tests/unit/server-helpers-runner-core.test.ts tests/unit/server-helpers-runner-process.test.ts --reporter=verbose` -> passed. + - `npm_config_cache=/tmp/ttdash-npm-cache npm run format:check` -> passed. + - `npm_config_cache=/tmp/ttdash-npm-cache npm run lint` -> passed. + - `npm_config_cache=/tmp/ttdash-npm-cache npm run typecheck` -> passed. + - `npm_config_cache=/tmp/ttdash-npm-cache npm run test:timings:projects -- --projects=unit` -> + passed. + - `coderabbit review --agent -t committed --base-commit HEAD~1 -c AGENTS.md -f ...` -> 0 + issues. +- Measurement notes: + - `tests/unit/server-helpers-runner-process.test.ts` dropped from earlier `3.105s` suite time to + `0.878s` in the unit project timing run. + - Unit project timing summary for this phase: median `6.41s`. + - Commit is SSH-signed. + +### test-review.md / Phase 2 - Frontend/jsdom Worker-Parallelitaet + +- Status: fixed +- Scope: datenbasierte Anpassung der Frontend/jsdom Worker-Obergrenze von `50%` auf `80%`. +- Fix reference: `2932697` `Tune frontend test parallelism` +- Closed findings: + - `test-review.md / H-01` - Frontend/jsdom ist der aktuelle Hauptkostentreiber. + - `test-review.md / Phase 2` - Frontend/jsdom-Kosten senken, Teil Worker-Kalibrierung. +- Guardrails: + - `vitest-coverage-config.test.ts` prueft, dass das Frontend-Projekt `maxWorkers: '80%'` + nutzt und nicht auf volle CPU-Saettigung gestellt wird. + - `docs/testing.md` dokumentiert die gemessene Begruendung und das Re-Benchmark-Kommando. +- Validation: + - `npm_config_cache=/tmp/ttdash-npm-cache npx vitest run --project unit tests/unit/vitest-coverage-config.test.ts --reporter=verbose` -> passed. + - `npm_config_cache=/tmp/ttdash-npm-cache npm run format:check` -> passed. + - `npm_config_cache=/tmp/ttdash-npm-cache npm run lint` -> passed. + - `npm_config_cache=/tmp/ttdash-npm-cache npm run typecheck` -> passed. + - `npm_config_cache=/tmp/ttdash-npm-cache npm run test:timings:projects -- --projects=frontend` -> passed with 77 files / 206 tests. + - `coderabbit review --agent -t committed --base-commit HEAD~1 -c AGENTS.md -f ...` -> 0 issues. +- Measurement notes: + - Old configured `50%` project run in the same window: `55.97s` after CPU saturation. + - `80%` probe before the 100% stress run: `27.46s`. + - `100%` probe: `84.84s`, confirming full saturation is harmful. + - Configured `80%` validation after the stress run: `48.13s`, still faster than the paired + post-saturation `50%` run. + - Commit is SSH-signed. + +### test-review.md / Phase 1 - Messbarkeit und Benchmarking + +- Status: fixed +- Scope: projektweise Vitest-Timing-Messung mit isolierten JUnit-Reports und wiederholbaren + Benchmark-Laeufen fuer Median/Worst-Run-Auswertung. +- Fix reference: `944a049` `Add project test timing benchmark` +- Closed findings: + - `test-review.md / Phase 1` - Messbarkeit und Benchmarks stabilisieren. + - `test-review.md / M-02` - Timing-Budgets sind vorhanden, aber noch grob. +- Guardrails: + - `test:timings:projects` schreibt projektbezogene Reports wie + `test-results/vitest-frontend.timing.junit.xml`. + - `test:timings:benchmark` wiederholt die Projektmessung dreimal und schreibt `timing-run-N` + Reports, damit keine normalen JUnit-Ausgaben ueberschrieben werden. + - Unit-Tests decken CLI-Parsing, eindeutige Reportpfade, Budget-Kommandos, Dry-Run und Median ab. +- Validation: + - `node scripts/run-vitest-project-timings.js --projects=unit --repeat=1 --dry-run` -> passed. + - `npm_config_cache=/tmp/ttdash-npm-cache npx vitest run --project unit tests/unit/run-vitest-project-timings.test.ts tests/unit/test-pipeline-scripts.test.ts --reporter=verbose` -> passed. + - `npm_config_cache=/tmp/ttdash-npm-cache npm run format:check` -> passed. + - `npm_config_cache=/tmp/ttdash-npm-cache npm run lint` -> passed. + - `npm_config_cache=/tmp/ttdash-npm-cache npm run typecheck` -> passed. + - `npm_config_cache=/tmp/ttdash-npm-cache npm run test:timings:projects -- --projects=unit` -> + passed. + - `coderabbit review --agent -t committed --base-commit HEAD~1 -c AGENTS.md -f ...` -> 0 + issues. +- Notes: + - Commit is SSH-signed. Local `git log --show-signature` verification still needs + `gpg.ssh.allowedSignersFile`, but `git cat-file -p HEAD` contains the `gpgsig` block. + - Earlier CodeRabbit uncommitted review surfaced the pre-existing `.gitignore`/`docs/review` + policy issue. That was outside this phase and intentionally not mixed into the phase commit. + +## 2026-04-29 + +### test-review.md implementation index + +- Status: fixed +- Scope: phasenweise Umsetzung der Testsystem-Review vom 2026-04-28. Diese Liste referenziert die + konkret geschlossenen Findings aus `docs/review/test-review.md` und die Commits, in denen die + jeweilige Verbesserung gelandet ist. + +| Finding | Status | Fix reference | Validation | +| --- | --- | --- | --- | +| `test-review.md / H-01` - Playwright ist nicht worker-isoliert | fixed | `ab49513` `Isolate Playwright workers` | `npm run test:e2e:ci` mit 2 Workern gruen; final `verify:full` gruen | +| `test-review.md / H-02` - CI ist ein monolithischer serieller Full-Gate | fixed | `b5cac6a` `Split CI test pipeline` | CI-DAG in `.github/workflows/ci.yml`; final `verify:full` gruen | +| `test-review.md / H-03` - Command-Palette-E2E mischt zu viele Testarten | fixed | `cb31d35` `Move command palette contracts to Vitest` | Command-Contract in Vitest, E2E auf Smokes reduziert; final Playwright `11 passed` | +| `test-review.md / H-04` - Report- und Coverage-Ausgaben sind nicht matrix-sicher | fixed | `c4abb2b` `Make Vitest setup and reports parallel-safe`, `b5cac6a` `Split CI test pipeline` | Projekt-/Job-spezifische JUnit/Artifact-Pfade; CI-Matrix nutzt getrennte Reports | +| `test-review.md / M-02` - Node-Projekte laden unnoetiges React-Test-Setup | fixed | `c4abb2b` `Make Vitest setup and reports parallel-safe` | `vitest.setup.node.ts` / `vitest.setup.frontend.ts` getrennt; Guardrail in `vitest-coverage-config.test.ts` | +| `test-review.md / M-03` - Coverage ist global gruen, aber risikogewichtet schwach | fixed | `3a4285d` `Improve runtime and dashboard coverage`, `117cfeb` `Fix CodeRabbit test robustness issues` | Runtime-/Dashboard-Coverage gezielt erhoeht; final Coverage-Gate gruen | +| `test-review.md / M-05` - Statische Checks haben Doppelarbeit | fixed | `b5cac6a` `Split CI test pipeline` | `test:static` als ein regulaerer Static-Gate, Cache fuer Prettier/ESLint/TypeScript | +| `test-review.md / M-06` - Package-Smoke gehoert in eigenen CI-Job | fixed | `b5cac6a` `Split CI test pipeline` | `build` erzeugt `production-dist`; `package-smoke` und `e2e` nutzen Artifact parallel | +| `test-review.md / M-07` - Timing-Regressions sind sichtbar, aber nicht budgetiert | fixed | `e9d54fa` `Add test timing budgets` | `test:timings:budget` gruen; CI-Matrix prueft JUnit-Reports gegen `20s` Suite / `12s` Test Budget | + +Nicht als vollstaendig geschlossen markiert: + +- `test-review.md / M-01` - `frontend/jsdom` bleibt der groesste Vitest-Kostentreiber. Die CI-DAG, + Setup-Trennung und der zentrale Frontend-Timeout reduzieren Flakiness und Blockierzeit, aber die + Import-/Environment-Kosten wurden nicht als eigener Optimierungsabschluss entfernt. +- `test-review.md / M-04` - Prozessnahe Tests wurden begrenzt und budgetiert, aber reale Subprozesse + bleiben absichtlich fuer Shell-, PATH-, CLI- und Cross-Process-Verhalten bestehen. +- `test-review.md / N-01` - macOS-Sandbox/Playwright wurde als Agent-Memory dokumentiert; das ist + kein Repository-Fix und bleibt eine Laufzeitumgebungsregel. + +- Validation: + - `npm_config_cache=/tmp/ttdash-npm-cache npm run test:timings:budget` -> passed with `149` + files and `596 passed | 1 skipped`. + - `PLAYWRIGHT_TEST_PORT=3016 npm_config_cache=/tmp/ttdash-npm-cache npm run verify:full` -> + passed, including coverage, build, package verification, and Playwright `11 passed`. + - `coderabbit review --agent -t committed -c AGENTS.md` after Phase 6 -> `0 issues`. + +## 2026-04-28 + +### documentation-review.md / N-01 + +- Status: fixed +- Scope: the stale `security-review.md / H-01` fixed-finding note about remote `?ttdash_token=...` browser bootstrap was corrected. The entry now matches the current runtime and integration test contract: remote API auth accepts Bearer, `X-TTDash-Remote-Token`, or an explicit auth cookie, while automatic query-token-to-cookie bootstrap is a local-session flow. +- Guardrails: `tests/integration/server-remote-auth.test.ts` asserts that remote query bootstrap does not return the local `303` cookie redirect, and `server/remote-auth.js` only resolves query-token bootstrap when local session auth is active. +- Validation: + - `npm run format:check` + - `git diff --check` + +## 2026-04-27 + +### test-review.md / H-01 + +- Status: fixed +- Scope: simple architecture rules no longer depend on repeated ArchUnit project scans. `tests/architecture/source-graph.ts` now owns one cached `src/**` file scan, TypeScript-based import/export parsing, alias resolution for `@/...`, relative import resolution, extension resolution, and `index` module resolution. The frontend layer, unused-hook, hook-naming, and shared-UI-placement tests use this shared graph while the feature-slice diagram remains on ArchUnit where its diagram model adds value. +- Guardrails: `tests/architecture/frontend-layers.test.ts` still blocks hooks from importing components, lib core from importing hooks/components, lib React modules from reaching back into hooks/components, and type modules from depending on components/hooks/lib. `tests/architecture/unused-hooks.test.ts`, `hook-naming.test.ts`, and `shared-ui-placement.test.ts` now reuse the same source graph so future test-layer changes do not reintroduce separate slow scans. `tests/architecture/source-graph.test.ts` covers static imports, re-exports, side-effect imports, and dynamic imports with options. +- Follow-up quality fixes during implementation: + - Dynamic `import(...)` calls are parsed alongside static imports and re-exports, so lazy edges stay visible to the architecture guardrails instead of only top-level declarations being checked. + - The formerly near-timeout `hooks must not depend on components` rule dropped from the historical `~4950ms` flake boundary to well below `100ms` in the targeted architecture run, without increasing global or local timeouts. + - Dashboard UI, content, animation, runtime API behavior, and production code remain unchanged. +- Validation: + - `npm run test:architecture -- --reporter=verbose` + - `npm run format:check` + - `npm run lint` + - `tsc --noEmit` + - `npm run check:deps` + - `git diff --check` + - `npm run verify:full` + - `npm run test:timings` + - `coderabbit review --agent -t uncommitted -c AGENTS.md --files ...` -> round 1: 0 issues, round 2: 1 minor status-line suggestion already satisfied by the current `- Status: fixed` text, round 3: 1 dynamic-import options issue fixed, final round: 0 issues + +### test-review.md / H-02 + +- Status: fixed +- Scope: Vitest coverage now reports against the product runtime instead of the former narrow frontend slice. `vitest.config.ts` includes `src/**/*.{ts,tsx}`, `server.js`, `server/**/*.js`, `shared/**/*.js`, and `usage-normalizer.js`, while excluding only TypeScript declarations, test files, and locale JSON assets from the configured runtime denominator. +- Guardrails: `tests/unit/vitest-coverage-config.test.ts` locks the coverage includes, excludes, and broad-denominator global thresholds. The thresholds ratchet the new signal at Statements `70`, Branches `60`, Functions `70`, and Lines `70`, below the measured broad baseline so CI fails on real regressions without making the first honest denominator brittle. +- Follow-up quality fixes during implementation: + - The current `npm run test:unit:coverage` baseline is Statements `72.85%`, Branches `63.01%`, Functions `74.97%`, Lines `73.88%` across the broader runtime scope. + - Server entrypoints, local server modules, shared runtime contracts, App/main entry files, and lazy dashboard sections are now visible in the coverage report instead of being hidden outside the denominator. + - `docs/testing.md` documents the broader denominator and clarifies that subprocess-spawned CLI/server paths remain behaviorally covered by integration, background, and Playwright tests even when V8 main-process line attribution stays lower. + - Dashboard UI, content, animation, runtime API behavior, and production code remain unchanged. +- Validation: + - `npx vitest run --project unit tests/unit/vitest-coverage-config.test.ts --reporter=verbose` + - `npm run test:unit:coverage` + - `npm run format:check` + - `npm run lint` + - `tsc --noEmit` + - `npm run check:deps` + - `npx vitest run --project architecture --reporter=verbose` + - `npm run verify:full` + - `npm run test:timings` + - `coderabbit review --agent -t uncommitted -c AGENTS.md --files ...` -> round 1: 0 issues, round 2: 0 issues + +### test-review.md / M-01 + +- Status: fixed +- Scope: dead hook visibility now has an explicit architecture guardrail. The two originally cited unused hook files, `src/hooks/use-theme.ts` and `src/hooks/use-provider-limits.ts`, had already been removed during the earlier `code-review.md / N-01` cleanup; this phase strengthens the test-review guardrail so the same class of dead runtime hook cannot silently return. +- Guardrails: `tests/architecture/unused-hooks.test.ts` now treats a hook as live only when it is reachable from `src/main.tsx`, rather than merely imported by any production file. A focused synthetic regression covers a dead hook cluster where one unused hook imports another, so future dead-code islands cannot satisfy the guardrail by importing each other. +- Follow-up quality fixes during implementation: + - `docs/architecture.md` and `docs/testing.md` now describe the hook rule as app-entrypoint reachability, matching the enforced behavior. + - `dependency-cruiser` remains responsible for dependency boundaries and cycle/orphan visibility, while `npm run test:architecture` owns the precise unused-hook signal that the original finding needed. + - Dashboard UI, content, animation, runtime API behavior, and production code remain unchanged. +- Validation: + - `npm run test:architecture` + - `npm run check:deps` + - `npm run verify:full` + - `npm run test:timings` + - `git diff --check` + - `coderabbit review --agent -t uncommitted -c AGENTS.md` -> round 1: 0 issues, round 2: 0 issues + +### test-review.md / M-02 + +- Status: fixed +- Scope: the highest-cost and hang-prone test paths were split, made deterministic, and given bounded cleanup without changing production behavior. The auto-import singleton integration test now coordinates the fake Toktrack runner through start/release sentinel files instead of a fixed 2-second delay, and the previous background catch-all suite was split into focused background prefix, selection, concurrency, registry, and startup CLI files. +- Guardrails: the `integration-background` Vitest project now allows file-level scheduling with a capped `maxWorkers: 2`, so independent background process tests can overlap without turning the test run into an unbounded process fan-out. `docs/testing.md` now documents deterministic subprocess coordination and small focused background files as the expected pattern. +- Follow-up quality fixes during implementation: + - The final `npm run test:timings` run now reports the auto-import singleton test at `1.371s` instead of the historical `3.326s`, and `tests/integration/server-auto-import.test.ts` at `2.669s` instead of the historical `4.521s`. + - The old `tests/integration/server-background.test.ts` monolith no longer dominates as one `5.785s` suite; its formerly mixed cases now appear as focused files, with the slowest background slice at `3.208s` in the final coverage/timing run. + - Test hangs were addressed at the harness level: server readiness/shutdown probes now have abort timeouts, CLI subprocess helpers have bounded timeouts and SIGKILL fallback, failed standalone server startup cleans up the spawned process, background cleanup force-stops leftover registry PIDs owned by the test root, and shared integration servers now wait for process shutdown in `afterAll`. + - `tests/unit/server-helpers-runner-process.test.ts` remains intentionally subprocess-backed where shell, `PATH`, timeout, and runner fallback behavior are the thing under test. + - Dashboard UI, content, animation, runtime API behavior, and production code remain unchanged. +- Validation: + - `npx vitest run --project integration tests/integration/server-auto-import.test.ts tests/integration/server-startup-cli.test.ts --project integration-background tests/integration/server-background.test.ts tests/integration/server-background-selection.test.ts tests/integration/server-background-concurrency.test.ts tests/integration/server-background-registry.test.ts --reporter=verbose` + - `npx vitest run --project integration tests/integration/server-auto-import.test.ts tests/integration/server-startup-cli.test.ts tests/integration/server-local-auth.test.ts --project integration-background tests/integration/server-background.test.ts tests/integration/server-background-selection.test.ts tests/integration/server-background-concurrency.test.ts tests/integration/server-background-registry.test.ts --reporter=verbose` + - `npx vitest run --project integration-background tests/integration/server-background-registry.test.ts --reporter=verbose` + - `npx vitest run --project integration-background tests/integration/server-background-selection.test.ts --reporter=verbose` + - `tsc --noEmit` + - `npx vitest run --project integration --project integration-background --reporter=verbose` + - `npm run verify:full` -> final run passed, including Playwright `15 passed` + - `npm run test:timings` -> completed without hanging after the cleanup hardening + - `coderabbit review --agent -t uncommitted -c AGENTS.md` -> multiple rounds: 0 issues initially, 4 minor test-harness/test-clarity issues fixed, latest round 0 issues + +### test-review.md / M-03 + +- Status: fixed +- Scope: the Playwright dashboard monolith was split by user journey without changing dashboard runtime behavior. The former `tests/e2e/dashboard.spec.ts` coverage now lives in focused load/upload, forecast/filter, settings/backups, and reporting specs, while command-palette coverage remains separate. +- Guardrails: shared E2E auth, state reset, usage seeding, file upload, mocked auto-import/report, download-recording, and dashboard test-hook access now live in `tests/e2e/helpers.ts`. `docs/testing.md` documents journey-based Playwright files so future browser coverage does not grow back into a catch-all suite. +- Follow-up quality fixes during implementation: + - The CSP/browser-error smoke coverage moved to `tests/e2e/dashboard-load-upload.spec.ts`, replacing stale references to the removed dashboard monolith. + - The report-language test now resets both usage and settings before switching locale, making it independent from earlier Playwright file order. + - The largest dashboard-specific E2E files are now focused around settings/backups and command-palette behavior instead of one mixed dashboard file. + - The recurring silent coverage/timing hang was traced to Vitest runs that emitted only the JUnit reporter in this non-interactive gate. `test:unit:coverage` and `test:timings` now keep the JUnit artifact and also emit the `dot` reporter, so long coverage runs show progress and finish cleanly instead of depending on a silent reporter path. + - Dashboard UI, content, animation, runtime API behavior, and production code remain unchanged. +- Validation: + - `npx vitest run --project unit tests/unit/vitest-coverage-config.test.ts --reporter=verbose` -> passed, including the guardrail for explicit `dot` plus `junit` reporters on coverage-heavy scripts. + - `npm run test:unit:coverage` -> passed with `135` files, `496` tests passed, `1` skipped, and no silent hang after the reporter fix. + - `npm run test:timings` -> passed and printed timing diagnostics; the slowest suites remained existing server integration subprocess paths, while the split E2E work is outside this Vitest-only timing gate. + - `PLAYWRIGHT_TEST_PORT=3016 npm_config_cache=/tmp/ttdash-npm-cache npm run test:e2e` -> passed with `15` Playwright tests after the journey split. + - `npm run verify:full` -> final run passed, including format, lint, docstring lint, dependency-cruiser, `tsc --noEmit`, architecture tests, coverage, package verification, production build, and Playwright `15 passed`. + - Targeted Playwright follow-ups passed for `tests/e2e/dashboard-load-upload.spec.ts` and `tests/e2e/dashboard-settings-backups.spec.ts` after CodeRabbit requested locale-resilient label assertions. + - `git diff --check` -> passed after this validation update. + - `coderabbit review --agent -t uncommitted -c AGENTS.md` -> rounds 1 and 3 reported minor test/doc clarity issues that were fixed; rounds 2, 4, and 5 reported 0 issues; round 6 reported this missing validation detail and was fixed by this entry; the follow-up review after the validation update reported 0 issues. + +## 2026-04-26 + +### server-review.md / H-01 + +- Status: fixed +- Scope: `server.js` was reduced to dependency/runtime wiring during this phase, then further reduced by `server-review.md / M-01` to an executable CLI/Bin shim. CLI parsing/help moved to `server/cli.js`, startup summaries/browser opening/local auth-session metadata moved to `server/startup-runtime.js`, shared process helpers moved to `server/process-utils.js`, and HTTP server lifecycle, CLI routing, startup sequencing, client errors, and shutdown cleanup moved to `server/server-lifecycle.js`. +- Guardrails: `tests/architecture/server-entrypoint-contract.test.ts` blocks local helper function definitions, `__test__` exports, and direct `http.createServer(...)` calls from returning to `server.js`. `tests/unit/server-cli.test.ts`, `tests/unit/startup-runtime.test.ts`, and `tests/unit/server-lifecycle.test.ts` cover the extracted behavior directly. Existing server helper tests now instantiate `server/data-runtime.js` and `server/auto-import-runtime.js` directly instead of importing `server.js`. +- Follow-up quality fixes during implementation: + - The productive `server.js.__test__` helper surface was removed as part of the Entrypoint split; tests now target the owning runtime modules. + - The cross-process file-lock test now loads `server/data-runtime.js` directly in its child process, so it still validates real lock behavior without loading the CLI entrypoint. + - The startup data summary now pluralizes `1 day` versus `N days` correctly without changing the existing cost or token formatting. + - Background-child shutdown now logs unregister failures, suppresses unhandled promise rejections, exits through a finally-style path, and prevents duplicate shutdown completion when graceful close and forced timeout race. + - CLI help now documents the supported `-bg` legacy background alias alongside `-b` and `--background`, so displayed usage matches parser behavior. + - Startup behavior remains intentionally unchanged: auth bootstrap URL output, remote warnings, browser opening, background registration, auto-load logging, package startup, and API routing keep the same runtime contracts. +- Validation: + - `npx vitest run --project unit tests/unit/server-cli.test.ts tests/unit/startup-runtime.test.ts tests/unit/server-lifecycle.test.ts tests/unit/server-helpers-network.test.ts tests/unit/server-helpers-runner-core.test.ts tests/unit/server-helpers-runner-process.test.ts tests/unit/server-helpers-file-locks.test.ts --reporter=verbose` + - `npx vitest run --project architecture tests/architecture/server-entrypoint-contract.test.ts --reporter=verbose` + - `npm run format:check` + - `npm run lint` + - `tsc --noEmit` + - `npm run test:architecture` + - `npm run check:deps` + - `npm run verify:full` + - `npm run test:timings` + - `coderabbit review --agent -t uncommitted -c AGENTS.md --files ...` -> rounds 1-2: 0 issues; round 3: 2 minor issues fixed; round 4: 1 minor issue fixed + +### server-review.md / M-01 + +- Status: fixed +- Scope: `server.js` is now only the executable CLI/Bin shim. Runtime composition moved to `server/app-runtime.js`, which builds the injected server lifecycle and keeps the background process entrypoint pointed at package-root `server.js`. The undocumented `require('./server.js').bootstrapCli/runCli` import surface was removed; in-process test/server starts now use `createAppRuntime(...)` explicitly. +- Guardrails: `tests/architecture/server-entrypoint-contract.test.ts` now blocks `module.exports`, `exports.*`, local helper functions, `__test__`, direct `http.createServer(...)`, and any `server.js` require target other than `./server/app-runtime`. Playwright's `scripts/start-test-server.js` uses the explicit app-runtime composer so E2E startup still exercises the same server lifecycle without importing the executable shim as a helper module. +- Follow-up quality fixes during implementation: + - Environment-derived CLI/server configuration now lives inside `createAppRuntime(...)`, so test harnesses can set isolated storage, host, and port environment variables before composing the runtime. + - `server/app-runtime.js` passes an explicit root `server.js` path to the background runtime, preserving foreground/background CLI behavior after the composition move. + - `docs/architecture.md` documents the new split between the executable shim and the app-runtime composition root. + - Dashboard UI, content, animation, API route behavior, local auth, remote auth, background CLI, packaging, and E2E startup behavior remain unchanged. +- Validation: + - `node -c server.js` + - `node -c server/app-runtime.js` + - `npx vitest run --project architecture tests/architecture/server-entrypoint-contract.test.ts --reporter=verbose` + - `npx vitest run --project unit tests/unit/server-lifecycle.test.ts tests/unit/server-cli.test.ts tests/unit/startup-runtime.test.ts --project integration tests/integration/server-local-auth.test.ts tests/integration/server-remote-auth.test.ts --reporter=verbose` + - `npx vitest run --project integration-background tests/integration/server-background.test.ts --reporter=verbose` + - `npm run verify:full` + - `npm run test:timings` + - `coderabbit review --agent -t uncommitted -c AGENTS.md --files ...` -> multiple rounds: 0 issues + +### server-review.md / M-02 + +- Status: fixed +- Scope: mutable server runtime state is now encapsulated in per-runtime services. `server/runtime-state.js` owns the runtime snapshot, startup auto-load flag, singleton runtime lease, and expiring async cache primitives; `server/app-runtime.js`, `server/server-lifecycle.js`, and `server/startup-runtime.js` use the runtime-state service instead of ad hoc mutable flags; `server/auto-import-runtime.js` owns the Auto-Import lease and Toktrack latest-version cache through those services. +- Guardrails: `tests/unit/runtime-state.test.ts` covers runtime snapshots, startup flag isolation, singleton lease behavior, in-flight lookup deduplication, and TTL/reset behavior. `tests/architecture/server-runtime-state-contract.test.ts` blocks route-local `autoImportStreamRunning` state and the old free Auto-Import/Toktrack cache variables from returning. +- Follow-up quality fixes during implementation: + - `server/http-router.js` no longer owns an Auto-Import stream flag. It acquires a lease before sending SSE headers, so concurrent starts still return the existing HTTP `409` response without turning into streamed errors. + - `server/auto-import-runtime.js` remains the owner of Auto-Import execution, but the singleton state is now an explicit lease with idempotent release for normal completion, failures, and aborted streams. + - Toktrack latest-version lookups still share one in-flight request and keep separate success/failure TTL behavior, but the cache/promise state is hidden behind `createExpiringAsyncCache(...)`. + - Dashboard UI, content, animation, API paths, response shapes, local auth, remote auth, background startup, and E2E startup behavior remain unchanged. +- Validation: + - `node -c server/runtime-state.js` + - `node -c server/app-runtime.js` + - `node -c server/auto-import-runtime.js` + - `node -c server/http-router.js` + - `npx vitest run --project unit tests/unit/runtime-state.test.ts tests/unit/server-lifecycle.test.ts tests/unit/startup-runtime.test.ts tests/unit/server-helpers-runner-process.test.ts --project architecture tests/architecture/server-entrypoint-contract.test.ts tests/architecture/server-runtime-state-contract.test.ts --reporter=verbose` + - `npx vitest run --project integration tests/integration/server-auto-import.test.ts tests/integration/server-api-routing-runtime.test.ts tests/integration/server-local-auth.test.ts tests/integration/server-remote-auth.test.ts --project integration-background tests/integration/server-background.test.ts --reporter=verbose` + - `npm run verify:full` + - `npm run test:timings` + - `coderabbit review --agent -t uncommitted -c AGENTS.md --files ...` -> multiple rounds: 0 issues + +### server-review.md / N-01 + +- Status: fixed +- Scope: Host, Origin, Sec-Fetch-Site, and JSON content-type request policy moved from the broader HTTP utility module into `server/http-request-guards.js`. `server/http-utils.js` remains the compatible facade for router consumers and still owns request body parsing, JSON responses, buffer responses, and API-prefix resolution. +- Guardrails: `tests/unit/http-request-guards.test.ts` covers loopback, wildcard, non-loopback, IPv6, origin, cross-site, and content-type behavior directly. `tests/unit/http-utils.test.ts` now focuses on the HTTP facade and body/response behavior. `tests/architecture/server-http-boundaries.test.ts` keeps request guard policy out of the router and out of generic HTTP utility internals. +- Follow-up quality fixes during implementation: + - The request guard code now tolerates missing `req.headers` defensively while preserving the same rejection path for malformed or missing Host/Origin input. + - Wildcard binds now apply the intended socket-local host check before exact bind-host matching, so `Host: 0.0.0.0` is not treated as a trusted client-facing host. + - Body-size and response-header behavior gained focused unit coverage, so the split did not trade broad utility tests for narrower policy-only tests. + - Dashboard UI, content, animation, API paths, response shapes, local auth, remote auth, background startup, and E2E startup behavior remain unchanged. +- Validation: + - `node -c server/http-request-guards.js` + - `node -c server/http-utils.js` + - `npx vitest run --project unit tests/unit/http-request-guards.test.ts tests/unit/http-utils.test.ts --project architecture tests/architecture/server-http-boundaries.test.ts tests/architecture/server-entrypoint-contract.test.ts tests/architecture/server-runtime-state-contract.test.ts --reporter=verbose` + - `npx vitest run --project integration tests/integration/server-api-guards.test.ts tests/integration/server-remote-auth.test.ts --reporter=verbose` + - `npm run verify:full` + - `npm run test:timings` + - `coderabbit review --agent -t uncommitted -c AGENTS.md --files ...` -> multiple rounds: 0 issues + +## 2026-04-25 + +### security-review.md / H-01 + +- Status: fixed +- Scope: explicit non-loopback binding now requires `TTDASH_REMOTE_TOKEN` in addition to `TTDASH_ALLOW_REMOTE=1`. Remote API requests are authenticated centrally before route handling through Bearer auth, `X-TTDash-Remote-Token`, or an HttpOnly same-site cookie; the later `security-review.md / M-01` fix extends the same auth boundary to default loopback sessions. +- Guardrails: `tests/unit/remote-auth.test.ts` covers remote-mode configuration, accepted credential forms, generic rejection paths, timing-safe length-independent comparison behavior, and the browser bootstrap redirect/cookie contract. `tests/integration/server-remote-auth.test.ts` covers failed startup without a token, protected remote API reads, cookie bootstrap, and preserved Origin mutation guards. Existing server guard tests continue to cover host validation, malformed paths, oversized payloads, and cross-site rejection. +- Follow-up quality fixes during implementation: + - Remote authentication lives in `server/remote-auth.js` as a focused server boundary, while `server/http-router.js` only applies the injected gate before API routing and static bootstrap handling. + - Remote API access is supported through Bearer auth, `X-TTDash-Remote-Token`, or an explicit `ttdash_auth` cookie. The automatic `?ttdash_token=...` bootstrap-to-cookie redirect is a local-session flow, not the current remote-mode browser flow; `tests/integration/server-remote-auth.test.ts` locks that remote query bootstrap does not set a cookie. + - Background runtime identity checks now send the remote Bearer header when the parent process is running in authenticated remote mode, so remote background management keeps working. + - Startup help and remote warnings now mention the additional token requirement without logging the token value. +- Validation: + - `npx vitest run --project unit tests/unit/remote-auth.test.ts tests/unit/http-utils.test.ts tests/unit/background-runtime.test.ts --reporter=verbose` + - `npx vitest run --project integration tests/integration/server-remote-auth.test.ts tests/integration/server-api-guards.test.ts --reporter=verbose` + - `npx vitest run --project integration-background tests/integration/server-background.test.ts --reporter=verbose` + - `npm run format:check` + - `npm run lint` + - `tsc --noEmit` + - `npm run test:architecture` + - `npm run check:deps` + - `npm run verify:full` + - `npm run test:timings` + - `coderabbit review --agent -t uncommitted -c AGENTS.md --files ...` -> round 1: 0 issues, round 2: 0 issues, round 3: 0 issues, round 4: 0 issues + +### security-review.md / M-01 + +- Status: fixed +- Scope: default loopback servers now generate a per-start local session token and require authentication for every API endpoint, including `/api/usage`, `/api/settings`, `/api/runtime`, and `/api/toktrack/version-status`. Normal dashboard startup stays frictionless because the auto-open URL carries the one-time bootstrap token, which is exchanged for an HttpOnly/SameSite cookie and stripped from the redirected URL. +- Guardrails: `tests/unit/remote-auth.test.ts` covers default local session auth, generated local tokens, explicit test opt-out, shared credential parsing, bootstrap redirects, and generic rejection paths. `tests/integration/server-local-auth.test.ts` covers protected loopback reads, Bearer and cookie access, preserved mutation Origin guards, and restrictive auth-session file permissions. Existing remote, background, persistence, recovery, routing, and E2E tests were updated to authenticate through the same boundary. +- Follow-up quality fixes during implementation: + - The existing `server/remote-auth.js` boundary now owns both local session auth and remote auth so API route handlers stay unaware of the source of the credential. + - `server.js` writes the local session metadata to restrictive user config state and prints a `Local Auth URL` only when browser auto-open is disabled. + - Background instance registry entries now carry per-instance auth headers and bootstrap URLs so `ttdash stop`, registry pruning, and no-open background starts remain usable. + - Playwright and integration test helpers bootstrap through the same local session file instead of bypassing the production auth path. +- Residual risk: + - This protects the local HTTP API from unauthenticated loopback access, browser/DNS-rebinding-style access, and other OS users. It does not fully isolate against malware already running as the same OS user, which can target user config files, terminal output, or browser state. +- Validation: + - `npx vitest run --project unit tests/unit/remote-auth.test.ts tests/unit/background-runtime.test.ts --reporter=verbose` + - `npx vitest run --project integration tests/integration/server-local-auth.test.ts tests/integration/server-api-guards.test.ts tests/integration/server-api-persistence.test.ts tests/integration/server-api-routing-runtime.test.ts tests/integration/server-api-recovery.test.ts tests/integration/server-remote-auth.test.ts --reporter=verbose` + - `npx vitest run --project integration-background tests/integration/server-background.test.ts --reporter=verbose` + - `PLAYWRIGHT_TEST_PORT=3020 npm_config_cache=/tmp/ttdash-npm-cache npm run test:e2e` + - `npm run format:check` + - `npm run lint` + - `tsc --noEmit` + - `npm run test:architecture` + - `npm run check:deps` + - `npm run verify:package` + - `npm run verify:full` + - `npm run test:timings` + - `coderabbit review --agent -t uncommitted -c AGENTS.md --files ...` -> round 1: 0 issues, round 2: 0 issues, round 3: 2 minor documentation issues fixed, round 4: 0 issues + +### security-review.md / N-01 + +- Status: fixed +- Scope: the server CSP no longer allows `unsafe-inline` styles. Shared security headers now live in `server/security-headers.js`, HTML responses get a per-response CSP nonce plus a matching `ttdash-csp-nonce` meta tag, `style-src-elem` is limited to `self` and the nonce, and `style-src-attr 'none'` blocks literal inline style attributes. +- Guardrails: `tests/unit/security-headers.test.ts` covers CSP construction, nonce shape, nonce meta injection, HTML response preparation, and non-HTML header behavior. `tests/integration/server-api-guards.test.ts` checks the strict CSP on authenticated API responses. `tests/e2e/dashboard-load-upload.spec.ts` verifies that the loaded dashboard HTML carries the nonce-backed CSP and that the browser reports no CSP errors while the main dashboard journey runs. +- Follow-up quality fixes during implementation: + - CSP generation moved out of `server.js`, so future header changes have a focused unit-testable boundary. + - `server/http-router.js` now treats HTML static responses separately from other assets, allowing nonce-specific headers without weakening API, JSON, CSS, or JS asset responses. + - React/Recharts/Motion JS-driven style property updates remain allowed because the browser-enforced risk path for this finding is literal inline style attributes or inline style elements; preserving those runtime style properties avoids UI and animation regressions without reintroducing `unsafe-inline`. +- Validation: + - `npx vitest run --project unit tests/unit/security-headers.test.ts --reporter=verbose` + - `npx vitest run --project integration tests/integration/server-api-guards.test.ts --reporter=verbose` + - `npm run format:check` + - `npm run lint` + - `tsc --noEmit` + - `npm run test:architecture` + - `npm run check:deps` + - `npm run verify:full` + - `npm run test:timings` + - `coderabbit review --agent -t uncommitted -c AGENTS.md --files ...` -> round 1: 0 issues, round 2: 1 minor grammar issue in `docs/application-stack-reference.md` fixed, round 3: 0 issues + +### performance-review.md / H-01 + +- Status: fixed +- Scope: secondary cost-analysis charts now leave the initial dashboard bundle and load through the section warmup path. Dashboard sections also get an adaptive, deduplicated preload scheduler that starts visible lazy section chunks shortly after the first render and keeps IntersectionObserver preloading far enough ahead of the viewport to avoid visible lazy-loading gaps while scrolling. +- Guardrails: `tests/frontend/dashboard-motion.test.tsx` covers idle/fallback scheduling, deduplication, cancellation, early preloading, and inert hidden content. `tests/unit/dashboard-section-preloading.test.ts` covers visible-section queue ordering, hidden/request-data gating, and duplicate task removal. +- Follow-up quality fixes during implementation: + - Cost-analysis entry charts (`CostOverTime`, `CostByModel`) were moved from static dashboard imports to lazy chunks, reducing the built main `index` chunk from roughly `212 kB` raw / `57 kB` gzip to roughly `198 kB` raw / `53 kB` gzip. + - The warmup scheduler now uses a short idle timeout with bounded parallelism so deeper sections are warmed without waiting for late viewport proximity. + - Section placeholders for deeper analysis/table areas now better match final section heights, preventing scroll-command layout shift while lazy chunks complete above the target section. +- Validation: + - `npm run test:unit -- tests/frontend/dashboard-motion.test.tsx tests/unit/dashboard-section-preloading.test.ts` + - `npx playwright test tests/e2e/command-palette.spec.ts -g "executes analysis section navigation commands"` + - `npm run lint` + - `npm run test:architecture` + - `npm run check:deps` + - `tsc --noEmit` + - `npm run build:app` + - `npm run verify:full` + - `npm run test:timings` + - `coderabbit review --agent -t uncommitted -c AGENTS.md --files ...` -> round 1: 0 issues, round 2: 0 issues, round 3: 0 issues + +### performance-review.md / M-01 + +- Status: fixed +- Scope: dashboard filter changes now flow through a centralized `deriveDashboardFilterData(...)` pass that derives date/month filtering, provider/model filtering, available filter options, date range, and view-mode aggregation behind the stable `useDashboardFilters` contract. Computed dashboard summaries now share one normalized breakdown aggregation for model costs, provider metrics, and model options behind the stable `useComputedMetrics` contract. +- Guardrails: `tests/unit/dashboard-filter-data.test.ts` compares the new filter derivation with the previous staged semantics across representative filter combinations and empty states. `tests/unit/dashboard-aggregation.test.ts` locks normalized model/provider aggregation and day counting. `tests/frontend/use-computed-metrics.test.tsx` covers the public computed-metrics hook boundary. +- Follow-up quality fixes during implementation: + - `src/lib/dashboard-filter-data.ts` imports directly from the shared dashboard-domain contract instead of routing through broader frontend transform helpers, keeping the hook dependency graph smaller and the architecture layer test below its timeout budget. + - `src/lib/data-transforms.ts` now fills scalar chart arrays and model-name discovery in one sorted-data pass instead of separate `map(...)` and `flatMap(...)` passes. + - `computeModelCosts(...)` and `computeProviderMetrics(...)` now delegate to the shared breakdown summary, so standalone calculation callers and dashboard hook callers use the same aggregation path. + - After CodeRabbit review, computed `allModels` now unions `modelsUsed` and `modelBreakdowns`, so inconsistent imported data can no longer hide a breakdown-backed model from the model-over-time chart while existing modelsUsed-only behavior remains preserved. +- Validation: + - `npm run test:unit -- tests/unit/dashboard-filter-data.test.ts tests/unit/dashboard-aggregation.test.ts tests/frontend/use-computed-metrics.test.tsx tests/frontend/use-dashboard-filters.test.tsx tests/unit/analytics.test.ts tests/unit/data-transforms.test.ts tests/unit/code-rabbit-phase4.test.ts` + - `npm run test:unit -- tests/unit/dashboard-aggregation.test.ts tests/frontend/use-computed-metrics.test.tsx tests/unit/analytics.test.ts` + - `npm run format:check` + - `npm run lint` + - `tsc --noEmit` + - `npm run test:architecture` + - `npm run check:deps` + - `npm run verify:full` -> completed after the CodeRabbit follow-up fix + - `npm run test:timings` -> completed after the CodeRabbit follow-up fix + - `coderabbit review --agent -t uncommitted -c AGENTS.md --files ...` -> round 1: 0 issues, round 2: 1 minor issue fixed, round 3: 0 issues, round 4: 0 issues + +### performance-review.md / M-02 + +- Status: fixed +- Scope: the Settings dialog no longer starts `/api/toktrack/version-status` when it opens. The dashboard shell now schedules one cancellable, idle-friendly toktrack latest-version warmup per browser session through `src/lib/toktrack-version-status.ts`, and the settings version hook only formats the shared session snapshot for the Maintenance tab. +- Guardrails: `tests/unit/toktrack-version-status.test.ts` covers the pinned initial snapshot, concurrent warmup deduplication, cached failure behavior, scheduled fallback warmup, and cancellation. `tests/frontend/settings-modal-version-status.test.tsx` covers warmed status rendering without a dialog-open fetch, the no-fetch dialog-open path, and cached failure reuse across reopen. `tests/frontend/dashboard-filter-visibility.test.tsx` covers that the dashboard shell wires the session warmup scheduler. +- Follow-up quality fixes during implementation: + - `docs/architecture.md` now documents the session-wide toktrack version status cache as the owner of lookup state, while the settings modal hook is only the presentation adapter. + - Dashboard tests mock the warmup scheduler explicitly so future dashboard renders cannot accidentally perform real toktrack version network work during component tests. + - The server `/api/toktrack/version-status` contract and existing server-side success/failure TTL cache remain unchanged, so the fix changes when the UI asks for the status, not how the status is resolved. +- Validation: + - `npm run test:unit -- tests/unit/toktrack-version-status.test.ts tests/frontend/settings-modal-version-status.test.tsx tests/frontend/settings-modal-tabs.test.tsx tests/frontend/dashboard-filter-visibility.test.tsx tests/frontend/dashboard-error-state.test.tsx tests/unit/api.test.ts` + - `npm run format:check` + - `npm run lint` + - `tsc --noEmit` + - `npm run test:architecture` + - `npm run check:deps` + - `npm run verify:full` + - `npm run test:timings` + - `coderabbit review --agent -t uncommitted -c AGENTS.md --files ...` -> round 1: 0 issues, round 2: 0 issues, round 3: 0 issues + +### performance-review.md / N-01 + +- Status: fixed +- Scope: complex dashboard UI islands now separate calculation-heavy view data from rendering, accessibility, and motion. Drilldown details, heatmap grids, request-quality ratios, sortable table rows, recent-day benchmarks, and date-picker calendar navigation are derived through focused `src/lib/*-data.ts` helpers while the existing frontend functionality, visual structure, and animations stay unchanged. +- Guardrails: new unit suites cover drilldown aggregation/rankings/token segments, heatmap grid and keyboard target derivation, request-quality ratios/progress, table sort/row derivation, and date-picker calendar actions. Existing frontend suites still cover the DOM, ARIA, keyboard, and motion contracts for the touched components. +- Follow-up quality fixes during implementation: + - React component tests for heatmap and drilldown were narrowed to visible UI/A11y contracts after the derived branches moved to fast unit coverage, and over-broad timeout overrides were removed from sortable table tests. + - `docs/architecture.md` now documents the non-presentational data-derivation helpers as the boundary for complex dashboard UI islands. + - `npm run test:timings` shows the new helper suites stay out of the slowest-suite list; the remaining slow entries are existing integration, motion, settings, and broad table/UI interaction paths. +- Validation: + - `npx vitest run --project unit tests/unit/drill-down-data.test.ts tests/unit/heatmap-calendar-data.test.ts tests/unit/request-quality-data.test.ts tests/unit/sortable-table-data.test.ts tests/unit/filter-date-picker-data.test.ts tests/unit/recent-days-reveal.test.ts --reporter=verbose` + - `npx vitest run --project frontend tests/frontend/drill-down-modal-content.test.tsx tests/frontend/drill-down-modal-motion.test.tsx tests/frontend/heatmap-calendar-accessibility.test.tsx tests/frontend/request-quality.test.tsx tests/frontend/sortable-table-provider-model.test.tsx tests/frontend/sortable-table-recent-days.test.tsx tests/frontend/filter-bar-date-picker.test.tsx --reporter=verbose` + - `npm run format:check` + - `npm run lint` + - `tsc --noEmit` + - `npm run test:architecture` + - `npm run check:deps` + - `npm run verify:full` + - `npm run test:timings` + - `coderabbit review --agent -t uncommitted -c AGENTS.md --files ...` -> round 1: 0 issues, round 2: 0 issues + +### dashboard-review.md / N-02 + +- Status: fixed +- Scope: `src/components/dashboard/DashboardSections.tsx` already consumes a single structured `DashboardSectionsViewModel`, and `src/types/dashboard-view-model.d.ts` keeps the section data split into named section bundles instead of flat dashboard props. This closes the original broad `DashboardSectionsProps` concern without changing visible dashboard functionality, content, UI, or animations. +- Guardrails: `tests/architecture/dashboard-sections-contract.test.ts` now locks the public `DashboardSections` prop contract to one `viewModel` prop and keeps `DashboardSectionsViewModel` split into the intended layout, analysis, table, comparison, and interaction bundles. +- Follow-up quality fixes during implementation: + - No production refactor was needed because the broader view-model boundary had already been introduced by `architecture-review.md / M-01`; this change adds a targeted regression guardrail so future section work does not reintroduce wide flat props. + - `docs/architecture.md` already documents the intended DashboardSections boundary, so no duplicate architecture text was added. +- Validation: + - `npm run test:architecture` + - `npm run test:unit -- tests/frontend/dashboard-filter-visibility.test.tsx` + - `tsc --noEmit` + - `npm run lint` + - `npm run check:deps` + - `npm run verify:full` + - `npm run test:timings` -> attempted twice; both runs failed under external CPU pressure with migrating 5s timeouts in existing unrelated frontend suites, while `npm run verify:full` had just completed the same coverage test set successfully; the new architecture guardrail itself stayed below 100ms in `npm run test:architecture` + - `coderabbit review --agent -t uncommitted -c AGENTS.md --files docs/review/fixed-findings.md tests/architecture/dashboard-sections-contract.test.ts` -> 0 issues + +## 2026-04-24 + +### dashboard-review.md / N-01 + +- Status: fixed +- Scope: the dashboard action landscape now separates everyday data loading, export/use actions, and maintenance actions in the header, while the Command Palette mirrors the same intent split for load data, exports, and maintenance. Existing actions and command IDs remain available. +- Guardrails: `tests/frontend/header-links.test.tsx` covers localized header action groups and preserved button access, while `tests/frontend/command-palette-action-groups.test.tsx` covers localized Command Palette groups and stable command IDs. +- Follow-up quality fixes during implementation: + - Header action rendering is now owned by a private `HeaderActions` composition inside `Header.tsx`, keeping the global header shell separate from action information architecture. + - Command Palette action commands are grouped by intent instead of sharing one broad `Actions` bucket, without changing command handlers or `data-testid` contracts. +- Validation: + - `npm run test:unit -- tests/frontend/header-links.test.tsx tests/frontend/command-palette-action-groups.test.tsx` + - `npx playwright test tests/e2e/command-palette.spec.ts` + - `tsc --noEmit` + - `npm run lint` + - `npm run test:architecture` + - `npm run check:deps` + - `npm run verify:full` + - `npm run test:timings` + - `coderabbit review --agent -t uncommitted -c AGENTS.md -f ...` -> only reported unrelated untracked `docs/application-stack-reference.md`; the file was not changed + - `coderabbit review --agent -t uncommitted -c AGENTS.md --dir src/components` -> 0 issues + - `coderabbit review --agent -t uncommitted -c AGENTS.md --dir tests/frontend` -> 0 issues + - `coderabbit review --agent -t uncommitted -c AGENTS.md --dir shared/locales` -> 0 issues + - `coderabbit review --agent -t uncommitted -c AGENTS.md --dir docs/review` -> 0 issues + - `coderabbit review --agent -t uncommitted -c AGENTS.md --files docs/architecture.md` -> 0 issues + +### dashboard-review.md / M-02 + +- Status: fixed +- Scope: `src/components/layout/FilterBar.tsx` was reduced to a composition shell over private filter groups for status, time presets, date range, and provider/model chips. The visible dashboard filtering capabilities remain intact while the bar now shows lightly grouped Time, Date range, Providers, and Models areas. +- Guardrails: `tests/frontend/filter-bar-accessibility.test.tsx` covers localized filter groups, while the existing date-picker and preset/chip suites continue to lock keyboard focus, date clearing, preset highlighting, and chip `aria-pressed`/visual states. `.dependency-cruiser.cjs` now keeps the new FilterBar internals private to the FilterBar shell. +- Follow-up quality fixes during implementation: + - The custom date picker logic now lives in `FilterBarDateRange.tsx`, isolating its portal, overlay positioning, keyboard navigation, and focus restoration from the main filter shell. + - Provider and model chip rendering now lives in `FilterBarChipFilters.tsx`, keeping provider badge styling and model color state local to chip filters. + - Provider badge alpha variants now flow through `getProviderBadgeStyle(...)`, so included provider chips no longer parse or mutate CSS strings in the UI. + - Date-picker calendar labels now recompute from the active locale while the picker is open, and weekday cells use stable keys. +- Validation: + - `npm run test:unit -- tests/frontend/filter-bar-accessibility.test.tsx tests/frontend/filter-bar-date-picker.test.tsx tests/frontend/filter-bar-presets.test.tsx tests/unit/model-colors.test.ts` + - `tsc --noEmit` + - `npm run lint` + - `npm run test:architecture` + - `npm run check:deps` + - `npm run verify:full` + - `npm run test:timings` + - `coderabbit review --agent -t uncommitted -c AGENTS.md -f ...` -> fixed relevant minor findings for weekday keys, provider badge alpha handling, locale-reactive calendar labels, and included-chip style collisions; remaining global uncommitted finding is isolated to unrelated untracked `docs/application-stack-reference.md`. + - `coderabbit review --agent -t uncommitted -c AGENTS.md --dir src/components/layout` -> 0 issues + - `coderabbit review --agent -t uncommitted -c AGENTS.md --dir src/lib` -> 0 issues + - `coderabbit review --agent -t uncommitted -c AGENTS.md --dir tests` -> 0 issues + - `coderabbit review --agent -t uncommitted -c AGENTS.md --dir shared` -> 0 issues + - `coderabbit review --agent -t uncommitted -c AGENTS.md --dir docs/review` -> 0 issues + - `coderabbit review --agent -t uncommitted -c AGENTS.md --files .dependency-cruiser.cjs` -> 0 issues + +### dashboard-review.md / M-01 + +- Status: fixed +- Scope: `src/components/features/settings/SettingsModal.tsx` was changed from one long settings surface into a tabbed workspace with Basics, Layout, Limits, and Maintenance areas. Existing settings capabilities remain available, but day-to-day preferences are separated from section layout, provider limits, and backup/version-maintenance actions. +- Guardrails: `tests/frontend/settings-modal-tabs.test.tsx` covers default tab state, section grouping, and keyboard navigation, while the existing focused settings suites now exercise their sections through the relevant tab. `docs/architecture.md` documents the tabbed settings shell and split motion/toktrack version ownership. +- Follow-up quality fixes during implementation: + - `SettingsModalSections.tsx` now separates the reduced-motion card from the toktrack version-status card so Maintenance can own diagnostic/version information without mixing it into daily dashboard behavior settings. + - `SettingsModal.tsx` now resets the active settings tab while the dialog is closed, so reopening the dialog always starts from Basics without a transient stale tab render. + - `tests/e2e/command-palette.spec.ts` now splits the formerly near-timeout section-navigation coverage into smaller scroll, dashboard-section, and analysis-section tests after the full gate exposed the old monolithic test as a reliability/performance risk. +- Validation: + - `npm run test:unit -- tests/frontend/settings-modal-tabs.test.tsx tests/frontend/settings-modal-language.test.tsx tests/frontend/settings-modal-version-status.test.tsx tests/frontend/settings-modal-defaults.test.tsx tests/frontend/settings-modal-sections.test.tsx tests/frontend/settings-modal-backups.test.tsx tests/frontend/settings-modal-provider-limits.test.tsx tests/frontend/settings-modal-draft-state.test.tsx` + - `npx playwright test tests/e2e/command-palette.spec.ts` + - `npm run test:architecture` + - `npm run check:deps` + - `npm run verify:full` + - `npm run test:timings` + - `coderabbit review --agent -t uncommitted -c AGENTS.md` -> round 1: 0 issues, round 2: 0 issues, round 3: 0 issues + +### code-review.md / N-01 + +- Status: fixed +- Scope: removed the unused `src/hooks/use-theme.ts` and `src/hooks/use-provider-limits.ts` files. Their active responsibilities already live in the persisted settings flow: theme application goes through `applyTheme` and dashboard controller effects, while provider-limit synchronization goes through `useAppSettings` and `syncProviderLimits`. +- Guardrails: `tests/architecture/unused-hooks.test.ts` now fails when a production hook file under `src/hooks/` has no production import, and `docs/architecture.md` plus `docs/testing.md` document that unused hook helpers should be removed instead of retained as speculative code. +- Follow-up quality fixes during implementation: + - The guardrail covers the same dead-hook visibility gap that made the original `0%` coverage files easy to miss, so future unused hook files are caught by `npm run test:architecture`. +- Validation: + - `npm run test:architecture` + - `npm run check:deps` + - `npm run test:unit -- tests/frontend/react-query-hooks.test.tsx tests/frontend/dashboard-controller-state.test.tsx tests/frontend/dashboard-controller-actions.test.tsx` + - `npm run verify:full` + - `npm run test:timings` + - `coderabbit review --agent -t uncommitted -c AGENTS.md` -> round 1: blocked by CodeRabbit rate limit (`Rate limit exceeded`, retry window reported by the CLI: `47 minutes and 10 seconds`) + +### code-review.md / M-01 + +- Status: fixed +- Scope: dashboard date preset behavior is centralized in the shared dashboard preferences contract. `src/hooks/use-dashboard-filters.ts` applies presets through `resolveDashboardPresetRange`, and `src/components/layout/FilterBar.tsx` derives the active UI state through `resolveDashboardActivePreset`; both flow through `src/lib/dashboard-preferences.ts` to `shared/dashboard-preferences.js`, so applying and displaying presets no longer duplicate the `7d`, `30d`, `month`, `year`, and `all` rules. +- Guardrails: `tests/unit/dashboard-preferences.test.ts` locks the shared/frontend preset contract, while `tests/frontend/use-dashboard-filters.test.tsx` and `tests/frontend/filter-bar-presets.test.tsx` cover applying presets in the hook and highlighting them in the filter bar. +- Follow-up quality fixes during implementation: + - No production or test changes were needed for this finding because the shared preset contract and focused regression coverage already existed; the fix was to document the concrete `code-review.md / M-01` reference as closed. +- Validation: + - `npm run test:unit -- tests/unit/dashboard-preferences.test.ts tests/frontend/use-dashboard-filters.test.tsx tests/frontend/filter-bar-presets.test.tsx` + - `npm run test:architecture` + - `npm run check:deps` + - `npm run verify:full` + - `npm run test:timings` + - `coderabbit review --agent -t uncommitted -c AGENTS.md` -> round 1: 2 minor documentation issues, fixed + - `coderabbit review --agent -t uncommitted -c AGENTS.md` -> round 2: blocked by CodeRabbit rate limit (`Rate limit exceeded`, retry window reported by the CLI: `57 minutes and 23 seconds`) + +### code-review.md / M-02 + +- Status: fixed +- Scope: `src/components/features/settings/SettingsModal.tsx` was reduced from the former 1000+ line all-in-one dialog into a shell/composition root over extracted settings sections, a draft-state hook, a version-status hook, and modal-local helper logic. The visible settings areas now live behind `src/components/features/settings/SettingsModalSections.tsx`, while the draft/save/reset behavior moved into `use-settings-modal-draft.ts` and the toktrack lookup behavior moved into `use-settings-modal-version-status.ts`. +- Guardrails: `docs/architecture.md` now documents the settings modal shell, internal section bundle, and private helper hooks as a feature-internal composition boundary, and `.dependency-cruiser.cjs` now blocks unrelated frontend modules from importing `SettingsModalSections.tsx`, `use-settings-modal-draft.ts`, `use-settings-modal-version-status.ts`, or `settings-modal-helpers.ts` directly. +- Follow-up quality fixes during implementation: + - `src/components/features/settings/settings-modal-helpers.ts` now owns the extracted number parsing, selection normalization, provider-limit draft building/patching, and section reorder helpers so tests no longer import utility behavior through the UI shell. + - `use-settings-modal-draft.ts` now clones incoming section drafts and patches empty provider configs from `DEFAULT_PROVIDER_LIMIT_CONFIG`, preserving the previous provider-limit safety behavior after the refactor. + - `use-settings-modal-draft.ts` now initializes modal drafts once per open session and resets that guard on close, so external prop churn can no longer overwrite in-progress edits while the dialog remains open. + - `tests/unit/settings-modal-helpers.test.ts` now covers the extracted settings helpers directly, and `tests/unit/code-rabbit-phase1.test.ts` was narrowed back to the unrelated chart/auto-import helpers it actually owns. + - `tests/frontend/settings-modal-defaults.test.tsx`, `settings-modal-sections.test.tsx`, `settings-modal-backups.test.tsx`, `settings-modal-provider-limits.test.tsx`, and `settings-modal-draft-state.test.tsx` now split the modal coverage by responsibility instead of adding another broad catch-all dialog suite. +- Validation: + - `npm run test:unit -- tests/unit/settings-modal-helpers.test.ts tests/unit/code-rabbit-phase1.test.ts tests/frontend/settings-modal-language.test.tsx tests/frontend/settings-modal-version-status.test.tsx tests/frontend/settings-modal-defaults.test.tsx tests/frontend/settings-modal-sections.test.tsx tests/frontend/settings-modal-backups.test.tsx tests/frontend/settings-modal-provider-limits.test.tsx` + - `npm run check` + - `npm run test:architecture` + - `npm run test:timings` + - `npm_config_cache=/tmp/ttdash-npm-cache npm run verify:release` + - `PLAYWRIGHT_TEST_PORT=3016 npm_config_cache=/tmp/ttdash-npm-cache npm run test:e2e` + - `coderabbit review --agent -t uncommitted -c AGENTS.md` -> round 1: 1 minor issue, fixed (draft state no longer reinitializes while the modal stays open; `tests/frontend/settings-modal-draft-state.test.tsx` added) + - `coderabbit review --agent -t uncommitted -c AGENTS.md` -> round 2: blocked by CodeRabbit rate limit (`Rate limit exceeded`, retry window reported by the CLI: `54 minutes and 32 seconds`) + +### code-review.md / H-01 + +- Status: fixed +- Scope: `src/hooks/use-dashboard-controller.ts` was reduced from the remaining god-hook into a composition root over focused internal controller slices. The heavy derived state, browser IO, dialog ownership, drill-down navigation, shell/load state, and imperative dashboard actions now live in `src/hooks/use-dashboard-controller-actions.ts`, `use-dashboard-controller-browser.ts`, `use-dashboard-controller-derived-state.ts`, `use-dashboard-controller-dialogs.ts`, `use-dashboard-controller-drill-down.ts`, `use-dashboard-controller-effects.ts`, and `use-dashboard-controller-shell-state.ts`, while the public controller contract stayed stable through `src/hooks/use-dashboard-controller.ts`. +- Guardrails: `docs/architecture.md` now documents the internal dashboard controller slices and the browser-IO helper as private implementation details behind the public controller hook, and `.dependency-cruiser.cjs` now blocks component-level fanout to the internal `use-dashboard-controller-*.ts` slices while keeping the intentional type-only controller contract out of the orphan warning path. +- Follow-up quality fixes during implementation: + - `tests/frontend/dashboard-controller-browser.test.tsx` now locks the extracted browser helper responsibilities for JSON downloads, section scrolling, and the test-only `openSettings` bridge. + - `tests/frontend/dashboard-controller-drill-down.test.tsx` now covers the extracted drill-down slice directly, including the edge case where the selected day disappears after filtering changes. + - `src/hooks/use-dashboard-controller-actions.ts` now uses the upload-specific fallback toast (`api.uploadFailed`) after a successful JSON parse when the backend rejects a usage upload without an `Error` instance, and `tests/frontend/dashboard-error-state.test.tsx` covers that regression explicitly. + - `npm run test:timings` showed no new performance hotspot in the touched dashboard/controller suites; the new slice tests stay sub-100ms and the existing dashboard controller/public-shell tests remained fast. +- Validation: + - `npm run check` + - `npm run test:architecture` + - `npm run test:timings` + - `npm run test:unit -- tests/frontend/dashboard-controller-browser.test.tsx tests/frontend/dashboard-controller-drill-down.test.tsx tests/frontend/dashboard-controller-state.test.tsx tests/frontend/dashboard-controller-actions.test.tsx tests/frontend/dashboard-filter-visibility.test.tsx tests/frontend/dashboard-error-state.test.tsx` + - `npm run test:unit -- tests/frontend/dashboard-error-state.test.tsx tests/frontend/dashboard-controller-actions.test.tsx tests/frontend/dashboard-controller-browser.test.tsx` + - `npm_config_cache=/tmp/ttdash-npm-cache npm run verify:release` + - `PLAYWRIGHT_TEST_PORT=3016 npm_config_cache=/tmp/ttdash-npm-cache npm run test:e2e` + - `coderabbit review --agent -t uncommitted -c AGENTS.md` -> round 1: 1 minor issue, fixed (`api.uploadFailed` fallback for backend upload rejection after successful JSON parse) + - `coderabbit review --agent -t uncommitted -c AGENTS.md` -> round 2: blocked by CodeRabbit rate limit (`Rate limit exceeded`, retry window reported by the CLI: `52 minutes and 50 seconds`) + +## 2026-04-23 + +### architecture-review.md / H-01 + +- Status: fixed +- Scope: `server.js` was reduced to the CLI/bootstrap and runtime composition root; subsystem logic moved into `server/data-runtime.js`, `server/background-runtime.js`, `server/auto-import-runtime.js`, and `server/http-router.js`. +- Guardrails: `docs/architecture.md` now documents the server split, `.dependency-cruiser.cjs` prevents runtime modules from coupling back to `server.js`, the router, or each other, and `tests/unit/background-runtime.test.ts` locks the background registry snapshot behavior that was tightened during the review cycle. +- Follow-up quality fixes during implementation: + - `server/background-runtime.js`: removed a snapshot TOCTOU re-read so background pruning now decides cleanup from one captured registry read. + - `src/components/layout/FilterBar.tsx`: added cleanup for queued focus-restoration callbacks; `tests/frontend/filter-bar-date-picker.test.tsx` now covers the unmount path that was causing the flaky date-picker teardown in Playwright. +- Validation: + - `npm run test:architecture` + - `npm run check:deps` + - `npm run test:unit -- tests/unit/server-helpers-network.test.ts tests/unit/server-helpers-runner-core.test.ts tests/unit/server-helpers-runner-process.test.ts tests/unit/server-helpers-file-locks.test.ts tests/integration/server-auto-import.test.ts tests/integration/server-background.test.ts tests/integration/server-api-guards.test.ts` + - `npm run verify:full` + - `coderabbit review --agent -t uncommitted -c AGENTS.md` -> round 1: 1 minor issue, round 2: 0 issues + +### architecture-review.md / H-02 + +- Status: fixed +- Scope: the duplicated client/server settings contract was consolidated into `shared/app-settings.js` with typed declarations in `shared/app-settings.d.ts`; frontend adapters in `src/lib/app-settings.ts`, `src/lib/dashboard-preferences.ts`, and `src/lib/provider-limits.ts` now consume that shared contract, and `server/data-runtime.js` now normalizes persisted settings through the same source. +- Guardrails: `docs/architecture.md` now documents `shared/app-settings.js` as the single production source for persisted settings defaults and normalization, `.dependency-cruiser.cjs` blocks bypassing that contract from `server.js`, `server/data-runtime.js`, and `src/lib/app-settings.ts`, and `vitest.config.ts` now includes `shared/app-settings.js` in coverage reporting. +- Follow-up quality fixes during implementation: + - `tests/unit/app-settings-contract.test.ts`: locks the shared/frontend contract alignment for defaults, fragment normalization, persisted settings normalization, and runtime-only flags. + - `tests/integration/server-api-imports.test.ts`: now asserts the normalized settings-import response instead of only the status code. + - `tests/integration/server-api-persistence.test.ts` and `tests/frontend/settings-modal-test-helpers.tsx`: now derive defaults from the shared settings contract instead of re-hardcoding them in tests. + - `tests/unit/background-runtime.test.ts`: cleaned up type-only imports to keep the repo lint/type gates green after the shared typings were added. +- Validation: + - `npm run check` + - `npm run test:architecture` + - `npm run test:unit -- tests/unit/app-settings-contract.test.ts tests/unit/api.test.ts tests/unit/dashboard-preferences.test.ts tests/integration/server-api-persistence.test.ts tests/integration/server-api-imports.test.ts` + - `npm_config_cache=/tmp/ttdash-npm-cache npm run test:unit:coverage` + - `npm run build:app` + - `npm run verify:package` + - `PLAYWRIGHT_TEST_PORT=3016 npm_config_cache=/tmp/ttdash-npm-cache npm run test:e2e` + - `coderabbit review --agent -t uncommitted -c AGENTS.md` -> round 1: 0 issues, round 2: 0 issues + +### architecture-review.md / M-01 + +- Status: fixed +- Scope: the dashboard orchestration was cut from a broad flat controller surface into focused view-model bundles in `src/hooks/use-dashboard-controller.ts`; `src/components/Dashboard.tsx` now consumes `header`, `filterBar`, `sections`, `settingsModal`, `dialogs`, `commandPalette`, `report`, and shell bundles instead of forwarding dozens of individual fields, and `src/components/dashboard/DashboardSections.tsx` now consumes one structured `DashboardSectionsViewModel`. +- Guardrails: `src/types/dashboard-view-model.d.ts` now owns the shared frontend-only dashboard view-model contracts, `docs/architecture.md` documents `Dashboard.tsx` as the controller composition root, and `.dependency-cruiser.cjs` now blocks component-subtree fanout to `src/hooks/use-dashboard-controller.ts`. +- Follow-up quality fixes during implementation: + - `src/components/layout/Header.tsx`, `src/components/layout/FilterBar.tsx`, `src/components/features/command-palette/CommandPalette.tsx`, and `src/components/features/settings/SettingsModal.tsx` now type their props from the shared dashboard view-model contracts instead of re-declaring local prop shapes. + - `tests/frontend/dashboard-controller-test-helpers.ts` now provides bundle-based controller and section factories, so dashboard composition tests no longer rebuild a flat mega-mock. + - `tests/frontend/dashboard-controller-actions.test.tsx` now covers drill-down navigation from the controller-owned dialog bundle, locking the logic that moved out of `Dashboard.tsx`. + - `tests/frontend/dashboard-filter-visibility.test.tsx` now also asserts that `DashboardSections` receives a structured `viewModel` bundle instead of flat section props. +- Validation: + - `npm run check` + - `npm run test:architecture` + - `npm run test:unit -- tests/frontend/dashboard-controller-state.test.tsx tests/frontend/dashboard-controller-actions.test.tsx tests/frontend/dashboard-filter-visibility.test.tsx` + - `npm_config_cache=/tmp/ttdash-npm-cache npm run test:unit:coverage` + - `npm run build:app` + - `npm run verify:package` + - `PLAYWRIGHT_TEST_PORT=3016 npm_config_cache=/tmp/ttdash-npm-cache npm run test:e2e` + - `coderabbit review --agent -t uncommitted -c AGENTS.md` -> round 1: 0 issues, round 2: 0 issues + +### architecture-review.md / M-02 + +- Status: fixed +- Scope: dashboard preset, section, and settings-adjacent UI rules now flow through `shared/dashboard-preferences.js` with declarations in `shared/dashboard-preferences.d.ts`; `shared/app-settings.js` consumes that shared contract, `src/lib/dashboard-preferences.ts` was reduced to a thin adapter, and the duplicated preset semantics were removed from `src/hooks/use-dashboard-filters.ts` and `src/components/layout/FilterBar.tsx`. +- Guardrails: `docs/architecture.md` now documents the shared dashboard contract separately from the app settings contract, `.dependency-cruiser.cjs` blocks production imports of `shared/dashboard-preferences.json` outside `shared/dashboard-preferences.js`, and `vitest.config.ts` now includes `shared/dashboard-preferences.js` in coverage. +- Follow-up quality fixes during implementation: + - `tests/unit/dashboard-preferences.test.ts` now locks the shared/frontend adapter alignment for config parsing, section metadata, preset-range resolution, and active-preset detection. + - `tests/frontend/use-dashboard-filters.test.tsx` now asserts preset application and reset behavior against the shared preset resolver instead of re-hardcoding date ranges. + - `tests/frontend/filter-bar-presets.test.tsx` now seeds active-preset UI states from the shared resolver while preserving the existing visible quick-select order. + - `src/types/dashboard-view-model.d.ts` and `src/hooks/use-dashboard-controller.ts` now type preset actions with `DashboardDatePreset` instead of broad strings. +- Validation: + - `npm run check` + - `npm run test:architecture` + - `npm run check:deps` + - `npm_config_cache=/tmp/ttdash-npm-cache npm run test:unit:coverage` + - `npm run build:app` + - `npm run verify:package` + - `PLAYWRIGHT_TEST_PORT=3016 npm_config_cache=/tmp/ttdash-npm-cache npm run test:e2e` + - `coderabbit review --agent -t uncommitted -c AGENTS.md` -> round 1: 0 issues, round 2: 0 issues diff --git a/tests/unit/background-runtime.test.ts b/tests/unit/background-runtime.test.ts index 4b99d29..90ae188 100644 --- a/tests/unit/background-runtime.test.ts +++ b/tests/unit/background-runtime.test.ts @@ -455,6 +455,139 @@ describe('background runtime', () => { } }) + it('prints a no-op message when no background instances are running', async () => { + const logSpy = vi.spyOn(console, 'log').mockImplementation(() => undefined) + const runtime = createTestBackgroundRuntime() + + try { + await runtime.runStopCommand() + + expect(logSpy).toHaveBeenCalledWith('No running TTDash background servers found.') + } finally { + logSpy.mockRestore() + } + }) + + it('cancels the stop command when the multi-instance prompt is left empty', async () => { + const registryEntries = [ + { + id: 'first-instance', + pid: 101, + port: 3101, + url: 'http://127.0.0.1:3101', + startedAt: '2026-04-01T08:00:00.000Z', + }, + { + id: 'second-instance', + pid: 102, + port: 3102, + url: 'http://127.0.0.1:3102', + startedAt: '2026-04-01T09:00:00.000Z', + }, + ] + const question = vi.fn(async () => '') + const close = vi.fn() + const processObject = { + ...process, + env: {}, + execPath: process.execPath, + kill: vi.fn(), + stdin: {} as NodeJS.ReadStream, + stdout: {} as NodeJS.WriteStream, + } as unknown as NodeJS.Process + const logSpy = vi.spyOn(console, 'log').mockImplementation(() => undefined) + const runtime = createTestBackgroundRuntime({ + fs: { + readFileSync: vi.fn(() => JSON.stringify(registryEntries)), + mkdirSync: vi.fn(), + chmodSync: vi.fn(), + rmSync: vi.fn(), + }, + processObject, + fetchImpl: vi.fn(async (input: URL) => { + const port = Number(input.port) + return { + ok: true, + json: async () => ({ + id: port === 3101 ? 'first-instance' : 'second-instance', + port, + }), + } + }), + readlinePromises: { + createInterface: vi.fn(() => ({ question, close })), + } as unknown as typeof readlinePromisesModule, + }) + + try { + await runtime.runStopCommand() + + expect(question).toHaveBeenCalledWith( + 'Which instance should be stopped? [1-2, Enter=cancel] ', + ) + expect(close).toHaveBeenCalled() + expect(logSpy).toHaveBeenCalledWith('Canceled.') + expect(processObject.kill).not.toHaveBeenCalled() + } finally { + logSpy.mockRestore() + } + }) + + it('reports a timeout and log file when a stopped instance keeps reporting the same runtime', async () => { + let currentTime = 0 + const dateNowSpy = vi.spyOn(Date, 'now').mockImplementation(() => currentTime) + const registryEntries = [ + { + id: 'owned-instance', + pid: 101, + port: 3101, + url: 'http://127.0.0.1:3101', + startedAt: '2026-04-01T08:00:00.000Z', + logFile: '/tmp/ttdash-cache/background/server-timeout.log', + }, + ] + const processObject = { + ...process, + env: {}, + execPath: process.execPath, + exitCode: 0, + kill: vi.fn(), + } as unknown as NodeJS.Process + const errorSpy = vi.spyOn(console, 'error').mockImplementation(() => undefined) + const runtime = createTestBackgroundRuntime({ + fs: { + readFileSync: vi.fn(() => JSON.stringify(registryEntries)), + mkdirSync: vi.fn(), + chmodSync: vi.fn(), + rmSync: vi.fn(), + }, + processObject, + fetchImpl: vi.fn(async () => ({ + ok: true, + json: async () => ({ id: 'owned-instance', port: 3101 }), + })), + sleep: vi.fn(async (durationMs: number) => { + currentTime += durationMs + }), + }) + + try { + await runtime.runStopCommand() + + expect(processObject.kill).toHaveBeenCalledWith(101, 'SIGTERM') + expect(errorSpy).toHaveBeenCalledWith( + 'TTDash background server did not respond to SIGTERM: http://127.0.0.1:3101 (PID 101)', + ) + expect(errorSpy).toHaveBeenCalledWith( + 'Log file: /tmp/ttdash-cache/background/server-timeout.log', + ) + expect(processObject.exitCode).toBe(1) + } finally { + dateNowSpy.mockRestore() + errorSpy.mockRestore() + } + }) + it('removes a registry entry when stop finds the owned process already gone', async () => { const registryEntries = [ { @@ -627,4 +760,77 @@ describe('background runtime', () => { ) expect(fsMock.closeSync).toHaveBeenCalledWith(42) }) + + it('reports the registered child details after a successful background start', async () => { + const registryEntries = [ + { + id: 'child-runtime', + pid: 123, + port: 3101, + url: 'http://127.0.0.1:3101', + bootstrapUrl: 'http://127.0.0.1:3101/?ttdash_token=abc', + host: '127.0.0.1', + apiPrefix: '/api', + authHeader: 'Bearer child-token', + startedAt: '2026-04-01T08:00:00.000Z', + }, + ] + const spawnImpl = vi.fn(() => ({ + pid: 123, + unref: vi.fn(), + })) + const logSpy = vi.spyOn(console, 'log').mockImplementation(() => undefined) + const runtime = createTestBackgroundRuntime({ + fs: { + readFileSync: vi.fn(() => JSON.stringify(registryEntries)), + mkdirSync: vi.fn(), + chmodSync: vi.fn(), + rmSync: vi.fn(), + openSync: vi.fn(() => 42), + fchmodSync: vi.fn(), + closeSync: vi.fn(), + }, + processObject: { + ...process, + env: {}, + execPath: '/usr/bin/node', + } as NodeJS.Process, + fetchImpl: vi.fn(async () => ({ + ok: true, + json: async () => ({ id: 'child-runtime', port: 3101 }), + })), + spawnImpl, + normalizedCliArgs: ['--background', '--no-open', '--port', '3101'], + cliOptions: { + noOpen: true, + }, + }) + + try { + await runtime.startInBackground() + + expect(spawnImpl).toHaveBeenCalledWith( + '/usr/bin/node', + ['/tmp/server.js', '--no-open', '--port', '3101'], + expect.objectContaining({ + detached: true, + env: expect.objectContaining({ + TTDASH_BACKGROUND_CHILD: '1', + TTDASH_FORCE_OPEN_BROWSER: '0', + }), + }), + ) + expect(logSpy).toHaveBeenCalledWith('TTDash is running in the background.') + expect(logSpy).toHaveBeenCalledWith(' URL: http://127.0.0.1:3101') + expect(logSpy).toHaveBeenCalledWith( + ' Local Auth URL: http://127.0.0.1:3101/?ttdash_token=abc', + ) + expect(logSpy).toHaveBeenCalledWith(' PID: 123') + expect(logSpy).toHaveBeenCalledWith( + expect.stringMatching(/^ Log: \/tmp\/ttdash-cache\/background\/server-/), + ) + } finally { + logSpy.mockRestore() + } + }) }) From be994fd7e253a0ac90459372ed9c96d9bbaeb51a Mon Sep 17 00:00:00 2001 From: tyl3r-ch Date: Fri, 1 May 2026 01:10:26 +0200 Subject: [PATCH 23/42] Lighten chart legend frontend test --- .gitignore | 3 +- docs/review/fixed-findings.md | 55 ++++++-- .../chart-legend-integration.test.tsx | 128 +++++------------- 3 files changed, 79 insertions(+), 107 deletions(-) diff --git a/.gitignore b/.gitignore index d6f51bc..848e658 100644 --- a/.gitignore +++ b/.gitignore @@ -22,7 +22,8 @@ coverage/ requirements/ docs/security/ docs/review/* -!docs/review/*.md +!docs/review/test-review.md +!docs/review/fixed-findings.md docs/application-stack-reference.md /activity-*.png /cache-hit-rate-*.png diff --git a/docs/review/fixed-findings.md b/docs/review/fixed-findings.md index 70a5eec..7e78d50 100644 --- a/docs/review/fixed-findings.md +++ b/docs/review/fixed-findings.md @@ -35,6 +35,39 @@ passed and `1` skipped. - `coderabbit review --agent -t uncommitted -c AGENTS.md -c .coderabbit.yaml -f docs/review/fixed-findings.md -f tests/unit/background-runtime.test.ts` -> CodeRabbit raised 0 issues. +### test-review.md / Phase 2 - Frontend/jsdom-Kosten senken + +- Status: fixed +- Scope: Der Chart-Legend-Wrapping-Vertrag wird nicht mehr ueber drei schwere Chart-Integrationen + mit Recharts-Mocks und i18n geprueft. `tests/frontend/chart-legend-integration.test.tsx` rendert + jetzt direkt `ChartLegend` und deckt Default-Labels, custom `renderLabel`, `filterEntry`, + `flex-wrap` und den Verzicht auf horizontales Overflow ab. +- Fix reference: phase commit `Lighten chart legend frontend test` +- Closed findings: + - `test-review.md / Phase 2` - Frontend/jsdom-Kosten senken, Teil Reduktion schwerer + Chart-/Recharts-Importe. + - `test-review.md / Bottlenecks / Frontend/jsdom` - pure gemeinsame Legend-Layout-Logik bleibt im + jsdom-Layer, aber ohne die nicht noetigen Chart-Integrationen. +- Guardrails: + - Keine Produktionslogik wurde geaendert. + - Der User-visible Legend-Vertrag bleibt erhalten: Labels rendern, Wrapping bleibt aktiv, + horizontales Scroll-Layout kehrt nicht zurueck. + - Die bestehenden chart-spezifischen Tests fuer `CostByModel`, `RequestsOverTime`, + `TokenTypes`-nahe Datenpfade und `CostOverTime` bleiben separat erhalten. + - `.gitignore` erlaubt nur die getrackten Review-Markdown-Dateien `test-review.md` und + `fixed-findings.md`, damit lokale Review-Notizen nicht in scoped CodeRabbit-Laeufe geraten. +- Validation: + - `npm_config_cache=/tmp/ttdash-npm-cache npx vitest run --project frontend tests/frontend/chart-legend-integration.test.tsx --reporter=verbose` -> passed with `3` tests; targeted run reported import `25ms`, tests `86ms`. + - `npm_config_cache=/tmp/ttdash-npm-cache npm run test:vitest:frontend` -> passed with `76` + files and `204` tests. + - `npm_config_cache=/tmp/ttdash-npm-cache npm run format:check` -> passed. + - `npm_config_cache=/tmp/ttdash-npm-cache npm run lint` -> passed. + - `npm_config_cache=/tmp/ttdash-npm-cache npm run typecheck` -> passed. + - `coderabbit review --agent -t uncommitted -c AGENTS.md -c .coderabbit.yaml -f .gitignore -f docs/review/fixed-findings.md -f tests/frontend/chart-legend-integration.test.tsx` -> CodeRabbit raised 0 issues. +- Measurement notes: + - The frontend project run was load-distorted at `80.80s`; the refactored Legend test itself + completed as a small direct component test at `269ms` inside that run. + ## 2026-04-30 ### test-review.md / Phase 1 - Background-Concurrency stabilisieren @@ -487,17 +520,17 @@ konkret geschlossenen Findings aus `docs/review/test-review.md` und die Commits, in denen die jeweilige Verbesserung gelandet ist. -| Finding | Status | Fix reference | Validation | -| --- | --- | --- | --- | -| `test-review.md / H-01` - Playwright ist nicht worker-isoliert | fixed | `ab49513` `Isolate Playwright workers` | `npm run test:e2e:ci` mit 2 Workern gruen; final `verify:full` gruen | -| `test-review.md / H-02` - CI ist ein monolithischer serieller Full-Gate | fixed | `b5cac6a` `Split CI test pipeline` | CI-DAG in `.github/workflows/ci.yml`; final `verify:full` gruen | -| `test-review.md / H-03` - Command-Palette-E2E mischt zu viele Testarten | fixed | `cb31d35` `Move command palette contracts to Vitest` | Command-Contract in Vitest, E2E auf Smokes reduziert; final Playwright `11 passed` | -| `test-review.md / H-04` - Report- und Coverage-Ausgaben sind nicht matrix-sicher | fixed | `c4abb2b` `Make Vitest setup and reports parallel-safe`, `b5cac6a` `Split CI test pipeline` | Projekt-/Job-spezifische JUnit/Artifact-Pfade; CI-Matrix nutzt getrennte Reports | -| `test-review.md / M-02` - Node-Projekte laden unnoetiges React-Test-Setup | fixed | `c4abb2b` `Make Vitest setup and reports parallel-safe` | `vitest.setup.node.ts` / `vitest.setup.frontend.ts` getrennt; Guardrail in `vitest-coverage-config.test.ts` | -| `test-review.md / M-03` - Coverage ist global gruen, aber risikogewichtet schwach | fixed | `3a4285d` `Improve runtime and dashboard coverage`, `117cfeb` `Fix CodeRabbit test robustness issues` | Runtime-/Dashboard-Coverage gezielt erhoeht; final Coverage-Gate gruen | -| `test-review.md / M-05` - Statische Checks haben Doppelarbeit | fixed | `b5cac6a` `Split CI test pipeline` | `test:static` als ein regulaerer Static-Gate, Cache fuer Prettier/ESLint/TypeScript | -| `test-review.md / M-06` - Package-Smoke gehoert in eigenen CI-Job | fixed | `b5cac6a` `Split CI test pipeline` | `build` erzeugt `production-dist`; `package-smoke` und `e2e` nutzen Artifact parallel | -| `test-review.md / M-07` - Timing-Regressions sind sichtbar, aber nicht budgetiert | fixed | `e9d54fa` `Add test timing budgets` | `test:timings:budget` gruen; CI-Matrix prueft JUnit-Reports gegen `20s` Suite / `12s` Test Budget | +| Finding | Status | Fix reference | Validation | +| --------------------------------------------------------------------------------- | ------ | ----------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------- | +| `test-review.md / H-01` - Playwright ist nicht worker-isoliert | fixed | `ab49513` `Isolate Playwright workers` | `npm run test:e2e:ci` mit 2 Workern gruen; final `verify:full` gruen | +| `test-review.md / H-02` - CI ist ein monolithischer serieller Full-Gate | fixed | `b5cac6a` `Split CI test pipeline` | CI-DAG in `.github/workflows/ci.yml`; final `verify:full` gruen | +| `test-review.md / H-03` - Command-Palette-E2E mischt zu viele Testarten | fixed | `cb31d35` `Move command palette contracts to Vitest` | Command-Contract in Vitest, E2E auf Smokes reduziert; final Playwright `11 passed` | +| `test-review.md / H-04` - Report- und Coverage-Ausgaben sind nicht matrix-sicher | fixed | `c4abb2b` `Make Vitest setup and reports parallel-safe`, `b5cac6a` `Split CI test pipeline` | Projekt-/Job-spezifische JUnit/Artifact-Pfade; CI-Matrix nutzt getrennte Reports | +| `test-review.md / M-02` - Node-Projekte laden unnoetiges React-Test-Setup | fixed | `c4abb2b` `Make Vitest setup and reports parallel-safe` | `vitest.setup.node.ts` / `vitest.setup.frontend.ts` getrennt; Guardrail in `vitest-coverage-config.test.ts` | +| `test-review.md / M-03` - Coverage ist global gruen, aber risikogewichtet schwach | fixed | `3a4285d` `Improve runtime and dashboard coverage`, `117cfeb` `Fix CodeRabbit test robustness issues` | Runtime-/Dashboard-Coverage gezielt erhoeht; final Coverage-Gate gruen | +| `test-review.md / M-05` - Statische Checks haben Doppelarbeit | fixed | `b5cac6a` `Split CI test pipeline` | `test:static` als ein regulaerer Static-Gate, Cache fuer Prettier/ESLint/TypeScript | +| `test-review.md / M-06` - Package-Smoke gehoert in eigenen CI-Job | fixed | `b5cac6a` `Split CI test pipeline` | `build` erzeugt `production-dist`; `package-smoke` und `e2e` nutzen Artifact parallel | +| `test-review.md / M-07` - Timing-Regressions sind sichtbar, aber nicht budgetiert | fixed | `e9d54fa` `Add test timing budgets` | `test:timings:budget` gruen; CI-Matrix prueft JUnit-Reports gegen `20s` Suite / `12s` Test Budget | Nicht als vollstaendig geschlossen markiert: diff --git a/tests/frontend/chart-legend-integration.test.tsx b/tests/frontend/chart-legend-integration.test.tsx index ac338f2..1b25c44 100644 --- a/tests/frontend/chart-legend-integration.test.tsx +++ b/tests/frontend/chart-legend-integration.test.tsx @@ -1,117 +1,55 @@ // @vitest-environment jsdom -import { cloneElement, type ReactElement, type ReactNode } from 'react' import { render, screen } from '@testing-library/react' -import { beforeEach, describe, expect, it, vi } from 'vitest' -import { CostByModel } from '@/components/charts/CostByModel' -import { RequestsOverTime } from '@/components/charts/RequestsOverTime' -import { TokenTypes } from '@/components/charts/TokenTypes' -import { TooltipProvider } from '@/components/ui/tooltip' -import { initI18n } from '@/lib/i18n' -import { MockSvgContainer, MockSvgGroup } from '../recharts-test-utils' +import { describe, expect, it } from 'vitest' +import { ChartLegend } from '@/components/charts/ChartLegend' -let lastLegendPayload: Array<{ value: string; color: string }> = [] - -vi.mock('recharts', () => ({ - ResponsiveContainer: ({ children }: { children: ReactNode }) => ( - {children} - ), - PieChart: ({ children }: { children: ReactNode }) => ( - {children} - ), - ComposedChart: ({ children }: { children: ReactNode }) => ( - {children} - ), - Pie: ({ children, data = [] }: { children: ReactNode; data?: Array<{ name: string }> }) => { - lastLegendPayload = data.map((entry, index) => ({ - value: entry.name, - color: `hsl(${(index + 1) * 40} 70% 50%)`, - })) - return {children} - }, - Legend: ({ content }: { content?: ReactElement }) => - content ? ( - {cloneElement(content, { payload: lastLegendPayload })} - ) : null, - Tooltip: () => null, - Cell: () => null, - Area: () => null, - Line: () => null, - XAxis: () => null, - YAxis: () => null, - CartesianGrid: () => null, -})) - -describe('Chart legend integrations', () => { - beforeEach(async () => { - lastLegendPayload = [] - vi.stubGlobal( - 'IntersectionObserver', - class { - observe() {} - unobserve() {} - disconnect() {} - }, - ) - await initI18n('en') - }) - - it('wraps CostByModel legend labels instead of relying on horizontal scroll', () => { +describe('ChartLegend', () => { + it('wraps legend labels instead of relying on horizontal scroll', () => { const { container } = render( - - - , + , ) - expect(screen.getByText(/GPT-5\.4 \(\$60(?:\.0+)?\)/)).toBeInTheDocument() - expect(screen.getByText(/Claude Sonnet 4\.5 \(\$25(?:\.0+)?\)/)).toBeInTheDocument() + expect(screen.getByText('GPT-5.4 ($60)')).toBeInTheDocument() + expect(screen.getByText('Claude Sonnet 4.5 ($25)')).toBeInTheDocument() expect(container.querySelector('.overflow-x-auto')).toBeNull() expect(container.querySelector('.flex-wrap')).not.toBeNull() }) - it('wraps TokenTypes legend labels instead of relying on horizontal scroll', () => { + it('keeps custom rendered labels in the wrapped legend layout', () => { const { container } = render( - - - , + `${entry.value} token type`} + />, ) - expect(screen.getByText('Cache Write (1.2k)')).toBeInTheDocument() - expect(screen.getByText('Cache Read (950)')).toBeInTheDocument() + expect(screen.getByText('Cache Write token type')).toBeInTheDocument() + expect(screen.getByText('Cache Read token type')).toBeInTheDocument() expect(container.querySelector('.overflow-x-auto')).toBeNull() expect(container.querySelector('.flex-wrap')).not.toBeNull() }) - it('wraps RequestsOverTime donut legend labels instead of relying on horizontal scroll', () => { - const { container } = render( - - - , + it('filters hidden entries before rendering the wrapped payload', () => { + render( + entry.value !== 'Hidden model'} + />, ) - expect(screen.getByText('GPT-5.4 (80)')).toBeInTheDocument() - expect(screen.getByText('Claude Sonnet 4.5 (40)')).toBeInTheDocument() - expect(container.querySelector('.overflow-x-auto')).toBeNull() - expect(container.querySelector('.flex-wrap')).not.toBeNull() + expect(screen.getByText('Visible model')).toBeInTheDocument() + expect(screen.queryByText('Hidden model')).not.toBeInTheDocument() }) }) From 04aa7a6d3b079456134db8308de4a83b0470c392 Mon Sep 17 00:00:00 2001 From: tyl3r-ch Date: Fri, 1 May 2026 01:14:57 +0200 Subject: [PATCH 24/42] Cover runner resolution error branches --- docs/review/fixed-findings.md | 26 +++++++++++++++++++ tests/unit/server-helpers-runner-core.test.ts | 25 ++++++++++++++++++ 2 files changed, 51 insertions(+) diff --git a/docs/review/fixed-findings.md b/docs/review/fixed-findings.md index 7e78d50..7f594da 100644 --- a/docs/review/fixed-findings.md +++ b/docs/review/fixed-findings.md @@ -68,6 +68,32 @@ - The frontend project run was load-distorted at `80.80s`; the refactored Legend test itself completed as a small direct component test at `269ms` inside that run. +### test-review.md / Phase 3 - Prozessnahe Tests trennen + +- Status: fixed +- Scope: Die verbleibenden Runner-Resolution-Error-Branches fuer lokale Startfehler und komplett + fehlende Runner sind jetzt im Fake-basierten Unit-Layer abgedeckt. Die echten + `server-helpers-runner-process`-Smokes bleiben auf Shell/PATH/Timeout/stdout-Vertraege begrenzt. +- Fix reference: phase commit `Cover runner resolution error branches` +- Closed findings: + - `test-review.md / Phase 3` - Prozessnahe Tests trennen, Teil Branch-Matrix aus echten + Prozess-Smokes herausziehen. + - `test-review.md / Prozessnahe Tests` - Error-Mapping fuer Runner-Resolution ohne Subprozesse. +- Guardrails: + - Keine Produktionslogik wurde geaendert. + - Die neuen Tests rufen `toAutoImportRunnerResolutionError(...)` direkt mit kontrollierten + Diagnosedaten auf. + - Echte Runner-Smokes fuer lokale Installation, PATH-Fallback, Timeout und stdout-Fehler bleiben + unveraendert. +- Validation: + - `npm_config_cache=/tmp/ttdash-npm-cache npx vitest run --project unit tests/unit/server-helpers-runner-core.test.ts tests/unit/server-helpers-runner-process.test.ts --reporter=verbose` -> passed with `24` passed and `1` skipped. + - `node scripts/run-vitest-project-timings.js --projects=unit --repeat=1` -> passed with unit + median `4.90s`, worst `4.90s`. + - `npm_config_cache=/tmp/ttdash-npm-cache npm run format:check` -> passed. + - `npm_config_cache=/tmp/ttdash-npm-cache npm run lint` -> passed. + - `npm_config_cache=/tmp/ttdash-npm-cache npm run typecheck` -> passed. + - `coderabbit review --agent -t uncommitted -c AGENTS.md -c .coderabbit.yaml -f docs/review/fixed-findings.md -f tests/unit/server-helpers-runner-core.test.ts` -> CodeRabbit raised 0 issues. + ## 2026-04-30 ### test-review.md / Phase 1 - Background-Concurrency stabilisieren diff --git a/tests/unit/server-helpers-runner-core.test.ts b/tests/unit/server-helpers-runner-core.test.ts index 78cab66..d8de98d 100644 --- a/tests/unit/server-helpers-runner-core.test.ts +++ b/tests/unit/server-helpers-runner-core.test.ts @@ -418,6 +418,20 @@ describe('server helper utilities: toktrack runner core behavior', () => { expect(error.message).toContain(TOKTRACK_VERSION) }) + it('prioritizes local startup failures over package runner feedback', () => { + const error = toAutoImportRunnerResolutionError({ + localVersionMismatch: null, + localFailure: 'permission denied', + runnerFailures: [ + { label: 'bunx', message: 'missing', timedOut: false }, + { label: 'npm exec', message: 'missing', timedOut: false }, + ], + }) + + expect(error.messageKey).toBe('localToktrackFailed') + expect(error.message).toContain('permission denied') + }) + it('reports package runner failures when no compatible fallback succeeds', () => { const error = toAutoImportRunnerResolutionError({ localVersionMismatch: null, @@ -447,4 +461,15 @@ describe('server helper utilities: toktrack runner core behavior', () => { expect(error.message).toContain('bunx / npm exec') expect(error.message).toContain('45s') }) + + it('reports a no-runner error when no resolution diagnostics are available', () => { + const error = toAutoImportRunnerResolutionError({ + localVersionMismatch: null, + localFailure: null, + runnerFailures: [], + }) + + expect(error.messageKey).toBe('noRunnerFound') + expect(error.message).toBe('No local toktrack, Bun, or npm exec installation found.') + }) }) From ca29012c57e968d944ce1f4a552360063b36dcf5 Mon Sep 17 00:00:00 2001 From: tyl3r-ch Date: Fri, 1 May 2026 01:19:45 +0200 Subject: [PATCH 25/42] Cover settings and CSV export branches --- docs/review/fixed-findings.md | 33 +++++++++++ tests/unit/app-settings-contract.test.ts | 41 +++++++++++++- tests/unit/csv-export.test.ts | 70 +++++++++++++++++++++++- 3 files changed, 141 insertions(+), 3 deletions(-) diff --git a/docs/review/fixed-findings.md b/docs/review/fixed-findings.md index 7f594da..5abb3a3 100644 --- a/docs/review/fixed-findings.md +++ b/docs/review/fixed-findings.md @@ -94,6 +94,39 @@ - `npm_config_cache=/tmp/ttdash-npm-cache npm run typecheck` -> passed. - `coderabbit review --agent -t uncommitted -c AGENTS.md -c .coderabbit.yaml -f docs/review/fixed-findings.md -f tests/unit/server-helpers-runner-core.test.ts` -> CodeRabbit raised 0 issues. +### test-review.md / Phase 4 - Coverage risikogewichtet erhoehen + +- Status: fixed +- Scope: Guenstige Node-Unit-Coverage wurde fuer die Low-Coverage-Lib-Module + `src/lib/app-settings.ts` und `src/lib/csv-export.ts` erhoeht. App-Settings-Wrapper, + Timestamp-/Load-Source-Normalisierung, Provider-Limits, leere CSV-Exports, deduplizierte + Modellnamen und der Browser-Download-Branch laufen jetzt ohne jsdom und ohne echte Downloads. +- Fix reference: phase commit `Cover settings and CSV export branches` +- Closed findings: + - `test-review.md / Coverage-Luecken` - `src/lib/app-settings.ts`, Teil Wrapper- und + Normalisierungsbranches. + - `test-review.md / Coverage-Luecken` - `src/lib/csv-export.ts`, Teil leere Daten, + Modell-Dedupe und Download-Side-Effects. + - `test-review.md / Phase 4` - Coverage risikogewichtet erhoehen, kleine Lib-Module als + schnelle Coverage-Wins. +- Guardrails: + - Keine Produktionslogik wurde geaendert. + - `downloadCSV` wird mit Fake-`document`, Fake-`URL`, Fake-Timern und Node `Blob` getestet. + - CSV-Assertions pruefen den extern sichtbaren Dateinamen, Blob-Typ, Link-Click und + Object-URL-Cleanup. +- Validation: + - `npm_config_cache=/tmp/ttdash-npm-cache npx vitest run --project unit tests/unit/app-settings-contract.test.ts tests/unit/csv-export.test.ts --reporter=verbose` -> passed with `11` tests. + - `npm_config_cache=/tmp/ttdash-npm-cache npm run test:vitest:coverage` -> passed with `150` + files, `642` passed and `1` skipped. + - `npm_config_cache=/tmp/ttdash-npm-cache npm run format:check` -> passed. + - `npm_config_cache=/tmp/ttdash-npm-cache npm run lint` -> passed. + - `npm_config_cache=/tmp/ttdash-npm-cache npm run typecheck` -> passed. + - `coderabbit review --agent -t uncommitted -c AGENTS.md -c .coderabbit.yaml -f docs/review/fixed-findings.md -f tests/unit/app-settings-contract.test.ts -f tests/unit/csv-export.test.ts` -> CodeRabbit raised 0 issues. +- Measurement notes: + - Coverage summary after this phase: Statements `82.87%`, Branches `70.21%`, Functions + `83.06%`, Lines `84.52%`. + - `src/lib/app-settings.ts` reports Lines `100%`, Statements `90.9%`, Functions `100%`. + ## 2026-04-30 ### test-review.md / Phase 1 - Background-Concurrency stabilisieren diff --git a/tests/unit/app-settings-contract.test.ts b/tests/unit/app-settings-contract.test.ts index 00adb52..0fd0162 100644 --- a/tests/unit/app-settings-contract.test.ts +++ b/tests/unit/app-settings-contract.test.ts @@ -1,5 +1,14 @@ import { describe, expect, it } from 'vitest' -import { DEFAULT_APP_SETTINGS, normalizeAppSettings } from '@/lib/app-settings' +import { + DEFAULT_APP_SETTINGS, + normalizeAppLanguage, + normalizeAppSettings, + normalizeAppTheme, + normalizeDataLoadSource, + normalizeReducedMotionPreference, + normalizeStoredProviderLimits, + normalizeStoredTimestamp, +} from '@/lib/app-settings' import { DEFAULT_DASHBOARD_FILTERS, getDefaultDashboardSectionOrder, @@ -68,6 +77,36 @@ describe('shared app settings contract', () => { expect(normalizeAppSettings(payload)).toEqual(normalizeSharedAppSettings(payload)) }) + it('normalizes frontend app-settings fragments through the shared wrappers', () => { + expect(normalizeAppLanguage('en')).toBe('en') + expect(normalizeAppLanguage('fr')).toBe('de') + expect(normalizeAppTheme('light')).toBe('light') + expect(normalizeAppTheme('system')).toBe('dark') + expect(normalizeReducedMotionPreference('always')).toBe('always') + expect(normalizeReducedMotionPreference('never')).toBe('never') + expect(normalizeReducedMotionPreference('sometimes')).toBe('system') + expect(normalizeDataLoadSource('file')).toBe('file') + expect(normalizeDataLoadSource('cli-auto-load')).toBe('cli-auto-load') + expect(normalizeDataLoadSource('manual')).toBeNull() + expect(normalizeStoredTimestamp('2026-04-01T12:34:56+02:00')).toBe('2026-04-01T10:34:56.000Z') + expect(normalizeStoredTimestamp('not a timestamp')).toBeNull() + expect( + normalizeStoredProviderLimits({ + OpenAI: { + hasSubscription: true, + subscriptionPrice: 12.345, + monthlyLimit: -1, + }, + }), + ).toEqual({ + OpenAI: { + hasSubscription: true, + subscriptionPrice: 12.35, + monthlyLimit: 0, + }, + }) + }) + it('normalizes dashboard fragments and provider limits through the shared contract', () => { const filters = { viewMode: 'weekly', diff --git a/tests/unit/csv-export.test.ts b/tests/unit/csv-export.test.ts index cbd7bfe..bd76b30 100644 --- a/tests/unit/csv-export.test.ts +++ b/tests/unit/csv-export.test.ts @@ -1,5 +1,5 @@ -import { describe, expect, it } from 'vitest' -import { generateCSV } from '@/lib/csv-export' +import { describe, expect, it, vi } from 'vitest' +import { downloadCSV, generateCSV } from '@/lib/csv-export' import { buildCsvLine, stringifyCsvCell } from '@/lib/csv' import type { DailyUsage } from '@/types' @@ -46,4 +46,70 @@ describe('csv export helpers', () => { expect(csv).toContain('"models"') expect(csv).toContain('"GPT-4 ""test"""') }) + + it('exports only the header row when the usage dataset is empty', () => { + expect(generateCSV([])).toBe( + [ + '"date"', + '"totalCost"', + '"totalTokens"', + '"inputTokens"', + '"outputTokens"', + '"cacheCreationTokens"', + '"cacheReadTokens"', + '"thinkingTokens"', + '"requestCount"', + '"models"', + ].join(','), + ) + }) + + it('deduplicates normalized model names in each exported day', () => { + const day = createDay('gpt-4') + const csv = generateCSV([ + { + ...day, + modelBreakdowns: [ + { ...day.modelBreakdowns[0]!, modelName: 'gpt-4' }, + { ...day.modelBreakdowns[0]!, modelName: 'GPT-4' }, + { ...day.modelBreakdowns[0]!, modelName: 'claude sonnet 4.5' }, + ], + }, + ]) + + expect(csv).toContain('"GPT-4; Claude Sonnet 4.5"') + }) + + it('downloads the generated CSV with a local-date filename and revokes the object URL', () => { + vi.useFakeTimers() + vi.setSystemTime(new Date('2026-04-13T10:30:00Z')) + const click = vi.fn() + const anchor = { + click, + download: '', + href: '', + } + const createElement = vi.fn(() => anchor) + const createObjectURL = vi.fn(() => 'blob:ttdash-export') + const revokeObjectURL = vi.fn() + + vi.stubGlobal('document', { createElement }) + vi.stubGlobal('URL', { createObjectURL, revokeObjectURL }) + + try { + downloadCSV([createDay('gpt-4')]) + + const [blob] = createObjectURL.mock.calls[0]! + expect(blob).toBeInstanceOf(Blob) + expect((blob as Blob).type).toBe('text/csv;charset=utf-8;') + expect(createElement).toHaveBeenCalledWith('a') + expect(anchor.href).toBe('blob:ttdash-export') + expect(anchor.download).toBe('ttdash-export-2026-04-13.csv') + expect(click).toHaveBeenCalledTimes(1) + expect(revokeObjectURL).toHaveBeenCalledWith('blob:ttdash-export') + } finally { + vi.useRealTimers() + vi.unstubAllGlobals() + } + }) }) From 0834a070b15441f5e66e8bb534bed99fbfbea174 Mon Sep 17 00:00:00 2001 From: tyl3r-ch Date: Fri, 1 May 2026 01:22:21 +0200 Subject: [PATCH 26/42] Guard parallel gate option errors --- docs/review/fixed-findings.md | 23 +++++++++++++++++++++++ tests/unit/run-parallel-gate.test.ts | 12 ++++++++++++ 2 files changed, 35 insertions(+) diff --git a/docs/review/fixed-findings.md b/docs/review/fixed-findings.md index 5abb3a3..19d03cd 100644 --- a/docs/review/fixed-findings.md +++ b/docs/review/fixed-findings.md @@ -127,6 +127,29 @@ `83.06%`, Lines `84.52%`. - `src/lib/app-settings.ts` reports Lines `100%`, Statements `90.9%`, Functions `100%`. +### test-review.md / Phase 5 - Gates datenbasiert schaerfen + +- Status: fixed +- Scope: Das lokale Parallel-Gate hat jetzt einen expliziten Unit-Guardrail fuer unbekannte + CLI-Optionen. Fehlerhafte Gate-Aufrufe brechen vor jedem Task-Spawn ab und schreiben die konkrete + Fehlermeldung nach stderr. +- Fix reference: phase commit `Guard parallel gate option errors` +- Closed findings: + - `test-review.md / Phase 5` - Gates datenbasiert schaerfen, Teil robuste lokale Gate-CLI. + - `test-review.md / Nicht optimale Einstellungen` - direkte Gate-Aufrufe sollen kontrolliert + und diagnosetauglich bleiben. +- Guardrails: + - Keine Produktionslogik wurde geaendert. + - Der bestehende Dry-Run mit deklarierten Outputs bleibt unveraendert. + - Der neue Test stellt sicher, dass bei unbekannten Optionen keine parallelen Tasks starten. +- Validation: + - `npm_config_cache=/tmp/ttdash-npm-cache npx vitest run --project unit tests/unit/run-parallel-gate.test.ts --reporter=verbose` -> passed with `8` tests. + - `node scripts/run-parallel-gate.js --dry-run --e2e` -> passed. + - `npm_config_cache=/tmp/ttdash-npm-cache npm run format:check` -> passed. + - `npm_config_cache=/tmp/ttdash-npm-cache npm run lint` -> passed. + - `npm_config_cache=/tmp/ttdash-npm-cache npm run typecheck` -> passed. + - `coderabbit review --agent -t uncommitted -c AGENTS.md -c .coderabbit.yaml -f docs/review/fixed-findings.md -f tests/unit/run-parallel-gate.test.ts` -> CodeRabbit raised 0 issues. + ## 2026-04-30 ### test-review.md / Phase 1 - Background-Concurrency stabilisieren diff --git a/tests/unit/run-parallel-gate.test.ts b/tests/unit/run-parallel-gate.test.ts index 5ae65da..c4272c3 100644 --- a/tests/unit/run-parallel-gate.test.ts +++ b/tests/unit/run-parallel-gate.test.ts @@ -111,6 +111,18 @@ describe('parallel verification gate', () => { }) }) + it('rejects unknown options before spawning any gate tasks', async () => { + const stdout = { write: vi.fn() } + const stderr = { write: vi.fn() } + const spawnImpl = vi.fn() + + const status = await parallelGate.run(['--unknown'], { stdout, stderr }, spawnImpl) + + expect(status).toBe(1) + expect(spawnImpl).not.toHaveBeenCalled() + expect(stderr.write).toHaveBeenCalledWith('Unknown option: --unknown\n') + }) + it('prints the task graph without spawning commands in dry-run mode', async () => { const stdout = { write: vi.fn() } const stderr = { write: vi.fn() } From 5a701b7e7bbff0a70f7b03b16ab756cd86f33ff8 Mon Sep 17 00:00:00 2001 From: tyl3r-ch Date: Fri, 1 May 2026 01:35:04 +0200 Subject: [PATCH 27/42] Guard Playwright spec state isolation --- docs/review/fixed-findings.md | 28 ++++++++++++++++++++++++++++ tests/unit/playwright-config.test.ts | 11 +++++++++++ 2 files changed, 39 insertions(+) diff --git a/docs/review/fixed-findings.md b/docs/review/fixed-findings.md index 19d03cd..95e17a8 100644 --- a/docs/review/fixed-findings.md +++ b/docs/review/fixed-findings.md @@ -150,6 +150,34 @@ - `npm_config_cache=/tmp/ttdash-npm-cache npm run typecheck` -> passed. - `coderabbit review --agent -t uncommitted -c AGENTS.md -c .coderabbit.yaml -f docs/review/fixed-findings.md -f tests/unit/run-parallel-gate.test.ts` -> CodeRabbit raised 0 issues. +### test-review.md / Phase 6 - Playwright klein und wertvoll halten + +- Status: fixed +- Scope: Playwright-Scope-Guardrails pruefen jetzt zusaetzlich, dass jede E2E-Spec den + worker-isolierten App-Zustand ueber `resetAppState(...)` oder `prepareDashboard(...)` + initialisiert. Damit bleibt die Browser-Suite auf representative Journeys begrenzt und vermeidet + versteckte Reihenfolge-/Worker-Abhaengigkeiten. +- Fix reference: phase commit `Guard Playwright spec state isolation` +- Closed findings: + - `test-review.md / Phase 6` - Playwright klein und wertvoll halten, Teil State-Isolation pro + Spec. + - `test-review.md / Zielarchitektur / E2E` - kurze, worker-isolierte Browser-Journeys ohne + Contract-Matrix. +- Guardrails: + - Keine Produktionslogik wurde geaendert. + - E2E-Specs muessen weiterhin `test`/`expect` aus `tests/e2e/fixtures.ts` importieren. + - Die Browser-Suite bleibt bei `11` Tests und der bestehenden Journey-Liste. + - Jeder Spec muss explizit isolierten Zustand vorbereiten. +- Validation: + - `npm_config_cache=/tmp/ttdash-npm-cache npx vitest run --project unit tests/unit/playwright-config.test.ts --reporter=verbose` -> passed with `5` tests. + - `PLAYWRIGHT_TEST_PORT=3016 npm_config_cache=/tmp/ttdash-npm-cache npm run test:e2e:ci` -> + passed with `11` tests in `12.1s`. + - `npm_config_cache=/tmp/ttdash-npm-cache npm run format:check` -> passed. + - `npm_config_cache=/tmp/ttdash-npm-cache npm run lint` -> passed. + - `npm_config_cache=/tmp/ttdash-npm-cache npm run typecheck` -> passed. + - First CodeRabbit attempt was blocked by a rate limit with retry window `6 minutes and 13 seconds`. + - `coderabbit review --agent -t uncommitted -c AGENTS.md -c .coderabbit.yaml -f docs/review/fixed-findings.md -f tests/unit/playwright-config.test.ts` -> CodeRabbit raised 0 issues after retry. + ## 2026-04-30 ### test-review.md / Phase 1 - Background-Concurrency stabilisieren diff --git a/tests/unit/playwright-config.test.ts b/tests/unit/playwright-config.test.ts index ef046ef..28453ce 100644 --- a/tests/unit/playwright-config.test.ts +++ b/tests/unit/playwright-config.test.ts @@ -106,6 +106,17 @@ describe('playwright config', () => { } }) + it('keeps each browser spec isolated through the shared state-reset helpers', async () => { + const specs = await readE2ESpecs() + + for (const spec of specs) { + expect( + spec.source.includes('resetAppState(') || spec.source.includes('prepareDashboard('), + `${spec.filename} must reset or prepare isolated app state`, + ).toBe(true) + } + }) + it('keeps Playwright as a representative smoke suite instead of a contract matrix', async () => { const specs = await readE2ESpecs() const totalTests = specs.reduce((count, spec) => { From 475aee465c732c41d9f809e5d7ef588e0cc582aa Mon Sep 17 00:00:00 2001 From: tyl3r-ch Date: Fri, 1 May 2026 09:52:19 +0200 Subject: [PATCH 28/42] Document test gates and untrack review docs --- .gitignore | 2 - AGENTS.md | 3 +- CONTRIBUTING.md | 17 +- README.md | 14 +- RELEASING.md | 9 + docs/review/fixed-findings.md | 1256 --------------------------------- docs/review/test-review.md | 456 ------------ docs/testing.md | 10 +- 8 files changed, 42 insertions(+), 1725 deletions(-) delete mode 100644 docs/review/fixed-findings.md delete mode 100644 docs/review/test-review.md diff --git a/.gitignore b/.gitignore index 848e658..a9c1b0e 100644 --- a/.gitignore +++ b/.gitignore @@ -22,8 +22,6 @@ coverage/ requirements/ docs/security/ docs/review/* -!docs/review/test-review.md -!docs/review/fixed-findings.md docs/application-stack-reference.md /activity-*.png /cache-hit-rate-*.png diff --git a/AGENTS.md b/AGENTS.md index 73e6764..b9be6c0 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -24,7 +24,7 @@ Frontend code is TypeScript + React. Follow the existing style: 2-space indentat ## Testing Guidelines -Automated tests are part of the repo now. Before opening a PR, run `npm run verify:full`. If you only need the main local gate without Playwright, run `npm run verify`. If local port `3015` is already in use, run Playwright with `PLAYWRIGHT_TEST_PORT=3016 npm run test:e2e`. Use `npm run test:timings` to inspect the slowest Vitest suites and cases after larger test changes, but do not run it in parallel with another Vitest command that writes the same JUnit report. Architecture and dependency boundaries are guarded by `npm run test:architecture` and `npm run check:deps`; when you add or move modules, keep [`docs/architecture.md`](docs/architecture.md) aligned with the actual structure. +Automated tests are part of the repo now. Before opening a PR, run `npm run verify:full`; on a local machine with enough CPU, `PLAYWRIGHT_TEST_PORT=3016 npm run verify:full:parallel` is the faster non-coverage fast path across the main test surfaces. If you only need the main local gate without Playwright, run `npm run verify`. If local port `3015` is already in use, run Playwright with `PLAYWRIGHT_TEST_PORT=3016 npm run test:e2e`. Use `npm run test:timings` to inspect the slowest Vitest suites and cases after larger test changes, but do not run it in parallel with another Vitest command that writes the same JUnit report. Architecture and dependency boundaries are guarded by `npm run test:architecture` and `npm run check:deps`; when you add or move modules, keep [`docs/architecture.md`](docs/architecture.md) aligned with the actual structure. When adding tests: @@ -37,6 +37,7 @@ When adding tests: - use real subprocesses only when shell/PATH/CLI or cross-process behavior is the thing being tested - split files once they mix unrelated concerns like content, navigation, localization, motion, or keyboard behavior - keep `waitFor(...)` for real eventual consistency only, not as the default assertion pattern +- keep Playwright specs on `tests/e2e/fixtures.ts` and reset state with `resetAppState(...)` or `prepareDashboard(...)` - when improving coverage, prefer critical branch-heavy runtime modules like `src/lib/api.ts`, `src/hooks/use-usage-data.ts`, and `src/hooks/use-dashboard-controller.ts` over adding another broad dashboard catch-all test Continue to manually verify the main flows affected by the change: dashboard load, auto-import, JSON upload, filtering, and export actions. diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index fe85a18..97b8d42 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -41,19 +41,26 @@ For feature requests, explain the user problem first. Suggestions that only desc Make sure the change is small, focused, and aligned with the existing product direction. -Run the main local checks: +Run the full local gate before opening a PR: ```bash -npm run verify -npm run test:e2e:ci +npm run verify:full ``` -`npm run verify` covers formatting, ESLint, `tsc --noEmit`, unit tests, the production bundle, and packaged-artifact verification. If you want the same coverage gate used in release preparation, also run: +On a local machine with enough CPU, the staged parallel gate gives faster feedback across the same +main test surfaces without the coverage-instrumented pass: ```bash -npm run test:unit:coverage +PLAYWRIGHT_TEST_PORT=3016 npm run verify:full:parallel ``` +Do not use the parallel fast path as a replacement for the final coverage gate when a change affects +coverage thresholds; keep `npm run verify:full` or `npm run test:vitest:coverage` in that validation. + +`npm run verify` remains the faster non-browser gate for inner-loop work. It covers formatting, +ESLint, `tsc --noEmit`, Vitest without coverage instrumentation, the production bundle, and +packaged-artifact verification. + If you only need the production bundle without the lint/format gate, use: ```bash diff --git a/README.md b/README.md index 3481e1c..a17014a 100644 --- a/README.md +++ b/README.md @@ -334,18 +334,28 @@ npm run build npm run build:app ``` -Run automated checks: +Run the main local gate without Playwright: ```bash npm run verify ``` -For the full release-style local gate without re-running unit tests or rebuilding twice, use: +For the full serial CI/release-style local gate, including coverage thresholds and Playwright, use: ```bash npm run verify:full ``` +On a local machine with enough CPU, the staged parallel fast path gives quicker feedback across the +same main test surfaces without the coverage-instrumented pass: + +```bash +PLAYWRIGHT_TEST_PORT=3016 npm run verify:full:parallel +``` + +Keep `npm run verify:full` or `npm run test:vitest:coverage` in the final validation path whenever +coverage thresholds matter. + If you want to run the steps individually, use: ```bash diff --git a/RELEASING.md b/RELEASING.md index 8b3bb04..99c942e 100644 --- a/RELEASING.md +++ b/RELEASING.md @@ -45,6 +45,15 @@ Optional local confidence check before starting the workflow: npm run verify:full ``` +For faster local behavior-test feedback before the final serial gate, use the staged parallel path: + +```bash +PLAYWRIGHT_TEST_PORT=3016 npm_config_cache=/tmp/ttdash-npm-cache npm run verify:full:parallel +``` + +It does not replace `npm run verify:full` for release confidence because the serial gate evaluates +the coverage-instrumented pass. + If port `3015` is already occupied locally, keep the same one-pass gate and only override the Playwright port: ```bash diff --git a/docs/review/fixed-findings.md b/docs/review/fixed-findings.md deleted file mode 100644 index 95e17a8..0000000 --- a/docs/review/fixed-findings.md +++ /dev/null @@ -1,1256 +0,0 @@ -# Fixed Findings - -## 2026-05-01 - -### test-review.md / Phase 1 - Background-Concurrency stabilisieren - -- Status: fixed -- Scope: Die Background-Runtime-Branch-Coverage wurde ohne weitere echte Prozesse erweitert. - Stop-Noop, Multi-Instance-Cancel, Stop-Timeout mit Logausgabe und erfolgreicher Background-Start - laufen jetzt im schnellen Fake-basierten Unit-Layer. Die vorhandenen echten - `integration-background`-Smokes bleiben unveraendert und pruefen weiter Concurrent Startup, - selected stop, Registry pruning und custom prefix/log permissions. -- Fix reference: phase commit `Expand background runtime branch coverage` -- Closed findings: - - `test-review.md / Phase 1` - Background-Concurrency stabilisieren, Restabdeckung fuer - Stop-/Start-Branches. - - `test-review.md / Coverage-Luecken` - `server/background-runtime.js`, Teil Stop-Cancel, - Stop-Timeout und erfolgreicher Start. -- Guardrails: - - Keine Produktionslogik wurde geaendert. - - Neue Tests verwenden Fake-FS, Fake-Spawn, Fake-Readline, Fake-Clock und Spy-Ausgaben statt - zusaetzlicher Subprozesse. - - `integration-background.maxWorkers` bleibt unveraendert bei `2`. - - Der dreifache Background-Benchmark lief ohne Concurrency-Flake. -- Validation: - - `npm_config_cache=/tmp/ttdash-npm-cache npx vitest run --project unit tests/unit/background-runtime.test.ts --reporter=verbose` -> passed with `14` tests. - - `npm_config_cache=/tmp/ttdash-npm-cache npm run test:vitest:integration-background` -> - passed with `4` files and `5` tests. - - `node scripts/run-vitest-project-timings.js --projects=integration-background --repeat=3` -> - passed with median `2.74s`, worst `3.12s`. - - `npm_config_cache=/tmp/ttdash-npm-cache npm run format:check` -> passed. - - `npm_config_cache=/tmp/ttdash-npm-cache npm run lint` -> passed. - - `npm_config_cache=/tmp/ttdash-npm-cache npm run typecheck` -> passed. - - `npm_config_cache=/tmp/ttdash-npm-cache npm run test:vitest:unit` -> passed with `383` - passed and `1` skipped. - - `coderabbit review --agent -t uncommitted -c AGENTS.md -c .coderabbit.yaml -f docs/review/fixed-findings.md -f tests/unit/background-runtime.test.ts` -> CodeRabbit raised 0 issues. - -### test-review.md / Phase 2 - Frontend/jsdom-Kosten senken - -- Status: fixed -- Scope: Der Chart-Legend-Wrapping-Vertrag wird nicht mehr ueber drei schwere Chart-Integrationen - mit Recharts-Mocks und i18n geprueft. `tests/frontend/chart-legend-integration.test.tsx` rendert - jetzt direkt `ChartLegend` und deckt Default-Labels, custom `renderLabel`, `filterEntry`, - `flex-wrap` und den Verzicht auf horizontales Overflow ab. -- Fix reference: phase commit `Lighten chart legend frontend test` -- Closed findings: - - `test-review.md / Phase 2` - Frontend/jsdom-Kosten senken, Teil Reduktion schwerer - Chart-/Recharts-Importe. - - `test-review.md / Bottlenecks / Frontend/jsdom` - pure gemeinsame Legend-Layout-Logik bleibt im - jsdom-Layer, aber ohne die nicht noetigen Chart-Integrationen. -- Guardrails: - - Keine Produktionslogik wurde geaendert. - - Der User-visible Legend-Vertrag bleibt erhalten: Labels rendern, Wrapping bleibt aktiv, - horizontales Scroll-Layout kehrt nicht zurueck. - - Die bestehenden chart-spezifischen Tests fuer `CostByModel`, `RequestsOverTime`, - `TokenTypes`-nahe Datenpfade und `CostOverTime` bleiben separat erhalten. - - `.gitignore` erlaubt nur die getrackten Review-Markdown-Dateien `test-review.md` und - `fixed-findings.md`, damit lokale Review-Notizen nicht in scoped CodeRabbit-Laeufe geraten. -- Validation: - - `npm_config_cache=/tmp/ttdash-npm-cache npx vitest run --project frontend tests/frontend/chart-legend-integration.test.tsx --reporter=verbose` -> passed with `3` tests; targeted run reported import `25ms`, tests `86ms`. - - `npm_config_cache=/tmp/ttdash-npm-cache npm run test:vitest:frontend` -> passed with `76` - files and `204` tests. - - `npm_config_cache=/tmp/ttdash-npm-cache npm run format:check` -> passed. - - `npm_config_cache=/tmp/ttdash-npm-cache npm run lint` -> passed. - - `npm_config_cache=/tmp/ttdash-npm-cache npm run typecheck` -> passed. - - `coderabbit review --agent -t uncommitted -c AGENTS.md -c .coderabbit.yaml -f .gitignore -f docs/review/fixed-findings.md -f tests/frontend/chart-legend-integration.test.tsx` -> CodeRabbit raised 0 issues. -- Measurement notes: - - The frontend project run was load-distorted at `80.80s`; the refactored Legend test itself - completed as a small direct component test at `269ms` inside that run. - -### test-review.md / Phase 3 - Prozessnahe Tests trennen - -- Status: fixed -- Scope: Die verbleibenden Runner-Resolution-Error-Branches fuer lokale Startfehler und komplett - fehlende Runner sind jetzt im Fake-basierten Unit-Layer abgedeckt. Die echten - `server-helpers-runner-process`-Smokes bleiben auf Shell/PATH/Timeout/stdout-Vertraege begrenzt. -- Fix reference: phase commit `Cover runner resolution error branches` -- Closed findings: - - `test-review.md / Phase 3` - Prozessnahe Tests trennen, Teil Branch-Matrix aus echten - Prozess-Smokes herausziehen. - - `test-review.md / Prozessnahe Tests` - Error-Mapping fuer Runner-Resolution ohne Subprozesse. -- Guardrails: - - Keine Produktionslogik wurde geaendert. - - Die neuen Tests rufen `toAutoImportRunnerResolutionError(...)` direkt mit kontrollierten - Diagnosedaten auf. - - Echte Runner-Smokes fuer lokale Installation, PATH-Fallback, Timeout und stdout-Fehler bleiben - unveraendert. -- Validation: - - `npm_config_cache=/tmp/ttdash-npm-cache npx vitest run --project unit tests/unit/server-helpers-runner-core.test.ts tests/unit/server-helpers-runner-process.test.ts --reporter=verbose` -> passed with `24` passed and `1` skipped. - - `node scripts/run-vitest-project-timings.js --projects=unit --repeat=1` -> passed with unit - median `4.90s`, worst `4.90s`. - - `npm_config_cache=/tmp/ttdash-npm-cache npm run format:check` -> passed. - - `npm_config_cache=/tmp/ttdash-npm-cache npm run lint` -> passed. - - `npm_config_cache=/tmp/ttdash-npm-cache npm run typecheck` -> passed. - - `coderabbit review --agent -t uncommitted -c AGENTS.md -c .coderabbit.yaml -f docs/review/fixed-findings.md -f tests/unit/server-helpers-runner-core.test.ts` -> CodeRabbit raised 0 issues. - -### test-review.md / Phase 4 - Coverage risikogewichtet erhoehen - -- Status: fixed -- Scope: Guenstige Node-Unit-Coverage wurde fuer die Low-Coverage-Lib-Module - `src/lib/app-settings.ts` und `src/lib/csv-export.ts` erhoeht. App-Settings-Wrapper, - Timestamp-/Load-Source-Normalisierung, Provider-Limits, leere CSV-Exports, deduplizierte - Modellnamen und der Browser-Download-Branch laufen jetzt ohne jsdom und ohne echte Downloads. -- Fix reference: phase commit `Cover settings and CSV export branches` -- Closed findings: - - `test-review.md / Coverage-Luecken` - `src/lib/app-settings.ts`, Teil Wrapper- und - Normalisierungsbranches. - - `test-review.md / Coverage-Luecken` - `src/lib/csv-export.ts`, Teil leere Daten, - Modell-Dedupe und Download-Side-Effects. - - `test-review.md / Phase 4` - Coverage risikogewichtet erhoehen, kleine Lib-Module als - schnelle Coverage-Wins. -- Guardrails: - - Keine Produktionslogik wurde geaendert. - - `downloadCSV` wird mit Fake-`document`, Fake-`URL`, Fake-Timern und Node `Blob` getestet. - - CSV-Assertions pruefen den extern sichtbaren Dateinamen, Blob-Typ, Link-Click und - Object-URL-Cleanup. -- Validation: - - `npm_config_cache=/tmp/ttdash-npm-cache npx vitest run --project unit tests/unit/app-settings-contract.test.ts tests/unit/csv-export.test.ts --reporter=verbose` -> passed with `11` tests. - - `npm_config_cache=/tmp/ttdash-npm-cache npm run test:vitest:coverage` -> passed with `150` - files, `642` passed and `1` skipped. - - `npm_config_cache=/tmp/ttdash-npm-cache npm run format:check` -> passed. - - `npm_config_cache=/tmp/ttdash-npm-cache npm run lint` -> passed. - - `npm_config_cache=/tmp/ttdash-npm-cache npm run typecheck` -> passed. - - `coderabbit review --agent -t uncommitted -c AGENTS.md -c .coderabbit.yaml -f docs/review/fixed-findings.md -f tests/unit/app-settings-contract.test.ts -f tests/unit/csv-export.test.ts` -> CodeRabbit raised 0 issues. -- Measurement notes: - - Coverage summary after this phase: Statements `82.87%`, Branches `70.21%`, Functions - `83.06%`, Lines `84.52%`. - - `src/lib/app-settings.ts` reports Lines `100%`, Statements `90.9%`, Functions `100%`. - -### test-review.md / Phase 5 - Gates datenbasiert schaerfen - -- Status: fixed -- Scope: Das lokale Parallel-Gate hat jetzt einen expliziten Unit-Guardrail fuer unbekannte - CLI-Optionen. Fehlerhafte Gate-Aufrufe brechen vor jedem Task-Spawn ab und schreiben die konkrete - Fehlermeldung nach stderr. -- Fix reference: phase commit `Guard parallel gate option errors` -- Closed findings: - - `test-review.md / Phase 5` - Gates datenbasiert schaerfen, Teil robuste lokale Gate-CLI. - - `test-review.md / Nicht optimale Einstellungen` - direkte Gate-Aufrufe sollen kontrolliert - und diagnosetauglich bleiben. -- Guardrails: - - Keine Produktionslogik wurde geaendert. - - Der bestehende Dry-Run mit deklarierten Outputs bleibt unveraendert. - - Der neue Test stellt sicher, dass bei unbekannten Optionen keine parallelen Tasks starten. -- Validation: - - `npm_config_cache=/tmp/ttdash-npm-cache npx vitest run --project unit tests/unit/run-parallel-gate.test.ts --reporter=verbose` -> passed with `8` tests. - - `node scripts/run-parallel-gate.js --dry-run --e2e` -> passed. - - `npm_config_cache=/tmp/ttdash-npm-cache npm run format:check` -> passed. - - `npm_config_cache=/tmp/ttdash-npm-cache npm run lint` -> passed. - - `npm_config_cache=/tmp/ttdash-npm-cache npm run typecheck` -> passed. - - `coderabbit review --agent -t uncommitted -c AGENTS.md -c .coderabbit.yaml -f docs/review/fixed-findings.md -f tests/unit/run-parallel-gate.test.ts` -> CodeRabbit raised 0 issues. - -### test-review.md / Phase 6 - Playwright klein und wertvoll halten - -- Status: fixed -- Scope: Playwright-Scope-Guardrails pruefen jetzt zusaetzlich, dass jede E2E-Spec den - worker-isolierten App-Zustand ueber `resetAppState(...)` oder `prepareDashboard(...)` - initialisiert. Damit bleibt die Browser-Suite auf representative Journeys begrenzt und vermeidet - versteckte Reihenfolge-/Worker-Abhaengigkeiten. -- Fix reference: phase commit `Guard Playwright spec state isolation` -- Closed findings: - - `test-review.md / Phase 6` - Playwright klein und wertvoll halten, Teil State-Isolation pro - Spec. - - `test-review.md / Zielarchitektur / E2E` - kurze, worker-isolierte Browser-Journeys ohne - Contract-Matrix. -- Guardrails: - - Keine Produktionslogik wurde geaendert. - - E2E-Specs muessen weiterhin `test`/`expect` aus `tests/e2e/fixtures.ts` importieren. - - Die Browser-Suite bleibt bei `11` Tests und der bestehenden Journey-Liste. - - Jeder Spec muss explizit isolierten Zustand vorbereiten. -- Validation: - - `npm_config_cache=/tmp/ttdash-npm-cache npx vitest run --project unit tests/unit/playwright-config.test.ts --reporter=verbose` -> passed with `5` tests. - - `PLAYWRIGHT_TEST_PORT=3016 npm_config_cache=/tmp/ttdash-npm-cache npm run test:e2e:ci` -> - passed with `11` tests in `12.1s`. - - `npm_config_cache=/tmp/ttdash-npm-cache npm run format:check` -> passed. - - `npm_config_cache=/tmp/ttdash-npm-cache npm run lint` -> passed. - - `npm_config_cache=/tmp/ttdash-npm-cache npm run typecheck` -> passed. - - First CodeRabbit attempt was blocked by a rate limit with retry window `6 minutes and 13 seconds`. - - `coderabbit review --agent -t uncommitted -c AGENTS.md -c .coderabbit.yaml -f docs/review/fixed-findings.md -f tests/unit/playwright-config.test.ts` -> CodeRabbit raised 0 issues after retry. - -## 2026-04-30 - -### test-review.md / Phase 1 - Background-Concurrency stabilisieren - -- Status: fixed -- Scope: Background-Concurrency-Smoke behält den echten Cross-Process-Vertrag, liefert aber im - Fehlerfall jetzt verwertbare Diagnose zu CLI-Ausgaben, Registry, Runtime-Root, Ports und - Background-Logs. Background-Registry-/Lock-/Runtime-Identity-Branches sind zusätzlich auf dem - schnellen Unit-Layer abgedeckt. -- Fix reference: `b7fbe80` `Stabilize background concurrency tests` -- Closed findings: - - `test-review.md / H-01` - Background-Concurrency flakt unter wiederholter Last, Teil - Diagnostik und Lock-/Registry-Branch-Abdeckung. - - `test-review.md / Phase 1` - Background-Concurrency stabilisieren. -- Guardrails: - - `runCli` trennt stdout/stderr und liefert Signal, Args und kombinierten Output weiterhin - kompatibel fuer bestehende Tests. - - `waitForBackgroundRegistry` meldet bei Timeout Registry-Inhalt und Background-Log-Snippets. - - Der Concurrent-Startup-Test prueft nach Readiness erneut, dass beide Registry-Eintraege noch - vorhanden sind und eindeutige URLs besitzen. - - Background-Logdateien enthalten jetzt den Parent-PID-Suffix, damit parallele Starts keine - Diagnose-Logs teilen. - - Unit-Tests decken Runtime-Identity Fetch, nicht verfuegbare Runtime-Responses, stale Locks, - Lock-Timeout, invalid Registry Payloads, ESRCH beim Stop und Browser-Open-Env fuer - Background-Kinder ab. -- Validation: - - `npm_config_cache=/tmp/ttdash-npm-cache npx vitest run --project unit tests/unit/background-runtime.test.ts --reporter=verbose` -> passed. - - `npm_config_cache=/tmp/ttdash-npm-cache npx vitest run --project integration-background tests/integration/server-background-concurrency.test.ts --reporter=verbose` -> passed. - - `npm_config_cache=/tmp/ttdash-npm-cache npm run test:vitest:integration-background` -> passed. - - `npm_config_cache=/tmp/ttdash-npm-cache npm run test:timings:benchmark -- --projects=integration-background` -> passed with median `4.95s`, worst `5.51s` in the final isolated run; an earlier same-session isolated control before extra load measured median `2.52s`, worst `2.66s`. - - `npm_config_cache=/tmp/ttdash-npm-cache npm run test:vitest:unit` -> passed with `361 passed | 1 skipped`. - - `npm_config_cache=/tmp/ttdash-npm-cache npm run format:check` -> passed. - - `npm_config_cache=/tmp/ttdash-npm-cache npm run lint` -> passed. - - `npm_config_cache=/tmp/ttdash-npm-cache npm run typecheck` -> passed. - - `npm_config_cache=/tmp/ttdash-npm-cache npm run test:vitest:coverage` -> passed with `151` - files, `616 passed | 1 skipped`. - - `PLAYWRIGHT_TEST_PORT=3016 npm_config_cache=/tmp/ttdash-npm-cache npm run verify:full:parallel` -> passed with Playwright `11 passed`. -- Measurement notes: - - Coverage summary after this phase: Statements `81.98%`, Branches `69.51%`, Functions - `83.06%`, Lines `83.43%`. - - `server/background-runtime.js` improved to Statements `58.60%`, Branches `53.89%`, Functions - `73.52%`, Lines `59.52%`. - - `coderabbit review --agent -t committed --base-commit HEAD~1 -c AGENTS.md -f ...` -> 0 - issues. - - Commit is SSH-signed. - -### test-review.md / Phase 2 - Frontend/jsdom-Kosten senken - -- Status: fixed -- Scope: Command-Palette-Gruppen- und ID-Vertraege laufen jetzt im schnellen - Node-Unit-Layer; ein schmaler Command-Palette-Render-Smoke bleibt in einer bestehenden - i18n-jsdom-Datei. Der redundante FilterBar-Preset-Order-DOM-Test wurde entfernt, weil die - gemeinsame Preset-Reihenfolge bereits im Unit-Layer abgesichert ist. -- Fix reference: `1657822` `Move command palette contracts to unit tests` -- Closed findings: - - `test-review.md / H-02` - Frontend/jsdom dominiert die Vitest-Walltime, Teil Command-Palette - Contract-Tests und redundante Preset-Assertion. - - `test-review.md / Phase 2` - Frontend/jsdom-Kosten senken. -- Guardrails: - - Command-IDs fuer Load-Data-, Export- und Maintenance-Aktionen bleiben im Builder-Vertrag - explizit stabil. - - Lokalisierte Command-Gruppen werden im Unit-Layer ueber einen kontrollierten Translator - geprueft. - - Der verbleibende jsdom-Smoke prueft weiterhin, dass die Command Palette gerendert werden kann - und representative Commands in deutscher UI sichtbar sind. - - FilterBar-Preset-Reihenfolge bleibt ueber `DASHBOARD_QUICK_DATE_PRESETS` im Unit-Test - abgesichert. - - Keine Produktionsdateien, kein Layout und kein App-Verhalten wurden geaendert. -- Validation: - - `npm_config_cache=/tmp/ttdash-npm-cache npx vitest run --project unit tests/unit/command-palette-commands.test.ts tests/unit/dashboard-preferences.test.ts --reporter=verbose` -> passed. - - `npm_config_cache=/tmp/ttdash-npm-cache npx vitest run --project frontend tests/frontend/filter-bar-presets.test.tsx --reporter=verbose` -> passed. - - `npm_config_cache=/tmp/ttdash-npm-cache npx vitest run --project frontend tests/frontend/dashboard-language-regressions.test.tsx --reporter=verbose` -> passed. - - `npm_config_cache=/tmp/ttdash-npm-cache npm run format:check` -> passed. - - `npm_config_cache=/tmp/ttdash-npm-cache npm run lint` -> passed. - - `npm_config_cache=/tmp/ttdash-npm-cache npm run typecheck` -> passed. - - `npm_config_cache=/tmp/ttdash-npm-cache npm run test:vitest:unit` -> passed with - `363 passed | 1 skipped`. - - `npm_config_cache=/tmp/ttdash-npm-cache npm run test:vitest:frontend` -> passed with `76` - files and `204 passed`. - - `npm_config_cache=/tmp/ttdash-npm-cache npm run test:timings:projects -- --projects=frontend` -> passed with `76` files, `204 passed`, and `55.86s` measured under high local system load. - - `npm_config_cache=/tmp/ttdash-npm-cache npm run test:vitest:coverage` -> passed with global - coverage unchanged from Phase 1. - - `PLAYWRIGHT_TEST_PORT=3016 npm_config_cache=/tmp/ttdash-npm-cache npm run verify:full:parallel` -> passed with Playwright `11 passed`. - - `coderabbit review --agent -t committed --base-commit HEAD~1 -c AGENTS.md -f ...` -> - CodeRabbit raised 0 issues. -- Measurement notes: - - Frontend/jsdom files dropped from `77` to `76`. - - Frontend project now runs `204` jsdom tests; removed broad Command-Palette group coverage was - replaced by cheaper Unit-Contract coverage plus one existing-file render smoke. - - Coverage summary after this phase: Statements `81.98%`, Branches `69.51%`, Functions - `83.06%`, Lines `83.43%`. - - Current absolute frontend timings are load-distorted; the structural improvement is lower - jsdom file count with no coverage regression. - - Commit is SSH-signed. - -### test-review.md / Phase 3 - Prozessnahe Tests weiter in Vertraege und Branches trennen - -- Status: fixed -- Scope: Runner-Resolution-, Command-Error-, stderr-streaming-, external-close- und Timeout- - Branches laufen jetzt im Fake-Spawn-Unit-Layer; der echte Cross-Process-File-Lock-Smoke nutzt - ein explizites IPC-Release statt eines fixed `250ms` Sleeps. Shared Dashboard-Preference- - Normalisierungs-Branches sind als billige Node-Unit-Tests stabilisiert, damit Coverage nicht von - teuren jsdom- oder Prozess-Smokes abhaengt. -- Fix reference: `7555c0e` `Harden process-adjacent test coverage` -- Closed findings: - - `test-review.md / H-03` - Prozessnahe Tests sind teilweise zu breit, Teil Runner-Branch- - Coverage und File-Lock-Smoke-Wartezeit. - - `test-review.md / H-04` - Kritische Runtime-/Config-Branches sind unterdurchschnittlich - abgedeckt, Teil Auto-Import-Runner und Dashboard-Preference-Normalisierung. - - `test-review.md / Phase 3` - Prozessnahe Tests weiter in Vertraege und Branches trennen. -- Guardrails: - - Echte Runner-Smokes fuer lokale Installation, PATH-Fallback, Timeout und stdout-Fehler bleiben - erhalten; neue Fake-Spawn-Tests decken die branchigen Fehler- und Fallback-Pfade zusaetzlich - deterministisch ab. - - Der Fake-Timer-Timeout-Test beweist, dass `runToktrack` erst nach Child-Exit rejected und nicht - direkt beim Timeout-Alarm settled. - - `signalOnClose`, stderr streaming und Spawn-Error-Wrapping werden ohne echte Subprozesse - getestet. - - Der Cross-Process-File-Lock-Test wartet nicht mehr ueber elapsed time, sondern haelt den - Child-Lock bis zum expliziten Parent-Release. - - Dashboard-Preference-Tests sichern malformed config, defensive default filters, visibility, - ordering und custom active range ohne jsdom ab. -- Validation: - - `npm_config_cache=/tmp/ttdash-npm-cache npx vitest run --project unit tests/unit/dashboard-preferences.test.ts tests/unit/server-helpers-runner-core.test.ts tests/unit/server-helpers-runner-process.test.ts tests/unit/server-helpers-file-locks.test.ts --reporter=verbose` -> passed. - - `npm_config_cache=/tmp/ttdash-npm-cache npm run format:check` -> passed. - - `npm_config_cache=/tmp/ttdash-npm-cache npm run lint` -> passed. - - `npm_config_cache=/tmp/ttdash-npm-cache npm run typecheck` -> passed. - - `npm_config_cache=/tmp/ttdash-npm-cache npm run test:timings:projects -- --projects=unit` -> passed, but absolute timing was distorted by local system CPU load. - - `npm_config_cache=/tmp/ttdash-npm-cache npm run test:vitest:coverage` -> passed with `150` - files, `623 passed | 1 skipped`. - - `PLAYWRIGHT_TEST_PORT=3016 npm_config_cache=/tmp/ttdash-npm-cache npm run verify:full:parallel` -> passed with Playwright `11 passed`. - - `coderabbit review --agent -t committed --base-commit HEAD~1 -c AGENTS.md -f ...` -> - CodeRabbit raised 0 issues. -- Measurement notes: - - Coverage summary after this phase: Statements `82.07%`, Branches `69.58%`, Functions - `83.11%`, Lines `83.52%`. - - `server/auto-import-runtime.js` improved to Statements `69.95%`, Branches `54.72%`, - Functions `79.03%`, Lines `69.43%`. - - `shared/dashboard-preferences.js` improved to Statements `99.13%`, Branches `94.11%`, - Functions `100%`, Lines `99.09%`. - - Full gate timing under high local load: Unit `9.54s`, Frontend `73.51s`, Background - Integration `7.97s`, Playwright `11 passed` in `42.3s`; `ps aux` showed heavy non-test load - from WindowServer, ControlCenter, IDEA, Bitdefender and WebKit, so these absolute timings are - not a stable performance baseline. - - Commit is SSH-signed. - -### test-review.md / Phase 4 - Coverage risikogewichtet erhoehen - -- Status: fixed -- Scope: HTTP-Router-Branch-Coverage wurde auf die produktionskritischen Auto-Import-Stream- und - Report-Request-Fehlerpfade erweitert, ohne echte Server, Browser oder Typst-Prozesse zu starten. -- Fix reference: `f2b3400` `Cover router auto-import error paths` -- Closed findings: - - `test-review.md / H-04` - Kritische Runtime-Branches sind unterdurchschnittlich abgedeckt, - Teil HTTP-Router Auto-Import-Stream, Lease-Handling und Report-Body-Fehler. - - `test-review.md / Phase 4` - Coverage risikogewichtet erhoehen. -- Guardrails: - - Der Router-Mock unterstuetzt jetzt `res.write(...)`, damit Server-Sent-Event-Streams im - Unit-Layer realistisch akkumuliert werden. - - Auto-Import-Erfolg prueft `check`, `progress`, `stderr`, `success` und `done` Events sowie die - Weitergabe der erworbenen Lease an `performAutoImport`. - - Concurrent Auto-Import Starts werden als lokalisierte `409`-Antwort abgebildet. - - Strukturierte Auto-Import-Fehler pruefen `error` und `done` Stream-Events und stellen sicher, - dass die Lease auch im Fehlerpfad genau einmal freigegeben wird. - - Malformed Report-Requests liefern `400` mit `Invalid report request`, getrennt von oversized - Request- und fehlendem Typst-Fehlerpfad. -- Validation: - - `npm_config_cache=/tmp/ttdash-npm-cache npx vitest run --project unit tests/unit/http-router-mutations.test.ts --reporter=verbose` -> passed with `21` tests. - - `npm_config_cache=/tmp/ttdash-npm-cache npm run format:check` -> passed. - - `npm_config_cache=/tmp/ttdash-npm-cache npm run lint` -> passed. - - `npm_config_cache=/tmp/ttdash-npm-cache npm run typecheck` -> passed. - - `npm_config_cache=/tmp/ttdash-npm-cache npm run test:timings:projects -- --projects=unit` - -> passed with unit median `3.69s` in the local timing summary. - - `npm_config_cache=/tmp/ttdash-npm-cache npm run test:vitest:coverage` -> passed with `150` - files, `626 passed | 1 skipped`. - - `PLAYWRIGHT_TEST_PORT=3016 npm_config_cache=/tmp/ttdash-npm-cache npm run verify:full:parallel` -> passed with Playwright `11 passed`. - - `coderabbit review --agent -t committed --base-commit HEAD~1 -c AGENTS.md -f tests/unit/http-router-mutations.test.ts` -> CodeRabbit raised 0 issues. -- Measurement notes: - - Coverage summary after this phase: Statements `82.46%`, Branches `70.01%`, Functions - `83.11%`, Lines `83.95%`. - - `server/http-router.js` improved to Statements `68%`, Branches `54.63%`, Functions `68%`, - Lines `68%`. - - Full gate timing in the normalized local run: Integration `2.69s`, Unit `2.64s`, Frontend - `22.34s`, Architecture `3.49s`, Integration-Background `1.95s`, Playwright `11 passed` in - `14.7s`. - - Commit is SSH-signed. - -### test-review.md / Phase 5 - Lokale und CI-Gates datenbasiert nachschaerfen - -- Status: fixed -- Scope: Das lokale Parallel-Gate deklariert jetzt die Report-/Artefakt-Outputs der parallel - laufenden Tasks, prueft vor dem Spawn auf Output-Kollisionen und gibt nach jedem Run eine - per-Task-Timing-Zusammenfassung aus. Die bestehende serielle Referenz bleibt unveraendert. -- Fix reference: `eab08c5` `Harden parallel gate diagnostics` -- Closed findings: - - `test-review.md / M-02` - Timing-Budgets erkennen Ausreisser, Teil lokale Task-Walltime- - Sichtbarkeit im Full-Gate. - - `test-review.md / Phase 5` - Lokale und CI-Gates datenbasiert nachschaerfen. -- Guardrails: - - `node scripts/run-parallel-gate.js --dry-run --e2e` zeigt jetzt Task-Wellen und deklarierte - Outputs wie `test-results/vitest-frontend.junit.xml`, `playwright-report/` und - `test-results/`. - - `findParallelOutputCollisions(...)` verhindert, dass zwei Tasks in derselben Welle denselben - Output-Pfad deklarieren. - - Erfolgreiche und fehlschlagende Gate-Runs drucken eine Timing-Summary der bereits beendeten - Tasks, damit lokale Engpaesse ohne JUnit-Inspektion sichtbar sind. - - `tests/frontend/metric-ratio-locale.test.tsx` fixiert den aktuellen Monat selbst und stellt - Timer/Globals wieder zurueck; der MonthMetrics-Test ist damit nicht mehr vom echten Datum oder - von fremden Fake-Timer-Zustaenden abhaengig. - - `docs/testing.md` dokumentiert Dry-Run, Output-Pruefung und Timing-Summary als Gate-Runbook. -- Validation: - - `node scripts/run-parallel-gate.js --dry-run --e2e` -> passed. - - `npm_config_cache=/tmp/ttdash-npm-cache npx vitest run --project unit tests/unit/run-parallel-gate.test.ts --reporter=verbose` -> passed with `7` tests. - - `npm_config_cache=/tmp/ttdash-npm-cache npx vitest run --project frontend tests/frontend/metric-ratio-locale.test.tsx --reporter=verbose` -> passed with `3` tests. - - `npm_config_cache=/tmp/ttdash-npm-cache npm run format:check` -> passed. - - `npm_config_cache=/tmp/ttdash-npm-cache npm run lint` -> passed. - - `npm_config_cache=/tmp/ttdash-npm-cache npm run typecheck` -> passed. - - `npm_config_cache=/tmp/ttdash-npm-cache npm run test:timings:projects -- --projects=unit` - -> passed with unit median `3.29s`, worst `3.29s`. - - `npm_config_cache=/tmp/ttdash-npm-cache npm run test:vitest:coverage` -> passed with `150` - files, `629 passed | 1 skipped`. - - `PLAYWRIGHT_TEST_PORT=3016 npm_config_cache=/tmp/ttdash-npm-cache npm run verify:full:parallel` -> passed with Playwright `11 passed`. - - `coderabbit review --agent -t committed --base-commit HEAD~1 -c AGENTS.md -f ...` -> - CodeRabbit raised 0 issues. -- Measurement notes: - - Coverage summary after this phase: Statements `82.30%`, Branches `69.78%`, Functions - `83.06%`, Lines `83.80%`. - - Full gate timing summary: Static `6.73s`, Integration `4.01s`, Build `1.07s`, Unit `5.65s`, - Frontend `52.49s`, Architecture `6.24s`, Integration-Background `3.39s`, - Package-Smoke `21.34s`, E2E `25.67s`. - - The Full-Gate bottleneck in this run is still frontend/jsdom, followed by Playwright and the - package smoke. - - Commit is SSH-signed. - -### test-review.md / Phase 6 - Playwright klein und wertvoll halten - -- Status: fixed -- Scope: Playwright-Scope, worker-isolierte Fixtures, CI-Worker-Cap und Reportpfade sind jetzt im - schnellen Unit-Layer abgesichert. Die Browser-Suite bleibt bei den bestehenden `11` - representative Smoke-Flows. -- Fix reference: `efb9cee` `Guard Playwright smoke scope` -- Closed findings: - - `test-review.md / M-03` - E2E-Sharding ist erst nach Report-Isolation sinnvoll, Teil - Reporterpfad-Guardrail. - - `test-review.md / Phase 6` - Playwright klein und wertvoll halten. -- Guardrails: - - `tests/unit/playwright-config.test.ts` prueft `testDir`, `fullyParallel`, CI-Worker-Cap, - fehlenden globalen `webServer`, Trace/Screenshot/Video-Policy und stabile HTML-/JUnit- - Reporterpfade. - - E2E-Specs muessen `test`/`expect` aus `tests/e2e/fixtures.ts` importieren und duerfen nicht - direkt `@playwright/test` verwenden; damit bleiben Worker-Port, Runtime-Root und Auth-Session - isoliert. - - Die erlaubte Journey-Liste ist explizit: Command Palette, Forecast/Filters, Load/Upload, - Reporting und Settings/Backups. - - Die Browser-Suite bleibt auf `11` Tests und hat eine obere Guardrail von `12`, damit neue - Contract-Matrizen nicht unbemerkt in Playwright wachsen. - - `docs/testing.md` dokumentiert, dass dieser Vertrag bewusst angepasst werden muss, wenn eine - neue echte Browser-Journey dazukommt. -- Validation: - - `npm_config_cache=/tmp/ttdash-npm-cache npx vitest run --project unit tests/unit/playwright-config.test.ts --reporter=verbose` -> passed with `4` tests. - - `npm_config_cache=/tmp/ttdash-npm-cache npm run format:check` -> passed. - - `npm_config_cache=/tmp/ttdash-npm-cache npm run lint` -> passed. - - `npm_config_cache=/tmp/ttdash-npm-cache npm run typecheck` -> passed. - - `npm_config_cache=/tmp/ttdash-npm-cache npm run test:vitest:coverage` -> passed with `150` - files, `632 passed | 1 skipped`. - - `PLAYWRIGHT_TEST_PORT=3016 npm_config_cache=/tmp/ttdash-npm-cache npm run verify:full:parallel` -> passed with Playwright `11 passed`. - - `coderabbit review --agent -t committed --base-commit HEAD~1 -c AGENTS.md -f ...` -> - CodeRabbit raised 0 issues. -- Measurement notes: - - Coverage summary after this phase: Statements `82.30%`, Branches `69.78%`, Functions - `83.06%`, Lines `83.80%`. - - Full gate timing summary: Static `6.12s`, Integration `3.94s`, Build `1.30s`, Unit `3.62s`, - Frontend `23.68s`, Architecture `5.07s`, Integration-Background `2.96s`, - Package-Smoke `13.16s`, E2E `17.02s`. - - Playwright stayed at `11` browser tests and `workers=2` for CI-style runs. - - Commit is SSH-signed. - -### test-review.md / Phase 5 - Lokales Full-Gate parallelisieren - -- Status: fixed -- Scope: neues lokales, lastbewusstes Parallel-Gate fuer statische Checks, Vitest-Projekte, Build, - Package-Smoke und optionalen E2E-Full-Gate. -- Fix reference: `67db759` `Add parallel verification gate` -- Closed findings: - - `test-review.md / H-04` - Lokales Full-Gate laeuft unnoetig seriell, obwohl grosse Teile - unabhaengig sind. - - `test-review.md / Phase 5` - Lokale und CI-nahe Verifikation parallel und robuster - orchestrieren. -- Guardrails: - - `verify:parallel` ueberlappt nur die gut kombinierbaren Gates `test:static`, - `test:integration` und `build:app`. - - Die intern bereits stark parallelen oder prozessnahen Suites laufen in eigenen Wellen: - `test:unit`, `test:frontend`, `test:architecture` und `test:integration:background`. - - `verify:package` laeuft erst nach erfolgreichem Build und erfolgreichen Testwellen, damit der - Paket-Smoke weiterhin gegen das frische Produktionsbundle prueft. - - `verify:full:parallel` haengt Playwright an die finale Welle. - - Der E2E-Datepicker-Test fokussiert einen stabilen mittleren Monatstag, damit der Test am - Monatsletzten nicht faelschlich erwartet, dass `ArrowRight` ueber das Monatsende hinaus - navigiert. - - Unit-Tests decken Dry-Run, E2E-Plan, Spawn-Argumente, Fehlerweitergabe und Paket-Smoke- - Abhaengigkeit ab. -- Validation: - - `node scripts/run-parallel-gate.js --dry-run --e2e` -> passed. - - `npm_config_cache=/tmp/ttdash-npm-cache npx vitest run --project unit tests/unit/run-parallel-gate.test.ts tests/unit/test-pipeline-scripts.test.ts --reporter=verbose` -> passed. - - `npm_config_cache=/tmp/ttdash-npm-cache npm run format:check` -> passed. - - `npm_config_cache=/tmp/ttdash-npm-cache npm run lint` -> passed. - - `npm_config_cache=/tmp/ttdash-npm-cache npm run typecheck` -> passed. - - `PLAYWRIGHT_TEST_PORT=3016 npm_config_cache=/tmp/ttdash-npm-cache npx playwright test tests/e2e/dashboard-forecast-filters.spec.ts -g 'exposes pressed filter state' --workers=1 --reporter=line` -> passed. - - `PLAYWRIGHT_TEST_PORT=3016 npm_config_cache=/tmp/ttdash-npm-cache npm run verify:full:parallel` -> passed with Playwright `11 passed`. - - `npm_config_cache=/tmp/ttdash-npm-cache npm run test:vitest:coverage` -> passed with `151` - files, `609 passed | 1 skipped`. - - `coderabbit review --agent -t committed --base-commit HEAD~1 -c AGENTS.md -f ...` -> 0 - issues. -- Measurement notes: - - Naive "alles parallel" war nicht robust: Architektur konnte unter CPU-Saettigung in das 5s- - Testtimeout laufen; `integration-background` konnte unter voller Last beim parallelen - Background-Start nur eine Registry-Instanz sehen. - - Balancierte finale Messung: Integration `3.06s`, Unit `3.25s`, Frontend `25.89s`, - Architektur `4.44s`, Integration-Background `3.08s`, Playwright `11 passed` in `51.8s`. - - Coverage summary nach dieser Phase: Statements `81.54%`, Branches `69.33%`, Functions - `82.59%`, Lines `82.97%`. - - Commit is SSH-signed. - -### test-review.md / Phase 4 - Runtime-Branch-Coverage - -- Status: fixed -- Scope: gezielte Branch-Tests fuer Prozess-Utilities und Background-Stop-Fehlerpfad. -- Fix reference: `44c073d` `Cover process runtime edge cases` -- Closed findings: - - `test-review.md / H-03` - Coverage-Luecken liegen in kritischen Runtime-Branches, Teil - `server/process-utils.js` und `server/background-runtime.js`. - - `test-review.md / Phase 4` - Coverage risikogewichtet erhoehen. -- Guardrails: - - `process-utils.test.ts` prueft `isProcessRunning` fuer invalid PID, laufenden Prozess, ESRCH - und EPERM sowie `sleep` mit Fake Timers. - - `background-runtime.test.ts` prueft, dass `ttdash stop` bei EPERM den Fehlerpfad und Exitcode - korrekt setzt, ohne echte Prozesse zu signalisieren. -- Validation: - - `npm_config_cache=/tmp/ttdash-npm-cache npx vitest run --project unit tests/unit/process-utils.test.ts tests/unit/background-runtime.test.ts --reporter=verbose` -> passed. - - `npm_config_cache=/tmp/ttdash-npm-cache npm run format:check` -> passed. - - `npm_config_cache=/tmp/ttdash-npm-cache npm run lint` -> passed. - - `npm_config_cache=/tmp/ttdash-npm-cache npm run typecheck` -> passed. - - `npm_config_cache=/tmp/ttdash-npm-cache npm run test:vitest:coverage` -> passed with `150` - files, `605 passed | 1 skipped`. - - `coderabbit review --agent -t committed --base-commit HEAD~1 -c AGENTS.md -f ...` -> 0 - issues. -- Coverage notes: - - Global coverage after this phase: Statements `81.46%`, Branches `69.09%`, Functions `82.54%`, - Lines `82.86%`. - - `server/process-utils.js` now reports Statements `100%`, Functions `100%`, Lines `100%`, - Branches `80%`. - - `server/background-runtime.js` improved to Statements `46.51%`, Branches `47.90%`, - Functions `47.05%`, Lines `47.14%`. - - Commit is SSH-signed. - -### test-review.md / Phase 3 - Prozessnahe Runner-Tests straffen - -- Status: fixed -- Scope: reine Latest-Version Timeout-/Cache-Branches laufen jetzt mit Fake Spawn im Node-Unit- - Test statt mit echten PATH-Skripten und Subprozessen. -- Fix reference: `c4882ae` `Move toktrack cache tests to fake spawn` -- Closed findings: - - `test-review.md / H-02` - Prozessnahe Tests mischen Vertragspruefung und Branch-Coverage, - Teil Toktrack-Latest-Version Cache/Timeout. - - `test-review.md / Phase 3` - Prozessnahe Tests straffen. -- Guardrails: - - Echte Prozess-Smokes bleiben fuer lokale Binary, PATH-Fallback, Runner-Timeout und stdout- - Fehler bestehen. - - Cache-Erfolg, Cache-Fehler und Lookup-Timeout werden ueber Fake Spawn, Fake Timers und - kontrolliertes `Date.now()` abgedeckt. -- Validation: - - `npm_config_cache=/tmp/ttdash-npm-cache npx vitest run --project unit tests/unit/server-helpers-runner-core.test.ts tests/unit/server-helpers-runner-process.test.ts --reporter=verbose` -> passed. - - `npm_config_cache=/tmp/ttdash-npm-cache npm run format:check` -> passed. - - `npm_config_cache=/tmp/ttdash-npm-cache npm run lint` -> passed. - - `npm_config_cache=/tmp/ttdash-npm-cache npm run typecheck` -> passed. - - `npm_config_cache=/tmp/ttdash-npm-cache npm run test:timings:projects -- --projects=unit` -> - passed. - - `coderabbit review --agent -t committed --base-commit HEAD~1 -c AGENTS.md -f ...` -> 0 - issues. -- Measurement notes: - - `tests/unit/server-helpers-runner-process.test.ts` dropped from earlier `3.105s` suite time to - `0.878s` in the unit project timing run. - - Unit project timing summary for this phase: median `6.41s`. - - Commit is SSH-signed. - -### test-review.md / Phase 2 - Frontend/jsdom Worker-Parallelitaet - -- Status: fixed -- Scope: datenbasierte Anpassung der Frontend/jsdom Worker-Obergrenze von `50%` auf `80%`. -- Fix reference: `2932697` `Tune frontend test parallelism` -- Closed findings: - - `test-review.md / H-01` - Frontend/jsdom ist der aktuelle Hauptkostentreiber. - - `test-review.md / Phase 2` - Frontend/jsdom-Kosten senken, Teil Worker-Kalibrierung. -- Guardrails: - - `vitest-coverage-config.test.ts` prueft, dass das Frontend-Projekt `maxWorkers: '80%'` - nutzt und nicht auf volle CPU-Saettigung gestellt wird. - - `docs/testing.md` dokumentiert die gemessene Begruendung und das Re-Benchmark-Kommando. -- Validation: - - `npm_config_cache=/tmp/ttdash-npm-cache npx vitest run --project unit tests/unit/vitest-coverage-config.test.ts --reporter=verbose` -> passed. - - `npm_config_cache=/tmp/ttdash-npm-cache npm run format:check` -> passed. - - `npm_config_cache=/tmp/ttdash-npm-cache npm run lint` -> passed. - - `npm_config_cache=/tmp/ttdash-npm-cache npm run typecheck` -> passed. - - `npm_config_cache=/tmp/ttdash-npm-cache npm run test:timings:projects -- --projects=frontend` -> passed with 77 files / 206 tests. - - `coderabbit review --agent -t committed --base-commit HEAD~1 -c AGENTS.md -f ...` -> 0 issues. -- Measurement notes: - - Old configured `50%` project run in the same window: `55.97s` after CPU saturation. - - `80%` probe before the 100% stress run: `27.46s`. - - `100%` probe: `84.84s`, confirming full saturation is harmful. - - Configured `80%` validation after the stress run: `48.13s`, still faster than the paired - post-saturation `50%` run. - - Commit is SSH-signed. - -### test-review.md / Phase 1 - Messbarkeit und Benchmarking - -- Status: fixed -- Scope: projektweise Vitest-Timing-Messung mit isolierten JUnit-Reports und wiederholbaren - Benchmark-Laeufen fuer Median/Worst-Run-Auswertung. -- Fix reference: `944a049` `Add project test timing benchmark` -- Closed findings: - - `test-review.md / Phase 1` - Messbarkeit und Benchmarks stabilisieren. - - `test-review.md / M-02` - Timing-Budgets sind vorhanden, aber noch grob. -- Guardrails: - - `test:timings:projects` schreibt projektbezogene Reports wie - `test-results/vitest-frontend.timing.junit.xml`. - - `test:timings:benchmark` wiederholt die Projektmessung dreimal und schreibt `timing-run-N` - Reports, damit keine normalen JUnit-Ausgaben ueberschrieben werden. - - Unit-Tests decken CLI-Parsing, eindeutige Reportpfade, Budget-Kommandos, Dry-Run und Median ab. -- Validation: - - `node scripts/run-vitest-project-timings.js --projects=unit --repeat=1 --dry-run` -> passed. - - `npm_config_cache=/tmp/ttdash-npm-cache npx vitest run --project unit tests/unit/run-vitest-project-timings.test.ts tests/unit/test-pipeline-scripts.test.ts --reporter=verbose` -> passed. - - `npm_config_cache=/tmp/ttdash-npm-cache npm run format:check` -> passed. - - `npm_config_cache=/tmp/ttdash-npm-cache npm run lint` -> passed. - - `npm_config_cache=/tmp/ttdash-npm-cache npm run typecheck` -> passed. - - `npm_config_cache=/tmp/ttdash-npm-cache npm run test:timings:projects -- --projects=unit` -> - passed. - - `coderabbit review --agent -t committed --base-commit HEAD~1 -c AGENTS.md -f ...` -> 0 - issues. -- Notes: - - Commit is SSH-signed. Local `git log --show-signature` verification still needs - `gpg.ssh.allowedSignersFile`, but `git cat-file -p HEAD` contains the `gpgsig` block. - - Earlier CodeRabbit uncommitted review surfaced the pre-existing `.gitignore`/`docs/review` - policy issue. That was outside this phase and intentionally not mixed into the phase commit. - -## 2026-04-29 - -### test-review.md implementation index - -- Status: fixed -- Scope: phasenweise Umsetzung der Testsystem-Review vom 2026-04-28. Diese Liste referenziert die - konkret geschlossenen Findings aus `docs/review/test-review.md` und die Commits, in denen die - jeweilige Verbesserung gelandet ist. - -| Finding | Status | Fix reference | Validation | -| --------------------------------------------------------------------------------- | ------ | ----------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------- | -| `test-review.md / H-01` - Playwright ist nicht worker-isoliert | fixed | `ab49513` `Isolate Playwright workers` | `npm run test:e2e:ci` mit 2 Workern gruen; final `verify:full` gruen | -| `test-review.md / H-02` - CI ist ein monolithischer serieller Full-Gate | fixed | `b5cac6a` `Split CI test pipeline` | CI-DAG in `.github/workflows/ci.yml`; final `verify:full` gruen | -| `test-review.md / H-03` - Command-Palette-E2E mischt zu viele Testarten | fixed | `cb31d35` `Move command palette contracts to Vitest` | Command-Contract in Vitest, E2E auf Smokes reduziert; final Playwright `11 passed` | -| `test-review.md / H-04` - Report- und Coverage-Ausgaben sind nicht matrix-sicher | fixed | `c4abb2b` `Make Vitest setup and reports parallel-safe`, `b5cac6a` `Split CI test pipeline` | Projekt-/Job-spezifische JUnit/Artifact-Pfade; CI-Matrix nutzt getrennte Reports | -| `test-review.md / M-02` - Node-Projekte laden unnoetiges React-Test-Setup | fixed | `c4abb2b` `Make Vitest setup and reports parallel-safe` | `vitest.setup.node.ts` / `vitest.setup.frontend.ts` getrennt; Guardrail in `vitest-coverage-config.test.ts` | -| `test-review.md / M-03` - Coverage ist global gruen, aber risikogewichtet schwach | fixed | `3a4285d` `Improve runtime and dashboard coverage`, `117cfeb` `Fix CodeRabbit test robustness issues` | Runtime-/Dashboard-Coverage gezielt erhoeht; final Coverage-Gate gruen | -| `test-review.md / M-05` - Statische Checks haben Doppelarbeit | fixed | `b5cac6a` `Split CI test pipeline` | `test:static` als ein regulaerer Static-Gate, Cache fuer Prettier/ESLint/TypeScript | -| `test-review.md / M-06` - Package-Smoke gehoert in eigenen CI-Job | fixed | `b5cac6a` `Split CI test pipeline` | `build` erzeugt `production-dist`; `package-smoke` und `e2e` nutzen Artifact parallel | -| `test-review.md / M-07` - Timing-Regressions sind sichtbar, aber nicht budgetiert | fixed | `e9d54fa` `Add test timing budgets` | `test:timings:budget` gruen; CI-Matrix prueft JUnit-Reports gegen `20s` Suite / `12s` Test Budget | - -Nicht als vollstaendig geschlossen markiert: - -- `test-review.md / M-01` - `frontend/jsdom` bleibt der groesste Vitest-Kostentreiber. Die CI-DAG, - Setup-Trennung und der zentrale Frontend-Timeout reduzieren Flakiness und Blockierzeit, aber die - Import-/Environment-Kosten wurden nicht als eigener Optimierungsabschluss entfernt. -- `test-review.md / M-04` - Prozessnahe Tests wurden begrenzt und budgetiert, aber reale Subprozesse - bleiben absichtlich fuer Shell-, PATH-, CLI- und Cross-Process-Verhalten bestehen. -- `test-review.md / N-01` - macOS-Sandbox/Playwright wurde als Agent-Memory dokumentiert; das ist - kein Repository-Fix und bleibt eine Laufzeitumgebungsregel. - -- Validation: - - `npm_config_cache=/tmp/ttdash-npm-cache npm run test:timings:budget` -> passed with `149` - files and `596 passed | 1 skipped`. - - `PLAYWRIGHT_TEST_PORT=3016 npm_config_cache=/tmp/ttdash-npm-cache npm run verify:full` -> - passed, including coverage, build, package verification, and Playwright `11 passed`. - - `coderabbit review --agent -t committed -c AGENTS.md` after Phase 6 -> `0 issues`. - -## 2026-04-28 - -### documentation-review.md / N-01 - -- Status: fixed -- Scope: the stale `security-review.md / H-01` fixed-finding note about remote `?ttdash_token=...` browser bootstrap was corrected. The entry now matches the current runtime and integration test contract: remote API auth accepts Bearer, `X-TTDash-Remote-Token`, or an explicit auth cookie, while automatic query-token-to-cookie bootstrap is a local-session flow. -- Guardrails: `tests/integration/server-remote-auth.test.ts` asserts that remote query bootstrap does not return the local `303` cookie redirect, and `server/remote-auth.js` only resolves query-token bootstrap when local session auth is active. -- Validation: - - `npm run format:check` - - `git diff --check` - -## 2026-04-27 - -### test-review.md / H-01 - -- Status: fixed -- Scope: simple architecture rules no longer depend on repeated ArchUnit project scans. `tests/architecture/source-graph.ts` now owns one cached `src/**` file scan, TypeScript-based import/export parsing, alias resolution for `@/...`, relative import resolution, extension resolution, and `index` module resolution. The frontend layer, unused-hook, hook-naming, and shared-UI-placement tests use this shared graph while the feature-slice diagram remains on ArchUnit where its diagram model adds value. -- Guardrails: `tests/architecture/frontend-layers.test.ts` still blocks hooks from importing components, lib core from importing hooks/components, lib React modules from reaching back into hooks/components, and type modules from depending on components/hooks/lib. `tests/architecture/unused-hooks.test.ts`, `hook-naming.test.ts`, and `shared-ui-placement.test.ts` now reuse the same source graph so future test-layer changes do not reintroduce separate slow scans. `tests/architecture/source-graph.test.ts` covers static imports, re-exports, side-effect imports, and dynamic imports with options. -- Follow-up quality fixes during implementation: - - Dynamic `import(...)` calls are parsed alongside static imports and re-exports, so lazy edges stay visible to the architecture guardrails instead of only top-level declarations being checked. - - The formerly near-timeout `hooks must not depend on components` rule dropped from the historical `~4950ms` flake boundary to well below `100ms` in the targeted architecture run, without increasing global or local timeouts. - - Dashboard UI, content, animation, runtime API behavior, and production code remain unchanged. -- Validation: - - `npm run test:architecture -- --reporter=verbose` - - `npm run format:check` - - `npm run lint` - - `tsc --noEmit` - - `npm run check:deps` - - `git diff --check` - - `npm run verify:full` - - `npm run test:timings` - - `coderabbit review --agent -t uncommitted -c AGENTS.md --files ...` -> round 1: 0 issues, round 2: 1 minor status-line suggestion already satisfied by the current `- Status: fixed` text, round 3: 1 dynamic-import options issue fixed, final round: 0 issues - -### test-review.md / H-02 - -- Status: fixed -- Scope: Vitest coverage now reports against the product runtime instead of the former narrow frontend slice. `vitest.config.ts` includes `src/**/*.{ts,tsx}`, `server.js`, `server/**/*.js`, `shared/**/*.js`, and `usage-normalizer.js`, while excluding only TypeScript declarations, test files, and locale JSON assets from the configured runtime denominator. -- Guardrails: `tests/unit/vitest-coverage-config.test.ts` locks the coverage includes, excludes, and broad-denominator global thresholds. The thresholds ratchet the new signal at Statements `70`, Branches `60`, Functions `70`, and Lines `70`, below the measured broad baseline so CI fails on real regressions without making the first honest denominator brittle. -- Follow-up quality fixes during implementation: - - The current `npm run test:unit:coverage` baseline is Statements `72.85%`, Branches `63.01%`, Functions `74.97%`, Lines `73.88%` across the broader runtime scope. - - Server entrypoints, local server modules, shared runtime contracts, App/main entry files, and lazy dashboard sections are now visible in the coverage report instead of being hidden outside the denominator. - - `docs/testing.md` documents the broader denominator and clarifies that subprocess-spawned CLI/server paths remain behaviorally covered by integration, background, and Playwright tests even when V8 main-process line attribution stays lower. - - Dashboard UI, content, animation, runtime API behavior, and production code remain unchanged. -- Validation: - - `npx vitest run --project unit tests/unit/vitest-coverage-config.test.ts --reporter=verbose` - - `npm run test:unit:coverage` - - `npm run format:check` - - `npm run lint` - - `tsc --noEmit` - - `npm run check:deps` - - `npx vitest run --project architecture --reporter=verbose` - - `npm run verify:full` - - `npm run test:timings` - - `coderabbit review --agent -t uncommitted -c AGENTS.md --files ...` -> round 1: 0 issues, round 2: 0 issues - -### test-review.md / M-01 - -- Status: fixed -- Scope: dead hook visibility now has an explicit architecture guardrail. The two originally cited unused hook files, `src/hooks/use-theme.ts` and `src/hooks/use-provider-limits.ts`, had already been removed during the earlier `code-review.md / N-01` cleanup; this phase strengthens the test-review guardrail so the same class of dead runtime hook cannot silently return. -- Guardrails: `tests/architecture/unused-hooks.test.ts` now treats a hook as live only when it is reachable from `src/main.tsx`, rather than merely imported by any production file. A focused synthetic regression covers a dead hook cluster where one unused hook imports another, so future dead-code islands cannot satisfy the guardrail by importing each other. -- Follow-up quality fixes during implementation: - - `docs/architecture.md` and `docs/testing.md` now describe the hook rule as app-entrypoint reachability, matching the enforced behavior. - - `dependency-cruiser` remains responsible for dependency boundaries and cycle/orphan visibility, while `npm run test:architecture` owns the precise unused-hook signal that the original finding needed. - - Dashboard UI, content, animation, runtime API behavior, and production code remain unchanged. -- Validation: - - `npm run test:architecture` - - `npm run check:deps` - - `npm run verify:full` - - `npm run test:timings` - - `git diff --check` - - `coderabbit review --agent -t uncommitted -c AGENTS.md` -> round 1: 0 issues, round 2: 0 issues - -### test-review.md / M-02 - -- Status: fixed -- Scope: the highest-cost and hang-prone test paths were split, made deterministic, and given bounded cleanup without changing production behavior. The auto-import singleton integration test now coordinates the fake Toktrack runner through start/release sentinel files instead of a fixed 2-second delay, and the previous background catch-all suite was split into focused background prefix, selection, concurrency, registry, and startup CLI files. -- Guardrails: the `integration-background` Vitest project now allows file-level scheduling with a capped `maxWorkers: 2`, so independent background process tests can overlap without turning the test run into an unbounded process fan-out. `docs/testing.md` now documents deterministic subprocess coordination and small focused background files as the expected pattern. -- Follow-up quality fixes during implementation: - - The final `npm run test:timings` run now reports the auto-import singleton test at `1.371s` instead of the historical `3.326s`, and `tests/integration/server-auto-import.test.ts` at `2.669s` instead of the historical `4.521s`. - - The old `tests/integration/server-background.test.ts` monolith no longer dominates as one `5.785s` suite; its formerly mixed cases now appear as focused files, with the slowest background slice at `3.208s` in the final coverage/timing run. - - Test hangs were addressed at the harness level: server readiness/shutdown probes now have abort timeouts, CLI subprocess helpers have bounded timeouts and SIGKILL fallback, failed standalone server startup cleans up the spawned process, background cleanup force-stops leftover registry PIDs owned by the test root, and shared integration servers now wait for process shutdown in `afterAll`. - - `tests/unit/server-helpers-runner-process.test.ts` remains intentionally subprocess-backed where shell, `PATH`, timeout, and runner fallback behavior are the thing under test. - - Dashboard UI, content, animation, runtime API behavior, and production code remain unchanged. -- Validation: - - `npx vitest run --project integration tests/integration/server-auto-import.test.ts tests/integration/server-startup-cli.test.ts --project integration-background tests/integration/server-background.test.ts tests/integration/server-background-selection.test.ts tests/integration/server-background-concurrency.test.ts tests/integration/server-background-registry.test.ts --reporter=verbose` - - `npx vitest run --project integration tests/integration/server-auto-import.test.ts tests/integration/server-startup-cli.test.ts tests/integration/server-local-auth.test.ts --project integration-background tests/integration/server-background.test.ts tests/integration/server-background-selection.test.ts tests/integration/server-background-concurrency.test.ts tests/integration/server-background-registry.test.ts --reporter=verbose` - - `npx vitest run --project integration-background tests/integration/server-background-registry.test.ts --reporter=verbose` - - `npx vitest run --project integration-background tests/integration/server-background-selection.test.ts --reporter=verbose` - - `tsc --noEmit` - - `npx vitest run --project integration --project integration-background --reporter=verbose` - - `npm run verify:full` -> final run passed, including Playwright `15 passed` - - `npm run test:timings` -> completed without hanging after the cleanup hardening - - `coderabbit review --agent -t uncommitted -c AGENTS.md` -> multiple rounds: 0 issues initially, 4 minor test-harness/test-clarity issues fixed, latest round 0 issues - -### test-review.md / M-03 - -- Status: fixed -- Scope: the Playwright dashboard monolith was split by user journey without changing dashboard runtime behavior. The former `tests/e2e/dashboard.spec.ts` coverage now lives in focused load/upload, forecast/filter, settings/backups, and reporting specs, while command-palette coverage remains separate. -- Guardrails: shared E2E auth, state reset, usage seeding, file upload, mocked auto-import/report, download-recording, and dashboard test-hook access now live in `tests/e2e/helpers.ts`. `docs/testing.md` documents journey-based Playwright files so future browser coverage does not grow back into a catch-all suite. -- Follow-up quality fixes during implementation: - - The CSP/browser-error smoke coverage moved to `tests/e2e/dashboard-load-upload.spec.ts`, replacing stale references to the removed dashboard monolith. - - The report-language test now resets both usage and settings before switching locale, making it independent from earlier Playwright file order. - - The largest dashboard-specific E2E files are now focused around settings/backups and command-palette behavior instead of one mixed dashboard file. - - The recurring silent coverage/timing hang was traced to Vitest runs that emitted only the JUnit reporter in this non-interactive gate. `test:unit:coverage` and `test:timings` now keep the JUnit artifact and also emit the `dot` reporter, so long coverage runs show progress and finish cleanly instead of depending on a silent reporter path. - - Dashboard UI, content, animation, runtime API behavior, and production code remain unchanged. -- Validation: - - `npx vitest run --project unit tests/unit/vitest-coverage-config.test.ts --reporter=verbose` -> passed, including the guardrail for explicit `dot` plus `junit` reporters on coverage-heavy scripts. - - `npm run test:unit:coverage` -> passed with `135` files, `496` tests passed, `1` skipped, and no silent hang after the reporter fix. - - `npm run test:timings` -> passed and printed timing diagnostics; the slowest suites remained existing server integration subprocess paths, while the split E2E work is outside this Vitest-only timing gate. - - `PLAYWRIGHT_TEST_PORT=3016 npm_config_cache=/tmp/ttdash-npm-cache npm run test:e2e` -> passed with `15` Playwright tests after the journey split. - - `npm run verify:full` -> final run passed, including format, lint, docstring lint, dependency-cruiser, `tsc --noEmit`, architecture tests, coverage, package verification, production build, and Playwright `15 passed`. - - Targeted Playwright follow-ups passed for `tests/e2e/dashboard-load-upload.spec.ts` and `tests/e2e/dashboard-settings-backups.spec.ts` after CodeRabbit requested locale-resilient label assertions. - - `git diff --check` -> passed after this validation update. - - `coderabbit review --agent -t uncommitted -c AGENTS.md` -> rounds 1 and 3 reported minor test/doc clarity issues that were fixed; rounds 2, 4, and 5 reported 0 issues; round 6 reported this missing validation detail and was fixed by this entry; the follow-up review after the validation update reported 0 issues. - -## 2026-04-26 - -### server-review.md / H-01 - -- Status: fixed -- Scope: `server.js` was reduced to dependency/runtime wiring during this phase, then further reduced by `server-review.md / M-01` to an executable CLI/Bin shim. CLI parsing/help moved to `server/cli.js`, startup summaries/browser opening/local auth-session metadata moved to `server/startup-runtime.js`, shared process helpers moved to `server/process-utils.js`, and HTTP server lifecycle, CLI routing, startup sequencing, client errors, and shutdown cleanup moved to `server/server-lifecycle.js`. -- Guardrails: `tests/architecture/server-entrypoint-contract.test.ts` blocks local helper function definitions, `__test__` exports, and direct `http.createServer(...)` calls from returning to `server.js`. `tests/unit/server-cli.test.ts`, `tests/unit/startup-runtime.test.ts`, and `tests/unit/server-lifecycle.test.ts` cover the extracted behavior directly. Existing server helper tests now instantiate `server/data-runtime.js` and `server/auto-import-runtime.js` directly instead of importing `server.js`. -- Follow-up quality fixes during implementation: - - The productive `server.js.__test__` helper surface was removed as part of the Entrypoint split; tests now target the owning runtime modules. - - The cross-process file-lock test now loads `server/data-runtime.js` directly in its child process, so it still validates real lock behavior without loading the CLI entrypoint. - - The startup data summary now pluralizes `1 day` versus `N days` correctly without changing the existing cost or token formatting. - - Background-child shutdown now logs unregister failures, suppresses unhandled promise rejections, exits through a finally-style path, and prevents duplicate shutdown completion when graceful close and forced timeout race. - - CLI help now documents the supported `-bg` legacy background alias alongside `-b` and `--background`, so displayed usage matches parser behavior. - - Startup behavior remains intentionally unchanged: auth bootstrap URL output, remote warnings, browser opening, background registration, auto-load logging, package startup, and API routing keep the same runtime contracts. -- Validation: - - `npx vitest run --project unit tests/unit/server-cli.test.ts tests/unit/startup-runtime.test.ts tests/unit/server-lifecycle.test.ts tests/unit/server-helpers-network.test.ts tests/unit/server-helpers-runner-core.test.ts tests/unit/server-helpers-runner-process.test.ts tests/unit/server-helpers-file-locks.test.ts --reporter=verbose` - - `npx vitest run --project architecture tests/architecture/server-entrypoint-contract.test.ts --reporter=verbose` - - `npm run format:check` - - `npm run lint` - - `tsc --noEmit` - - `npm run test:architecture` - - `npm run check:deps` - - `npm run verify:full` - - `npm run test:timings` - - `coderabbit review --agent -t uncommitted -c AGENTS.md --files ...` -> rounds 1-2: 0 issues; round 3: 2 minor issues fixed; round 4: 1 minor issue fixed - -### server-review.md / M-01 - -- Status: fixed -- Scope: `server.js` is now only the executable CLI/Bin shim. Runtime composition moved to `server/app-runtime.js`, which builds the injected server lifecycle and keeps the background process entrypoint pointed at package-root `server.js`. The undocumented `require('./server.js').bootstrapCli/runCli` import surface was removed; in-process test/server starts now use `createAppRuntime(...)` explicitly. -- Guardrails: `tests/architecture/server-entrypoint-contract.test.ts` now blocks `module.exports`, `exports.*`, local helper functions, `__test__`, direct `http.createServer(...)`, and any `server.js` require target other than `./server/app-runtime`. Playwright's `scripts/start-test-server.js` uses the explicit app-runtime composer so E2E startup still exercises the same server lifecycle without importing the executable shim as a helper module. -- Follow-up quality fixes during implementation: - - Environment-derived CLI/server configuration now lives inside `createAppRuntime(...)`, so test harnesses can set isolated storage, host, and port environment variables before composing the runtime. - - `server/app-runtime.js` passes an explicit root `server.js` path to the background runtime, preserving foreground/background CLI behavior after the composition move. - - `docs/architecture.md` documents the new split between the executable shim and the app-runtime composition root. - - Dashboard UI, content, animation, API route behavior, local auth, remote auth, background CLI, packaging, and E2E startup behavior remain unchanged. -- Validation: - - `node -c server.js` - - `node -c server/app-runtime.js` - - `npx vitest run --project architecture tests/architecture/server-entrypoint-contract.test.ts --reporter=verbose` - - `npx vitest run --project unit tests/unit/server-lifecycle.test.ts tests/unit/server-cli.test.ts tests/unit/startup-runtime.test.ts --project integration tests/integration/server-local-auth.test.ts tests/integration/server-remote-auth.test.ts --reporter=verbose` - - `npx vitest run --project integration-background tests/integration/server-background.test.ts --reporter=verbose` - - `npm run verify:full` - - `npm run test:timings` - - `coderabbit review --agent -t uncommitted -c AGENTS.md --files ...` -> multiple rounds: 0 issues - -### server-review.md / M-02 - -- Status: fixed -- Scope: mutable server runtime state is now encapsulated in per-runtime services. `server/runtime-state.js` owns the runtime snapshot, startup auto-load flag, singleton runtime lease, and expiring async cache primitives; `server/app-runtime.js`, `server/server-lifecycle.js`, and `server/startup-runtime.js` use the runtime-state service instead of ad hoc mutable flags; `server/auto-import-runtime.js` owns the Auto-Import lease and Toktrack latest-version cache through those services. -- Guardrails: `tests/unit/runtime-state.test.ts` covers runtime snapshots, startup flag isolation, singleton lease behavior, in-flight lookup deduplication, and TTL/reset behavior. `tests/architecture/server-runtime-state-contract.test.ts` blocks route-local `autoImportStreamRunning` state and the old free Auto-Import/Toktrack cache variables from returning. -- Follow-up quality fixes during implementation: - - `server/http-router.js` no longer owns an Auto-Import stream flag. It acquires a lease before sending SSE headers, so concurrent starts still return the existing HTTP `409` response without turning into streamed errors. - - `server/auto-import-runtime.js` remains the owner of Auto-Import execution, but the singleton state is now an explicit lease with idempotent release for normal completion, failures, and aborted streams. - - Toktrack latest-version lookups still share one in-flight request and keep separate success/failure TTL behavior, but the cache/promise state is hidden behind `createExpiringAsyncCache(...)`. - - Dashboard UI, content, animation, API paths, response shapes, local auth, remote auth, background startup, and E2E startup behavior remain unchanged. -- Validation: - - `node -c server/runtime-state.js` - - `node -c server/app-runtime.js` - - `node -c server/auto-import-runtime.js` - - `node -c server/http-router.js` - - `npx vitest run --project unit tests/unit/runtime-state.test.ts tests/unit/server-lifecycle.test.ts tests/unit/startup-runtime.test.ts tests/unit/server-helpers-runner-process.test.ts --project architecture tests/architecture/server-entrypoint-contract.test.ts tests/architecture/server-runtime-state-contract.test.ts --reporter=verbose` - - `npx vitest run --project integration tests/integration/server-auto-import.test.ts tests/integration/server-api-routing-runtime.test.ts tests/integration/server-local-auth.test.ts tests/integration/server-remote-auth.test.ts --project integration-background tests/integration/server-background.test.ts --reporter=verbose` - - `npm run verify:full` - - `npm run test:timings` - - `coderabbit review --agent -t uncommitted -c AGENTS.md --files ...` -> multiple rounds: 0 issues - -### server-review.md / N-01 - -- Status: fixed -- Scope: Host, Origin, Sec-Fetch-Site, and JSON content-type request policy moved from the broader HTTP utility module into `server/http-request-guards.js`. `server/http-utils.js` remains the compatible facade for router consumers and still owns request body parsing, JSON responses, buffer responses, and API-prefix resolution. -- Guardrails: `tests/unit/http-request-guards.test.ts` covers loopback, wildcard, non-loopback, IPv6, origin, cross-site, and content-type behavior directly. `tests/unit/http-utils.test.ts` now focuses on the HTTP facade and body/response behavior. `tests/architecture/server-http-boundaries.test.ts` keeps request guard policy out of the router and out of generic HTTP utility internals. -- Follow-up quality fixes during implementation: - - The request guard code now tolerates missing `req.headers` defensively while preserving the same rejection path for malformed or missing Host/Origin input. - - Wildcard binds now apply the intended socket-local host check before exact bind-host matching, so `Host: 0.0.0.0` is not treated as a trusted client-facing host. - - Body-size and response-header behavior gained focused unit coverage, so the split did not trade broad utility tests for narrower policy-only tests. - - Dashboard UI, content, animation, API paths, response shapes, local auth, remote auth, background startup, and E2E startup behavior remain unchanged. -- Validation: - - `node -c server/http-request-guards.js` - - `node -c server/http-utils.js` - - `npx vitest run --project unit tests/unit/http-request-guards.test.ts tests/unit/http-utils.test.ts --project architecture tests/architecture/server-http-boundaries.test.ts tests/architecture/server-entrypoint-contract.test.ts tests/architecture/server-runtime-state-contract.test.ts --reporter=verbose` - - `npx vitest run --project integration tests/integration/server-api-guards.test.ts tests/integration/server-remote-auth.test.ts --reporter=verbose` - - `npm run verify:full` - - `npm run test:timings` - - `coderabbit review --agent -t uncommitted -c AGENTS.md --files ...` -> multiple rounds: 0 issues - -## 2026-04-25 - -### security-review.md / H-01 - -- Status: fixed -- Scope: explicit non-loopback binding now requires `TTDASH_REMOTE_TOKEN` in addition to `TTDASH_ALLOW_REMOTE=1`. Remote API requests are authenticated centrally before route handling through Bearer auth, `X-TTDash-Remote-Token`, or an HttpOnly same-site cookie; the later `security-review.md / M-01` fix extends the same auth boundary to default loopback sessions. -- Guardrails: `tests/unit/remote-auth.test.ts` covers remote-mode configuration, accepted credential forms, generic rejection paths, timing-safe length-independent comparison behavior, and the browser bootstrap redirect/cookie contract. `tests/integration/server-remote-auth.test.ts` covers failed startup without a token, protected remote API reads, cookie bootstrap, and preserved Origin mutation guards. Existing server guard tests continue to cover host validation, malformed paths, oversized payloads, and cross-site rejection. -- Follow-up quality fixes during implementation: - - Remote authentication lives in `server/remote-auth.js` as a focused server boundary, while `server/http-router.js` only applies the injected gate before API routing and static bootstrap handling. - - Remote API access is supported through Bearer auth, `X-TTDash-Remote-Token`, or an explicit `ttdash_auth` cookie. The automatic `?ttdash_token=...` bootstrap-to-cookie redirect is a local-session flow, not the current remote-mode browser flow; `tests/integration/server-remote-auth.test.ts` locks that remote query bootstrap does not set a cookie. - - Background runtime identity checks now send the remote Bearer header when the parent process is running in authenticated remote mode, so remote background management keeps working. - - Startup help and remote warnings now mention the additional token requirement without logging the token value. -- Validation: - - `npx vitest run --project unit tests/unit/remote-auth.test.ts tests/unit/http-utils.test.ts tests/unit/background-runtime.test.ts --reporter=verbose` - - `npx vitest run --project integration tests/integration/server-remote-auth.test.ts tests/integration/server-api-guards.test.ts --reporter=verbose` - - `npx vitest run --project integration-background tests/integration/server-background.test.ts --reporter=verbose` - - `npm run format:check` - - `npm run lint` - - `tsc --noEmit` - - `npm run test:architecture` - - `npm run check:deps` - - `npm run verify:full` - - `npm run test:timings` - - `coderabbit review --agent -t uncommitted -c AGENTS.md --files ...` -> round 1: 0 issues, round 2: 0 issues, round 3: 0 issues, round 4: 0 issues - -### security-review.md / M-01 - -- Status: fixed -- Scope: default loopback servers now generate a per-start local session token and require authentication for every API endpoint, including `/api/usage`, `/api/settings`, `/api/runtime`, and `/api/toktrack/version-status`. Normal dashboard startup stays frictionless because the auto-open URL carries the one-time bootstrap token, which is exchanged for an HttpOnly/SameSite cookie and stripped from the redirected URL. -- Guardrails: `tests/unit/remote-auth.test.ts` covers default local session auth, generated local tokens, explicit test opt-out, shared credential parsing, bootstrap redirects, and generic rejection paths. `tests/integration/server-local-auth.test.ts` covers protected loopback reads, Bearer and cookie access, preserved mutation Origin guards, and restrictive auth-session file permissions. Existing remote, background, persistence, recovery, routing, and E2E tests were updated to authenticate through the same boundary. -- Follow-up quality fixes during implementation: - - The existing `server/remote-auth.js` boundary now owns both local session auth and remote auth so API route handlers stay unaware of the source of the credential. - - `server.js` writes the local session metadata to restrictive user config state and prints a `Local Auth URL` only when browser auto-open is disabled. - - Background instance registry entries now carry per-instance auth headers and bootstrap URLs so `ttdash stop`, registry pruning, and no-open background starts remain usable. - - Playwright and integration test helpers bootstrap through the same local session file instead of bypassing the production auth path. -- Residual risk: - - This protects the local HTTP API from unauthenticated loopback access, browser/DNS-rebinding-style access, and other OS users. It does not fully isolate against malware already running as the same OS user, which can target user config files, terminal output, or browser state. -- Validation: - - `npx vitest run --project unit tests/unit/remote-auth.test.ts tests/unit/background-runtime.test.ts --reporter=verbose` - - `npx vitest run --project integration tests/integration/server-local-auth.test.ts tests/integration/server-api-guards.test.ts tests/integration/server-api-persistence.test.ts tests/integration/server-api-routing-runtime.test.ts tests/integration/server-api-recovery.test.ts tests/integration/server-remote-auth.test.ts --reporter=verbose` - - `npx vitest run --project integration-background tests/integration/server-background.test.ts --reporter=verbose` - - `PLAYWRIGHT_TEST_PORT=3020 npm_config_cache=/tmp/ttdash-npm-cache npm run test:e2e` - - `npm run format:check` - - `npm run lint` - - `tsc --noEmit` - - `npm run test:architecture` - - `npm run check:deps` - - `npm run verify:package` - - `npm run verify:full` - - `npm run test:timings` - - `coderabbit review --agent -t uncommitted -c AGENTS.md --files ...` -> round 1: 0 issues, round 2: 0 issues, round 3: 2 minor documentation issues fixed, round 4: 0 issues - -### security-review.md / N-01 - -- Status: fixed -- Scope: the server CSP no longer allows `unsafe-inline` styles. Shared security headers now live in `server/security-headers.js`, HTML responses get a per-response CSP nonce plus a matching `ttdash-csp-nonce` meta tag, `style-src-elem` is limited to `self` and the nonce, and `style-src-attr 'none'` blocks literal inline style attributes. -- Guardrails: `tests/unit/security-headers.test.ts` covers CSP construction, nonce shape, nonce meta injection, HTML response preparation, and non-HTML header behavior. `tests/integration/server-api-guards.test.ts` checks the strict CSP on authenticated API responses. `tests/e2e/dashboard-load-upload.spec.ts` verifies that the loaded dashboard HTML carries the nonce-backed CSP and that the browser reports no CSP errors while the main dashboard journey runs. -- Follow-up quality fixes during implementation: - - CSP generation moved out of `server.js`, so future header changes have a focused unit-testable boundary. - - `server/http-router.js` now treats HTML static responses separately from other assets, allowing nonce-specific headers without weakening API, JSON, CSS, or JS asset responses. - - React/Recharts/Motion JS-driven style property updates remain allowed because the browser-enforced risk path for this finding is literal inline style attributes or inline style elements; preserving those runtime style properties avoids UI and animation regressions without reintroducing `unsafe-inline`. -- Validation: - - `npx vitest run --project unit tests/unit/security-headers.test.ts --reporter=verbose` - - `npx vitest run --project integration tests/integration/server-api-guards.test.ts --reporter=verbose` - - `npm run format:check` - - `npm run lint` - - `tsc --noEmit` - - `npm run test:architecture` - - `npm run check:deps` - - `npm run verify:full` - - `npm run test:timings` - - `coderabbit review --agent -t uncommitted -c AGENTS.md --files ...` -> round 1: 0 issues, round 2: 1 minor grammar issue in `docs/application-stack-reference.md` fixed, round 3: 0 issues - -### performance-review.md / H-01 - -- Status: fixed -- Scope: secondary cost-analysis charts now leave the initial dashboard bundle and load through the section warmup path. Dashboard sections also get an adaptive, deduplicated preload scheduler that starts visible lazy section chunks shortly after the first render and keeps IntersectionObserver preloading far enough ahead of the viewport to avoid visible lazy-loading gaps while scrolling. -- Guardrails: `tests/frontend/dashboard-motion.test.tsx` covers idle/fallback scheduling, deduplication, cancellation, early preloading, and inert hidden content. `tests/unit/dashboard-section-preloading.test.ts` covers visible-section queue ordering, hidden/request-data gating, and duplicate task removal. -- Follow-up quality fixes during implementation: - - Cost-analysis entry charts (`CostOverTime`, `CostByModel`) were moved from static dashboard imports to lazy chunks, reducing the built main `index` chunk from roughly `212 kB` raw / `57 kB` gzip to roughly `198 kB` raw / `53 kB` gzip. - - The warmup scheduler now uses a short idle timeout with bounded parallelism so deeper sections are warmed without waiting for late viewport proximity. - - Section placeholders for deeper analysis/table areas now better match final section heights, preventing scroll-command layout shift while lazy chunks complete above the target section. -- Validation: - - `npm run test:unit -- tests/frontend/dashboard-motion.test.tsx tests/unit/dashboard-section-preloading.test.ts` - - `npx playwright test tests/e2e/command-palette.spec.ts -g "executes analysis section navigation commands"` - - `npm run lint` - - `npm run test:architecture` - - `npm run check:deps` - - `tsc --noEmit` - - `npm run build:app` - - `npm run verify:full` - - `npm run test:timings` - - `coderabbit review --agent -t uncommitted -c AGENTS.md --files ...` -> round 1: 0 issues, round 2: 0 issues, round 3: 0 issues - -### performance-review.md / M-01 - -- Status: fixed -- Scope: dashboard filter changes now flow through a centralized `deriveDashboardFilterData(...)` pass that derives date/month filtering, provider/model filtering, available filter options, date range, and view-mode aggregation behind the stable `useDashboardFilters` contract. Computed dashboard summaries now share one normalized breakdown aggregation for model costs, provider metrics, and model options behind the stable `useComputedMetrics` contract. -- Guardrails: `tests/unit/dashboard-filter-data.test.ts` compares the new filter derivation with the previous staged semantics across representative filter combinations and empty states. `tests/unit/dashboard-aggregation.test.ts` locks normalized model/provider aggregation and day counting. `tests/frontend/use-computed-metrics.test.tsx` covers the public computed-metrics hook boundary. -- Follow-up quality fixes during implementation: - - `src/lib/dashboard-filter-data.ts` imports directly from the shared dashboard-domain contract instead of routing through broader frontend transform helpers, keeping the hook dependency graph smaller and the architecture layer test below its timeout budget. - - `src/lib/data-transforms.ts` now fills scalar chart arrays and model-name discovery in one sorted-data pass instead of separate `map(...)` and `flatMap(...)` passes. - - `computeModelCosts(...)` and `computeProviderMetrics(...)` now delegate to the shared breakdown summary, so standalone calculation callers and dashboard hook callers use the same aggregation path. - - After CodeRabbit review, computed `allModels` now unions `modelsUsed` and `modelBreakdowns`, so inconsistent imported data can no longer hide a breakdown-backed model from the model-over-time chart while existing modelsUsed-only behavior remains preserved. -- Validation: - - `npm run test:unit -- tests/unit/dashboard-filter-data.test.ts tests/unit/dashboard-aggregation.test.ts tests/frontend/use-computed-metrics.test.tsx tests/frontend/use-dashboard-filters.test.tsx tests/unit/analytics.test.ts tests/unit/data-transforms.test.ts tests/unit/code-rabbit-phase4.test.ts` - - `npm run test:unit -- tests/unit/dashboard-aggregation.test.ts tests/frontend/use-computed-metrics.test.tsx tests/unit/analytics.test.ts` - - `npm run format:check` - - `npm run lint` - - `tsc --noEmit` - - `npm run test:architecture` - - `npm run check:deps` - - `npm run verify:full` -> completed after the CodeRabbit follow-up fix - - `npm run test:timings` -> completed after the CodeRabbit follow-up fix - - `coderabbit review --agent -t uncommitted -c AGENTS.md --files ...` -> round 1: 0 issues, round 2: 1 minor issue fixed, round 3: 0 issues, round 4: 0 issues - -### performance-review.md / M-02 - -- Status: fixed -- Scope: the Settings dialog no longer starts `/api/toktrack/version-status` when it opens. The dashboard shell now schedules one cancellable, idle-friendly toktrack latest-version warmup per browser session through `src/lib/toktrack-version-status.ts`, and the settings version hook only formats the shared session snapshot for the Maintenance tab. -- Guardrails: `tests/unit/toktrack-version-status.test.ts` covers the pinned initial snapshot, concurrent warmup deduplication, cached failure behavior, scheduled fallback warmup, and cancellation. `tests/frontend/settings-modal-version-status.test.tsx` covers warmed status rendering without a dialog-open fetch, the no-fetch dialog-open path, and cached failure reuse across reopen. `tests/frontend/dashboard-filter-visibility.test.tsx` covers that the dashboard shell wires the session warmup scheduler. -- Follow-up quality fixes during implementation: - - `docs/architecture.md` now documents the session-wide toktrack version status cache as the owner of lookup state, while the settings modal hook is only the presentation adapter. - - Dashboard tests mock the warmup scheduler explicitly so future dashboard renders cannot accidentally perform real toktrack version network work during component tests. - - The server `/api/toktrack/version-status` contract and existing server-side success/failure TTL cache remain unchanged, so the fix changes when the UI asks for the status, not how the status is resolved. -- Validation: - - `npm run test:unit -- tests/unit/toktrack-version-status.test.ts tests/frontend/settings-modal-version-status.test.tsx tests/frontend/settings-modal-tabs.test.tsx tests/frontend/dashboard-filter-visibility.test.tsx tests/frontend/dashboard-error-state.test.tsx tests/unit/api.test.ts` - - `npm run format:check` - - `npm run lint` - - `tsc --noEmit` - - `npm run test:architecture` - - `npm run check:deps` - - `npm run verify:full` - - `npm run test:timings` - - `coderabbit review --agent -t uncommitted -c AGENTS.md --files ...` -> round 1: 0 issues, round 2: 0 issues, round 3: 0 issues - -### performance-review.md / N-01 - -- Status: fixed -- Scope: complex dashboard UI islands now separate calculation-heavy view data from rendering, accessibility, and motion. Drilldown details, heatmap grids, request-quality ratios, sortable table rows, recent-day benchmarks, and date-picker calendar navigation are derived through focused `src/lib/*-data.ts` helpers while the existing frontend functionality, visual structure, and animations stay unchanged. -- Guardrails: new unit suites cover drilldown aggregation/rankings/token segments, heatmap grid and keyboard target derivation, request-quality ratios/progress, table sort/row derivation, and date-picker calendar actions. Existing frontend suites still cover the DOM, ARIA, keyboard, and motion contracts for the touched components. -- Follow-up quality fixes during implementation: - - React component tests for heatmap and drilldown were narrowed to visible UI/A11y contracts after the derived branches moved to fast unit coverage, and over-broad timeout overrides were removed from sortable table tests. - - `docs/architecture.md` now documents the non-presentational data-derivation helpers as the boundary for complex dashboard UI islands. - - `npm run test:timings` shows the new helper suites stay out of the slowest-suite list; the remaining slow entries are existing integration, motion, settings, and broad table/UI interaction paths. -- Validation: - - `npx vitest run --project unit tests/unit/drill-down-data.test.ts tests/unit/heatmap-calendar-data.test.ts tests/unit/request-quality-data.test.ts tests/unit/sortable-table-data.test.ts tests/unit/filter-date-picker-data.test.ts tests/unit/recent-days-reveal.test.ts --reporter=verbose` - - `npx vitest run --project frontend tests/frontend/drill-down-modal-content.test.tsx tests/frontend/drill-down-modal-motion.test.tsx tests/frontend/heatmap-calendar-accessibility.test.tsx tests/frontend/request-quality.test.tsx tests/frontend/sortable-table-provider-model.test.tsx tests/frontend/sortable-table-recent-days.test.tsx tests/frontend/filter-bar-date-picker.test.tsx --reporter=verbose` - - `npm run format:check` - - `npm run lint` - - `tsc --noEmit` - - `npm run test:architecture` - - `npm run check:deps` - - `npm run verify:full` - - `npm run test:timings` - - `coderabbit review --agent -t uncommitted -c AGENTS.md --files ...` -> round 1: 0 issues, round 2: 0 issues - -### dashboard-review.md / N-02 - -- Status: fixed -- Scope: `src/components/dashboard/DashboardSections.tsx` already consumes a single structured `DashboardSectionsViewModel`, and `src/types/dashboard-view-model.d.ts` keeps the section data split into named section bundles instead of flat dashboard props. This closes the original broad `DashboardSectionsProps` concern without changing visible dashboard functionality, content, UI, or animations. -- Guardrails: `tests/architecture/dashboard-sections-contract.test.ts` now locks the public `DashboardSections` prop contract to one `viewModel` prop and keeps `DashboardSectionsViewModel` split into the intended layout, analysis, table, comparison, and interaction bundles. -- Follow-up quality fixes during implementation: - - No production refactor was needed because the broader view-model boundary had already been introduced by `architecture-review.md / M-01`; this change adds a targeted regression guardrail so future section work does not reintroduce wide flat props. - - `docs/architecture.md` already documents the intended DashboardSections boundary, so no duplicate architecture text was added. -- Validation: - - `npm run test:architecture` - - `npm run test:unit -- tests/frontend/dashboard-filter-visibility.test.tsx` - - `tsc --noEmit` - - `npm run lint` - - `npm run check:deps` - - `npm run verify:full` - - `npm run test:timings` -> attempted twice; both runs failed under external CPU pressure with migrating 5s timeouts in existing unrelated frontend suites, while `npm run verify:full` had just completed the same coverage test set successfully; the new architecture guardrail itself stayed below 100ms in `npm run test:architecture` - - `coderabbit review --agent -t uncommitted -c AGENTS.md --files docs/review/fixed-findings.md tests/architecture/dashboard-sections-contract.test.ts` -> 0 issues - -## 2026-04-24 - -### dashboard-review.md / N-01 - -- Status: fixed -- Scope: the dashboard action landscape now separates everyday data loading, export/use actions, and maintenance actions in the header, while the Command Palette mirrors the same intent split for load data, exports, and maintenance. Existing actions and command IDs remain available. -- Guardrails: `tests/frontend/header-links.test.tsx` covers localized header action groups and preserved button access, while `tests/frontend/command-palette-action-groups.test.tsx` covers localized Command Palette groups and stable command IDs. -- Follow-up quality fixes during implementation: - - Header action rendering is now owned by a private `HeaderActions` composition inside `Header.tsx`, keeping the global header shell separate from action information architecture. - - Command Palette action commands are grouped by intent instead of sharing one broad `Actions` bucket, without changing command handlers or `data-testid` contracts. -- Validation: - - `npm run test:unit -- tests/frontend/header-links.test.tsx tests/frontend/command-palette-action-groups.test.tsx` - - `npx playwright test tests/e2e/command-palette.spec.ts` - - `tsc --noEmit` - - `npm run lint` - - `npm run test:architecture` - - `npm run check:deps` - - `npm run verify:full` - - `npm run test:timings` - - `coderabbit review --agent -t uncommitted -c AGENTS.md -f ...` -> only reported unrelated untracked `docs/application-stack-reference.md`; the file was not changed - - `coderabbit review --agent -t uncommitted -c AGENTS.md --dir src/components` -> 0 issues - - `coderabbit review --agent -t uncommitted -c AGENTS.md --dir tests/frontend` -> 0 issues - - `coderabbit review --agent -t uncommitted -c AGENTS.md --dir shared/locales` -> 0 issues - - `coderabbit review --agent -t uncommitted -c AGENTS.md --dir docs/review` -> 0 issues - - `coderabbit review --agent -t uncommitted -c AGENTS.md --files docs/architecture.md` -> 0 issues - -### dashboard-review.md / M-02 - -- Status: fixed -- Scope: `src/components/layout/FilterBar.tsx` was reduced to a composition shell over private filter groups for status, time presets, date range, and provider/model chips. The visible dashboard filtering capabilities remain intact while the bar now shows lightly grouped Time, Date range, Providers, and Models areas. -- Guardrails: `tests/frontend/filter-bar-accessibility.test.tsx` covers localized filter groups, while the existing date-picker and preset/chip suites continue to lock keyboard focus, date clearing, preset highlighting, and chip `aria-pressed`/visual states. `.dependency-cruiser.cjs` now keeps the new FilterBar internals private to the FilterBar shell. -- Follow-up quality fixes during implementation: - - The custom date picker logic now lives in `FilterBarDateRange.tsx`, isolating its portal, overlay positioning, keyboard navigation, and focus restoration from the main filter shell. - - Provider and model chip rendering now lives in `FilterBarChipFilters.tsx`, keeping provider badge styling and model color state local to chip filters. - - Provider badge alpha variants now flow through `getProviderBadgeStyle(...)`, so included provider chips no longer parse or mutate CSS strings in the UI. - - Date-picker calendar labels now recompute from the active locale while the picker is open, and weekday cells use stable keys. -- Validation: - - `npm run test:unit -- tests/frontend/filter-bar-accessibility.test.tsx tests/frontend/filter-bar-date-picker.test.tsx tests/frontend/filter-bar-presets.test.tsx tests/unit/model-colors.test.ts` - - `tsc --noEmit` - - `npm run lint` - - `npm run test:architecture` - - `npm run check:deps` - - `npm run verify:full` - - `npm run test:timings` - - `coderabbit review --agent -t uncommitted -c AGENTS.md -f ...` -> fixed relevant minor findings for weekday keys, provider badge alpha handling, locale-reactive calendar labels, and included-chip style collisions; remaining global uncommitted finding is isolated to unrelated untracked `docs/application-stack-reference.md`. - - `coderabbit review --agent -t uncommitted -c AGENTS.md --dir src/components/layout` -> 0 issues - - `coderabbit review --agent -t uncommitted -c AGENTS.md --dir src/lib` -> 0 issues - - `coderabbit review --agent -t uncommitted -c AGENTS.md --dir tests` -> 0 issues - - `coderabbit review --agent -t uncommitted -c AGENTS.md --dir shared` -> 0 issues - - `coderabbit review --agent -t uncommitted -c AGENTS.md --dir docs/review` -> 0 issues - - `coderabbit review --agent -t uncommitted -c AGENTS.md --files .dependency-cruiser.cjs` -> 0 issues - -### dashboard-review.md / M-01 - -- Status: fixed -- Scope: `src/components/features/settings/SettingsModal.tsx` was changed from one long settings surface into a tabbed workspace with Basics, Layout, Limits, and Maintenance areas. Existing settings capabilities remain available, but day-to-day preferences are separated from section layout, provider limits, and backup/version-maintenance actions. -- Guardrails: `tests/frontend/settings-modal-tabs.test.tsx` covers default tab state, section grouping, and keyboard navigation, while the existing focused settings suites now exercise their sections through the relevant tab. `docs/architecture.md` documents the tabbed settings shell and split motion/toktrack version ownership. -- Follow-up quality fixes during implementation: - - `SettingsModalSections.tsx` now separates the reduced-motion card from the toktrack version-status card so Maintenance can own diagnostic/version information without mixing it into daily dashboard behavior settings. - - `SettingsModal.tsx` now resets the active settings tab while the dialog is closed, so reopening the dialog always starts from Basics without a transient stale tab render. - - `tests/e2e/command-palette.spec.ts` now splits the formerly near-timeout section-navigation coverage into smaller scroll, dashboard-section, and analysis-section tests after the full gate exposed the old monolithic test as a reliability/performance risk. -- Validation: - - `npm run test:unit -- tests/frontend/settings-modal-tabs.test.tsx tests/frontend/settings-modal-language.test.tsx tests/frontend/settings-modal-version-status.test.tsx tests/frontend/settings-modal-defaults.test.tsx tests/frontend/settings-modal-sections.test.tsx tests/frontend/settings-modal-backups.test.tsx tests/frontend/settings-modal-provider-limits.test.tsx tests/frontend/settings-modal-draft-state.test.tsx` - - `npx playwright test tests/e2e/command-palette.spec.ts` - - `npm run test:architecture` - - `npm run check:deps` - - `npm run verify:full` - - `npm run test:timings` - - `coderabbit review --agent -t uncommitted -c AGENTS.md` -> round 1: 0 issues, round 2: 0 issues, round 3: 0 issues - -### code-review.md / N-01 - -- Status: fixed -- Scope: removed the unused `src/hooks/use-theme.ts` and `src/hooks/use-provider-limits.ts` files. Their active responsibilities already live in the persisted settings flow: theme application goes through `applyTheme` and dashboard controller effects, while provider-limit synchronization goes through `useAppSettings` and `syncProviderLimits`. -- Guardrails: `tests/architecture/unused-hooks.test.ts` now fails when a production hook file under `src/hooks/` has no production import, and `docs/architecture.md` plus `docs/testing.md` document that unused hook helpers should be removed instead of retained as speculative code. -- Follow-up quality fixes during implementation: - - The guardrail covers the same dead-hook visibility gap that made the original `0%` coverage files easy to miss, so future unused hook files are caught by `npm run test:architecture`. -- Validation: - - `npm run test:architecture` - - `npm run check:deps` - - `npm run test:unit -- tests/frontend/react-query-hooks.test.tsx tests/frontend/dashboard-controller-state.test.tsx tests/frontend/dashboard-controller-actions.test.tsx` - - `npm run verify:full` - - `npm run test:timings` - - `coderabbit review --agent -t uncommitted -c AGENTS.md` -> round 1: blocked by CodeRabbit rate limit (`Rate limit exceeded`, retry window reported by the CLI: `47 minutes and 10 seconds`) - -### code-review.md / M-01 - -- Status: fixed -- Scope: dashboard date preset behavior is centralized in the shared dashboard preferences contract. `src/hooks/use-dashboard-filters.ts` applies presets through `resolveDashboardPresetRange`, and `src/components/layout/FilterBar.tsx` derives the active UI state through `resolveDashboardActivePreset`; both flow through `src/lib/dashboard-preferences.ts` to `shared/dashboard-preferences.js`, so applying and displaying presets no longer duplicate the `7d`, `30d`, `month`, `year`, and `all` rules. -- Guardrails: `tests/unit/dashboard-preferences.test.ts` locks the shared/frontend preset contract, while `tests/frontend/use-dashboard-filters.test.tsx` and `tests/frontend/filter-bar-presets.test.tsx` cover applying presets in the hook and highlighting them in the filter bar. -- Follow-up quality fixes during implementation: - - No production or test changes were needed for this finding because the shared preset contract and focused regression coverage already existed; the fix was to document the concrete `code-review.md / M-01` reference as closed. -- Validation: - - `npm run test:unit -- tests/unit/dashboard-preferences.test.ts tests/frontend/use-dashboard-filters.test.tsx tests/frontend/filter-bar-presets.test.tsx` - - `npm run test:architecture` - - `npm run check:deps` - - `npm run verify:full` - - `npm run test:timings` - - `coderabbit review --agent -t uncommitted -c AGENTS.md` -> round 1: 2 minor documentation issues, fixed - - `coderabbit review --agent -t uncommitted -c AGENTS.md` -> round 2: blocked by CodeRabbit rate limit (`Rate limit exceeded`, retry window reported by the CLI: `57 minutes and 23 seconds`) - -### code-review.md / M-02 - -- Status: fixed -- Scope: `src/components/features/settings/SettingsModal.tsx` was reduced from the former 1000+ line all-in-one dialog into a shell/composition root over extracted settings sections, a draft-state hook, a version-status hook, and modal-local helper logic. The visible settings areas now live behind `src/components/features/settings/SettingsModalSections.tsx`, while the draft/save/reset behavior moved into `use-settings-modal-draft.ts` and the toktrack lookup behavior moved into `use-settings-modal-version-status.ts`. -- Guardrails: `docs/architecture.md` now documents the settings modal shell, internal section bundle, and private helper hooks as a feature-internal composition boundary, and `.dependency-cruiser.cjs` now blocks unrelated frontend modules from importing `SettingsModalSections.tsx`, `use-settings-modal-draft.ts`, `use-settings-modal-version-status.ts`, or `settings-modal-helpers.ts` directly. -- Follow-up quality fixes during implementation: - - `src/components/features/settings/settings-modal-helpers.ts` now owns the extracted number parsing, selection normalization, provider-limit draft building/patching, and section reorder helpers so tests no longer import utility behavior through the UI shell. - - `use-settings-modal-draft.ts` now clones incoming section drafts and patches empty provider configs from `DEFAULT_PROVIDER_LIMIT_CONFIG`, preserving the previous provider-limit safety behavior after the refactor. - - `use-settings-modal-draft.ts` now initializes modal drafts once per open session and resets that guard on close, so external prop churn can no longer overwrite in-progress edits while the dialog remains open. - - `tests/unit/settings-modal-helpers.test.ts` now covers the extracted settings helpers directly, and `tests/unit/code-rabbit-phase1.test.ts` was narrowed back to the unrelated chart/auto-import helpers it actually owns. - - `tests/frontend/settings-modal-defaults.test.tsx`, `settings-modal-sections.test.tsx`, `settings-modal-backups.test.tsx`, `settings-modal-provider-limits.test.tsx`, and `settings-modal-draft-state.test.tsx` now split the modal coverage by responsibility instead of adding another broad catch-all dialog suite. -- Validation: - - `npm run test:unit -- tests/unit/settings-modal-helpers.test.ts tests/unit/code-rabbit-phase1.test.ts tests/frontend/settings-modal-language.test.tsx tests/frontend/settings-modal-version-status.test.tsx tests/frontend/settings-modal-defaults.test.tsx tests/frontend/settings-modal-sections.test.tsx tests/frontend/settings-modal-backups.test.tsx tests/frontend/settings-modal-provider-limits.test.tsx` - - `npm run check` - - `npm run test:architecture` - - `npm run test:timings` - - `npm_config_cache=/tmp/ttdash-npm-cache npm run verify:release` - - `PLAYWRIGHT_TEST_PORT=3016 npm_config_cache=/tmp/ttdash-npm-cache npm run test:e2e` - - `coderabbit review --agent -t uncommitted -c AGENTS.md` -> round 1: 1 minor issue, fixed (draft state no longer reinitializes while the modal stays open; `tests/frontend/settings-modal-draft-state.test.tsx` added) - - `coderabbit review --agent -t uncommitted -c AGENTS.md` -> round 2: blocked by CodeRabbit rate limit (`Rate limit exceeded`, retry window reported by the CLI: `54 minutes and 32 seconds`) - -### code-review.md / H-01 - -- Status: fixed -- Scope: `src/hooks/use-dashboard-controller.ts` was reduced from the remaining god-hook into a composition root over focused internal controller slices. The heavy derived state, browser IO, dialog ownership, drill-down navigation, shell/load state, and imperative dashboard actions now live in `src/hooks/use-dashboard-controller-actions.ts`, `use-dashboard-controller-browser.ts`, `use-dashboard-controller-derived-state.ts`, `use-dashboard-controller-dialogs.ts`, `use-dashboard-controller-drill-down.ts`, `use-dashboard-controller-effects.ts`, and `use-dashboard-controller-shell-state.ts`, while the public controller contract stayed stable through `src/hooks/use-dashboard-controller.ts`. -- Guardrails: `docs/architecture.md` now documents the internal dashboard controller slices and the browser-IO helper as private implementation details behind the public controller hook, and `.dependency-cruiser.cjs` now blocks component-level fanout to the internal `use-dashboard-controller-*.ts` slices while keeping the intentional type-only controller contract out of the orphan warning path. -- Follow-up quality fixes during implementation: - - `tests/frontend/dashboard-controller-browser.test.tsx` now locks the extracted browser helper responsibilities for JSON downloads, section scrolling, and the test-only `openSettings` bridge. - - `tests/frontend/dashboard-controller-drill-down.test.tsx` now covers the extracted drill-down slice directly, including the edge case where the selected day disappears after filtering changes. - - `src/hooks/use-dashboard-controller-actions.ts` now uses the upload-specific fallback toast (`api.uploadFailed`) after a successful JSON parse when the backend rejects a usage upload without an `Error` instance, and `tests/frontend/dashboard-error-state.test.tsx` covers that regression explicitly. - - `npm run test:timings` showed no new performance hotspot in the touched dashboard/controller suites; the new slice tests stay sub-100ms and the existing dashboard controller/public-shell tests remained fast. -- Validation: - - `npm run check` - - `npm run test:architecture` - - `npm run test:timings` - - `npm run test:unit -- tests/frontend/dashboard-controller-browser.test.tsx tests/frontend/dashboard-controller-drill-down.test.tsx tests/frontend/dashboard-controller-state.test.tsx tests/frontend/dashboard-controller-actions.test.tsx tests/frontend/dashboard-filter-visibility.test.tsx tests/frontend/dashboard-error-state.test.tsx` - - `npm run test:unit -- tests/frontend/dashboard-error-state.test.tsx tests/frontend/dashboard-controller-actions.test.tsx tests/frontend/dashboard-controller-browser.test.tsx` - - `npm_config_cache=/tmp/ttdash-npm-cache npm run verify:release` - - `PLAYWRIGHT_TEST_PORT=3016 npm_config_cache=/tmp/ttdash-npm-cache npm run test:e2e` - - `coderabbit review --agent -t uncommitted -c AGENTS.md` -> round 1: 1 minor issue, fixed (`api.uploadFailed` fallback for backend upload rejection after successful JSON parse) - - `coderabbit review --agent -t uncommitted -c AGENTS.md` -> round 2: blocked by CodeRabbit rate limit (`Rate limit exceeded`, retry window reported by the CLI: `52 minutes and 50 seconds`) - -## 2026-04-23 - -### architecture-review.md / H-01 - -- Status: fixed -- Scope: `server.js` was reduced to the CLI/bootstrap and runtime composition root; subsystem logic moved into `server/data-runtime.js`, `server/background-runtime.js`, `server/auto-import-runtime.js`, and `server/http-router.js`. -- Guardrails: `docs/architecture.md` now documents the server split, `.dependency-cruiser.cjs` prevents runtime modules from coupling back to `server.js`, the router, or each other, and `tests/unit/background-runtime.test.ts` locks the background registry snapshot behavior that was tightened during the review cycle. -- Follow-up quality fixes during implementation: - - `server/background-runtime.js`: removed a snapshot TOCTOU re-read so background pruning now decides cleanup from one captured registry read. - - `src/components/layout/FilterBar.tsx`: added cleanup for queued focus-restoration callbacks; `tests/frontend/filter-bar-date-picker.test.tsx` now covers the unmount path that was causing the flaky date-picker teardown in Playwright. -- Validation: - - `npm run test:architecture` - - `npm run check:deps` - - `npm run test:unit -- tests/unit/server-helpers-network.test.ts tests/unit/server-helpers-runner-core.test.ts tests/unit/server-helpers-runner-process.test.ts tests/unit/server-helpers-file-locks.test.ts tests/integration/server-auto-import.test.ts tests/integration/server-background.test.ts tests/integration/server-api-guards.test.ts` - - `npm run verify:full` - - `coderabbit review --agent -t uncommitted -c AGENTS.md` -> round 1: 1 minor issue, round 2: 0 issues - -### architecture-review.md / H-02 - -- Status: fixed -- Scope: the duplicated client/server settings contract was consolidated into `shared/app-settings.js` with typed declarations in `shared/app-settings.d.ts`; frontend adapters in `src/lib/app-settings.ts`, `src/lib/dashboard-preferences.ts`, and `src/lib/provider-limits.ts` now consume that shared contract, and `server/data-runtime.js` now normalizes persisted settings through the same source. -- Guardrails: `docs/architecture.md` now documents `shared/app-settings.js` as the single production source for persisted settings defaults and normalization, `.dependency-cruiser.cjs` blocks bypassing that contract from `server.js`, `server/data-runtime.js`, and `src/lib/app-settings.ts`, and `vitest.config.ts` now includes `shared/app-settings.js` in coverage reporting. -- Follow-up quality fixes during implementation: - - `tests/unit/app-settings-contract.test.ts`: locks the shared/frontend contract alignment for defaults, fragment normalization, persisted settings normalization, and runtime-only flags. - - `tests/integration/server-api-imports.test.ts`: now asserts the normalized settings-import response instead of only the status code. - - `tests/integration/server-api-persistence.test.ts` and `tests/frontend/settings-modal-test-helpers.tsx`: now derive defaults from the shared settings contract instead of re-hardcoding them in tests. - - `tests/unit/background-runtime.test.ts`: cleaned up type-only imports to keep the repo lint/type gates green after the shared typings were added. -- Validation: - - `npm run check` - - `npm run test:architecture` - - `npm run test:unit -- tests/unit/app-settings-contract.test.ts tests/unit/api.test.ts tests/unit/dashboard-preferences.test.ts tests/integration/server-api-persistence.test.ts tests/integration/server-api-imports.test.ts` - - `npm_config_cache=/tmp/ttdash-npm-cache npm run test:unit:coverage` - - `npm run build:app` - - `npm run verify:package` - - `PLAYWRIGHT_TEST_PORT=3016 npm_config_cache=/tmp/ttdash-npm-cache npm run test:e2e` - - `coderabbit review --agent -t uncommitted -c AGENTS.md` -> round 1: 0 issues, round 2: 0 issues - -### architecture-review.md / M-01 - -- Status: fixed -- Scope: the dashboard orchestration was cut from a broad flat controller surface into focused view-model bundles in `src/hooks/use-dashboard-controller.ts`; `src/components/Dashboard.tsx` now consumes `header`, `filterBar`, `sections`, `settingsModal`, `dialogs`, `commandPalette`, `report`, and shell bundles instead of forwarding dozens of individual fields, and `src/components/dashboard/DashboardSections.tsx` now consumes one structured `DashboardSectionsViewModel`. -- Guardrails: `src/types/dashboard-view-model.d.ts` now owns the shared frontend-only dashboard view-model contracts, `docs/architecture.md` documents `Dashboard.tsx` as the controller composition root, and `.dependency-cruiser.cjs` now blocks component-subtree fanout to `src/hooks/use-dashboard-controller.ts`. -- Follow-up quality fixes during implementation: - - `src/components/layout/Header.tsx`, `src/components/layout/FilterBar.tsx`, `src/components/features/command-palette/CommandPalette.tsx`, and `src/components/features/settings/SettingsModal.tsx` now type their props from the shared dashboard view-model contracts instead of re-declaring local prop shapes. - - `tests/frontend/dashboard-controller-test-helpers.ts` now provides bundle-based controller and section factories, so dashboard composition tests no longer rebuild a flat mega-mock. - - `tests/frontend/dashboard-controller-actions.test.tsx` now covers drill-down navigation from the controller-owned dialog bundle, locking the logic that moved out of `Dashboard.tsx`. - - `tests/frontend/dashboard-filter-visibility.test.tsx` now also asserts that `DashboardSections` receives a structured `viewModel` bundle instead of flat section props. -- Validation: - - `npm run check` - - `npm run test:architecture` - - `npm run test:unit -- tests/frontend/dashboard-controller-state.test.tsx tests/frontend/dashboard-controller-actions.test.tsx tests/frontend/dashboard-filter-visibility.test.tsx` - - `npm_config_cache=/tmp/ttdash-npm-cache npm run test:unit:coverage` - - `npm run build:app` - - `npm run verify:package` - - `PLAYWRIGHT_TEST_PORT=3016 npm_config_cache=/tmp/ttdash-npm-cache npm run test:e2e` - - `coderabbit review --agent -t uncommitted -c AGENTS.md` -> round 1: 0 issues, round 2: 0 issues - -### architecture-review.md / M-02 - -- Status: fixed -- Scope: dashboard preset, section, and settings-adjacent UI rules now flow through `shared/dashboard-preferences.js` with declarations in `shared/dashboard-preferences.d.ts`; `shared/app-settings.js` consumes that shared contract, `src/lib/dashboard-preferences.ts` was reduced to a thin adapter, and the duplicated preset semantics were removed from `src/hooks/use-dashboard-filters.ts` and `src/components/layout/FilterBar.tsx`. -- Guardrails: `docs/architecture.md` now documents the shared dashboard contract separately from the app settings contract, `.dependency-cruiser.cjs` blocks production imports of `shared/dashboard-preferences.json` outside `shared/dashboard-preferences.js`, and `vitest.config.ts` now includes `shared/dashboard-preferences.js` in coverage. -- Follow-up quality fixes during implementation: - - `tests/unit/dashboard-preferences.test.ts` now locks the shared/frontend adapter alignment for config parsing, section metadata, preset-range resolution, and active-preset detection. - - `tests/frontend/use-dashboard-filters.test.tsx` now asserts preset application and reset behavior against the shared preset resolver instead of re-hardcoding date ranges. - - `tests/frontend/filter-bar-presets.test.tsx` now seeds active-preset UI states from the shared resolver while preserving the existing visible quick-select order. - - `src/types/dashboard-view-model.d.ts` and `src/hooks/use-dashboard-controller.ts` now type preset actions with `DashboardDatePreset` instead of broad strings. -- Validation: - - `npm run check` - - `npm run test:architecture` - - `npm run check:deps` - - `npm_config_cache=/tmp/ttdash-npm-cache npm run test:unit:coverage` - - `npm run build:app` - - `npm run verify:package` - - `PLAYWRIGHT_TEST_PORT=3016 npm_config_cache=/tmp/ttdash-npm-cache npm run test:e2e` - - `coderabbit review --agent -t uncommitted -c AGENTS.md` -> round 1: 0 issues, round 2: 0 issues diff --git a/docs/review/test-review.md b/docs/review/test-review.md deleted file mode 100644 index 73ba54c..0000000 --- a/docs/review/test-review.md +++ /dev/null @@ -1,456 +0,0 @@ -# Test Review - -## Kurzfazit - -Stand: 2026-05-01, lokale Arbeitskopie nach den bisherigen -Testsystem-Fixes. - -Die Testpipeline ist inzwischen solide strukturiert: Vitest ist in Projekte -getrennt, die CI laeuft als DAG, lokale Timing-Kommandos schreiben -projektbezogene Reports, Playwright nutzt worker-isolierte Server, und das -optionale lokale Parallel-Gate ist lastbewusst. Der naechste grosse -Performance- und Robustheitsschritt liegt deshalb nicht in pauschal mehr -Parallelisierung, sondern in gezielter Reduktion von jsdom-, Prozess-, -Registry- und Coverage-Kosten. - -Groesste verbleibende Hebel: - -- `tests/frontend` ist mit Abstand der groesste Vitest-Walltime-Block. -- `integration-background` ist der kritischste Robustheitsbereich, weil echte - Background-Prozesse parallel starten und Registry-/Port-/Readiness-Zustand - teilen. -- Prozess-, Port-, Lock-, PATH-, Runner- und CLI-Tests sind fachlich wertvoll, - duerfen aber nicht fuer reine Branch-Coverage missbraucht werden. -- Coverage ist global gut, aber die wichtigsten Branch-Luecken liegen weiter - in Server-Runtime, HTTP-Router, Report, Auto-Import und - Dashboard-Controller-Actions. -- Die CI ist schon sinnvoll parallelisiert. Weitere Beschleunigung muss anhand - echter Timing-Daten erfolgen, damit keine Report-Kollisionen oder neue - Flakes entstehen. - -Keine der folgenden Verbesserungen darf App-Aussehen, Texte, Layout, -Animationen, User-Flows oder produktives Verhalten aendern. Der Umbau betrifft -Tests, Testhelfer, Testkonfiguration, CI-Orchestrierung und Dokumentation. - -## Aktuelle Messbasis - -Frischer lokaler Lauf: -`node scripts/run-vitest-project-timings.js --projects=architecture,unit,frontend,integration,integration-background --repeat=1` - -| Projekt | Umfang | Walltime | Aufsummierte Kosten | Bewertung | -| --- | ---: | ---: | --- | --- | -| `architecture` | 10 Dateien, 22 Tests | `4.78s` | Import `1.40s`, Tests `1.04s` | Stabil, nicht Hauptproblem. | -| `unit` | 57 Dateien, 380 Tests | `4.40s` | Import `3.58s`, Tests `3.76s` | Schnell; Hotspots sind echte Prozess-/Lock-Smokes. | -| `frontend` | 76 Dateien, 204 Tests | `30.66s` | Setup `17.90s`, Import `31.28s`, Tests `34.66s`, Environment `71.10s` | Groesster Block und wichtigster Performance-Hebel. | -| `integration` | 13 Dateien, 44 Tests | `4.03s` | Tests `9.08s` | Meist gesund; teuer durch echte Server/HTTP/CLI. | -| `integration-background` | 4 Dateien, 5 Tests | `3.07s` | Tests `4.03s` | Klein, aber robustheitskritisch durch echte Background-Prozesse. | - -Aktuelle Coverage aus `coverage/lcov.info`: - -| Kennzahl | Wert | -| --- | ---: | -| Lines | `83.96%` | -| Branches | `70.02%` | -| Functions | `83.12%` | - -Die globale Coverage ist damit gruen. Der relevante naechste Schritt ist nicht, -beliebige einfache UI-Wrapper auf 100% zu bringen, sondern kritische -Fehler-, Timeout-, Permission-, Registry-, Import-, Export- und Report-Branches -zu schliessen. - -## Bottlenecks - -### 1. Frontend/jsdom dominiert die Walltime - -`frontend` braucht lokal rund `30s`; die aufsummierte Environment-Zeit von -`71.10s` und Import-Zeit von `31.28s` zeigt den Kern des Problems. Viele -Dateien bezahlen wiederholt jsdom, React Testing Library, i18n, -Browser-API-Shims, Radix, Recharts, cmdk und Framer Motion. Mehr Worker kann -diese Kosten nur teilweise verdecken; ab einem Punkt steigen CPU- und -Speicherdruck. - -Aktuelle Hotspots: - -| Suite/Testbereich | Signal | Ursache | -| --- | ---: | --- | -| `filter-bar-date-picker.test.tsx` | `1.353s` | Kalenderdialog, Focus, Fake-Timer, Keyboard. | -| `drill-down-modal-motion.test.tsx` | `1.322s` | Modal, Motion, Re-Render und Positioning. | -| `settings-modal-sections.test.tsx` | `1.279s` | Settings-Provider, Draft-State, Save-Flow. | -| `settings-modal-tabs.test.tsx` | `1.240s` | Modal-Tabs, i18n, Re-Render. | -| `settings-modal-provider-limits.test.tsx` | `1.215s` | Provider-Limits, Draft-Mapping, Controls. | -| `sortable-table-recent-days.test.tsx` | `1.064s` | Tabelleninteraktion und ARIA-Sort-State. | -| `drill-down-modal-regressions.test.tsx` | `1.040s` | Breite Modal-Regressionen. | -| `request-quality.test.tsx` | `0.930s` | IntersectionObserver, Reveal-Animation, Metric Cards. | - -Die teuersten Einzeltests bestaetigen das Muster: DOM-/ARIA-/Focus-Verhalten -ist wertvoll, aber pure Datenableitung, Sortierung, Labelbildung und -Konfigurationsmapping sollten nicht in jsdom liegen. - -Optimierung: - -- Jede `tests/frontend`-Datei klassifizieren: pure logic, hook state, - component interaction, chart rendering, accessibility/ARIA, motion/reveal, - regression smoke. -- Pure Logik nach `tests/unit` verschieben: Command-Building, - Tabellen-Sortierung, Datepicker-Daten, Settings-Transformationen, - Chart-Datenformung, Label-/Formatierungslogik. -- Komponententests fokussieren: ein User-observable Verhalten pro Test, - keine langen Multi-Regression-Renders. -- Zentrale Mocks/Helper ausbauen fuer Recharts, IntersectionObserver, - ResizeObserver, matchMedia, requestAnimationFrame und Motion. -- `initI18n(...)` nur in Dateien explizit aufrufen, die Sprache variieren oder - lokalisierte Ausgabe testen. Der globale Frontend-Setup initialisiert bereits - Deutsch. -- `waitFor(...)` auditieren: behalten fuer React Query, async effects, Timer, - Observer und echte eventual consistency; ersetzen durch direkte Assertions, - `findBy...`, Fake-Timer-Flushing oder explizite User-Event-Schritte, wenn - der Zustand synchron ist. - -Nicht empfohlen: - -- `frontend.maxWorkers` blind ueber `'80%'` erhoehen. -- `happy-dom` als schnellen Drop-in versuchen, bevor Radix/cmdk/Focus- und - Accessibility-Verhalten gezielt validiert ist. -- Breite Dashboard-Smokes in jsdom ergaenzen, wenn ein Node-Unit-Test denselben - Vertrag guenstiger pruefen kann. - -### 2. Background-Concurrency ist der groesste Robustheitsblocker - -`tests/integration/server-background-concurrency.test.ts` prueft einen -wichtigen Vertrag: zwei gleichzeitige `ttdash --background --no-open` Starts -sollen beide in der Registry landen und erreichbar sein. Dieser Bereich ist -klein, aber riskant, weil er echte CLI-Prozesse, Portwahl, Registry-Dateien, -Locks, Logfiles, Auth-Header und Readiness kombiniert. - -Aktuelle Risikoflaechen: - -- Zwei Prozesse konkurrieren um Registry-Read/Write, Portwahl und Pruning. -- Ein Prozess kann vor dem erwarteten Registry-Zustand aussteigen. -- Tests warten auf Registry und URL-Readiness, aber Diagnose muss im Fehlerfall - sofort Port-, Registry-, Startup- und Logursache unterscheidbar machen. -- Weitere Parallelisierung des Background-Projekts wuerde Robustheit eher - verschlechtern als verbessern. - -Optimierung: - -- `integration-background.maxWorkers` bei `2` lassen. -- Background-Fehlerdiagnostik weiter schaerfen: Exitcode, Signal, stdout, - stderr, Runtime-Root, Registry-Datei, Registry-Inhalt, Log-Snippets, - erwartete Ports und relevante Env-Werte. -- Registry-, Selection-, Prune-, EPERM-/ESRCH- und Startup-Branches mit - Fake-FS/Fake-Clock/Fake-Spawn als Unit-Tests abdecken. -- Echte Cross-Process-Smokes auf wenige Vertraege begrenzen: concurrent - startup, selected stop, custom prefix/permissions. -- Nach jedem Fix mindestens drei Wiederholungen von - `node scripts/run-vitest-project-timings.js --projects=integration-background --repeat=3` - verlangen. - -### 3. Prozessnahe Tests sind wertvoll, aber teilweise zu breit - -Unit-Hotspots: - -| Suite | Signal | Bewertung | -| --- | ---: | --- | -| `server-helpers-runner-process.test.ts` | `1.215s` | Echte `bunx`/`npx`/PATH-Pfade; als Smoke wertvoll, fuer Branch-Matrix teuer. | -| `server-helpers-file-locks.test.ts` | `0.479s` | Cross-Process-Locking; echte Prozesskoordination nur fuer wenige Faelle noetig. | -| `playwright-config.test.ts` | `0.428s` | Config-Load und worker-scoped Server-Vertrag. | -| `toktrack-version.test.ts` | `0.283s` | Package-/Version-Guardrails. | - -Integration-Hotspots: - -| Suite | Signal | Bewertung | -| --- | ---: | --- | -| `server-auto-import.test.ts` | `1.798s` | Auto-Import-Streaming, Fake Toktrack Runner, Singleton-Start. | -| `server-local-auth.test.ts` | `1.687s` | Echter Server, Bootstrap-Cookie/Bearer, Guards. | -| `server-api-reporting.test.ts` | `0.865s` | PDF/Typst-Pfad und Report-Router. | -| `server-startup-cli.test.ts` | `0.845s` | CLI-Startup, Host/Port/Permission-Vertraege. | -| `server-api-persistence.test.ts` | `0.760s` | Echte Persistenz- und Runtime-Root-Vertraege. | - -Optimierung: - -- Echte Prozesse nur fuer Shell, PATH, Portbindung, Signalhandling, - Cross-Process-Locking, Registry-Koordination und CLI-Startup verwenden. -- Statusmapping, Error-Mapping, Timeout, Retry, Cache, Permission-Branches, - Request-Shaping und malformed payloads mit Fake-Spawn/Fake-FS/Fake-Clock - testen. -- Fixed sleeps in Testhelfern weiter durch Readiness-Endpoints, - Sentinel-Dateien, Stream-Events oder explizite Child-Process-Signale - ersetzen. -- `server-api-reporting` in Router-Fehlerpfade mit Fake-Report-Generator und - einen echten PDF-Smoke trennen. -- Auth-Guard-Matrix so weit wie moeglich auf Router-/HTTP-Unit-Ebene testen; - echte Server-Integration nur fuer Cookie/Bearer/Origin-Zusammenspiel - behalten. - -### 4. CI ist gut parallelisiert, aber weitere Daten fehlen - -`.github/workflows/ci.yml` ist bereits als DAG aufgebaut: - -- `static` -- Vitest-Matrix fuer `architecture`, `unit`, `frontend`, `integration`, - `integration-background` -- dedizierter `coverage`-Job -- `build` -- `package-smoke` nach `build` -- `e2e` nach `build` -- `windows-smoke` - -Verbleibende CI-Kosten sind wahrscheinlich: - -- `npm ci --ignore-scripts` pro Job -- Playwright-Browser-Install im E2E-Job -- Coverage-Instrumentierung und HTML/LCOV-Erzeugung -- Artifact upload/download fuer `dist`, Coverage und Reports - -Optimierung: - -- Vor CI-Umbauten echte Job-Timings aus GitHub Actions auswerten. -- Coverage nicht in jede Matrix duplizieren; dedizierten Coverage-Job - beibehalten. -- E2E-Sharding erst einfuehren, wenn HTML-/JUnit-/Artifact-Pfade shard-sicher - sind. -- `verify:full` als serielle Referenz behalten und `verify:full:parallel` als - lokalen Fast Path dokumentieren. -- Jede Worker- oder Shard-Aenderung mit mindestens drei Wiederholungen messen: - Median und Worst Run dokumentieren. - -## Coverage-Luecken - -Aktuelle Low-Coverage-Signale aus `coverage/lcov.info`: - -| Bereich | Lines | Branches | Risiko | -| --- | ---: | ---: | --- | -| `src/App.tsx` | `0.0%` | `0.0%` | Entry-Wiring; nur testen, wenn App-shell-Vertrag nicht anders abgedeckt ist. | -| `src/components/features/risk/ConcentrationRisk.tsx` | `0.0%` | `0.0%` | Feature ist ohne Testsignal; zuerst entscheiden, ob Smoke oder Exclude sinnvoll ist. | -| `server/report/index.js` | `40.9%` | `41.7%` | Typst-Erkennung, Compile-Fehler, Cleanup, PDF-Ausgabe. | -| `src/lib/app-settings.ts` | `40.0%` | `50.0%` | Defaults, Settings-Mapping, Recovery-Faelle. | -| `src/components/layout/Header.tsx` | `45.0%` | `35.7%` | Header-Actions, Links, Statusanzeigen. | -| `src/lib/csv-export.ts` | `46.7%` | n/a | Export-Korrektheit, Escaping, leere Daten. | -| `server/background-runtime.js` | `59.5%` | `53.9%` | Background-Start/Stop, Registry, EPERM/ESRCH, Pruning. | -| `server/http-router.js` | `68.0%` | `54.6%` | API-Fehlerpfade, Mutationsschutz, malformed JSON. | -| `server/auto-import-runtime.js` | `69.4%` | `54.7%` | Stream, Kill, Timeout, malformed output. | -| `src/hooks/use-dashboard-controller-actions.ts` | `69.0%` | `42.0%` | Upload/Delete/Export/Backup/PDF/toast side effects. | -| `src/components/features/auto-import/AutoImportModal.tsx` | `66.7%` | `51.4%` | Stream-Status, Cancel, Error, Success. | -| `src/components/features/command-palette/CommandPalette.tsx` | `67.4%` | `58.1%` | Visibility, action dispatch, keyboard/empty states. | -| `server/report/charts.js` | `75.7%` | `37.5%` | Chart asset edge cases and formatting. | - -Prioritaet fuer Coverage-Erhoehung: - -1. `server/background-runtime.js` - - EPERM/ESRCH, stale registry, invalid registry, selected stop, - startup failure, readiness timeout, no-open path, custom prefix. -2. `server/http-router.js` - - malformed JSON, unsupported methods, missing/invalid auth, origin/host - guards, permission denied, corrupt data/settings recovery, report - success/failure. -3. `server/auto-import-runtime.js` - - malformed toktrack output, child error, timeout, kill escalation, stream - tail flush, singleton guard. -4. `server/report/index.js` und `server/report/charts.js` - - missing Typst, compile stderr, cleanup failure tolerance, invalid payload, - chart edge values. -5. `src/hooks/use-dashboard-controller-actions.ts` - - upload success/error, delete/reset, backup import/export, PDF request, - blob download, toast side effects, mutation invalidation. -6. `src/components/features/auto-import/AutoImportModal.tsx` - - idle/running/success/error/cancel states, stream event rendering, - disabled controls. -7. Branch-heavy UI-Workflows - - Header, Command Palette, Settings sections, sortable tables, date picker. -8. Kleine Lib-Module - - `app-settings.ts`, `csv-export.ts`, `csv.ts`, `model-utils.ts` mit - parametrisierten Node-Unit-Tests. - -Nicht jede 0%-Datei ist automatisch ein Problem. Primitive Wrapper, -Entry-Points oder rein deklarative UI sollten entweder mit einem guenstigen -Smoke abgedeckt oder bewusst vom Coverage-Ratchet ausgenommen werden. Der -entscheidende Massstab ist Produktionsrisiko, nicht kosmetische Prozentzahl. - -## Nicht optimale Einstellungen - -| Bereich | Aktuell | Bewertung | Empfehlung | -| --- | --- | --- | --- | -| `architecture.fileParallelism` | `false` | Stabil und klein; Parallelisierung bringt wenig und kann Source-Graph-Arbeit duplizieren. | Beibehalten, nur nach 3-run Benchmark aendern. | -| `unit.maxWorkers` | `'80%'` | Schnell; einzelne Prozess-Smokes dominieren. | Beibehalten, Branch-Matrix aus echten Prozess-Smokes entfernen. | -| `frontend.maxWorkers` | `'80%'` | Aktuell sinnvoll, aber varianzreich. | Nicht erhoehen; zuerst jsdom-Dateien und Importkosten reduzieren. | -| `integration.maxWorkers` | `'50%'` | Gute Balance fuer HTTP/I/O. | Erst nach besserer Tempdir-/Port-Isolation neu messen. | -| `integration-background.maxWorkers` | `2` | Richtig wegen echter Background-Prozesse. | Nicht erhoehen, bis Concurrency mehrfach stabil ist. | -| `frontend.testTimeout` | `30_000` | Robust unter Coverage, kann langsame Tests verdecken. | Timing-Budgets schaerfen statt Timeout zuerst senken. | -| Playwright CI workers | `2` | Runnerfreundlich und stabil. | `4` oder Sharding nur nach report-sicherem Mehrfachbenchmark. | -| Coverage | dedizierter Lauf | Richtig, aber teuer. | Beibehalten; spaeter gezielte Threshold-Ratchets. | -| Direkte Parallelaufrufe | feste Reportpfade moeglich | Manuelle Parallelitaet kann JUnit/HTML/Coverage ueberschreiben. | Nur scripts mit eindeutigen Output-Pfaden parallel nutzen. | - -## Verbesserungsplan - -### Phase 1 - Background-Concurrency stabilisieren - -Ziel: Den kritischsten Flake-Bereich stabilisieren, bevor mehr -Parallelisierung oder scharfere Budgets eingefuehrt werden. - -Umsetzung: - -- Background-Testhelfer so erweitern, dass jeder CLI-Fehler Registry, - Logfiles, Ports, Runtime-Root und Env-Kontext ausgibt. -- Concurrent-Startup-Test auf konkrete Zwischenzustaende pruefen: - Prozess beendet erfolgreich, Registry enthaelt zwei Eintraege, beide Ports - sind erreichbar, Readiness-Endpoint antwortet mit Auth. -- Registry-/Startup-/Prune-Branches in Unit-Tests mit Fakes abdecken. -- Echte Background-Smokes auf wenige Prozessvertraege begrenzen. - -Akzeptanz: - -- `integration-background` laeuft drei Wiederholungen ohne Flake. -- Fehlerdiagnose reicht aus, um Port-, Registry-, Auth- und Startup-Ursache zu - unterscheiden. -- `maxWorkers` bleibt `2`. - -### Phase 2 - Frontend/jsdom-Kosten senken - -Ziel: Weniger jsdom-Fixkosten pro Assertion, ohne UI-Verhalten zu verlieren. - -Umsetzung: - -- `tests/frontend` klassifizieren und pure Logik in Node-Unit-Tests - verschieben. -- Schwere Settings-, Drilldown-, FilterBar- und Table-Tests in kleinere, - vertragsscharfe Tests schneiden. -- Shared Helper fuer Recharts, Observer, Motion, rAF und App-Provider - konsolidieren. -- Unnoetige i18n-Initialisierung und unnoetige `waitFor(...)`-Nutzung - entfernen. -- Danach `frontend` dreimal benchmarken und Median/Worst Run dokumentieren. - -Akzeptanz: - -- Frontend-Median oder Frontend-Worst-Run sinkt messbar. -- ARIA-, Keyboard-, Focus-, i18n-, Motion- und Chart-Verhalten bleibt - abgedeckt. -- Keine Snapshots oder Layout-Goldens einfuehren. - -### Phase 3 - Prozessnahe Tests trennen - -Ziel: Echte Prozesse nur fuer echte Prozessvertraege verwenden; Branch-Logik -billiger und deterministischer testen. - -Umsetzung: - -- Runner-/PATH-Smokes in `server-helpers-runner-process.test.ts` behalten, - Cache-/Timeout-/stderr-/stdout-Branches in Fake-Spawn-Tests verschieben. -- File-Lock-Smokes behalten, stale-lock-, permission- und cleanup-Branches mit - Fake-FS/Fake-Clock testen. -- Auto-Import-Integration um fixed sleeps bereinigen und Branch-Matrix auf - Runtime-/Router-Unit-Tests verlagern. -- Reporting in Fake-Generator-Routertests und einen echten PDF-Smoke trennen. - -Akzeptanz: - -- Integration-Worst-Run steigt nicht durch neue Coverage. -- Jeder echte Prozess-Smoke prueft einen Prozessvertrag, nicht nur - Branch-Logik. -- Keine neuen fixed sleeps ohne konkrete Begruendung. - -### Phase 4 - Coverage risikogewichtet erhoehen - -Ziel: Branch-Coverage dort erhoehen, wo Fehler teuer waeren. - -Umsetzung: - -- Server-Runtime zuerst: Background, HTTP-Router, Auto-Import, Report. -- Danach Controller-Actions und browsernahe Side Effects. -- Danach UI-Workflow-Branches mit echten DOM-Vertraegen. -- Kleine Lib-Module per parametrisierten Node-Unit-Tests abdecken. -- Nach jeder Coverage-Phase `npm run test:vitest:coverage` auswerten und - globale Werte plus betroffene Dateien dokumentieren. - -Akzeptanz: - -- Globale Coverage faellt nicht. -- Branch-Coverage steigt zuerst in `server/` und branch-heavy Hooks. -- Neue Tests liegen auf dem schmalsten sinnvollen Layer. - -### Phase 5 - Gates datenbasiert schaerfen - -Ziel: Lokale und CI-Gates schneller machen, ohne Strenge zu verlieren. - -Umsetzung: - -- `verify:full` als serielle Referenz behalten. -- `verify:full:parallel` als lokalen Fast Path dokumentieren und regelmaessig - gegen CI-DAG spiegeln. -- Timing-Budgets projektweise schaerfen, aber erst nach Background-Stabilitaet. -- CI-Timing-Artefakte auswerten, bevor Dependency-Install, Browser-Install, - Coverage oder Artifacts umgebaut werden. -- Sharding nur einfuehren, wenn alle Reportpfade shard-sicher sind. - -Akzeptanz: - -- Keine parallelen Jobs ueberschreiben JUnit, Coverage oder Playwright-HTML. -- CI-DAG bleibt lesbar und liefert aussagekraeftige Failures. -- Lokaler Fast Path ist schneller, aber nicht weniger streng. - -### Phase 6 - Playwright klein und wertvoll halten - -Ziel: Browserzeit nur fuer echte User Journeys verwenden. - -Behaltene E2E-Vertraege: - -- Dashboard load/upload. -- Settings/backups. -- Forecast/filter flow. -- Reporting smoke. -- Representative Command Palette smoke. - -Nicht in E2E ausweiten: - -- Vollstaendige Command-Liste, IDs, Aliases und Visibility. -- Exhaustive Sortier-, Label- und Filter-Matrizen. -- Reine Datenformung oder Settings-Mapping. - -Akzeptanz: - -- Playwright bleibt kurz und prueft echte Browserintegration. -- Worker-scoped Server, Port, Runtime-Root und Auth-Session bleiben erhalten. -- Kein User-Flow wird ausschliesslich durch pure Unit-Tests ersetzt, wenn der - Browser selbst Teil des Risikos ist. - -## Zielarchitektur - -| Testlevel | Zweck | Performance-Regel | -| --- | --- | --- | -| Node Unit | Pure Logik, Branches, Formatierung, Runtime-Factories | Schnell, fake by default. | -| Frontend jsdom | DOM, ARIA, Keyboard, Focus, Motion, Provider-Zusammenspiel | Zielgenau, keine breiten Dashboard-Smokes. | -| Integration Node | Echter Server, HTTP, CLI, FS, Ports, Cross-Process | Wenige echte Vertraege, gute Diagnose. | -| Architecture | Import-, Layer- und Strukturregeln | Klein halten, shared source graph nutzen. | -| E2E | Echte Browser-Journeys | Kurz, worker-isoliert, keine Contract-Matrix. | -| Coverage | Breiter Denominator und Risk Ratchet | Separater Lauf, nicht in jede Matrix duplizieren. | - -## Empfohlene Reihenfolge - -1. Background-Concurrency diagnostizieren und stabilisieren. -2. Frontend/jsdom-Testklassifizierung durchfuehren. -3. Pure Logik aus Frontendtests in Node-Unit-Tests verschieben. -4. Prozessnahe Tests in Fake-Branch-Tests und echte Smokes trennen. -5. Server-Runtime-Coverage fuer Background, Router, Auto-Import und Report - erhoehen. -6. Controller-/Hook-Coverage fuer Dashboard Actions, Settings, CSV und Model - Utils erhoehen. -7. Timing-Budgets projektweise schaerfen. -8. CI- und Playwright-Sharding nur nach report-sicherem Mehrfachbenchmark - erweitern. - -## Validierung - -Da diese Datei nur Review und Plan enthaelt: - -- `npx prettier --check docs/review/test-review.md` -- `git diff --check -- docs/review/test-review.md` - -Fuer spaetere Implementierungsphasen: - -- Nach Performance-Aenderungen: - `node scripts/run-vitest-project-timings.js --projects= --repeat=3` -- Nach Coverage-Aenderungen: `npm run test:vitest:coverage` -- Nach Pipeline-Aenderungen: - `PLAYWRIGHT_TEST_PORT=3016 npm run verify:full:parallel` -- Nach E2E-Aenderungen: `npm run test:e2e:ci` und bei Sharding/Worker-Aenderung - mehrere Wiederholungen mit eindeutigen Reportpfaden. diff --git a/docs/testing.md b/docs/testing.md index f4fc49b..bb75b00 100644 --- a/docs/testing.md +++ b/docs/testing.md @@ -64,7 +64,8 @@ Architecture constraints are documented separately in [`docs/architecture.md`](. - For large server helper or integration suites, group tests by subsystem so Vitest can schedule them more efficiently. - Keep background-process integration files focused by behavior; the background Vitest project intentionally uses a small worker cap instead of one serial catch-all file or unbounded process fan-out. - Keep Playwright files grouped by end-to-end journey, such as load/upload, forecast/filter interaction, settings/backups, reporting, and command palette behavior. Import `test` and `expect` from `tests/e2e/fixtures.ts` so each worker gets its own server, port, auth session, and runtime directory. Share authentication, server reset, seeding, and download helpers through `tests/e2e/helpers.ts` instead of creating new browser catch-all files. -- `tests/unit/playwright-config.test.ts` guards the small E2E journey list, the shared fixture import, CI worker cap, and Playwright reporter paths. Update that contract intentionally when adding a new browser journey. +- Every Playwright spec must reset state through `resetAppState(...)` or prepare an isolated dashboard through `prepareDashboard(...)` before it asserts app behavior. `tests/unit/playwright-config.test.ts` fails when a new spec skips that isolation contract. +- `tests/unit/playwright-config.test.ts` also guards the small E2E journey list, the shared fixture import, CI worker cap, and Playwright reporter paths. Update that contract intentionally when adding a new browser journey. ## Choosing the Right Layer @@ -134,6 +135,7 @@ Prioritize targeted branch coverage in runtime-heavy modules before adding anoth ## Local Commands - Required pre-PR gate: run `npm run verify:full` before opening a PR to ensure all tests and checks pass. +- Faster non-coverage fast path on a local machine with enough CPU: `PLAYWRIGHT_TEST_PORT=3016 npm run verify:full:parallel` - Faster inner-loop gate: `npm run verify` - Static gate only: `npm run test:static` - All Vitest projects without coverage: `npm run test:vitest` @@ -146,7 +148,7 @@ Prioritize targeted branch coverage in runtime-heavy modules before adding anoth - CI-style Playwright smoke: `npm run test:e2e:ci` - Serial local mirror of the CI gate: `npm run verify:ci` - Optional parallel local gate without Playwright: `npm run verify:parallel` -- Optional parallel full local gate with Playwright after build: `PLAYWRIGHT_TEST_PORT=3016 npm run verify:full:parallel` +- Optional parallel local fast path including Playwright, without coverage instrumentation: `PLAYWRIGHT_TEST_PORT=3016 npm run verify:full:parallel` ## Architecture Guardrails @@ -191,7 +193,9 @@ parallel Vitest projects from over-subscribing the same cores and keeps real bac from the jsdom/build storm. `verify:package` runs after those waves succeed. `npm run verify:full:parallel` adds `test:e2e:ci` to the final wave. The canonical serial gates stay unchanged; use the parallel gates for local feedback when the machine has enough CPU and the -per-project JUnit report paths must stay isolated. +per-project JUnit report paths must stay isolated. The parallel fast path intentionally avoids a +second coverage-instrumented Vitest pass, so pair it with `npm run test:vitest:coverage` or the +serial `npm run verify:full` when coverage thresholds are part of the validation. Use `node scripts/run-parallel-gate.js --dry-run --e2e` after changing gate scripts to inspect the task waves and their declared report outputs before running the expensive full gate. The script From c4e30494e8f135dd11bb33c4998fda46fc356bf1 Mon Sep 17 00:00:00 2001 From: tyl3r-ch Date: Fri, 1 May 2026 10:23:10 +0200 Subject: [PATCH 29/42] Split dashboard section ownership --- .../dashboard/DashboardSections.tsx | 721 +----------------- .../sections/dashboard-analysis-sections.tsx | 209 +++++ .../dashboard-comparison-table-sections.tsx | 129 ++++ .../sections/dashboard-forecast-sections.tsx | 120 +++ .../sections/dashboard-overview-sections.tsx | 128 ++++ .../dashboard-section-lazy-components.ts | 212 +++++ .../sections/dashboard-section-metadata.ts | 28 + .../dashboard-section-renderer-types.ts | 27 + .../sections/dashboard-section-renderers.ts | 25 + .../use-dashboard-controller-derived-state.ts | 5 + src/hooks/use-dashboard-controller.ts | 1 + src/types/dashboard-controller.d.ts | 1 + src/types/dashboard-view-model.d.ts | 1 + .../dashboard-sections-contract.test.ts | 61 ++ .../dashboard-controller-test-helpers.ts | 1 + .../dashboard-sections-rendering.test.tsx | 14 + 16 files changed, 981 insertions(+), 702 deletions(-) create mode 100644 src/components/dashboard/sections/dashboard-analysis-sections.tsx create mode 100644 src/components/dashboard/sections/dashboard-comparison-table-sections.tsx create mode 100644 src/components/dashboard/sections/dashboard-forecast-sections.tsx create mode 100644 src/components/dashboard/sections/dashboard-overview-sections.tsx create mode 100644 src/components/dashboard/sections/dashboard-section-lazy-components.ts create mode 100644 src/components/dashboard/sections/dashboard-section-metadata.ts create mode 100644 src/components/dashboard/sections/dashboard-section-renderer-types.ts create mode 100644 src/components/dashboard/sections/dashboard-section-renderers.ts diff --git a/src/components/dashboard/DashboardSections.tsx b/src/components/dashboard/DashboardSections.tsx index e7c1fa7..8df0e60 100644 --- a/src/components/dashboard/DashboardSections.tsx +++ b/src/components/dashboard/DashboardSections.tsx @@ -1,206 +1,19 @@ -import { - Fragment, - Suspense, - lazy, - useEffect, - useMemo, - useState, - type ComponentType, - type LazyExoticComponent, - type ReactNode, -} from 'react' +import { Fragment, Suspense, useEffect, useMemo, useState, type ReactNode } from 'react' import { useTranslation } from 'react-i18next' -import { PrimaryMetrics } from '../cards/PrimaryMetrics' -import { SecondaryMetrics } from '../cards/SecondaryMetrics' -import { TodayMetrics } from '../cards/TodayMetrics' -import { MonthMetrics } from '../cards/MonthMetrics' -import { HeatmapCalendar } from '../features/heatmap/HeatmapCalendar' -import { UsageInsights } from '../features/insights/UsageInsights' -import { ConcentrationRisk } from '../features/risk/ConcentrationRisk' -import { SectionHeader } from '../ui/section-header' -import { ExpandableCard } from '../ui/expandable-card' import { ChartCardSkeleton } from '../ui/skeleton' import { ErrorBoundary } from '../ui/error-boundary' import { AnimatedDashboardSection, scheduleDashboardPreloads } from './DashboardMotion' +import { resolveDashboardSectionPreloadTasks } from './dashboard-section-preloading' +import { dashboardSectionPreloaders } from './sections/dashboard-section-lazy-components' import { - resolveDashboardSectionPreloadTasks, - type DashboardSectionPreloaders, -} from './dashboard-section-preloading' -import { SECTION_HELP } from '@/lib/help-content' + dashboardSectionAnchorMap, + dashboardSectionPlaceholderClassName, +} from './sections/dashboard-section-metadata' +import { renderDashboardSection } from './sections/dashboard-section-renderers' import { cn } from '@/lib/cn' import type { DashboardSectionsViewModel } from '@/types/dashboard-view-model' -import { formatCurrency, formatPercent, formatTokens, periodUnit } from '@/lib/formatters' import type { DashboardSectionId } from '@/types' -// eslint-disable-next-line @typescript-eslint/no-explicit-any -type PreloadableLazyComponent> = LazyExoticComponent & { - preload: () => Promise<{ default: T }> -} - -// eslint-disable-next-line @typescript-eslint/no-explicit-any -function lazyWithPreload>( - loader: () => Promise<{ default: T }>, -): PreloadableLazyComponent { - const Component = lazy(loader) as PreloadableLazyComponent - Component.preload = loader - return Component -} - -function preloadComponents( - ...components: Array<{ preload: () => Promise }> -): Promise { - return Promise.all(components.map((component) => component.preload())) -} - -const CostForecast = lazyWithPreload(() => - import('../features/forecast/CostForecast').then((module) => ({ - default: module.CostForecast, - })), -) -const ProviderCostForecast = lazyWithPreload(() => - import('../features/forecast/ProviderCostForecast').then((module) => ({ - default: module.ProviderCostForecast, - })), -) -const ForecastZoomDialog = lazyWithPreload(() => - import('../features/forecast/ForecastZoomDialog').then((module) => ({ - default: module.ForecastZoomDialog, - })), -) -const CostOverTime = lazyWithPreload(() => - import('../charts/CostOverTime').then((module) => ({ - default: module.CostOverTime, - })), -) -const CostByModel = lazyWithPreload(() => - import('../charts/CostByModel').then((module) => ({ - default: module.CostByModel, - })), -) -const CostByModelOverTime = lazyWithPreload(() => - import('../charts/CostByModelOverTime').then((module) => ({ - default: module.CostByModelOverTime, - })), -) -const CumulativeCost = lazyWithPreload(() => - import('../charts/CumulativeCost').then((module) => ({ - default: module.CumulativeCost, - })), -) -const CumulativeCostPerProvider = lazyWithPreload(() => - import('../charts/CumulativeCostPerProvider').then((module) => ({ - default: module.CumulativeCostPerProvider, - })), -) -const CostByWeekday = lazyWithPreload(() => - import('../charts/CostByWeekday').then((module) => ({ - default: module.CostByWeekday, - })), -) -const TokenEfficiency = lazyWithPreload(() => - import('../charts/TokenEfficiency').then((module) => ({ - default: module.TokenEfficiency, - })), -) -const ModelMix = lazyWithPreload(() => - import('../charts/ModelMix').then((module) => ({ - default: module.ModelMix, - })), -) -const TokensOverTime = lazyWithPreload(() => - import('../charts/TokensOverTime').then((module) => ({ - default: module.TokensOverTime, - })), -) -const TokenTypes = lazyWithPreload(() => - import('../charts/TokenTypes').then((module) => ({ - default: module.TokenTypes, - })), -) -const RequestsOverTime = lazyWithPreload(() => - import('../charts/RequestsOverTime').then((module) => ({ - default: module.RequestsOverTime, - })), -) -const RequestCacheHitRateByModel = lazyWithPreload(() => - import('../charts/RequestCacheHitRateByModel').then((module) => ({ - default: module.RequestCacheHitRateByModel, - })), -) -const CacheROI = lazyWithPreload(() => - import('../features/cache-roi/CacheROI').then((module) => ({ - default: module.CacheROI, - })), -) -const ProviderLimitsSection = lazyWithPreload(() => - import('../features/limits/ProviderLimitsSection').then((module) => ({ - default: module.ProviderLimitsSection, - })), -) -const RequestQuality = lazyWithPreload(() => - import('../features/request-quality/RequestQuality').then((module) => ({ - default: module.RequestQuality, - })), -) -const DistributionAnalysis = lazyWithPreload(() => - import('../charts/DistributionAnalysis').then((module) => ({ - default: module.DistributionAnalysis, - })), -) -const CorrelationAnalysis = lazyWithPreload(() => - import('../charts/CorrelationAnalysis').then((module) => ({ - default: module.CorrelationAnalysis, - })), -) -const PeriodComparison = lazyWithPreload(() => - import('../features/comparison/PeriodComparison').then((module) => ({ - default: module.PeriodComparison, - })), -) -const AnomalyDetection = lazyWithPreload(() => - import('../features/anomaly/AnomalyDetection').then((module) => ({ - default: module.AnomalyDetection, - })), -) -const ModelEfficiency = lazyWithPreload(() => - import('../tables/ModelEfficiency').then((module) => ({ - default: module.ModelEfficiency, - })), -) -const ProviderEfficiency = lazyWithPreload(() => - import('../tables/ProviderEfficiency').then((module) => ({ - default: module.ProviderEfficiency, - })), -) -const RecentDays = lazyWithPreload(() => - import('../tables/RecentDays').then((module) => ({ - default: module.RecentDays, - })), -) - -const dashboardSectionPreloaders = { - forecastCache: () => - preloadComponents(CostForecast, ProviderCostForecast, ForecastZoomDialog, CacheROI), - limits: () => preloadComponents(ProviderLimitsSection), - costAnalysis: () => - preloadComponents( - CostOverTime, - CostByModel, - CumulativeCostPerProvider, - CostByModelOverTime, - CumulativeCost, - CostByWeekday, - TokenEfficiency, - ModelMix, - ), - tokenAnalysis: () => preloadComponents(TokensOverTime, TokenTypes), - requestAnalysis: () => - preloadComponents(RequestsOverTime, RequestCacheHitRateByModel, RequestQuality), - advancedAnalysis: () => preloadComponents(DistributionAnalysis, CorrelationAnalysis), - comparisons: () => preloadComponents(PeriodComparison, AnomalyDetection), - tables: () => preloadComponents(ModelEfficiency, ProviderEfficiency, RecentDays), -} satisfies DashboardSectionPreloaders - interface DashboardSectionsProps { viewModel: DashboardSectionsViewModel } @@ -209,19 +22,7 @@ interface DashboardSectionsProps { export function DashboardSections({ viewModel }: DashboardSectionsProps) { const { t } = useTranslation() const [forecastZoomOpen, setForecastZoomOpen] = useState(false) - const { - layout, - overview, - forecast, - limits, - costAnalysis, - tokenAnalysis, - requestAnalysis, - advancedAnalysis, - comparisons, - tables, - interactions, - } = viewModel + const { layout, requestAnalysis } = viewModel const { sectionOrder, sectionVisibility } = layout const warmupPreloadTasks = useMemo( () => @@ -271,30 +72,6 @@ export function DashboardSections({ viewModel }: DashboardSectionsProps) { ) - const sectionPlaceholderClassName: Record = { - insights: 'min-h-[260px]', - metrics: 'min-h-[320px]', - today: 'min-h-[320px]', - currentMonth: 'min-h-[360px]', - activity: 'min-h-[360px]', - forecastCache: 'min-h-[900px]', - limits: 'min-h-[480px]', - costAnalysis: 'min-h-[1460px]', - tokenAnalysis: 'min-h-[430px]', - requestAnalysis: 'min-h-[1040px]', - advancedAnalysis: 'min-h-[760px]', - comparisons: 'min-h-[480px]', - tables: 'min-h-[1100px]', - } - const sectionAnchorMap: Partial> = { - costAnalysis: 'charts', - currentMonth: 'current-month', - forecastCache: 'forecast-cache', - tokenAnalysis: 'token-analysis', - requestAnalysis: 'request-analysis', - advancedAnalysis: 'advanced-analysis', - } - const renderAnimatedSection = ( sectionId: DashboardSectionId, children: ReactNode, @@ -303,13 +80,13 @@ export function DashboardSections({ viewModel }: DashboardSectionsProps) { onPreload, }: { eager?: boolean; onPreload?: () => void | Promise } = {}, ) => { - const sectionAnchorId = sectionAnchorMap[sectionId] ?? sectionId + const sectionAnchorId = dashboardSectionAnchorMap[sectionId] ?? sectionId return ( {children} @@ -317,475 +94,15 @@ export function DashboardSections({ viewModel }: DashboardSectionsProps) { ) } - const renderSection = (sectionId: DashboardSectionId) => { - switch (sectionId) { - case 'insights': - return sectionVisibility.insights - ? renderAnimatedSection( - 'insights', - , - { eager: true }, - ) - : null - case 'metrics': - return sectionVisibility.metrics - ? renderAnimatedSection( - 'metrics', - <> - - -
- entry.totalCost)} - viewMode={overview.viewMode} - /> -
- , - { eager: true }, - ) - : null - case 'today': - return sectionVisibility.today && overview.todayData - ? renderAnimatedSection( - 'today', - , - { - eager: true, - }, - ) - : null - case 'currentMonth': - return sectionVisibility.currentMonth && overview.hasCurrentMonthData - ? renderAnimatedSection( - 'currentMonth', - , - { - eager: true, - }, - ) - : null - case 'activity': - return sectionVisibility.activity - ? renderAnimatedSection( - 'activity', - <> - -
- - - -
- , - ) - : null - case 'forecastCache': - return sectionVisibility.forecastCache - ? renderAnimatedSection( - 'forecastCache', - <> - -
- {renderLazySection( - setForecastZoomOpen(true)} - />, - 'h-[360px]', - )} - {renderLazySection( - - - , - 'h-[360px]', - )} -
-
- {renderLazySection( - setForecastZoomOpen(true)} - />, - 'h-[430px]', - )} -
- - - - - - , - { - onPreload: dashboardSectionPreloaders.forecastCache, - }, - ) - : null - case 'limits': - return sectionVisibility.limits - ? renderAnimatedSection( - 'limits', - renderLazySection( - , - 'h-[420px]', - ), - { - onPreload: dashboardSectionPreloaders.limits, - }, - ) - : null - case 'costAnalysis': - return sectionVisibility.costAnalysis - ? renderAnimatedSection( - 'costAnalysis', - <> - -
-
- {renderLazySection( - , - 'h-[360px]', - )} -
- {renderLazySection(, 'h-[360px]')} -
-
- {renderLazySection( - , - 'h-[320px]', - )} - {renderLazySection( - , - 'h-[320px]', - )} -
-
- {renderLazySection( - , - 'h-[320px]', - )} - {renderLazySection( - , - 'h-[320px]', - )} -
-
- {renderLazySection( - , - 'h-[320px]', - )} - {renderLazySection(, 'h-[320px]')} -
- , - { - onPreload: dashboardSectionPreloaders.costAnalysis, - }, - ) - : null - case 'tokenAnalysis': - return sectionVisibility.tokenAnalysis - ? renderAnimatedSection( - 'tokenAnalysis', - <> - -
- {renderLazySection( - , - 'h-[320px]', - )} - {renderLazySection(, 'h-[320px]')} -
- , - { - onPreload: dashboardSectionPreloaders.tokenAnalysis, - }, - ) - : null - case 'requestAnalysis': - return sectionVisibility.requestAnalysis && requestAnalysis.metrics.hasRequestData - ? renderAnimatedSection( - 'requestAnalysis', - <> - - {renderLazySection( - , - 'h-[320px]', - )} -
- {renderLazySection( - , - 'h-[320px]', - )} -
-
- {renderLazySection( - , - 'h-[280px]', - )} -
- , - { - onPreload: dashboardSectionPreloaders.requestAnalysis, - }, - ) - : null - case 'advancedAnalysis': - return sectionVisibility.advancedAnalysis - ? renderAnimatedSection( - 'advancedAnalysis', - <> - -
- {renderLazySection( - , - 'h-[320px]', - )} - -
-
- {renderLazySection( - , - 'h-[320px]', - )} -
- , - { - onPreload: dashboardSectionPreloaders.advancedAnalysis, - }, - ) - : null - case 'comparisons': - return sectionVisibility.comparisons - ? renderAnimatedSection( - 'comparisons', - <> - -
- {renderLazySection( - - - , - 'h-[360px]', - )} - {renderLazySection( - - - , - 'h-[360px]', - )} -
- , - { - onPreload: dashboardSectionPreloaders.comparisons, - }, - ) - : null - case 'tables': - return sectionVisibility.tables - ? renderAnimatedSection( - 'tables', - <> - - {renderLazySection( - , - 'h-[320px]', - )} -
- {renderLazySection( - , - 'h-[320px]', - )} -
-
- {renderLazySection( - , - 'h-[360px]', - )} -
- , - { - onPreload: dashboardSectionPreloaders.tables, - }, - ) - : null - default: - return null - } - } + const renderSection = (sectionId: DashboardSectionId) => + renderDashboardSection(sectionId, { + viewModel, + t, + forecastZoomOpen, + setForecastZoomOpen, + renderAnimatedSection, + renderLazySection, + }) return ( <> diff --git a/src/components/dashboard/sections/dashboard-analysis-sections.tsx b/src/components/dashboard/sections/dashboard-analysis-sections.tsx new file mode 100644 index 0000000..ce50a04 --- /dev/null +++ b/src/components/dashboard/sections/dashboard-analysis-sections.tsx @@ -0,0 +1,209 @@ +import { ConcentrationRisk } from '../../features/risk/ConcentrationRisk' +import { SectionHeader } from '../../ui/section-header' +import { SECTION_HELP } from '@/lib/help-content' +import type { DashboardSectionId } from '@/types' +import { + dashboardLazySectionComponents, + dashboardSectionPreloaders, +} from './dashboard-section-lazy-components' +import type { DashboardSectionRenderer } from './dashboard-section-renderer-types' + +const { + CorrelationAnalysis, + CostByModel, + CostByModelOverTime, + CostByWeekday, + CostOverTime, + CumulativeCost, + CumulativeCostPerProvider, + DistributionAnalysis, + ModelMix, + RequestCacheHitRateByModel, + RequestQuality, + RequestsOverTime, + TokenEfficiency, + TokenTypes, + TokensOverTime, +} = dashboardLazySectionComponents + +/** Renderers for chart-heavy dashboard analysis sections. */ +export const analysisSectionRenderers = { + costAnalysis: ({ viewModel, t, renderAnimatedSection, renderLazySection }) => { + const { costAnalysis, interactions, layout } = viewModel + + return layout.sectionVisibility.costAnalysis + ? renderAnimatedSection( + 'costAnalysis', + <> + +
+
+ {renderLazySection( + , + 'h-[360px]', + )} +
+ {renderLazySection(, 'h-[360px]')} +
+
+ {renderLazySection( + , + 'h-[320px]', + )} + {renderLazySection( + , + 'h-[320px]', + )} +
+
+ {renderLazySection( + , + 'h-[320px]', + )} + {renderLazySection(, 'h-[320px]')} +
+
+ {renderLazySection(, 'h-[320px]')} + {renderLazySection(, 'h-[320px]')} +
+ , + { + onPreload: dashboardSectionPreloaders.costAnalysis, + }, + ) + : null + }, + tokenAnalysis: ({ viewModel, t, renderAnimatedSection, renderLazySection }) => { + const { tokenAnalysis, interactions, layout } = viewModel + + return layout.sectionVisibility.tokenAnalysis + ? renderAnimatedSection( + 'tokenAnalysis', + <> + +
+ {renderLazySection( + , + 'h-[320px]', + )} + {renderLazySection(, 'h-[320px]')} +
+ , + { + onPreload: dashboardSectionPreloaders.tokenAnalysis, + }, + ) + : null + }, + requestAnalysis: ({ viewModel, t, renderAnimatedSection, renderLazySection }) => { + const { requestAnalysis, interactions, layout } = viewModel + + return layout.sectionVisibility.requestAnalysis && requestAnalysis.metrics.hasRequestData + ? renderAnimatedSection( + 'requestAnalysis', + <> + + {renderLazySection( + , + 'h-[320px]', + )} +
+ {renderLazySection( + , + 'h-[320px]', + )} +
+
+ {renderLazySection( + , + 'h-[280px]', + )} +
+ , + { + onPreload: dashboardSectionPreloaders.requestAnalysis, + }, + ) + : null + }, + advancedAnalysis: ({ viewModel, t, renderAnimatedSection, renderLazySection }) => { + const { advancedAnalysis, layout } = viewModel + + return layout.sectionVisibility.advancedAnalysis + ? renderAnimatedSection( + 'advancedAnalysis', + <> + +
+ {renderLazySection( + , + 'h-[320px]', + )} + {/* Keep this card eager to preserve the existing advanced-analysis first paint. */} + +
+
+ {renderLazySection( + , + 'h-[320px]', + )} +
+ , + { + onPreload: dashboardSectionPreloaders.advancedAnalysis, + }, + ) + : null + }, +} satisfies Partial> diff --git a/src/components/dashboard/sections/dashboard-comparison-table-sections.tsx b/src/components/dashboard/sections/dashboard-comparison-table-sections.tsx new file mode 100644 index 0000000..eea3071 --- /dev/null +++ b/src/components/dashboard/sections/dashboard-comparison-table-sections.tsx @@ -0,0 +1,129 @@ +import { ExpandableCard } from '../../ui/expandable-card' +import { SectionHeader } from '../../ui/section-header' +import { SECTION_HELP } from '@/lib/help-content' +import { formatCurrency, periodUnit } from '@/lib/formatters' +import type { DashboardSectionId } from '@/types' +import { + dashboardLazySectionComponents, + dashboardSectionPreloaders, +} from './dashboard-section-lazy-components' +import type { DashboardSectionRenderer } from './dashboard-section-renderer-types' + +const { AnomalyDetection, ModelEfficiency, PeriodComparison, ProviderEfficiency, RecentDays } = + dashboardLazySectionComponents + +/** Renderers for comparison and table dashboard sections. */ +export const comparisonTableSectionRenderers = { + comparisons: ({ viewModel, t, renderAnimatedSection, renderLazySection }) => { + const { comparisons, interactions, layout } = viewModel + + return layout.sectionVisibility.comparisons + ? renderAnimatedSection( + 'comparisons', + <> + +
+ {renderLazySection( + + + , + 'h-[360px]', + )} + {renderLazySection( + + + , + 'h-[360px]', + )} +
+ , + { + onPreload: dashboardSectionPreloaders.comparisons, + }, + ) + : null + }, + tables: ({ viewModel, t, renderAnimatedSection, renderLazySection }) => { + const { tables, interactions, layout } = viewModel + + return layout.sectionVisibility.tables + ? renderAnimatedSection( + 'tables', + <> + + {renderLazySection( + , + 'h-[320px]', + )} +
+ {renderLazySection( + , + 'h-[320px]', + )} +
+
+ {renderLazySection( + , + 'h-[360px]', + )} +
+ , + { + onPreload: dashboardSectionPreloaders.tables, + }, + ) + : null + }, +} satisfies Partial> diff --git a/src/components/dashboard/sections/dashboard-forecast-sections.tsx b/src/components/dashboard/sections/dashboard-forecast-sections.tsx new file mode 100644 index 0000000..f5f67ce --- /dev/null +++ b/src/components/dashboard/sections/dashboard-forecast-sections.tsx @@ -0,0 +1,120 @@ +import { Suspense } from 'react' +import { ExpandableCard } from '../../ui/expandable-card' +import { ErrorBoundary } from '../../ui/error-boundary' +import { SectionHeader } from '../../ui/section-header' +import { SECTION_HELP } from '@/lib/help-content' +import { formatPercent, formatTokens } from '@/lib/formatters' +import type { DashboardSectionId } from '@/types' +import { + dashboardLazySectionComponents, + dashboardSectionPreloaders, +} from './dashboard-section-lazy-components' +import type { DashboardSectionRenderer } from './dashboard-section-renderer-types' + +const { CacheROI, CostForecast, ForecastZoomDialog, ProviderCostForecast, ProviderLimitsSection } = + dashboardLazySectionComponents + +/** Renderers for forecast, cache, and limit dashboard sections. */ +export const forecastSectionRenderers = { + forecastCache: ({ + viewModel, + t, + forecastZoomOpen, + setForecastZoomOpen, + renderAnimatedSection, + renderLazySection, + }) => { + const { forecast, layout } = viewModel + + return layout.sectionVisibility.forecastCache + ? renderAnimatedSection( + 'forecastCache', + <> + +
+ {renderLazySection( + setForecastZoomOpen(true)} + />, + 'h-[360px]', + )} + {renderLazySection( + + + , + 'h-[360px]', + )} +
+
+ {renderLazySection( + setForecastZoomOpen(true)} + />, + 'h-[430px]', + )} +
+ + + + + + , + { + onPreload: dashboardSectionPreloaders.forecastCache, + }, + ) + : null + }, + limits: ({ viewModel, renderAnimatedSection, renderLazySection }) => { + const { limits, layout } = viewModel + + return layout.sectionVisibility.limits + ? renderAnimatedSection( + 'limits', + renderLazySection( + , + 'h-[420px]', + ), + { + onPreload: dashboardSectionPreloaders.limits, + }, + ) + : null + }, +} satisfies Partial> diff --git a/src/components/dashboard/sections/dashboard-overview-sections.tsx b/src/components/dashboard/sections/dashboard-overview-sections.tsx new file mode 100644 index 0000000..4b5076d --- /dev/null +++ b/src/components/dashboard/sections/dashboard-overview-sections.tsx @@ -0,0 +1,128 @@ +import { PrimaryMetrics } from '../../cards/PrimaryMetrics' +import { SecondaryMetrics } from '../../cards/SecondaryMetrics' +import { TodayMetrics } from '../../cards/TodayMetrics' +import { MonthMetrics } from '../../cards/MonthMetrics' +import { HeatmapCalendar } from '../../features/heatmap/HeatmapCalendar' +import { UsageInsights } from '../../features/insights/UsageInsights' +import { SectionHeader } from '../../ui/section-header' +import { SECTION_HELP } from '@/lib/help-content' +import type { DashboardSectionId } from '@/types' +import type { DashboardSectionRenderer } from './dashboard-section-renderer-types' + +const activityDescriptionKeys = { + daily: 'dashboard.activity.dailyDescription', + monthly: 'dashboard.activity.monthlyDescription', + yearly: 'dashboard.activity.yearlyDescription', +} as const + +/** Renderers for eager overview and activity dashboard sections. */ +export const overviewSectionRenderers = { + insights: ({ viewModel, renderAnimatedSection }) => { + const { overview, layout } = viewModel + + return layout.sectionVisibility.insights + ? renderAnimatedSection( + 'insights', + , + { eager: true }, + ) + : null + }, + metrics: ({ viewModel, t, renderAnimatedSection }) => { + const { overview, layout } = viewModel + + return layout.sectionVisibility.metrics + ? renderAnimatedSection( + 'metrics', + <> + + +
+ +
+ , + { eager: true }, + ) + : null + }, + today: ({ viewModel, renderAnimatedSection }) => { + const { overview, layout } = viewModel + + return layout.sectionVisibility.today && overview.todayData + ? renderAnimatedSection( + 'today', + , + { + eager: true, + }, + ) + : null + }, + currentMonth: ({ viewModel, renderAnimatedSection }) => { + const { overview, layout } = viewModel + + return layout.sectionVisibility.currentMonth && overview.hasCurrentMonthData + ? renderAnimatedSection( + 'currentMonth', + , + { + eager: true, + }, + ) + : null + }, + activity: ({ viewModel, t, renderAnimatedSection }) => { + const { overview, layout } = viewModel + + return layout.sectionVisibility.activity + ? renderAnimatedSection( + 'activity', + <> + +
+ + + +
+ , + { eager: false }, + ) + : null + }, +} satisfies Partial> diff --git a/src/components/dashboard/sections/dashboard-section-lazy-components.ts b/src/components/dashboard/sections/dashboard-section-lazy-components.ts new file mode 100644 index 0000000..a4f03fc --- /dev/null +++ b/src/components/dashboard/sections/dashboard-section-lazy-components.ts @@ -0,0 +1,212 @@ +import { lazy, type ComponentType, type LazyExoticComponent } from 'react' +import type { DashboardSectionPreloaders } from '../dashboard-section-preloading' + +// eslint-disable-next-line @typescript-eslint/no-explicit-any +type PreloadableLazyComponent> = LazyExoticComponent & { + preload: () => Promise<{ default: T }> +} + +// eslint-disable-next-line @typescript-eslint/no-explicit-any +function lazyWithPreload>( + loader: () => Promise<{ default: T }>, +): PreloadableLazyComponent { + const Component = lazy(loader) as PreloadableLazyComponent + Component.preload = loader + return Component +} + +async function preloadComponents( + ...components: Array<{ preload: () => Promise }> +): Promise { + const results = await Promise.allSettled(components.map((component) => component.preload())) + const rejectedResult = results.find((result): result is PromiseRejectedResult => { + return result.status === 'rejected' + }) + + if (rejectedResult) { + throw rejectedResult.reason + } + + return results + .filter((result): result is PromiseFulfilledResult => result.status === 'fulfilled') + .map((result) => result.value) +} + +const CostForecast = lazyWithPreload(() => + import('../../features/forecast/CostForecast').then((module) => ({ + default: module.CostForecast, + })), +) +const ProviderCostForecast = lazyWithPreload(() => + import('../../features/forecast/ProviderCostForecast').then((module) => ({ + default: module.ProviderCostForecast, + })), +) +const ForecastZoomDialog = lazyWithPreload(() => + import('../../features/forecast/ForecastZoomDialog').then((module) => ({ + default: module.ForecastZoomDialog, + })), +) +const CostOverTime = lazyWithPreload(() => + import('../../charts/CostOverTime').then((module) => ({ + default: module.CostOverTime, + })), +) +const CostByModel = lazyWithPreload(() => + import('../../charts/CostByModel').then((module) => ({ + default: module.CostByModel, + })), +) +const CostByModelOverTime = lazyWithPreload(() => + import('../../charts/CostByModelOverTime').then((module) => ({ + default: module.CostByModelOverTime, + })), +) +const CumulativeCost = lazyWithPreload(() => + import('../../charts/CumulativeCost').then((module) => ({ + default: module.CumulativeCost, + })), +) +const CumulativeCostPerProvider = lazyWithPreload(() => + import('../../charts/CumulativeCostPerProvider').then((module) => ({ + default: module.CumulativeCostPerProvider, + })), +) +const CostByWeekday = lazyWithPreload(() => + import('../../charts/CostByWeekday').then((module) => ({ + default: module.CostByWeekday, + })), +) +const TokenEfficiency = lazyWithPreload(() => + import('../../charts/TokenEfficiency').then((module) => ({ + default: module.TokenEfficiency, + })), +) +const ModelMix = lazyWithPreload(() => + import('../../charts/ModelMix').then((module) => ({ + default: module.ModelMix, + })), +) +const TokensOverTime = lazyWithPreload(() => + import('../../charts/TokensOverTime').then((module) => ({ + default: module.TokensOverTime, + })), +) +const TokenTypes = lazyWithPreload(() => + import('../../charts/TokenTypes').then((module) => ({ + default: module.TokenTypes, + })), +) +const RequestsOverTime = lazyWithPreload(() => + import('../../charts/RequestsOverTime').then((module) => ({ + default: module.RequestsOverTime, + })), +) +const RequestCacheHitRateByModel = lazyWithPreload(() => + import('../../charts/RequestCacheHitRateByModel').then((module) => ({ + default: module.RequestCacheHitRateByModel, + })), +) +const CacheROI = lazyWithPreload(() => + import('../../features/cache-roi/CacheROI').then((module) => ({ + default: module.CacheROI, + })), +) +const ProviderLimitsSection = lazyWithPreload(() => + import('../../features/limits/ProviderLimitsSection').then((module) => ({ + default: module.ProviderLimitsSection, + })), +) +const RequestQuality = lazyWithPreload(() => + import('../../features/request-quality/RequestQuality').then((module) => ({ + default: module.RequestQuality, + })), +) +const DistributionAnalysis = lazyWithPreload(() => + import('../../charts/DistributionAnalysis').then((module) => ({ + default: module.DistributionAnalysis, + })), +) +const CorrelationAnalysis = lazyWithPreload(() => + import('../../charts/CorrelationAnalysis').then((module) => ({ + default: module.CorrelationAnalysis, + })), +) +const PeriodComparison = lazyWithPreload(() => + import('../../features/comparison/PeriodComparison').then((module) => ({ + default: module.PeriodComparison, + })), +) +const AnomalyDetection = lazyWithPreload(() => + import('../../features/anomaly/AnomalyDetection').then((module) => ({ + default: module.AnomalyDetection, + })), +) +const ModelEfficiency = lazyWithPreload(() => + import('../../tables/ModelEfficiency').then((module) => ({ + default: module.ModelEfficiency, + })), +) +const ProviderEfficiency = lazyWithPreload(() => + import('../../tables/ProviderEfficiency').then((module) => ({ + default: module.ProviderEfficiency, + })), +) +const RecentDays = lazyWithPreload(() => + import('../../tables/RecentDays').then((module) => ({ + default: module.RecentDays, + })), +) + +/** Lazy dashboard section component registry shared by family renderers and preload plans. */ +export const dashboardLazySectionComponents = { + CostForecast, + ProviderCostForecast, + ForecastZoomDialog, + CostOverTime, + CostByModel, + CostByModelOverTime, + CumulativeCost, + CumulativeCostPerProvider, + CostByWeekday, + TokenEfficiency, + ModelMix, + TokensOverTime, + TokenTypes, + RequestsOverTime, + RequestCacheHitRateByModel, + CacheROI, + ProviderLimitsSection, + RequestQuality, + DistributionAnalysis, + CorrelationAnalysis, + PeriodComparison, + AnomalyDetection, + ModelEfficiency, + ProviderEfficiency, + RecentDays, +} + +/** Preload groups aligned with the dashboard section families. */ +export const dashboardSectionPreloaders = { + forecastCache: () => + preloadComponents(CostForecast, ProviderCostForecast, ForecastZoomDialog, CacheROI), + limits: () => preloadComponents(ProviderLimitsSection), + costAnalysis: () => + preloadComponents( + CostOverTime, + CostByModel, + CumulativeCostPerProvider, + CostByModelOverTime, + CumulativeCost, + CostByWeekday, + TokenEfficiency, + ModelMix, + ), + tokenAnalysis: () => preloadComponents(TokensOverTime, TokenTypes), + requestAnalysis: () => + preloadComponents(RequestsOverTime, RequestCacheHitRateByModel, RequestQuality), + advancedAnalysis: () => preloadComponents(DistributionAnalysis, CorrelationAnalysis), + comparisons: () => preloadComponents(PeriodComparison, AnomalyDetection), + tables: () => preloadComponents(ModelEfficiency, ProviderEfficiency, RecentDays), +} satisfies DashboardSectionPreloaders diff --git a/src/components/dashboard/sections/dashboard-section-metadata.ts b/src/components/dashboard/sections/dashboard-section-metadata.ts new file mode 100644 index 0000000..067ec38 --- /dev/null +++ b/src/components/dashboard/sections/dashboard-section-metadata.ts @@ -0,0 +1,28 @@ +import type { DashboardSectionId } from '@/types' + +/** Reserved placeholder heights used while dashboard sections lazy-render on scroll. */ +export const dashboardSectionPlaceholderClassName: Record = { + insights: 'min-h-[260px]', + metrics: 'min-h-[320px]', + today: 'min-h-[320px]', + currentMonth: 'min-h-[360px]', + activity: 'min-h-[360px]', + forecastCache: 'min-h-[900px]', + limits: 'min-h-[480px]', + costAnalysis: 'min-h-[1460px]', + tokenAnalysis: 'min-h-[430px]', + requestAnalysis: 'min-h-[1040px]', + advancedAnalysis: 'min-h-[760px]', + comparisons: 'min-h-[480px]', + tables: 'min-h-[1100px]', +} + +/** DOM anchor aliases that preserve existing dashboard deep links. */ +export const dashboardSectionAnchorMap: Partial> = { + costAnalysis: 'charts', + currentMonth: 'current-month', + forecastCache: 'forecast-cache', + tokenAnalysis: 'token-analysis', + requestAnalysis: 'request-analysis', + advancedAnalysis: 'advanced-analysis', +} diff --git a/src/components/dashboard/sections/dashboard-section-renderer-types.ts b/src/components/dashboard/sections/dashboard-section-renderer-types.ts new file mode 100644 index 0000000..c961f90 --- /dev/null +++ b/src/components/dashboard/sections/dashboard-section-renderer-types.ts @@ -0,0 +1,27 @@ +import type { Dispatch, ReactNode, SetStateAction } from 'react' +import type { TFunction } from 'i18next' +import type { DashboardSectionId } from '@/types' +import type { DashboardSectionsViewModel } from '@/types/dashboard-view-model' + +/** Options passed through to the animated dashboard section shell. */ +export interface DashboardSectionRenderOptions { + eager?: boolean + onPreload?: () => void | Promise +} + +/** Shared render context injected into dashboard section family renderers. */ +export interface DashboardSectionRenderContext { + viewModel: DashboardSectionsViewModel + t: TFunction + forecastZoomOpen: boolean + setForecastZoomOpen: Dispatch> + renderAnimatedSection: ( + sectionId: DashboardSectionId, + children: ReactNode, + options?: DashboardSectionRenderOptions, + ) => ReactNode + renderLazySection: (content: ReactNode, className?: string) => ReactNode +} + +/** Renders one dashboard section from the shared section render context. */ +export type DashboardSectionRenderer = (context: DashboardSectionRenderContext) => ReactNode diff --git a/src/components/dashboard/sections/dashboard-section-renderers.ts b/src/components/dashboard/sections/dashboard-section-renderers.ts new file mode 100644 index 0000000..da80f6b --- /dev/null +++ b/src/components/dashboard/sections/dashboard-section-renderers.ts @@ -0,0 +1,25 @@ +import type { DashboardSectionId } from '@/types' +import { analysisSectionRenderers } from './dashboard-analysis-sections' +import { comparisonTableSectionRenderers } from './dashboard-comparison-table-sections' +import { forecastSectionRenderers } from './dashboard-forecast-sections' +import { overviewSectionRenderers } from './dashboard-overview-sections' +import type { + DashboardSectionRenderContext, + DashboardSectionRenderer, +} from './dashboard-section-renderer-types' + +/** Complete dashboard section renderer registry assembled from section family owners. */ +export const dashboardSectionRenderers = { + ...overviewSectionRenderers, + ...forecastSectionRenderers, + ...analysisSectionRenderers, + ...comparisonTableSectionRenderers, +} satisfies Record + +/** Renders a dashboard section through the family-owned renderer registry. */ +export function renderDashboardSection( + sectionId: DashboardSectionId, + context: DashboardSectionRenderContext, +) { + return dashboardSectionRenderers[sectionId](context) +} diff --git a/src/hooks/use-dashboard-controller-derived-state.ts b/src/hooks/use-dashboard-controller-derived-state.ts index ee0e6a6..ab1537a 100644 --- a/src/hooks/use-dashboard-controller-derived-state.ts +++ b/src/hooks/use-dashboard-controller-derived-state.ts @@ -28,6 +28,10 @@ export function useDashboardControllerDerivedState({ }: DashboardControllerDerivedStateParams): DashboardControllerDerivedState { const filters = useDashboardFilters(daily, settings.defaultFilters) const computed = useComputedMetrics(filters.filteredData, locale) + const dailyCosts = useMemo( + () => filters.filteredData.map((entry) => entry.totalCost), + [filters.filteredData], + ) const totalCalendarDays = useMemo(() => { if (!filters.dateRange || filters.viewMode !== 'daily') return 0 @@ -99,6 +103,7 @@ export function useDashboardControllerDerivedState({ hasData, filters, computed, + dailyCosts, totalCalendarDays, todayData, hasCurrentMonthData, diff --git a/src/hooks/use-dashboard-controller.ts b/src/hooks/use-dashboard-controller.ts index 35d9e9b..c0b573f 100644 --- a/src/hooks/use-dashboard-controller.ts +++ b/src/hooks/use-dashboard-controller.ts @@ -213,6 +213,7 @@ export function useDashboardControllerWithBootstrap( viewMode: derived.filters.viewMode, totalCalendarDays: derived.totalCalendarDays, filteredData: derived.filters.filteredData, + dailyCosts: derived.dailyCosts, filteredDailyData: derived.filters.filteredDailyData, todayData: derived.todayData, hasCurrentMonthData: derived.hasCurrentMonthData, diff --git a/src/types/dashboard-controller.d.ts b/src/types/dashboard-controller.d.ts index a7dee83..5526720 100644 --- a/src/types/dashboard-controller.d.ts +++ b/src/types/dashboard-controller.d.ts @@ -108,6 +108,7 @@ export interface DashboardControllerDerivedState { hasData: boolean filters: DashboardControllerFiltersState computed: DashboardControllerComputedState + dailyCosts: number[] totalCalendarDays: number todayData: DailyUsage | null hasCurrentMonthData: boolean diff --git a/src/types/dashboard-view-model.d.ts b/src/types/dashboard-view-model.d.ts index 9cc9198..de531f7 100644 --- a/src/types/dashboard-view-model.d.ts +++ b/src/types/dashboard-view-model.d.ts @@ -204,6 +204,7 @@ export interface DashboardOverviewSectionsViewModel { viewMode: ViewMode totalCalendarDays: number filteredData: DailyUsage[] + dailyCosts: number[] filteredDailyData: DailyUsage[] todayData: DailyUsage | null hasCurrentMonthData: boolean diff --git a/tests/architecture/dashboard-sections-contract.test.ts b/tests/architecture/dashboard-sections-contract.test.ts index dd70344..b8eb89c 100644 --- a/tests/architecture/dashboard-sections-contract.test.ts +++ b/tests/architecture/dashboard-sections-contract.test.ts @@ -50,6 +50,20 @@ function findFunction(sourceFile: ts.SourceFile, name: string) { return declaration } +function getImportSpecifiers(sourceFile: ts.SourceFile) { + return sourceFile.statements.flatMap((statement) => { + if ( + ts.isImportDeclaration(statement) && + statement.moduleSpecifier && + ts.isStringLiteral(statement.moduleSpecifier) + ) { + return [statement.moduleSpecifier.text] + } + + return [] + }) +} + function getIdentifierText(name: ts.PropertyName | ts.BindingName) { if (ts.isIdentifier(name)) return name.text throw new Error(`Expected identifier, received ${ts.SyntaxKind[name.kind]}`) @@ -92,6 +106,53 @@ describe('dashboard sections contract guardrails', () => { ]) }) + it('keeps DashboardSections as a thin section orchestrator', () => { + const sourceFile = readSourceFile(dashboardSectionsPath) + const source = sourceFile.getFullText() + const imports = getImportSpecifiers(sourceFile) + + expect(source).not.toContain('switch (sectionId)') + expect(source).not.toContain('function lazyWithPreload') + expect(source).not.toContain('const dashboardSectionPreloaders =') + for (const forbiddenImport of [ + '../cards/PrimaryMetrics', + '../features/forecast/CostForecast', + '../features/heatmap/HeatmapCalendar', + '../features/risk/ConcentrationRisk', + '../charts/CostOverTime', + '../tables/ModelEfficiency', + ]) { + expect(imports).not.toContain(forbiddenImport) + } + expect(imports).toEqual( + expect.arrayContaining([ + './sections/dashboard-section-lazy-components', + './sections/dashboard-section-metadata', + './sections/dashboard-section-renderers', + ]), + ) + }) + + it('keeps section renderer ownership split by section family', () => { + const rendererRegistryPath = path.resolve( + process.cwd(), + 'src/components/dashboard/sections/dashboard-section-renderers.ts', + ) + const sourceFile = readSourceFile(rendererRegistryPath) + const source = sourceFile.getFullText() + const imports = getImportSpecifiers(sourceFile) + + expect(source).not.toContain('switch (sectionId)') + expect(imports).toEqual( + expect.arrayContaining([ + './dashboard-overview-sections', + './dashboard-forecast-sections', + './dashboard-analysis-sections', + './dashboard-comparison-table-sections', + ]), + ) + }) + it('keeps DashboardSectionsViewModel split into section bundles', () => { const sourceFile = readSourceFile(dashboardViewModelPath) diff --git a/tests/frontend/dashboard-controller-test-helpers.ts b/tests/frontend/dashboard-controller-test-helpers.ts index 771c865..7e5603b 100644 --- a/tests/frontend/dashboard-controller-test-helpers.ts +++ b/tests/frontend/dashboard-controller-test-helpers.ts @@ -121,6 +121,7 @@ export function createDashboardSectionsViewModel( viewMode: 'daily', totalCalendarDays: 0, filteredData: [], + dailyCosts: [], filteredDailyData: [], todayData: null, hasCurrentMonthData: false, diff --git a/tests/frontend/dashboard-sections-rendering.test.tsx b/tests/frontend/dashboard-sections-rendering.test.tsx index 1f57122..069b270 100644 --- a/tests/frontend/dashboard-sections-rendering.test.tsx +++ b/tests/frontend/dashboard-sections-rendering.test.tsx @@ -253,6 +253,20 @@ describe('DashboardSections rendering contracts', () => { expect(dashboardMotionMocks.cancelPreloads).toHaveBeenCalledTimes(1) }) + it('keeps the activity heatmap section viewport lazy while overview metrics render eagerly', () => { + const viewModel = createDashboardSectionsViewModel({ + layout: { + sectionOrder: ['metrics', 'activity'], + sectionVisibility: createVisibility(), + }, + }) + + render() + + expect(screen.getByTestId('section-metrics')).toHaveAttribute('data-eager', 'true') + expect(screen.getByTestId('section-activity')).toHaveAttribute('data-eager', 'false') + }) + it('opens the forecast zoom dialog from lazy forecast cards', async () => { const viewModel = createDashboardSectionsViewModel({ layout: { From 892d551cfcb99a9ec640594ec07816db45729a9b Mon Sep 17 00:00:00 2001 From: tyl3r-ch Date: Fri, 1 May 2026 10:58:51 +0200 Subject: [PATCH 30/42] Split HTTP router route ownership --- server/http-router.js | 638 ++---------------- server/routes/auto-import-routes.js | 109 +++ server/routes/http-route-utils.js | 55 ++ server/routes/report-routes.js | 82 +++ server/routes/runtime-routes.js | 41 ++ server/routes/settings-routes.js | 125 ++++ server/routes/static-routes.js | 128 ++++ server/routes/usage-routes.js | 185 +++++ .../server-http-boundaries.test.ts | 34 + .../server-runtime-state-contract.test.ts | 6 +- tests/unit/http-router-mutations.test.ts | 4 +- 11 files changed, 838 insertions(+), 569 deletions(-) create mode 100644 server/routes/auto-import-routes.js create mode 100644 server/routes/http-route-utils.js create mode 100644 server/routes/report-routes.js create mode 100644 server/routes/runtime-routes.js create mode 100644 server/routes/settings-routes.js create mode 100644 server/routes/static-routes.js create mode 100644 server/routes/usage-routes.js diff --git a/server/http-router.js b/server/http-router.js index 8524681..fa79575 100644 --- a/server/http-router.js +++ b/server/http-router.js @@ -1,3 +1,12 @@ +const { createAutoImportRoutes } = require('./routes/auto-import-routes'); +const { createReportRoutes } = require('./routes/report-routes'); +const { createRuntimeRoutes } = require('./routes/runtime-routes'); +const { createSettingsRoutes } = require('./routes/settings-routes'); +const { createStaticRouteHandler } = require('./routes/static-routes'); +const { createUsageRoutes } = require('./routes/usage-routes'); +const { readMutationBody, sendSSE } = require('./routes/http-route-utils'); + +/** Creates the HTTP router that validates requests and dispatches route groups. */ function createHttpRouter({ fs, path, @@ -19,164 +28,69 @@ function createHttpRouter({ validateMutationRequest, validateRequestHost, } = httpUtils; - const { - extractSettingsImportPayload, - extractUsageImportPayload, - isPayloadTooLargeError, - isPersistedStateError, - mergeUsageData, - readData, - readSettings, - unlinkIfExists, - _updateDataLoadStateUnlocked, - updateSettings, - withFileMutationLock, - withSettingsAndDataMutationLock, - writeData, - writeSettings, - normalizeSettings, - paths: { dataFile, settingsFile }, - } = dataRuntime; - const { - createAutoImportMessageEvent, - formatAutoImportMessageEvent, - acquireAutoImportLease, - lookupLatestToktrackVersion, - performAutoImport, - toAutoImportErrorEvent, - } = autoImportRuntime; - - const mimeTypes = { - '.html': 'text/html; charset=utf-8', - '.css': 'text/css; charset=utf-8', - '.js': 'application/javascript; charset=utf-8', - '.json': 'application/json; charset=utf-8', - '.png': 'image/png', - '.jpg': 'image/jpeg', - '.jpeg': 'image/jpeg', - '.gif': 'image/gif', - '.svg': 'image/svg+xml', - '.ico': 'image/x-icon', - '.woff': 'font/woff', - '.woff2': 'font/woff2', - }; - - function sendSSE(res, event, data) { - res.write(`event: ${event}\ndata: ${JSON.stringify(data)}\n\n`); - } - - function getCacheControl(filePath) { - if (filePath.includes(path.sep + 'assets' + path.sep)) { - return 'public, max-age=31536000, immutable'; - } - if (filePath.endsWith('.html')) { - return 'no-cache'; - } - return 'public, max-age=86400'; - } - - function writeStaticErrorResponse(res, status, message) { - res.writeHead(status, { - 'Content-Type': 'application/json; charset=utf-8', - ...securityHeaders, + const routeReadMutationBody = (req, res, messages) => + readMutationBody(req, res, { + readBody, + json, + isPayloadTooLargeError: dataRuntime.isPayloadTooLargeError, + ...messages, }); - res.end(JSON.stringify({ message })); - } - - function getErrorMessage(error, fallback) { - return error && typeof error.message === 'string' && error.message ? error.message : fallback; - } + const usageRoutes = createUsageRoutes({ + json, + validateMutationRequest, + readMutationBody: routeReadMutationBody, + dataRuntime, + }); + const settingsRoutes = createSettingsRoutes({ + json, + validateMutationRequest, + readMutationBody: routeReadMutationBody, + dataRuntime, + }); + const autoImportRoutes = createAutoImportRoutes({ + json, + validateMutationRequest, + securityHeaders, + autoImportRuntime, + sendSSE, + }); + const runtimeRoutes = createRuntimeRoutes({ + json, + getRuntimeSnapshot, + autoImportRuntime, + }); + const reportRoutes = createReportRoutes({ + json, + readMutationBody: routeReadMutationBody, + sendBuffer, + dataRuntime, + generatePdfReport, + }); + const staticRoutes = createStaticRouteHandler({ + fs, + path, + staticRoot, + securityHeaders, + prepareHtmlResponse, + json, + }); + const apiRouteHandlers = [ + usageRoutes.handleUsageRoutes, + settingsRoutes.handleSettingsRoutes, + autoImportRoutes.handleAutoImportRoutes, + runtimeRoutes.handleRuntimeRoutes, + reportRoutes.handleReportRoutes, + ]; - async function readMutationBody(req, res, { tooLargeMessage, invalidMessage }) { - try { - return { ok: true, body: await readBody(req) }; - } catch (error) { - if (isPayloadTooLargeError(error)) { - json(res, 413, { message: tooLargeMessage }); - return { ok: false }; + async function dispatchApiRoute(apiPath, req, res) { + for (const handleApiRoute of apiRouteHandlers) { + const routeResult = await handleApiRoute(apiPath, req, res); + if (routeResult !== false) { + return true; } - json(res, 400, { message: getErrorMessage(error, invalidMessage) }); - return { ok: false }; } - } - function writeMutationServerError(res) { - return json(res, 500, { message: 'Server error' }); - } - - function sendStaticFile(res, reqPath, data) { - const ext = path.extname(reqPath).toLowerCase(); - const contentType = mimeTypes[ext] || 'application/octet-stream'; - const isHtml = ext === '.html'; - const htmlResponse = isHtml ? prepareHtmlResponse(data.toString('utf8')) : null; - const responseBody = htmlResponse ? htmlResponse.body : data; - const responseSecurityHeaders = htmlResponse ? htmlResponse.headers : securityHeaders; - - res.writeHead(200, { - 'Content-Type': contentType, - 'Cache-Control': getCacheControl(reqPath), - ...responseSecurityHeaders, - }); - res.end(responseBody); - } - - function readStaticFile(reqPath) { - if (fs.promises && typeof fs.promises.readFile === 'function') { - return fs.promises.readFile(reqPath); - } - - return new Promise((resolve, reject) => { - fs.readFile(reqPath, (error, data) => { - if (error) { - reject(error); - return; - } - resolve(data); - }); - }); - } - - function shouldServeSpaFallback(req, safePath) { - const acceptHeader = req.headers?.accept || req.headers?.Accept || ''; - const acceptsHtml = String( - Array.isArray(acceptHeader) ? acceptHeader[0] : acceptHeader, - ).includes('text/html'); - return acceptsHtml || safePath.endsWith('/') || path.extname(safePath) === ''; - } - - async function serveFile(req, res, reqPath, safePath) { - try { - const data = await readStaticFile(reqPath); - sendStaticFile(res, reqPath, data); - } catch (error) { - if (error && error.code === 'ENOENT') { - if (!shouldServeSpaFallback(req, safePath)) { - writeStaticErrorResponse(res, 404, 'Not Found'); - return; - } - - try { - const indexPath = path.join(staticRoot, 'index.html'); - const html = await readStaticFile(indexPath); - sendStaticFile(res, indexPath, html); - } catch { - writeStaticErrorResponse(res, 500, 'Internal Server Error'); - } - return; - } - - const invalidPath = error && error.code === 'ERR_INVALID_ARG_VALUE'; - const directoryRead = error && error.code === 'EISDIR'; - writeStaticErrorResponse( - res, - invalidPath ? 400 : directoryRead ? 403 : 500, - invalidPath - ? 'Invalid request path' - : directoryRead - ? 'Access denied' - : 'Internal Server Error', - ); - } + return false; } async function handleServerRequest(req, res) { @@ -208,407 +122,12 @@ function createHttpRouter({ return json(res, 404, { message: 'Not Found' }); } - if (apiPath === '/usage') { - if (req.method === 'GET') { - let data; - try { - data = readData(); - } catch (error) { - if (isPersistedStateError(error, 'usage')) { - return json(res, 500, { message: error.message }); - } - throw error; - } - return json( - res, - 200, - data || { - daily: [], - totals: { - inputTokens: 0, - outputTokens: 0, - cacheCreationTokens: 0, - cacheReadTokens: 0, - thinkingTokens: 0, - totalCost: 0, - totalTokens: 0, - requestCount: 0, - }, - }, - ); - } - if (req.method === 'DELETE') { - const validationError = validateMutationRequest(req); - if (validationError) { - return json(res, validationError.status, { message: validationError.message }); - } - try { - await withSettingsAndDataMutationLock(async () => { - await unlinkIfExists(dataFile); - await _updateDataLoadStateUnlocked({ - lastLoadedAt: null, - lastLoadSource: null, - }); - }); - } catch (error) { - if (isPersistedStateError(error, 'usage') || isPersistedStateError(error, 'settings')) { - return json(res, 500, { message: error.message }); - } - return writeMutationServerError(res); - } - return json(res, 200, { success: true }); - } - return json(res, 405, { message: 'Method Not Allowed' }); - } - - if (apiPath === '/runtime') { - if (req.method !== 'GET') { - return json(res, 405, { message: 'Method Not Allowed' }); - } - - return json(res, 200, getRuntimeSnapshot()); - } - - if (apiPath === '/settings') { - if (req.method === 'GET') { - try { - return json(res, 200, readSettings()); - } catch (error) { - if (isPersistedStateError(error, 'settings')) { - return json(res, 500, { message: error.message }); - } - throw error; - } - } - - if (req.method === 'DELETE') { - const validationError = validateMutationRequest(req); - if (validationError) { - return json(res, validationError.status, { message: validationError.message }); - } - try { - const settings = await withFileMutationLock(settingsFile, async () => { - await unlinkIfExists(settingsFile); - return readSettings(); - }); - return json(res, 200, { success: true, settings }); - } catch (error) { - if (isPersistedStateError(error, 'settings')) { - return json(res, 500, { message: error.message }); - } - return writeMutationServerError(res); - } - } - - if (req.method === 'PATCH') { - const validationError = validateMutationRequest(req, { requiresJsonContentType: true }); - if (validationError) { - return json(res, validationError.status, { message: validationError.message }); - } - - const bodyResult = await readMutationBody(req, res, { - tooLargeMessage: 'Settings request too large', - invalidMessage: 'Invalid settings request', - }); - if (!bodyResult.ok) { - return; - } - - try { - return json(res, 200, await updateSettings(bodyResult.body)); - } catch (error) { - if (isPersistedStateError(error, 'settings')) { - return json(res, 500, { message: error.message }); - } - return writeMutationServerError(res); - } - } - - return json(res, 405, { message: 'Method Not Allowed' }); - } - - if (apiPath === '/settings/import') { - if (req.method !== 'POST') { - return json(res, 405, { message: 'Method Not Allowed' }); - } - - const validationError = validateMutationRequest(req, { requiresJsonContentType: true }); - if (validationError) { - return json(res, validationError.status, { message: validationError.message }); - } - - const bodyResult = await readMutationBody(req, res, { - tooLargeMessage: 'Settings file too large', - invalidMessage: 'Invalid settings file', - }); - if (!bodyResult.ok) { - return; - } - - let importedSettings; - try { - importedSettings = normalizeSettings(extractSettingsImportPayload(bodyResult.body)); - } catch (error) { - return json(res, 400, { message: getErrorMessage(error, 'Invalid settings file') }); - } - - try { - const settings = await withFileMutationLock(settingsFile, async () => { - await writeSettings(importedSettings); - return readSettings(); - }); - return json(res, 200, settings); - } catch (error) { - if (isPersistedStateError(error, 'settings')) { - return json(res, 500, { message: error.message }); - } - return writeMutationServerError(res); - } - } - - if (apiPath === '/upload') { - if (req.method === 'POST') { - const validationError = validateMutationRequest(req, { requiresJsonContentType: true }); - if (validationError) { - return json(res, validationError.status, { message: validationError.message }); - } - - const bodyResult = await readMutationBody(req, res, { - tooLargeMessage: 'File too large (max. 10 MB)', - invalidMessage: 'Invalid JSON', - }); - if (!bodyResult.ok) { - return; - } - - let nextData; - try { - const normalized = dataRuntime.normalizeIncomingData - ? dataRuntime.normalizeIncomingData(bodyResult.body) - : null; - nextData = normalized || bodyResult.body; - } catch (error) { - return json(res, 400, { message: getErrorMessage(error, 'Invalid JSON') }); - } - - try { - await withSettingsAndDataMutationLock(async () => { - await writeData(nextData); - await _updateDataLoadStateUnlocked({ - lastLoadedAt: new Date().toISOString(), - lastLoadSource: 'file', - }); - }); - return json(res, 200, { - days: nextData.daily.length, - totalCost: nextData.totals.totalCost, - }); - } catch (error) { - if (isPersistedStateError(error, 'settings') || isPersistedStateError(error, 'usage')) { - return json(res, 500, { message: error.message }); - } - return writeMutationServerError(res); - } - } - return json(res, 405, { message: 'Method Not Allowed' }); - } - - if (apiPath === '/usage/import') { - if (req.method !== 'POST') { - return json(res, 405, { message: 'Method Not Allowed' }); - } - - const validationError = validateMutationRequest(req, { requiresJsonContentType: true }); - if (validationError) { - return json(res, validationError.status, { message: validationError.message }); - } - - const bodyResult = await readMutationBody(req, res, { - tooLargeMessage: 'Usage backup file too large', - invalidMessage: 'Invalid usage backup file', - }); - if (!bodyResult.ok) { + if (apiPath !== null) { + const handled = await dispatchApiRoute(apiPath, req, res); + if (handled) { return; } - let importedData; - try { - const usagePayload = extractUsageImportPayload(bodyResult.body); - importedData = dataRuntime.normalizeIncomingData - ? dataRuntime.normalizeIncomingData(usagePayload) - : usagePayload; - } catch (error) { - return json(res, 400, { message: getErrorMessage(error, 'Invalid usage backup file') }); - } - - try { - const result = await withSettingsAndDataMutationLock(async () => { - const currentData = readData(); - const merged = mergeUsageData(currentData, importedData); - await writeData(merged.data); - await _updateDataLoadStateUnlocked({ - lastLoadedAt: new Date().toISOString(), - lastLoadSource: 'file', - }); - return merged; - }); - return json(res, 200, result.summary); - } catch (error) { - if (isPersistedStateError(error, 'usage') || isPersistedStateError(error, 'settings')) { - return json(res, 500, { message: error.message }); - } - return writeMutationServerError(res); - } - } - - if (apiPath === '/auto-import/stream') { - if (req.method !== 'POST') { - return json(res, 405, { message: 'Method Not Allowed' }); - } - - const validationError = validateMutationRequest(req); - if (validationError) { - return json(res, validationError.status, { message: validationError.message }); - } - - let autoImportLease; - try { - autoImportLease = acquireAutoImportLease(); - } catch (error) { - if (error?.messageKey !== 'autoImportRunning') { - throw error; - } - return json(res, 409, { - message: formatAutoImportMessageEvent(createAutoImportMessageEvent('autoImportRunning')), - }); - } - - res.writeHead(200, { - 'Content-Type': 'text/event-stream', - 'Cache-Control': 'no-cache', - Connection: 'keep-alive', - 'X-Accel-Buffering': 'no', - ...securityHeaders, - }); - - let aborted = false; - req.on('close', () => { - aborted = true; - }); - - try { - const result = await performAutoImport({ - source: 'auto-import', - onCheck: (event) => { - if (!aborted) { - sendSSE(res, 'check', event); - } - }, - onProgress: (event) => { - if (!aborted) { - sendSSE(res, 'progress', event); - } - }, - onOutput: (line) => { - if (!aborted) { - sendSSE(res, 'stderr', { line }); - } - }, - signalOnClose: (close) => { - req.on('close', close); - }, - lease: autoImportLease, - }); - - if (aborted) { - return; - } - - sendSSE(res, 'success', result); - sendSSE(res, 'done', {}); - res.end(); - } catch (error) { - if (aborted) { - return; - } - sendSSE(res, 'error', toAutoImportErrorEvent(error)); - sendSSE(res, 'done', {}); - res.end(); - } finally { - autoImportLease.release(); - } - return; - } - - if (apiPath === '/toktrack/version-status') { - if (req.method !== 'GET') { - return json(res, 405, { message: 'Method Not Allowed' }); - } - - try { - return json(res, 200, await lookupLatestToktrackVersion()); - } catch (error) { - return json(res, 503, { - message: 'Service Unavailable', - detail: getErrorMessage(error, 'Could not determine the latest toktrack version.'), - }); - } - } - - if (apiPath === '/report/pdf') { - if (req.method !== 'POST') { - return json(res, 405, { message: 'Method Not Allowed' }); - } - - const validationError = validateMutationRequest(req, { requiresJsonContentType: true }); - if (validationError) { - return json(res, validationError.status, { message: validationError.message }); - } - - let data; - try { - data = readData(); - } catch (error) { - if (isPersistedStateError(error, 'usage')) { - return json(res, 500, { message: error.message }); - } - throw error; - } - if (!data || !Array.isArray(data.daily) || data.daily.length === 0) { - return json(res, 400, { message: 'No data available for the report.' }); - } - - let body; - try { - body = await readBody(req); - } catch (error) { - const status = isPayloadTooLargeError(error) ? 413 : 400; - return json(res, status, { - message: isPayloadTooLargeError(error) - ? 'Report request too large' - : 'Invalid report request', - }); - } - - try { - const result = await generatePdfReport(data.daily, body || {}); - return sendBuffer( - res, - 200, - { - 'Content-Type': 'application/pdf', - 'Content-Disposition': `attachment; filename="${result.filename}"`, - }, - result.buffer, - ); - } catch (error) { - const message = error && error.message ? error.message : 'PDF generation failed'; - const status = error && error.code === 'TYPST_MISSING' ? 503 : 500; - return json(res, status, { message }); - } - } - - if (apiPath !== null) { return json(res, 404, { message: 'API endpoint not found' }); } @@ -622,22 +141,7 @@ function createHttpRouter({ return; } - const safePath = pathname === '/' ? '/index.html' : pathname; - if (safePath.includes('\0')) { - return json(res, 400, { message: 'Invalid request path' }); - } - const resolvedStaticRoot = path.resolve(staticRoot); - const filePath = path.resolve(staticRoot, `.${safePath}`); - - if ( - filePath === resolvedStaticRoot || - (!filePath.startsWith(resolvedStaticRoot + path.sep) && - filePath !== path.resolve(staticRoot, 'index.html')) - ) { - return json(res, 403, { message: 'Access denied' }); - } - - await serveFile(req, res, filePath, safePath); + await staticRoutes.handleStaticRequest(req, res, pathname); } return { diff --git a/server/routes/auto-import-routes.js b/server/routes/auto-import-routes.js new file mode 100644 index 0000000..ba28353 --- /dev/null +++ b/server/routes/auto-import-routes.js @@ -0,0 +1,109 @@ +/** Creates the auto-import streaming API route handler. */ +function createAutoImportRoutes({ + json, + validateMutationRequest, + securityHeaders, + autoImportRuntime, + sendSSE, +}) { + const { + createAutoImportMessageEvent, + formatAutoImportMessageEvent, + acquireAutoImportLease, + performAutoImport, + toAutoImportErrorEvent, + } = autoImportRuntime; + + async function handleAutoImportRoutes(apiPath, req, res) { + if (apiPath !== '/auto-import/stream') { + return false; + } + + if (req.method !== 'POST') { + return json(res, 405, { message: 'Method Not Allowed' }); + } + + const validationError = validateMutationRequest(req); + if (validationError) { + return json(res, validationError.status, { message: validationError.message }); + } + + let autoImportLease; + let aborted = false; + try { + autoImportLease = acquireAutoImportLease(); + } catch (error) { + if (error?.messageKey !== 'autoImportRunning') { + throw error; + } + return json(res, 409, { + message: formatAutoImportMessageEvent(createAutoImportMessageEvent('autoImportRunning')), + }); + } + + try { + res.writeHead(200, { + 'Content-Type': 'text/event-stream', + 'Cache-Control': 'no-cache', + Connection: 'keep-alive', + 'X-Accel-Buffering': 'no', + ...securityHeaders, + }); + + // Local abort state gates SSE writes; performAutoImport gets its own close signal below. + req.on('close', () => { + aborted = true; + }); + + const result = await performAutoImport({ + source: 'auto-import', + onCheck: (event) => { + if (!aborted) { + sendSSE(res, 'check', event); + } + }, + onProgress: (event) => { + if (!aborted) { + sendSSE(res, 'progress', event); + } + }, + onOutput: (line) => { + if (!aborted) { + sendSSE(res, 'stderr', { line }); + } + }, + signalOnClose: (close) => { + // This second close listener aborts the long-running import command itself. + req.on('close', close); + }, + lease: autoImportLease, + }); + + if (aborted) { + return true; + } + + sendSSE(res, 'success', result); + sendSSE(res, 'done', {}); + res.end(); + } catch (error) { + if (aborted) { + return true; + } + sendSSE(res, 'error', toAutoImportErrorEvent(error)); + sendSSE(res, 'done', {}); + res.end(); + } finally { + autoImportLease.release(); + } + return true; + } + + return { + handleAutoImportRoutes, + }; +} + +module.exports = { + createAutoImportRoutes, +}; diff --git a/server/routes/http-route-utils.js b/server/routes/http-route-utils.js new file mode 100644 index 0000000..f74d5a5 --- /dev/null +++ b/server/routes/http-route-utils.js @@ -0,0 +1,55 @@ +/** Returns a stable fallback message for unknown thrown values. */ +function getErrorMessage(error, fallback) { + return error && typeof error.message === 'string' && error.message ? error.message : fallback; +} + +/** Reads a mutation body and writes the matching client error response on parse/size failures. */ +async function readMutationBody( + req, + res, + { + readBody, + json, + isPayloadTooLargeError, + tooLargeMessage, + invalidMessage, + suppressErrorDetails = false, + }, +) { + try { + return { ok: true, body: await readBody(req) }; + } catch (error) { + if (isPayloadTooLargeError(error)) { + json(res, 413, { message: tooLargeMessage }); + return { ok: false }; + } + json(res, 400, { + message: suppressErrorDetails ? invalidMessage : getErrorMessage(error, invalidMessage), + }); + return { ok: false }; + } +} + +/** Writes the generic mutation error response used by persistence-backed routes. */ +function writeMutationServerError(json, res) { + return json(res, 500, { message: 'Server error' }); +} + +/** Writes one Server-Sent Event frame. */ +function sendSSE(res, event, data) { + let payload; + try { + payload = JSON.stringify(data); + } catch (error) { + console.error(`Failed to serialize SSE payload for event "${event}":`, error); + payload = JSON.stringify({ message: 'Serialization error', event }); + } + res.write(`event: ${event}\ndata: ${payload}\n\n`); +} + +module.exports = { + getErrorMessage, + readMutationBody, + sendSSE, + writeMutationServerError, +}; diff --git a/server/routes/report-routes.js b/server/routes/report-routes.js new file mode 100644 index 0000000..c1167d1 --- /dev/null +++ b/server/routes/report-routes.js @@ -0,0 +1,82 @@ +const { getErrorMessage } = require('./http-route-utils'); + +function formatReportContentDisposition(filename) { + const rawFilename = String(filename || 'ttdash-report.pdf'); + const asciiFallback = rawFilename + .replace(/["\\;]/g, '_') + .replace(/[^\x20-\x7E]/g, '_') + .trim(); + const safeAsciiFilename = asciiFallback || 'ttdash-report.pdf'; + const encodedFilename = encodeURIComponent(rawFilename); + + return `attachment; filename="${safeAsciiFilename}"; filename*=UTF-8''${encodedFilename}`; +} + +/** Creates PDF report API route handlers. */ +function createReportRoutes({ + json, + readMutationBody, + sendBuffer, + dataRuntime, + generatePdfReport, +}) { + const { isPersistedStateError, readData } = dataRuntime; + + async function handleReportRoutes(apiPath, req, res) { + if (apiPath !== '/report/pdf') { + return false; + } + + if (req.method !== 'POST') { + return json(res, 405, { message: 'Method Not Allowed' }); + } + + let data; + try { + data = readData(); + } catch (error) { + if (isPersistedStateError(error, 'usage')) { + return json(res, 500, { message: error.message }); + } + console.error('Unexpected report route usage read error:', error); + throw new Error('report-routes: unexpected error during usage handling', { cause: error }); + } + if (!data || !Array.isArray(data.daily) || data.daily.length === 0) { + return json(res, 400, { message: 'No data available for the report.' }); + } + + const bodyResult = await readMutationBody(req, res, { + tooLargeMessage: 'Report request too large', + invalidMessage: 'Invalid report request', + suppressErrorDetails: true, + }); + if (!bodyResult.ok) { + return true; + } + + try { + const result = await generatePdfReport(data.daily, bodyResult.body || {}); + return sendBuffer( + res, + 200, + { + 'Content-Type': 'application/pdf', + 'Content-Disposition': formatReportContentDisposition(result.filename), + }, + result.buffer, + ); + } catch (error) { + const message = getErrorMessage(error, 'PDF generation failed'); + const status = error && error.code === 'TYPST_MISSING' ? 503 : 500; + return json(res, status, { message }); + } + } + + return { + handleReportRoutes, + }; +} + +module.exports = { + createReportRoutes, +}; diff --git a/server/routes/runtime-routes.js b/server/routes/runtime-routes.js new file mode 100644 index 0000000..1bd9397 --- /dev/null +++ b/server/routes/runtime-routes.js @@ -0,0 +1,41 @@ +const { getErrorMessage } = require('./http-route-utils'); + +/** Creates runtime metadata and toktrack version-status API route handlers. */ +function createRuntimeRoutes({ json, getRuntimeSnapshot, autoImportRuntime }) { + const { lookupLatestToktrackVersion } = autoImportRuntime; + + async function handleRuntimeRoutes(apiPath, req, res) { + if (apiPath === '/runtime') { + if (req.method !== 'GET') { + return json(res, 405, { message: 'Method Not Allowed' }); + } + + return json(res, 200, getRuntimeSnapshot()); + } + + if (apiPath === '/toktrack/version-status') { + if (req.method !== 'GET') { + return json(res, 405, { message: 'Method Not Allowed' }); + } + + try { + return json(res, 200, await lookupLatestToktrackVersion()); + } catch (error) { + return json(res, 503, { + message: 'Service Unavailable', + detail: getErrorMessage(error, 'Could not determine the latest toktrack version.'), + }); + } + } + + return false; + } + + return { + handleRuntimeRoutes, + }; +} + +module.exports = { + createRuntimeRoutes, +}; diff --git a/server/routes/settings-routes.js b/server/routes/settings-routes.js new file mode 100644 index 0000000..c461c9a --- /dev/null +++ b/server/routes/settings-routes.js @@ -0,0 +1,125 @@ +const { getErrorMessage, writeMutationServerError } = require('./http-route-utils'); + +/** Creates settings and settings-import API route handlers. */ +function createSettingsRoutes({ json, validateMutationRequest, readMutationBody, dataRuntime }) { + const { + extractSettingsImportPayload, + isPersistedStateError, + readSettings, + unlinkIfExists, + updateSettings, + withFileMutationLock, + writeSettings, + normalizeSettings, + paths: { settingsFile }, + } = dataRuntime; + + async function handleSettingsRoutes(apiPath, req, res) { + if (apiPath === '/settings') { + if (req.method === 'GET') { + try { + return json(res, 200, readSettings()); + } catch (error) { + if (isPersistedStateError(error, 'settings')) { + return json(res, 500, { message: error.message }); + } + throw error; + } + } + + if (req.method === 'DELETE') { + const validationError = validateMutationRequest(req); + if (validationError) { + return json(res, validationError.status, { message: validationError.message }); + } + try { + const settings = await withFileMutationLock(settingsFile, async () => { + await unlinkIfExists(settingsFile); + return readSettings(); + }); + return json(res, 200, { success: true, settings }); + } catch (error) { + if (isPersistedStateError(error, 'settings')) { + return json(res, 500, { message: error.message }); + } + return writeMutationServerError(json, res); + } + } + + if (req.method === 'PATCH') { + const validationError = validateMutationRequest(req, { requiresJsonContentType: true }); + if (validationError) { + return json(res, validationError.status, { message: validationError.message }); + } + + const bodyResult = await readMutationBody(req, res, { + tooLargeMessage: 'Settings request too large', + invalidMessage: 'Invalid settings request', + }); + if (!bodyResult.ok) { + return true; + } + + try { + return json(res, 200, await updateSettings(bodyResult.body)); + } catch (error) { + if (isPersistedStateError(error, 'settings')) { + return json(res, 500, { message: error.message }); + } + return writeMutationServerError(json, res); + } + } + + return json(res, 405, { message: 'Method Not Allowed' }); + } + + if (apiPath === '/settings/import') { + if (req.method !== 'POST') { + return json(res, 405, { message: 'Method Not Allowed' }); + } + + const validationError = validateMutationRequest(req, { requiresJsonContentType: true }); + if (validationError) { + return json(res, validationError.status, { message: validationError.message }); + } + + const bodyResult = await readMutationBody(req, res, { + tooLargeMessage: 'Settings file too large', + invalidMessage: 'Invalid settings file', + }); + if (!bodyResult.ok) { + return true; + } + + let importedSettings; + try { + importedSettings = normalizeSettings(extractSettingsImportPayload(bodyResult.body)); + } catch (error) { + return json(res, 400, { message: getErrorMessage(error, 'Invalid settings file') }); + } + + try { + const settings = await withFileMutationLock(settingsFile, async () => { + await writeSettings(importedSettings); + return readSettings(); + }); + return json(res, 200, settings); + } catch (error) { + if (isPersistedStateError(error, 'settings')) { + return json(res, 500, { message: error.message }); + } + return writeMutationServerError(json, res); + } + } + + return false; + } + + return { + handleSettingsRoutes, + }; +} + +module.exports = { + createSettingsRoutes, +}; diff --git a/server/routes/static-routes.js b/server/routes/static-routes.js new file mode 100644 index 0000000..8604700 --- /dev/null +++ b/server/routes/static-routes.js @@ -0,0 +1,128 @@ +/** Creates the static asset and SPA fallback route handler. */ +function createStaticRouteHandler({ + fs, + path, + staticRoot, + securityHeaders, + prepareHtmlResponse, + json, +}) { + const mimeTypes = { + '.html': 'text/html; charset=utf-8', + '.css': 'text/css; charset=utf-8', + '.js': 'application/javascript; charset=utf-8', + '.json': 'application/json; charset=utf-8', + '.png': 'image/png', + '.jpg': 'image/jpeg', + '.jpeg': 'image/jpeg', + '.gif': 'image/gif', + '.svg': 'image/svg+xml', + '.ico': 'image/x-icon', + '.woff': 'font/woff', + '.woff2': 'font/woff2', + }; + + function getCacheControl(filePath) { + if (filePath.includes(path.sep + 'assets' + path.sep)) { + return 'public, max-age=31536000, immutable'; + } + if (filePath.endsWith('.html')) { + return 'no-cache'; + } + return 'public, max-age=86400'; + } + + function writeStaticErrorResponse(res, status, message) { + res.writeHead(status, { + 'Content-Type': 'application/json; charset=utf-8', + ...securityHeaders, + }); + res.end(JSON.stringify({ message })); + } + + function sendStaticFile(res, reqPath, data) { + const ext = path.extname(reqPath).toLowerCase(); + const contentType = mimeTypes[ext] || 'application/octet-stream'; + const isHtml = ext === '.html'; + const htmlResponse = isHtml ? prepareHtmlResponse(data.toString('utf8')) : null; + const responseBody = htmlResponse ? htmlResponse.body : data; + const responseSecurityHeaders = htmlResponse ? htmlResponse.headers : securityHeaders; + + res.writeHead(200, { + 'Content-Type': contentType, + 'Cache-Control': getCacheControl(reqPath), + ...responseSecurityHeaders, + }); + res.end(responseBody); + } + + function readStaticFile(reqPath) { + return fs.promises.readFile(reqPath); + } + + function shouldServeSpaFallback(req, safePath) { + const acceptHeader = req.headers?.accept || ''; + const acceptsHtml = String( + Array.isArray(acceptHeader) ? acceptHeader[0] : acceptHeader, + ).includes('text/html'); + // Preserve direct local navigation and simple test clients that omit Accept for app routes. + return acceptsHtml || safePath.endsWith('/') || path.extname(safePath) === ''; + } + + async function serveFile(req, res, reqPath, safePath) { + try { + const data = await readStaticFile(reqPath); + sendStaticFile(res, reqPath, data); + } catch (error) { + if (error && error.code === 'ENOENT') { + if (!shouldServeSpaFallback(req, safePath)) { + writeStaticErrorResponse(res, 404, 'Not Found'); + return; + } + + try { + const indexPath = path.join(staticRoot, 'index.html'); + const html = await readStaticFile(indexPath); + sendStaticFile(res, indexPath, html); + } catch { + writeStaticErrorResponse(res, 500, 'Internal Server Error'); + } + return; + } + + const errorCode = error?.code; + const staticErrorResponses = { + ERR_INVALID_ARG_VALUE: { status: 400, message: 'Invalid request path' }, + EISDIR: { status: 403, message: 'Access denied' }, + }; + const response = staticErrorResponses[errorCode] || { + status: 500, + message: 'Internal Server Error', + }; + writeStaticErrorResponse(res, response.status, response.message); + } + } + + async function handleStaticRequest(req, res, pathname) { + const safePath = pathname === '/' ? '/index.html' : pathname; + if (safePath.includes('\0')) { + return json(res, 400, { message: 'Invalid request path' }); + } + const resolvedStaticRoot = path.resolve(staticRoot); + const filePath = path.resolve(staticRoot, `.${safePath}`); + + if (filePath === resolvedStaticRoot || !filePath.startsWith(resolvedStaticRoot + path.sep)) { + return json(res, 403, { message: 'Access denied' }); + } + + await serveFile(req, res, filePath, safePath); + } + + return { + handleStaticRequest, + }; +} + +module.exports = { + createStaticRouteHandler, +}; diff --git a/server/routes/usage-routes.js b/server/routes/usage-routes.js new file mode 100644 index 0000000..95a9dc3 --- /dev/null +++ b/server/routes/usage-routes.js @@ -0,0 +1,185 @@ +const { getErrorMessage, writeMutationServerError } = require('./http-route-utils'); + +const EMPTY_USAGE_RESPONSE = { + daily: [], + totals: { + inputTokens: 0, + outputTokens: 0, + cacheCreationTokens: 0, + cacheReadTokens: 0, + thinkingTokens: 0, + totalCost: 0, + totalTokens: 0, + requestCount: 0, + }, +}; + +/** Creates usage, upload, and usage-import API route handlers. */ +function createUsageRoutes({ json, validateMutationRequest, readMutationBody, dataRuntime }) { + const { + extractUsageImportPayload, + isPersistedStateError, + mergeUsageData, + readData, + unlinkIfExists, + _updateDataLoadStateUnlocked, + withSettingsAndDataMutationLock, + writeData, + paths: { dataFile }, + } = dataRuntime; + + function normalizeIncomingUsagePayload(payload, invalidMessage) { + if (typeof dataRuntime.normalizeIncomingData !== 'function') { + return payload; + } + + const normalized = dataRuntime.normalizeIncomingData(payload); + // normalizeIncomingData may return undefined to keep the original payload; null is invalid. + if (normalized === undefined) { + return payload; + } + if (normalized === null) { + throw new Error(invalidMessage); + } + return normalized; + } + + async function handleUsageRoutes(apiPath, req, res) { + if (apiPath === '/usage') { + if (req.method === 'GET') { + let data; + try { + data = readData(); + } catch (error) { + if (isPersistedStateError(error, 'usage')) { + return json(res, 500, { message: error.message }); + } + throw error; + } + return json(res, 200, data || EMPTY_USAGE_RESPONSE); + } + if (req.method === 'DELETE') { + const validationError = validateMutationRequest(req); + if (validationError) { + return json(res, validationError.status, { message: validationError.message }); + } + try { + await withSettingsAndDataMutationLock(async () => { + await unlinkIfExists(dataFile); + await _updateDataLoadStateUnlocked({ + lastLoadedAt: null, + lastLoadSource: null, + }); + }); + } catch (error) { + if (isPersistedStateError(error, 'usage') || isPersistedStateError(error, 'settings')) { + return json(res, 500, { message: error.message }); + } + return writeMutationServerError(json, res); + } + return json(res, 200, { success: true }); + } + return json(res, 405, { message: 'Method Not Allowed' }); + } + + if (apiPath === '/upload') { + if (req.method === 'POST') { + const validationError = validateMutationRequest(req, { requiresJsonContentType: true }); + if (validationError) { + return json(res, validationError.status, { message: validationError.message }); + } + + const bodyResult = await readMutationBody(req, res, { + tooLargeMessage: 'File too large (max. 10 MB)', + invalidMessage: 'Invalid JSON', + }); + if (!bodyResult.ok) { + return true; + } + + let nextData; + try { + nextData = normalizeIncomingUsagePayload(bodyResult.body, 'Invalid JSON'); + } catch (error) { + return json(res, 400, { message: getErrorMessage(error, 'Invalid JSON') }); + } + + try { + await withSettingsAndDataMutationLock(async () => { + await writeData(nextData); + await _updateDataLoadStateUnlocked({ + lastLoadedAt: new Date().toISOString(), + lastLoadSource: 'file', + }); + }); + return json(res, 200, { + days: nextData.daily.length, + totalCost: nextData.totals.totalCost, + }); + } catch (error) { + if (isPersistedStateError(error, 'settings') || isPersistedStateError(error, 'usage')) { + return json(res, 500, { message: error.message }); + } + return writeMutationServerError(json, res); + } + } + return json(res, 405, { message: 'Method Not Allowed' }); + } + + if (apiPath === '/usage/import') { + if (req.method !== 'POST') { + return json(res, 405, { message: 'Method Not Allowed' }); + } + + const validationError = validateMutationRequest(req, { requiresJsonContentType: true }); + if (validationError) { + return json(res, validationError.status, { message: validationError.message }); + } + + const bodyResult = await readMutationBody(req, res, { + tooLargeMessage: 'Usage backup file too large', + invalidMessage: 'Invalid usage backup file', + }); + if (!bodyResult.ok) { + return true; + } + + let importedData; + try { + const usagePayload = extractUsageImportPayload(bodyResult.body); + importedData = normalizeIncomingUsagePayload(usagePayload, 'Invalid usage backup file'); + } catch (error) { + return json(res, 400, { message: getErrorMessage(error, 'Invalid usage backup file') }); + } + + try { + const result = await withSettingsAndDataMutationLock(async () => { + const currentData = readData(); + const merged = mergeUsageData(currentData, importedData); + await writeData(merged.data); + await _updateDataLoadStateUnlocked({ + lastLoadedAt: new Date().toISOString(), + lastLoadSource: 'file', + }); + return merged; + }); + return json(res, 200, result.summary); + } catch (error) { + if (isPersistedStateError(error, 'usage') || isPersistedStateError(error, 'settings')) { + return json(res, 500, { message: error.message }); + } + return writeMutationServerError(json, res); + } + } + + return false; + } + + return { + handleUsageRoutes, + }; +} + +module.exports = { + createUsageRoutes, +}; diff --git a/tests/architecture/server-http-boundaries.test.ts b/tests/architecture/server-http-boundaries.test.ts index f959c33..0f88194 100644 --- a/tests/architecture/server-http-boundaries.test.ts +++ b/tests/architecture/server-http-boundaries.test.ts @@ -5,6 +5,16 @@ function readRepoFile(relativePath: string) { return readFileSync(path.resolve(process.cwd(), relativePath), 'utf8') } +const serverRouteFiles = [ + 'server/routes/auto-import-routes.js', + 'server/routes/http-route-utils.js', + 'server/routes/report-routes.js', + 'server/routes/runtime-routes.js', + 'server/routes/settings-routes.js', + 'server/routes/static-routes.js', + 'server/routes/usage-routes.js', +] + describe('server HTTP boundary contract', () => { it('keeps request guard policy behind the HTTP utils facade', () => { const routerSource = readRepoFile('server/http-router.js') @@ -31,4 +41,28 @@ describe('server HTTP boundary contract', () => { expect(requestGuardsSource).toContain('function getHostHeaderHost') expect(requestGuardsSource).toContain('function hasJsonContentType') }) + + it('keeps the HTTP router as a route-group composition shell', () => { + const routerSource = readRepoFile('server/http-router.js') + + expect(routerSource).toContain("require('./routes/usage-routes')") + expect(routerSource).toContain("require('./routes/settings-routes')") + expect(routerSource).toContain("require('./routes/auto-import-routes')") + expect(routerSource).toContain("require('./routes/report-routes')") + expect(routerSource).toContain("require('./routes/static-routes')") + expect(routerSource).not.toContain("apiPath === '/settings'") + expect(routerSource).not.toContain("apiPath === '/upload'") + expect(routerSource).not.toContain("apiPath === '/report/pdf'") + }) + + it('keeps route groups dependent on injected services instead of runtime implementations', () => { + for (const routeFile of serverRouteFiles) { + const routeSource = readRepoFile(routeFile) + + expect(routeSource).not.toContain("require('../data-runtime") + expect(routeSource).not.toContain("require('../auto-import-runtime") + expect(routeSource).not.toContain("require('../background-runtime") + expect(routeSource).not.toContain("require('../http-router") + } + }) }) diff --git a/tests/architecture/server-runtime-state-contract.test.ts b/tests/architecture/server-runtime-state-contract.test.ts index b4d108f..1d31ea9 100644 --- a/tests/architecture/server-runtime-state-contract.test.ts +++ b/tests/architecture/server-runtime-state-contract.test.ts @@ -8,9 +8,13 @@ function readRepoFile(relativePath: string) { describe('server runtime state contract', () => { it('keeps auto-import stream state outside the HTTP router', () => { const routerSource = readRepoFile('server/http-router.js') + const autoImportRoutesSource = readRepoFile('server/routes/auto-import-routes.js') expect(routerSource).not.toContain('autoImportStreamRunning') - expect(routerSource).toContain('acquireAutoImportLease') + expect(routerSource).not.toContain('acquireAutoImportLease') + expect(routerSource).toContain('createAutoImportRoutes') + expect(autoImportRoutesSource).not.toContain('autoImportStreamRunning') + expect(autoImportRoutesSource).toContain('acquireAutoImportLease') }) it('keeps mutable runtime flags behind runtime-state services', () => { diff --git a/tests/unit/http-router-mutations.test.ts b/tests/unit/http-router-mutations.test.ts index e078668..f513afe 100644 --- a/tests/unit/http-router-mutations.test.ts +++ b/tests/unit/http-router-mutations.test.ts @@ -619,7 +619,9 @@ describe('HTTP router mutation errors', () => { expect(res.status).toBe(200) expect(res.headers['Content-Type']).toBe('application/pdf') - expect(res.headers['Content-Disposition']).toBe('attachment; filename="ttdash-report.pdf"') + expect(res.headers['Content-Disposition']).toBe( + 'attachment; filename="ttdash-report.pdf"; filename*=UTF-8\'\'ttdash-report.pdf', + ) expect(res.body).toBe('%PDF-1.4') expect(generatePdfReport).toHaveBeenCalledWith(usageData.daily, { locale: 'de-CH' }) }) From aec28b2a9599385a57b10bfa68105ea9165eaa49 Mon Sep 17 00:00:00 2001 From: tyl3r-ch Date: Fri, 1 May 2026 13:25:00 +0200 Subject: [PATCH 31/42] Split data runtime services --- docs/architecture.md | 8 +- server/data-runtime.js | 626 ++---------------- server/data-runtime/app-paths.js | 67 ++ server/data-runtime/file-io.js | 122 ++++ server/data-runtime/file-locks.js | 340 ++++++++++ server/data-runtime/import-merge.js | 291 ++++++++ src/types/index.ts | 1 + .../server-data-runtime-boundaries.test.ts | 55 ++ .../dashboard-controller-actions.test.tsx | 1 + tests/frontend/dashboard-error-state.test.tsx | 1 + tests/integration/server-api-imports.test.ts | 1 + tests/unit/data-runtime-contract.test.ts | 54 +- tests/unit/http-router-mutations.test.ts | 1 + tests/unit/server-helpers-file-locks.test.ts | 150 ++++- vitest.setup.frontend.ts | 27 +- 15 files changed, 1179 insertions(+), 566 deletions(-) create mode 100644 server/data-runtime/app-paths.js create mode 100644 server/data-runtime/file-io.js create mode 100644 server/data-runtime/file-locks.js create mode 100644 server/data-runtime/import-merge.js create mode 100644 tests/architecture/server-data-runtime-boundaries.test.ts diff --git a/docs/architecture.md b/docs/architecture.md index c244503..ed40c49 100644 --- a/docs/architecture.md +++ b/docs/architecture.md @@ -52,8 +52,14 @@ The server runtime is intentionally split so `server.js` stays an executable shi - owns local mutable runtime state services such as runtime snapshots, singleton runtime leases, and expiring async caches - keeps startup flags, auto-import leases, and toktrack version lookup cache state scoped to one composed app runtime - `server/data-runtime.js` - - owns app-path resolution, persisted usage/settings IO, migration, and file-mutation locks + - is the facade for app-path resolution, persisted usage/settings IO, migration, import merging, and file-mutation locks - consumes the shared settings contract instead of defining local settings defaults or normalizers + - composes focused services from `server/data-runtime/**`: + - `app-paths.js` exposes `resolveDataRuntimeAppPaths` for platform and env override path selection + - `file-io.js` exposes `createDataRuntimeFileIo` for secure directory setup, atomic JSON writes, file modes, and missing-file handling + - `file-locks.js` exposes `createDataRuntimeFileLocks` for in-process and cross-process mutation locks around data/settings files + - `import-merge.js` exposes `createDataRuntimeImportMerge` for backup payload extraction, usage-day equivalence, and non-destructive merge summaries + - wiring model: `server/data-runtime.js` imports these four submodules, injects filesystem/path/runtime configuration, and returns one public data-runtime facade to `server/app-runtime.js` - `server/cli.js` - owns CLI alias normalization, help text, positional command parsing, and port validation - `server/background-runtime.js` diff --git a/server/data-runtime.js b/server/data-runtime.js index 77cd1cd..9a20c48 100644 --- a/server/data-runtime.js +++ b/server/data-runtime.js @@ -4,7 +4,12 @@ const { normalizePersistedAppSettings, normalizeProviderLimits: normalizeSharedProviderLimits, } = require('../shared/app-settings.js'); +const { resolveDataRuntimeAppPaths } = require('./data-runtime/app-paths'); +const { createDataRuntimeFileIo } = require('./data-runtime/file-io'); +const { createDataRuntimeFileLocks } = require('./data-runtime/file-locks'); +const { createDataRuntimeImportMerge } = require('./data-runtime/import-merge'); +/** Creates the persistence, locking, import, and settings runtime facade. */ function createDataRuntime({ fs, fsPromises, @@ -23,344 +28,69 @@ function createDataRuntime({ secureFileMode, fileMutationLockTimeoutMs, fileMutationLockStaleMs, + fileMutationLockInstanceMismatchStaleMs = fileMutationLockTimeoutMs, + debugLog = console.debug, getCliAutoLoadActive = () => false, }) { - function resolveAppPaths() { - const homeDir = os.homedir(); - const explicitPaths = { - dataDir: processObject.env.TTDASH_DATA_DIR, - configDir: processObject.env.TTDASH_CONFIG_DIR, - cacheDir: processObject.env.TTDASH_CACHE_DIR, - }; - let platformPaths; - - if (processObject.platform === 'darwin') { - const appSupportDir = path.join(homeDir, 'Library', 'Application Support', appDirName); - platformPaths = { - dataDir: appSupportDir, - configDir: appSupportDir, - cacheDir: path.join(homeDir, 'Library', 'Caches', appDirName), - }; - } else if (isWindows) { - platformPaths = { - dataDir: path.join( - processObject.env.LOCALAPPDATA || path.join(homeDir, 'AppData', 'Local'), - appDirName, - ), - configDir: path.join( - processObject.env.APPDATA || path.join(homeDir, 'AppData', 'Roaming'), - appDirName, - ), - cacheDir: path.join( - processObject.env.LOCALAPPDATA || path.join(homeDir, 'AppData', 'Local'), - appDirName, - 'Cache', - ), - }; - } else { - platformPaths = { - dataDir: path.join( - processObject.env.XDG_DATA_HOME || path.join(homeDir, '.local', 'share'), - appDirNameLinux, - ), - configDir: path.join( - processObject.env.XDG_CONFIG_HOME || path.join(homeDir, '.config'), - appDirNameLinux, - ), - cacheDir: path.join( - processObject.env.XDG_CACHE_HOME || path.join(homeDir, '.cache'), - appDirNameLinux, - ), - }; - } - - return { - dataDir: explicitPaths.dataDir || platformPaths.dataDir, - configDir: explicitPaths.configDir || platformPaths.configDir, - cacheDir: explicitPaths.cacheDir || platformPaths.cacheDir, - }; - } - - const appPaths = resolveAppPaths(); + const appPaths = resolveDataRuntimeAppPaths({ + os, + path, + processObject, + appDirName, + appDirNameLinux, + isWindows, + }); const dataFile = path.join(appPaths.dataDir, 'data.json'); const settingsFile = path.join(appPaths.configDir, 'settings.json'); const npxCacheDir = path.join(appPaths.cacheDir, 'npx-cache'); - const fileMutationLocks = new Map(); - - function ensureDir(dirPath) { - fs.mkdirSync(dirPath, { recursive: true, mode: secureDirMode }); - if (!isWindows) { - fs.chmodSync(dirPath, secureDirMode); - } - } - - function ensureAppDirs(extraDirs = []) { - ensureDir(appPaths.dataDir); - ensureDir(appPaths.configDir); - ensureDir(appPaths.cacheDir); - ensureDir(npxCacheDir); - extraDirs.forEach((dirPath) => ensureDir(dirPath)); - } - - function applySecureFileMode(filePath) { - if (!isWindows) { - fs.chmodSync(filePath, secureFileMode); - } - } - - function isMissingFileError(error) { - return error?.code === 'ENOENT' || /no such file/i.test(String(error?.message || error)); - } - - function writeJsonAtomic(filePath, data) { - ensureDir(path.dirname(filePath)); - const tempPath = `${filePath}.${processObject.pid}.${Date.now()}.tmp`; - try { - fs.writeFileSync(tempPath, JSON.stringify(data, null, 2), { - mode: secureFileMode, - }); - applySecureFileMode(tempPath); - fs.renameSync(tempPath, filePath); - } catch (error) { - try { - if (fs.existsSync(tempPath)) { - fs.unlinkSync(tempPath); - } - } catch { - // Ignore cleanup failures so the original write error is preserved. - } - throw error; - } - } - - async function writeJsonAtomicAsync(filePath, data) { - const tempPath = `${filePath}.${processObject.pid}.${Date.now()}.tmp`; - let tempPathCreated = false; - const parentDir = path.dirname(filePath); - - try { - await fsPromises.mkdir(parentDir, { recursive: true, mode: secureDirMode }); - if (!isWindows) { - await fsPromises.chmod(parentDir, secureDirMode); - } - tempPathCreated = true; - await fsPromises.writeFile(tempPath, JSON.stringify(data, null, 2), { - mode: secureFileMode, - }); - - if (!isWindows) { - await fsPromises.chmod(tempPath, secureFileMode); - } - - await fsPromises.rename(tempPath, filePath); - } catch (error) { - if (tempPathCreated) { - try { - await fsPromises.unlink(tempPath); - } catch (unlinkError) { - if (unlinkError?.code !== 'ENOENT') { - // Ignore temp-file cleanup failures so the original error wins. - } - } - } - throw error; - } - } - - async function unlinkIfExists(filePath) { - try { - await fsPromises.unlink(filePath); - } catch (error) { - if (error?.code !== 'ENOENT') { - throw error; - } - } - } - - function getFileMutationLockDir(filePath) { - return `${filePath}.lock`; - } - - function getFileMutationLockOwnerPath(lockDir) { - return path.join(lockDir, 'owner.json'); - } - - async function removeFileMutationLockDir(lockDir) { - try { - await fsPromises.rm(lockDir, { recursive: true, force: true }); - } catch (error) { - if (error?.code !== 'ENOENT') { - throw error; - } - } - } - - async function writeFileMutationLockOwner(lockDir) { - const ownerPath = getFileMutationLockOwnerPath(lockDir); - const owner = { - pid: processObject.pid, - createdAt: new Date().toISOString(), - instanceId: runtimeInstanceId, - }; - await fsPromises.writeFile(ownerPath, JSON.stringify(owner, null, 2), { - mode: secureFileMode, - }); - if (!isWindows) { - await fsPromises.chmod(ownerPath, secureFileMode); - } - } - - function isProcessRunning(pid) { - if (!Number.isInteger(pid) || pid <= 0) { - return false; - } - - try { - processObject.kill(pid, 0); - return true; - } catch (error) { - return error && error.code === 'EPERM'; - } - } - - async function sleep(ms) { - return new Promise((resolve) => setTimeout(resolve, ms)); - } - - async function shouldReapFileMutationLock(lockDir) { - const ownerPath = getFileMutationLockOwnerPath(lockDir); - let owner = null; - - try { - const rawOwner = await fsPromises.readFile(ownerPath, 'utf-8'); - owner = JSON.parse(rawOwner); - } catch (error) { - if (error?.code !== 'ENOENT') { - // Fall back to age-based cleanup if the owner metadata is missing or malformed. - } - } - - try { - if (Number.isInteger(owner?.pid)) { - return !isProcessRunning(owner.pid); - } - - const ownerCreatedAt = owner?.createdAt ? Date.parse(owner.createdAt) : Number.NaN; - const stats = await fsPromises.stat(lockDir); - const lockAgeMs = Number.isFinite(ownerCreatedAt) - ? Date.now() - ownerCreatedAt - : Date.now() - stats.mtimeMs; - - if (lockAgeMs > fileMutationLockStaleMs) { - return true; - } - - return false; - } catch (error) { - if (error?.code === 'ENOENT') { - return false; - } - throw error; - } - } - - async function withCrossProcessFileMutationLock( - filePath, - operation, - timeoutMs = fileMutationLockTimeoutMs, - ) { - const lockDir = getFileMutationLockDir(filePath); - const startedAt = Date.now(); - - while (true) { - try { - await fsPromises.mkdir(path.dirname(lockDir), { - recursive: true, - mode: secureDirMode, - }); - await fsPromises.mkdir(lockDir, { mode: secureDirMode }); - if (!isWindows) { - await fsPromises.chmod(lockDir, secureDirMode); - } - - try { - await writeFileMutationLockOwner(lockDir); - } catch (error) { - await removeFileMutationLockDir(lockDir).catch(() => undefined); - throw error; - } - - break; - } catch (error) { - if (!error || error.code !== 'EEXIST') { - throw error; - } - - if (await shouldReapFileMutationLock(lockDir)) { - await removeFileMutationLockDir(lockDir).catch(() => undefined); - continue; - } - - if (Date.now() - startedAt >= timeoutMs) { - throw new Error(`Could not acquire file mutation lock for ${path.basename(filePath)}.`, { - cause: error, - }); - } - - await sleep(50); - } - } - - try { - return await operation(); - } finally { - try { - await removeFileMutationLockDir(lockDir); - } catch { - // Ignore cleanup races so the original operation result wins. - } - } - } - - async function withFileMutationLock(filePath, operation) { - const previous = fileMutationLocks.get(filePath) || Promise.resolve(); - let releaseCurrent; - const current = new Promise((resolve) => { - releaseCurrent = resolve; - }); - - fileMutationLocks.set(filePath, current); - - await previous.catch(() => undefined); - - try { - return await withCrossProcessFileMutationLock(filePath, operation); - } finally { - releaseCurrent(); - if (fileMutationLocks.get(filePath) === current) { - fileMutationLocks.delete(filePath); - } - } - } - - async function withOrderedFileMutationLocks(filePaths, operation) { - const uniquePaths = Array.from(new Set(filePaths)).sort(); - - const runWithLock = async (index) => { - if (index >= uniquePaths.length) { - return operation(); - } - - const filePath = uniquePaths[index]; - return withFileMutationLock(filePath, () => runWithLock(index + 1)); - }; - - return runWithLock(0); - } - - async function withSettingsAndDataMutationLock(operation) { - return withOrderedFileMutationLocks([settingsFile, dataFile], operation); - } + const fileIo = createDataRuntimeFileIo({ + fs, + fsPromises, + path, + processObject, + appPaths, + npxCacheDir, + isWindows, + secureDirMode, + secureFileMode, + }); + const fileLocks = createDataRuntimeFileLocks({ + fsPromises, + path, + processObject, + runtimeInstanceId, + isWindows, + secureDirMode, + secureFileMode, + fileMutationLockTimeoutMs, + fileMutationLockStaleMs, + fileMutationLockInstanceMismatchStaleMs, + settingsFile, + dataFile, + debugLog, + }); + const importMerge = createDataRuntimeImportMerge({ + normalizeIncomingData, + settingsBackupKind, + usageBackupKind, + }); + const { + ensureDir, + ensureAppDirs, + applySecureFileMode, + isMissingFileError, + writeJsonAtomic, + writeJsonAtomicAsync, + unlinkIfExists, + } = fileIo; + const { + getFileMutationLockDir, + withFileMutationLock, + withOrderedFileMutationLocks, + withSettingsAndDataMutationLock, + getPendingFileMutationLockCount, + } = fileLocks; + const { extractSettingsImportPayload, extractUsageImportPayload, mergeUsageData } = importMerge; const normalizeIsoTimestamp = normalizeSharedIsoTimestamp; const normalizeProviderLimits = normalizeSharedProviderLimits; const normalizeSettings = normalizePersistedAppSettings; @@ -405,234 +135,6 @@ function createDataRuntime({ } } - function isPlainObject(value) { - return Boolean(value) && typeof value === 'object' && !Array.isArray(value); - } - - function sortStrings(values) { - return [ - ...new Set( - (Array.isArray(values) ? values : []) - .filter((value) => typeof value === 'string') - .map((value) => value.trim()) - .filter(Boolean), - ), - ].sort((left, right) => left.localeCompare(right)); - } - - function canonicalizeModelBreakdown(entry) { - return { - modelName: typeof entry?.modelName === 'string' ? entry.modelName.trim() : '', - inputTokens: Number(entry?.inputTokens) || 0, - outputTokens: Number(entry?.outputTokens) || 0, - cacheCreationTokens: Number(entry?.cacheCreationTokens) || 0, - cacheReadTokens: Number(entry?.cacheReadTokens) || 0, - thinkingTokens: Number(entry?.thinkingTokens) || 0, - cost: Number(entry?.cost) || 0, - requestCount: Number(entry?.requestCount) || 0, - }; - } - - function canonicalizeUsageDay(day) { - return { - date: typeof day?.date === 'string' ? day.date : '', - inputTokens: Number(day?.inputTokens) || 0, - outputTokens: Number(day?.outputTokens) || 0, - cacheCreationTokens: Number(day?.cacheCreationTokens) || 0, - cacheReadTokens: Number(day?.cacheReadTokens) || 0, - thinkingTokens: Number(day?.thinkingTokens) || 0, - totalTokens: Number(day?.totalTokens) || 0, - totalCost: Number(day?.totalCost) || 0, - requestCount: Number(day?.requestCount) || 0, - modelsUsed: sortStrings(day?.modelsUsed), - modelBreakdowns: (Array.isArray(day?.modelBreakdowns) ? day.modelBreakdowns : []) - .map(canonicalizeModelBreakdown) - .sort((left, right) => left.modelName.localeCompare(right.modelName)), - }; - } - - function areUsageDaysEquivalent(left, right) { - const leftDay = canonicalizeUsageDay(left); - const rightDay = canonicalizeUsageDay(right); - const scalarFields = [ - 'date', - 'inputTokens', - 'outputTokens', - 'cacheCreationTokens', - 'cacheReadTokens', - 'thinkingTokens', - 'totalTokens', - 'totalCost', - 'requestCount', - ]; - - for (const field of scalarFields) { - if (leftDay[field] !== rightDay[field]) { - return false; - } - } - - if (leftDay.modelsUsed.length !== rightDay.modelsUsed.length) { - return false; - } - for (let index = 0; index < leftDay.modelsUsed.length; index += 1) { - if (leftDay.modelsUsed[index] !== rightDay.modelsUsed[index]) { - return false; - } - } - - if (leftDay.modelBreakdowns.length !== rightDay.modelBreakdowns.length) { - return false; - } - - const breakdownFields = [ - 'modelName', - 'inputTokens', - 'outputTokens', - 'cacheCreationTokens', - 'cacheReadTokens', - 'thinkingTokens', - 'cost', - 'requestCount', - ]; - for (let index = 0; index < leftDay.modelBreakdowns.length; index += 1) { - const leftBreakdown = leftDay.modelBreakdowns[index]; - const rightBreakdown = rightDay.modelBreakdowns[index]; - for (const field of breakdownFields) { - if (leftBreakdown[field] !== rightBreakdown[field]) { - return false; - } - } - } - - return true; - } - - function extractSettingsImportPayload(payload) { - if (!isPlainObject(payload)) { - throw new Error('Uploaded JSON is not a settings backup file.'); - } - - if (payload.kind === settingsBackupKind) { - if (!Object.prototype.hasOwnProperty.call(payload, 'settings')) { - throw new Error('The settings backup file does not contain any settings.'); - } - if (!isPlainObject(payload.settings)) { - throw new Error('The settings backup file has an invalid settings payload.'); - } - return payload.settings; - } - - if (typeof payload.kind === 'string' && payload.kind === usageBackupKind) { - throw new Error('This is a data backup file, not a settings file.'); - } - - throw new Error('Uploaded JSON is not a settings backup file.'); - } - - function extractUsageImportPayload(payload) { - if (!isPlainObject(payload)) { - return payload; - } - - if (payload.kind === usageBackupKind) { - if (!Object.prototype.hasOwnProperty.call(payload, 'data')) { - throw new Error('The usage backup file does not contain any usage data.'); - } - return payload.data; - } - - if (typeof payload.kind === 'string' && payload.kind === settingsBackupKind) { - throw new Error('This is a settings backup file, not a data file.'); - } - - return payload; - } - - function computeUsageTotals(daily) { - return daily.reduce( - (totals, day) => ({ - inputTokens: totals.inputTokens + (day.inputTokens || 0), - outputTokens: totals.outputTokens + (day.outputTokens || 0), - cacheCreationTokens: totals.cacheCreationTokens + (day.cacheCreationTokens || 0), - cacheReadTokens: totals.cacheReadTokens + (day.cacheReadTokens || 0), - thinkingTokens: totals.thinkingTokens + (day.thinkingTokens || 0), - totalCost: totals.totalCost + (day.totalCost || 0), - totalTokens: totals.totalTokens + (day.totalTokens || 0), - requestCount: totals.requestCount + (day.requestCount || 0), - }), - { - inputTokens: 0, - outputTokens: 0, - cacheCreationTokens: 0, - cacheReadTokens: 0, - thinkingTokens: 0, - totalCost: 0, - totalTokens: 0, - requestCount: 0, - }, - ); - } - - function mergeUsageData(currentData, importedData) { - const current = - currentData && Array.isArray(currentData.daily) && currentData.daily.length > 0 - ? normalizeIncomingData(currentData) - : null; - - if (!current) { - return { - data: importedData, - summary: { - importedDays: importedData.daily.length, - addedDays: importedData.daily.length, - unchangedDays: 0, - conflictingDays: 0, - totalDays: importedData.daily.length, - }, - }; - } - - const currentByDate = new Map(current.daily.map((day) => [day.date, day])); - let addedDays = 0; - let unchangedDays = 0; - let conflictingDays = 0; - - for (const importedDay of importedData.daily) { - const existingDay = currentByDate.get(importedDay.date); - if (!existingDay) { - currentByDate.set(importedDay.date, importedDay); - addedDays += 1; - continue; - } - - if (areUsageDaysEquivalent(existingDay, importedDay)) { - unchangedDays += 1; - continue; - } - - conflictingDays += 1; - } - - const mergedDaily = [...currentByDate.values()].sort((left, right) => - left.date.localeCompare(right.date), - ); - - return { - data: { - daily: mergedDaily, - totals: computeUsageTotals(mergedDaily), - }, - summary: { - importedDays: importedData.daily.length, - addedDays, - unchangedDays, - conflictingDays, - totalDays: mergedDaily.length, - }, - }; - } - function toSettingsResponse(settings) { return { ...normalizeSettings(settings), @@ -766,7 +268,7 @@ function createDataRuntime({ withFileMutationLock, withOrderedFileMutationLocks, withSettingsAndDataMutationLock, - getPendingFileMutationLockCount: () => fileMutationLocks.size, + getPendingFileMutationLockCount, migrateLegacyDataFile, normalizeIsoTimestamp, normalizeProviderLimits, diff --git a/server/data-runtime/app-paths.js b/server/data-runtime/app-paths.js new file mode 100644 index 0000000..2873f42 --- /dev/null +++ b/server/data-runtime/app-paths.js @@ -0,0 +1,67 @@ +/** Resolves platform-specific data, config, and cache directories for the local app runtime. */ +function resolveDataRuntimeAppPaths({ + os, + path, + processObject, + appDirName, + appDirNameLinux, + isWindows, +}) { + const homeDir = os.homedir(); + const explicitPaths = { + dataDir: processObject.env.TTDASH_DATA_DIR, + configDir: processObject.env.TTDASH_CONFIG_DIR, + cacheDir: processObject.env.TTDASH_CACHE_DIR, + }; + let platformPaths; + + if (processObject.platform === 'darwin') { + const appSupportDir = path.join(homeDir, 'Library', 'Application Support', appDirName); + platformPaths = { + dataDir: appSupportDir, + configDir: appSupportDir, + cacheDir: path.join(homeDir, 'Library', 'Caches', appDirName), + }; + } else if (isWindows) { + platformPaths = { + dataDir: path.join( + processObject.env.LOCALAPPDATA || path.join(homeDir, 'AppData', 'Local'), + appDirName, + ), + configDir: path.join( + processObject.env.APPDATA || path.join(homeDir, 'AppData', 'Roaming'), + appDirName, + ), + cacheDir: path.join( + processObject.env.LOCALAPPDATA || path.join(homeDir, 'AppData', 'Local'), + appDirName, + 'Cache', + ), + }; + } else { + platformPaths = { + dataDir: path.join( + processObject.env.XDG_DATA_HOME || path.join(homeDir, '.local', 'share'), + appDirNameLinux, + ), + configDir: path.join( + processObject.env.XDG_CONFIG_HOME || path.join(homeDir, '.config'), + appDirNameLinux, + ), + cacheDir: path.join( + processObject.env.XDG_CACHE_HOME || path.join(homeDir, '.cache'), + appDirNameLinux, + ), + }; + } + + return { + dataDir: explicitPaths.dataDir || platformPaths.dataDir, + configDir: explicitPaths.configDir || platformPaths.configDir, + cacheDir: explicitPaths.cacheDir || platformPaths.cacheDir, + }; +} + +module.exports = { + resolveDataRuntimeAppPaths, +}; diff --git a/server/data-runtime/file-io.js b/server/data-runtime/file-io.js new file mode 100644 index 0000000..8a8d463 --- /dev/null +++ b/server/data-runtime/file-io.js @@ -0,0 +1,122 @@ +/** Creates secure JSON file I/O helpers for the data runtime. */ +function createDataRuntimeFileIo({ + fs, + fsPromises, + path, + processObject, + appPaths, + npxCacheDir, + isWindows, + secureDirMode, + secureFileMode, +}) { + function ensureDir(dirPath) { + fs.mkdirSync(dirPath, { recursive: true, mode: secureDirMode }); + if (!isWindows) { + fs.chmodSync(dirPath, secureDirMode); + } + } + + async function ensureDirAsync(dirPath) { + await fsPromises.mkdir(dirPath, { recursive: true, mode: secureDirMode }); + if (!isWindows) { + await fsPromises.chmod(dirPath, secureDirMode); + } + } + + function ensureAppDirs(extraDirs = []) { + ensureDir(appPaths.dataDir); + ensureDir(appPaths.configDir); + ensureDir(appPaths.cacheDir); + ensureDir(npxCacheDir); + extraDirs.forEach((dirPath) => ensureDir(dirPath)); + } + + function applySecureFileMode(filePath) { + if (!isWindows) { + fs.chmodSync(filePath, secureFileMode); + } + } + + function isMissingFileError(error) { + return error?.code === 'ENOENT'; + } + + function writeJsonAtomic(filePath, data) { + ensureDir(path.dirname(filePath)); + const tempPath = `${filePath}.${processObject.pid}.${Date.now()}.tmp`; + try { + fs.writeFileSync(tempPath, JSON.stringify(data, null, 2), { + mode: secureFileMode, + }); + applySecureFileMode(tempPath); + fs.renameSync(tempPath, filePath); + } catch (error) { + try { + fs.unlinkSync(tempPath); + } catch (cleanupError) { + if (cleanupError?.code !== 'ENOENT') { + throw new AggregateError( + [error, cleanupError], + `Failed atomic JSON write and temp-file cleanup for ${path.basename(filePath)}.`, + ); + } + } + throw error; + } + } + + async function writeJsonAtomicAsync(filePath, data) { + const tempPath = `${filePath}.${processObject.pid}.${Date.now()}.tmp`; + const parentDir = path.dirname(filePath); + + try { + await ensureDirAsync(parentDir); + await fsPromises.writeFile(tempPath, JSON.stringify(data, null, 2), { + mode: secureFileMode, + }); + + if (!isWindows) { + await fsPromises.chmod(tempPath, secureFileMode); + } + + await fsPromises.rename(tempPath, filePath); + } catch (error) { + try { + await fsPromises.unlink(tempPath); + } catch (unlinkError) { + if (unlinkError?.code !== 'ENOENT') { + throw new AggregateError( + [error, unlinkError], + `Failed atomic JSON write and temp-file cleanup for ${path.basename(filePath)}.`, + ); + } + } + throw error; + } + } + + async function unlinkIfExists(filePath) { + try { + await fsPromises.unlink(filePath); + } catch (error) { + if (error?.code !== 'ENOENT') { + throw error; + } + } + } + + return { + ensureDir, + ensureAppDirs, + applySecureFileMode, + isMissingFileError, + writeJsonAtomic, + writeJsonAtomicAsync, + unlinkIfExists, + }; +} + +module.exports = { + createDataRuntimeFileIo, +}; diff --git a/server/data-runtime/file-locks.js b/server/data-runtime/file-locks.js new file mode 100644 index 0000000..3b3baca --- /dev/null +++ b/server/data-runtime/file-locks.js @@ -0,0 +1,340 @@ +/** Creates in-process and cross-process file mutation locks for data runtime writes. */ +function getInvalidFileLockOptionNames({ + fsPromises, + path, + processObject, + runtimeInstanceId, + isWindows, + secureDirMode, + secureFileMode, + fileMutationLockTimeoutMs, + fileMutationLockStaleMs, + fileMutationLockInstanceMismatchStaleMs, + settingsFile, + dataFile, + debugLog, +}) { + const invalidOptions = []; + + if (!fsPromises) invalidOptions.push('fsPromises'); + if (!path) invalidOptions.push('path'); + if (!processObject) invalidOptions.push('processObject'); + if (typeof runtimeInstanceId !== 'string' || !runtimeInstanceId) { + invalidOptions.push('runtimeInstanceId'); + } + if (typeof isWindows !== 'boolean') invalidOptions.push('isWindows'); + if ( + secureDirMode != null && + (!Number.isInteger(secureDirMode) || secureDirMode < 0 || secureDirMode > 0o777) + ) { + invalidOptions.push('secureDirMode'); + } + if ( + secureFileMode != null && + (!Number.isInteger(secureFileMode) || secureFileMode < 0 || secureFileMode > 0o777) + ) { + invalidOptions.push('secureFileMode'); + } + if ( + typeof fileMutationLockTimeoutMs !== 'number' || + !Number.isFinite(fileMutationLockTimeoutMs) || + fileMutationLockTimeoutMs < 0 + ) { + invalidOptions.push('fileMutationLockTimeoutMs'); + } + if ( + typeof fileMutationLockStaleMs !== 'number' || + !Number.isFinite(fileMutationLockStaleMs) || + fileMutationLockStaleMs < 0 + ) { + invalidOptions.push('fileMutationLockStaleMs'); + } + if ( + typeof fileMutationLockInstanceMismatchStaleMs !== 'number' || + !Number.isFinite(fileMutationLockInstanceMismatchStaleMs) || + fileMutationLockInstanceMismatchStaleMs < 0 + ) { + invalidOptions.push('fileMutationLockInstanceMismatchStaleMs'); + } + if (typeof settingsFile !== 'string' || !settingsFile) invalidOptions.push('settingsFile'); + if (typeof dataFile !== 'string' || !dataFile) invalidOptions.push('dataFile'); + if (typeof debugLog !== 'function') invalidOptions.push('debugLog'); + + return invalidOptions; +} + +function createDataRuntimeFileLocks({ + fsPromises, + path, + processObject, + runtimeInstanceId, + isWindows, + secureDirMode, + secureFileMode, + fileMutationLockTimeoutMs, + fileMutationLockStaleMs, + fileMutationLockInstanceMismatchStaleMs = fileMutationLockTimeoutMs, + settingsFile, + dataFile, + debugLog = console.debug, +}) { + const invalidOptions = getInvalidFileLockOptionNames({ + fsPromises, + path, + processObject, + runtimeInstanceId, + isWindows, + secureDirMode, + secureFileMode, + fileMutationLockTimeoutMs, + fileMutationLockStaleMs, + fileMutationLockInstanceMismatchStaleMs, + settingsFile, + dataFile, + debugLog, + }); + + if (invalidOptions.length > 0) { + throw new TypeError( + `createDataRuntimeFileLocks received invalid options: ${invalidOptions.join(', ')}`, + ); + } + + const fileMutationLocks = new Map(); + + function getFileMutationLockDir(filePath) { + return `${filePath}.lock`; + } + + function getFileMutationLockOwnerPath(lockDir) { + return path.join(lockDir, 'owner.json'); + } + + async function removeFileMutationLockDir(lockDir) { + try { + await fsPromises.rm(lockDir, { recursive: true, force: true }); + } catch (error) { + if (error?.code !== 'ENOENT') { + throw error; + } + } + } + + async function writeFileMutationLockOwner(lockDir) { + const ownerPath = getFileMutationLockOwnerPath(lockDir); + const owner = { + pid: processObject.pid, + createdAt: new Date().toISOString(), + instanceId: runtimeInstanceId, + }; + await fsPromises.writeFile(ownerPath, JSON.stringify(owner, null, 2), { + mode: secureFileMode, + }); + if (!isWindows) { + await fsPromises.chmod(ownerPath, secureFileMode); + } + } + + function isProcessRunning(pid) { + if (!Number.isInteger(pid) || pid <= 0) { + return false; + } + + try { + processObject.kill(pid, 0); + return true; + } catch (error) { + return error && error.code === 'EPERM'; + } + } + + async function sleep(ms) { + return new Promise((resolve) => setTimeout(resolve, ms)); + } + + async function shouldReapFileMutationLock(lockDir) { + const ownerPath = getFileMutationLockOwnerPath(lockDir); + let owner = null; + + try { + const rawOwner = await fsPromises.readFile(ownerPath, 'utf-8'); + owner = JSON.parse(rawOwner); + } catch (error) { + if (error?.code !== 'ENOENT') { + debugLog('Failed to read file mutation lock owner', { ownerPath, error }); + // Continue through the stale-lock fallback chain: owner fields first, age last. + } + } + + try { + const ownerCreatedAt = owner?.createdAt ? Date.parse(owner.createdAt) : Number.NaN; + if ( + typeof owner?.instanceId === 'string' && + owner.instanceId !== runtimeInstanceId && + Number.isFinite(ownerCreatedAt) && + Date.now() - ownerCreatedAt > fileMutationLockInstanceMismatchStaleMs + ) { + return true; + } + + if (Number.isInteger(owner?.pid)) { + return !isProcessRunning(owner.pid); + } + + const stats = await fsPromises.stat(lockDir); + const lockAgeMs = Number.isFinite(ownerCreatedAt) + ? Date.now() - ownerCreatedAt + : Date.now() - stats.mtimeMs; + + if (lockAgeMs > fileMutationLockStaleMs) { + return true; + } + + return false; + } catch (error) { + if (error?.code === 'ENOENT') { + return false; + } + throw error; + } + } + + function createFileMutationLockQueueTimeoutError(filePath) { + return new Error( + `Timed out waiting for previous file mutation lock for ${path.basename(filePath)}.`, + ); + } + + async function waitForPreviousFileMutationLock(previous, filePath) { + let timeoutId; + const timeout = new Promise((_, reject) => { + timeoutId = setTimeout(() => { + reject(createFileMutationLockQueueTimeoutError(filePath)); + }, fileMutationLockTimeoutMs); + }); + + try { + await Promise.race([previous.catch(() => undefined), timeout]); + } finally { + clearTimeout(timeoutId); + } + } + + async function withCrossProcessFileMutationLock( + filePath, + operation, + timeoutMs = fileMutationLockTimeoutMs, + ) { + const lockDir = getFileMutationLockDir(filePath); + const startedAt = Date.now(); + let backoffMs = 50; + const maxBackoffMs = 1600; + + while (true) { + try { + await fsPromises.mkdir(path.dirname(lockDir), { + recursive: true, + mode: secureDirMode, + }); + await fsPromises.mkdir(lockDir, { mode: secureDirMode }); + if (!isWindows) { + await fsPromises.chmod(lockDir, secureDirMode); + } + + try { + await writeFileMutationLockOwner(lockDir); + } catch (error) { + try { + await removeFileMutationLockDir(lockDir); + } catch (cleanupError) { + debugLog('Failed to remove mutation lock dir', { lockDir, error: cleanupError }); + } + throw error; + } + + break; + } catch (error) { + if (!error || error.code !== 'EEXIST') { + throw error; + } + + if (await shouldReapFileMutationLock(lockDir)) { + await removeFileMutationLockDir(lockDir).catch(() => undefined); + backoffMs = 50; + continue; + } + + if (Date.now() - startedAt >= timeoutMs) { + throw new Error(`Could not acquire file mutation lock for ${path.basename(filePath)}.`, { + cause: error, + }); + } + + await sleep(backoffMs); + backoffMs = Math.min(backoffMs * 2, maxBackoffMs); + } + } + + try { + return await operation(); + } finally { + try { + await removeFileMutationLockDir(lockDir); + } catch (error) { + debugLog('failed cleaning lockDir', error); + // Ignore cleanup races so the original operation result wins. + } + } + } + + async function withFileMutationLock(filePath, operation) { + const previous = fileMutationLocks.get(filePath) || Promise.resolve(); + let releaseCurrent = () => {}; + const current = new Promise((resolve) => { + releaseCurrent = resolve; + }); + + fileMutationLocks.set(filePath, current); + + try { + await waitForPreviousFileMutationLock(previous, filePath); + return await withCrossProcessFileMutationLock(filePath, operation); + } finally { + releaseCurrent(); + if (fileMutationLocks.get(filePath) === current) { + fileMutationLocks.delete(filePath); + } + } + } + + async function withOrderedFileMutationLocks(filePaths, operation) { + const uniquePaths = Array.from(new Set(filePaths)).sort(); + + const runWithLock = async (index) => { + if (index >= uniquePaths.length) { + return operation(); + } + + const filePath = uniquePaths[index]; + return withFileMutationLock(filePath, () => runWithLock(index + 1)); + }; + + return runWithLock(0); + } + + async function withSettingsAndDataMutationLock(operation) { + return withOrderedFileMutationLocks([settingsFile, dataFile], operation); + } + + return { + getFileMutationLockDir, + withFileMutationLock, + withOrderedFileMutationLocks, + withSettingsAndDataMutationLock, + getPendingFileMutationLockCount: () => fileMutationLocks.size, + }; +} + +module.exports = { + createDataRuntimeFileLocks, +}; diff --git a/server/data-runtime/import-merge.js b/server/data-runtime/import-merge.js new file mode 100644 index 0000000..f928eaa --- /dev/null +++ b/server/data-runtime/import-merge.js @@ -0,0 +1,291 @@ +const COST_COMPARISON_TOLERANCE = 1e-9; + +function isPlainObject(value) { + return Boolean(value) && typeof value === 'object' && !Array.isArray(value); +} + +function areNumbersEquivalent(left, right, tolerance = COST_COMPARISON_TOLERANCE) { + return Math.abs(left - right) <= tolerance; +} + +function hasValidUsageDayDate(day) { + return typeof day?.date === 'string' && day.date.trim().length > 0; +} + +function sortStrings(values) { + return [ + ...new Set( + (Array.isArray(values) ? values : []) + .filter((value) => typeof value === 'string') + .map((value) => value.trim()) + .filter(Boolean), + ), + ].sort((left, right) => left.localeCompare(right)); +} + +function canonicalizeModelBreakdown(entry) { + return { + modelName: typeof entry?.modelName === 'string' ? entry.modelName.trim() : '', + inputTokens: Number(entry?.inputTokens) || 0, + outputTokens: Number(entry?.outputTokens) || 0, + cacheCreationTokens: Number(entry?.cacheCreationTokens) || 0, + cacheReadTokens: Number(entry?.cacheReadTokens) || 0, + thinkingTokens: Number(entry?.thinkingTokens) || 0, + cost: Number(entry?.cost) || 0, + requestCount: Number(entry?.requestCount) || 0, + }; +} + +function canonicalizeUsageDay(day) { + return { + date: typeof day?.date === 'string' ? day.date : '', + inputTokens: Number(day?.inputTokens) || 0, + outputTokens: Number(day?.outputTokens) || 0, + cacheCreationTokens: Number(day?.cacheCreationTokens) || 0, + cacheReadTokens: Number(day?.cacheReadTokens) || 0, + thinkingTokens: Number(day?.thinkingTokens) || 0, + totalTokens: Number(day?.totalTokens) || 0, + totalCost: Number(day?.totalCost) || 0, + requestCount: Number(day?.requestCount) || 0, + modelsUsed: sortStrings(day?.modelsUsed), + modelBreakdowns: (Array.isArray(day?.modelBreakdowns) ? day.modelBreakdowns : []) + .map(canonicalizeModelBreakdown) + .sort((left, right) => left.modelName.localeCompare(right.modelName)), + }; +} + +function areUsageDaysEquivalent(left, right) { + const leftDay = canonicalizeUsageDay(left); + const rightDay = canonicalizeUsageDay(right); + const scalarFields = [ + 'date', + 'inputTokens', + 'outputTokens', + 'cacheCreationTokens', + 'cacheReadTokens', + 'thinkingTokens', + 'totalTokens', + 'totalCost', + 'requestCount', + ]; + + for (const field of scalarFields) { + if (field === 'totalCost') { + if (!areNumbersEquivalent(leftDay[field], rightDay[field])) { + return false; + } + continue; + } + + if (leftDay[field] !== rightDay[field]) { + return false; + } + } + + if (leftDay.modelsUsed.length !== rightDay.modelsUsed.length) { + return false; + } + for (let index = 0; index < leftDay.modelsUsed.length; index += 1) { + if (leftDay.modelsUsed[index] !== rightDay.modelsUsed[index]) { + return false; + } + } + + if (leftDay.modelBreakdowns.length !== rightDay.modelBreakdowns.length) { + return false; + } + + const breakdownFields = [ + 'modelName', + 'inputTokens', + 'outputTokens', + 'cacheCreationTokens', + 'cacheReadTokens', + 'thinkingTokens', + 'cost', + 'requestCount', + ]; + for (let index = 0; index < leftDay.modelBreakdowns.length; index += 1) { + const leftBreakdown = leftDay.modelBreakdowns[index]; + const rightBreakdown = rightDay.modelBreakdowns[index]; + for (const field of breakdownFields) { + if (field === 'cost') { + if (!areNumbersEquivalent(leftBreakdown[field], rightBreakdown[field])) { + return false; + } + continue; + } + + if (leftBreakdown[field] !== rightBreakdown[field]) { + return false; + } + } + } + + return true; +} + +function computeUsageTotals(daily) { + return daily.reduce( + (totals, day) => ({ + inputTokens: totals.inputTokens + (day.inputTokens || 0), + outputTokens: totals.outputTokens + (day.outputTokens || 0), + cacheCreationTokens: totals.cacheCreationTokens + (day.cacheCreationTokens || 0), + cacheReadTokens: totals.cacheReadTokens + (day.cacheReadTokens || 0), + thinkingTokens: totals.thinkingTokens + (day.thinkingTokens || 0), + totalCost: totals.totalCost + (day.totalCost || 0), + totalTokens: totals.totalTokens + (day.totalTokens || 0), + requestCount: totals.requestCount + (day.requestCount || 0), + }), + { + inputTokens: 0, + outputTokens: 0, + cacheCreationTokens: 0, + cacheReadTokens: 0, + thinkingTokens: 0, + totalCost: 0, + totalTokens: 0, + requestCount: 0, + }, + ); +} + +/** Creates import payload extraction and usage merge helpers. */ +function createDataRuntimeImportMerge({ + normalizeIncomingData, + settingsBackupKind, + usageBackupKind, +}) { + function extractSettingsImportPayload(payload) { + if (!isPlainObject(payload)) { + throw new Error('Uploaded JSON is not a settings backup file.'); + } + + if (payload.kind === settingsBackupKind) { + if (!Object.prototype.hasOwnProperty.call(payload, 'settings')) { + throw new Error('The settings backup file does not contain any settings.'); + } + if (!isPlainObject(payload.settings)) { + throw new Error('The settings backup file has an invalid settings payload.'); + } + return payload.settings; + } + + if (typeof payload.kind === 'string' && payload.kind === usageBackupKind) { + throw new Error('This is a data backup file, not a settings file.'); + } + + throw new Error('Uploaded JSON is not a settings backup file.'); + } + + /** + * Extracts usage data from backup files while keeping legacy raw imports working. + * Non-object payloads are returned verbatim for backwards compatibility with raw arrays + * or pre-validated data. Object payloads with usageBackupKind must contain data, + * while settingsBackupKind objects are rejected as the wrong backup type. + */ + function extractUsageImportPayload(payload) { + if (!isPlainObject(payload)) { + return payload; + } + + if (payload.kind === usageBackupKind) { + if (!Object.prototype.hasOwnProperty.call(payload, 'data')) { + throw new Error('The usage backup file does not contain any usage data.'); + } + return payload.data; + } + + if (typeof payload.kind === 'string' && payload.kind === settingsBackupKind) { + throw new Error('This is a settings backup file, not a data file.'); + } + + return payload; + } + + function mergeUsageData(currentData, importedData) { + if (!importedData || !Array.isArray(importedData.daily)) { + throw new Error('Imported data must contain a daily array.'); + } + + const validImportedDaily = importedData.daily.filter(hasValidUsageDayDate); + const skippedDays = importedData.daily.length - validImportedDaily.length; + const current = + currentData && Array.isArray(currentData.daily) && currentData.daily.length > 0 + ? normalizeIncomingData(currentData) + : null; + + if (!current) { + const data = + skippedDays > 0 + ? { + daily: validImportedDaily, + totals: computeUsageTotals(validImportedDaily), + } + : importedData; + + return { + data, + summary: { + importedDays: importedData.daily.length, + addedDays: validImportedDaily.length, + unchangedDays: 0, + conflictingDays: 0, + skippedDays, + totalDays: validImportedDaily.length, + }, + }; + } + + const currentByDate = new Map(current.daily.map((day) => [day.date, day])); + let addedDays = 0; + let unchangedDays = 0; + let conflictingDays = 0; + + for (const importedDay of validImportedDaily) { + const existingDay = currentByDate.get(importedDay.date); + if (!existingDay) { + currentByDate.set(importedDay.date, importedDay); + addedDays += 1; + continue; + } + + if (areUsageDaysEquivalent(existingDay, importedDay)) { + unchangedDays += 1; + continue; + } + + // Preserve local data on conflicts and report the day so users can resolve it explicitly. + conflictingDays += 1; + } + + const mergedDaily = [...currentByDate.values()].sort((left, right) => + left.date.localeCompare(right.date), + ); + + return { + data: { + daily: mergedDaily, + totals: computeUsageTotals(mergedDaily), + }, + summary: { + importedDays: importedData.daily.length, + addedDays, + unchangedDays, + conflictingDays, + skippedDays, + totalDays: mergedDaily.length, + }, + }; + } + + return { + extractSettingsImportPayload, + extractUsageImportPayload, + mergeUsageData, + }; +} + +module.exports = { + createDataRuntimeImportMerge, +}; diff --git a/src/types/index.ts b/src/types/index.ts index 688b943..02f995d 100644 --- a/src/types/index.ts +++ b/src/types/index.ts @@ -48,6 +48,7 @@ export interface UsageImportSummary { addedDays: number unchangedDays: number conflictingDays: number + skippedDays: number totalDays: number } diff --git a/tests/architecture/server-data-runtime-boundaries.test.ts b/tests/architecture/server-data-runtime-boundaries.test.ts new file mode 100644 index 0000000..28cb0bc --- /dev/null +++ b/tests/architecture/server-data-runtime-boundaries.test.ts @@ -0,0 +1,55 @@ +import { readFileSync } from 'node:fs' +import path from 'node:path' +import { describe, expect, it } from 'vitest' + +function readRepoFile(relativePath: string) { + return readFileSync(path.resolve(process.cwd(), relativePath), 'utf8') +} + +const dataRuntimeServiceFiles = [ + 'server/data-runtime/app-paths.js', + 'server/data-runtime/file-io.js', + 'server/data-runtime/file-locks.js', + 'server/data-runtime/import-merge.js', +] + +const forbiddenRuntimeImports = [ + '../http-router', + '../routes/', + '../auto-import-runtime', + '../background-runtime', +] + +function escapeRegex(value: string) { + return value.replace(/[.*+?^${}()|[\]\\]/g, '\\$&') +} + +function createForbiddenImportPattern(importPath: string) { + const escapedPath = escapeRegex(importPath) + + return new RegExp( + String.raw`(?:require\s*\(\s*['"]${escapedPath}(?=(?:['"/]|$))|from\s*['"]${escapedPath}(?=(?:['"/]|$)))`, + ) +} + +describe('server data runtime boundary contract', () => { + it('keeps data-runtime.js as the persistence composition facade', () => { + const source = readRepoFile('server/data-runtime.js') + + expect(source).toContain("require('./data-runtime/app-paths')") + expect(source).toContain("require('./data-runtime/file-io')") + expect(source).toContain("require('./data-runtime/file-locks')") + expect(source).toContain("require('./data-runtime/import-merge')") + expect(source).toContain('function createDataRuntime') + }) + + it('keeps data runtime services independent from HTTP and other runtime modules', () => { + for (const serviceFile of dataRuntimeServiceFiles) { + const source = readRepoFile(serviceFile) + + for (const importPath of forbiddenRuntimeImports) { + expect(source).not.toMatch(createForbiddenImportPattern(importPath)) + } + } + }) +}) diff --git a/tests/frontend/dashboard-controller-actions.test.tsx b/tests/frontend/dashboard-controller-actions.test.tsx index 3187acb..8dd9feb 100644 --- a/tests/frontend/dashboard-controller-actions.test.tsx +++ b/tests/frontend/dashboard-controller-actions.test.tsx @@ -189,6 +189,7 @@ describe('useDashboardControllerWithBootstrap actions', () => { addedDays: 2, unchangedDays: 0, conflictingDays: 0, + skippedDays: 0, totalDays: 2, }) diff --git a/tests/frontend/dashboard-error-state.test.tsx b/tests/frontend/dashboard-error-state.test.tsx index b84ff40..0832475 100644 --- a/tests/frontend/dashboard-error-state.test.tsx +++ b/tests/frontend/dashboard-error-state.test.tsx @@ -109,6 +109,7 @@ describe('Dashboard fatal load state', () => { addedDays: 0, unchangedDays: 0, conflictingDays: 0, + skippedDays: 0, totalDays: 0, }) toktrackVersionStatusMocks.scheduleToktrackVersionStatusWarmup.mockClear() diff --git a/tests/integration/server-api-imports.test.ts b/tests/integration/server-api-imports.test.ts index 4607324..fd726d5 100644 --- a/tests/integration/server-api-imports.test.ts +++ b/tests/integration/server-api-imports.test.ts @@ -123,6 +123,7 @@ describe('local server API imports', () => { addedDays: 1, unchangedDays: 1, conflictingDays: 1, + skippedDays: 0, totalDays: 6, }) }) diff --git a/tests/unit/data-runtime-contract.test.ts b/tests/unit/data-runtime-contract.test.ts index 34d3f6a..5457036 100644 --- a/tests/unit/data-runtime-contract.test.ts +++ b/tests/unit/data-runtime-contract.test.ts @@ -38,6 +38,7 @@ const { createDataRuntime } = require('../../server/data-runtime.js') as { addedDays: number unchangedDays: number conflictingDays: number + skippedDays: number totalDays: number } } @@ -219,8 +220,18 @@ describe('data runtime contracts', () => { } const equivalentDay = { ...currentDay, + totalCost: 0.1 + 0.2 - 0.05, modelsUsed: ['Claude', 'GPT-5.4'], - modelBreakdowns: [...currentDay.modelBreakdowns].reverse(), + modelBreakdowns: [ + { + ...currentDay.modelBreakdowns[1], + cost: 0.15 + Number.EPSILON, + }, + { + ...currentDay.modelBreakdowns[0], + cost: 0.1 + Number.EPSILON, + }, + ], } const conflictingDay = { ...currentDay, @@ -254,6 +265,7 @@ describe('data runtime contracts', () => { addedDays: 1, unchangedDays: 1, conflictingDays: 0, + skippedDays: 0, totalDays: 2, }) expect(equivalentMerge.data.daily.map((day) => day.date)).toEqual(['2026-04-01', '2026-04-02']) @@ -266,8 +278,48 @@ describe('data runtime contracts', () => { addedDays: 0, unchangedDays: 0, conflictingDays: 1, + skippedDays: 0, totalDays: 1, }) expect(conflictingMerge.data.daily[0]?.totalCost).toBe(0.25) }) + + it('skips imported usage days without a usable date before writing merged data', () => { + const runtime = createRuntime() + const validDay = { + date: '2026-04-03', + inputTokens: 1, + outputTokens: 2, + cacheCreationTokens: 0, + cacheReadTokens: 0, + thinkingTokens: 0, + totalTokens: 3, + totalCost: 0.1, + requestCount: 1, + modelsUsed: ['GPT-5.4'], + modelBreakdowns: [], + } + + const merge = runtime.mergeUsageData(null, { + daily: [{ ...validDay, date: '' }, validDay], + totals: { totalCost: 9 }, + }) + + expect(merge.summary).toMatchObject({ + importedDays: 2, + addedDays: 1, + skippedDays: 1, + totalDays: 1, + }) + expect(merge.data.daily.map((day) => day.date)).toEqual(['2026-04-03']) + expect(merge.data.totals).toMatchObject({ totalCost: 0.1, totalTokens: 3 }) + }) + + it('rejects usage imports that do not contain a daily array before merging', () => { + const runtime = createRuntime() + + expect(() => runtime.mergeUsageData(null, { totals: {} } as never)).toThrow( + 'Imported data must contain a daily array.', + ) + }) }) diff --git a/tests/unit/http-router-mutations.test.ts b/tests/unit/http-router-mutations.test.ts index f513afe..444a9d4 100644 --- a/tests/unit/http-router-mutations.test.ts +++ b/tests/unit/http-router-mutations.test.ts @@ -76,6 +76,7 @@ function createRouter({ addedDays: Array.isArray(importedData?.daily) ? importedData.daily.length : 0, unchangedDays: 0, conflictingDays: 0, + skippedDays: 0, totalDays: Array.isArray(importedData?.daily) ? importedData.daily.length : 0, }, })), diff --git a/tests/unit/server-helpers-file-locks.test.ts b/tests/unit/server-helpers-file-locks.test.ts index a882b66..554b869 100644 --- a/tests/unit/server-helpers-file-locks.test.ts +++ b/tests/unit/server-helpers-file-locks.test.ts @@ -32,6 +32,9 @@ const { createDataRuntime } = require('../../server/data-runtime.js') as { withFileMutationLock: (filePath: string, operation: () => Promise) => Promise } } +const { createDataRuntimeFileLocks } = require('../../server/data-runtime/file-locks.js') as { + createDataRuntimeFileLocks: (options: Record) => unknown +} afterEach(() => { resetServerHelperTestState() @@ -180,6 +183,24 @@ function waitForChildExit(child: ReturnType, timeoutMs: number) { } describe('server helper utilities: file mutation locks', () => { + it('fails fast when required file lock runtime options are invalid', () => { + expect(() => + createDataRuntimeFileLocks({ + fsPromises, + path, + processObject: process, + runtimeInstanceId: '', + isWindows: false, + secureDirMode: 0o1000, + secureFileMode: 0o600, + fileMutationLockTimeoutMs: -1, + fileMutationLockStaleMs: 30_000, + settingsFile: '/tmp/settings.json', + dataFile: '/tmp/data.json', + }), + ).toThrow('runtimeInstanceId, secureDirMode, fileMutationLockTimeoutMs') + }) + it('serializes operations for the same file path and cleans up locks afterwards', async () => { const tempDir = await fsPromises.mkdtemp(path.join(tmpdir(), 'ttdash-lock-same-file-')) const settingsFile = path.join(tempDir, 'settings.json') @@ -231,6 +252,39 @@ describe('server helper utilities: file mutation locks', () => { await fsPromises.rm(tempDir, { recursive: true, force: true }) }) + it('times out in-process waiters behind a hung predecessor and releases their queue slot', async () => { + const runtimeRoot = await fsPromises.mkdtemp(path.join(tmpdir(), 'ttdash-lock-queue-timeout-')) + const runtime = createFileModeRuntime(runtimeRoot, path.join(runtimeRoot, 'legacy-data.json')) + let releaseFirst: (() => void) | null = null + + try { + const first = runtime.withFileMutationLock( + runtime.paths.dataFile, + async () => + new Promise((resolve) => { + releaseFirst = resolve + }), + ) + + await vi.waitFor(() => { + expect(existsSync(runtime.getFileMutationLockDir(runtime.paths.dataFile))).toBe(true) + }) + + await expect( + runtime.withFileMutationLock(runtime.paths.dataFile, async () => 'late'), + ).rejects.toThrow('Timed out waiting for previous file mutation lock') + + releaseFirst?.() + await first + await expect( + runtime.withFileMutationLock(runtime.paths.dataFile, async () => 'ok'), + ).resolves.toBe('ok') + } finally { + releaseFirst?.() + await fsPromises.rm(runtimeRoot, { recursive: true, force: true }) + } + }) + it('serializes overlapping multi-file operations in a stable order', async () => { const tempDir = await fsPromises.mkdtemp(path.join(tmpdir(), 'ttdash-lock-ordered-')) const settingsFile = path.join(tempDir, 'settings.json') @@ -290,6 +344,35 @@ describe('server helper utilities: file mutation locks', () => { await fsPromises.rm(targetDir, { recursive: true, force: true }) }) + it('reaps old lock directories from a different runtime instance even when the pid is alive', async () => { + const runtimeRoot = await fsPromises.mkdtemp(path.join(tmpdir(), 'ttdash-lock-instance-reuse-')) + const runtime = createFileModeRuntime(runtimeRoot, path.join(runtimeRoot, 'legacy-data.json')) + const lockDir = runtime.getFileMutationLockDir(runtime.paths.dataFile) + + try { + await fsPromises.mkdir(lockDir, { recursive: true }) + await fsPromises.writeFile( + path.join(lockDir, 'owner.json'), + JSON.stringify( + { + pid: process.pid, + createdAt: new Date(Date.now() - 1000).toISOString(), + instanceId: 'different-runtime-instance', + }, + null, + 2, + ), + ) + + await expect( + runtime.withFileMutationLock(runtime.paths.dataFile, async () => 'ok'), + ).resolves.toBe('ok') + expect(existsSync(lockDir)).toBe(false) + } finally { + await fsPromises.rm(runtimeRoot, { recursive: true, force: true }) + } + }) + it('does not reap stale locks while the recorded owner pid is still running', async () => { const runtime = createShortTimeoutFileLockRuntime() const targetDir = await fsPromises.mkdtemp(path.join(tmpdir(), 'ttdash-stale-pid-lock-')) @@ -302,7 +385,7 @@ describe('server helper utilities: file mutation locks', () => { JSON.stringify({ pid: 4242, createdAt: new Date(Date.now() - 60_000).toISOString(), - instanceId: 'stale-instance', + instanceId: `test-${process.pid}`, }), ) @@ -425,6 +508,35 @@ dataRuntime.withFileMutationLock(filePath, async () => { await fsPromises.rm(targetDir, { recursive: true, force: true }) }) + it('surfaces async atomic write cleanup failures with the original write error', async () => { + const targetDir = await fsPromises.mkdtemp(path.join(tmpdir(), 'ttdash-write-json-cleanup-')) + const targetFile = path.join(targetDir, 'settings.json') + const renameError = Object.assign(new Error('rename failed'), { code: 'EXDEV' }) + const cleanupError = Object.assign(new Error('cleanup failed'), { code: 'EACCES' }) + + const renameSpy = vi.spyOn(fsPromises, 'rename').mockRejectedValue(renameError) + const unlinkSpy = vi.spyOn(fsPromises, 'unlink').mockRejectedValue(cleanupError) + + try { + let caughtError: unknown + try { + await writeJsonAtomicAsync(targetFile, { ok: true }) + } catch (error) { + caughtError = error + } + + expect(caughtError).toBeInstanceOf(AggregateError) + expect((caughtError as AggregateError).message).toContain( + 'Failed atomic JSON write and temp-file cleanup', + ) + expect((caughtError as AggregateError).errors).toEqual([renameError, cleanupError]) + } finally { + renameSpy.mockRestore() + unlinkSpy.mockRestore() + await fsPromises.rm(targetDir, { recursive: true, force: true }) + } + }) + it('removes temporary files when atomic sync writes fail after creating the temp file', async () => { const targetDir = await fsPromises.mkdtemp(path.join(tmpdir(), 'ttdash-write-json-sync-')) const targetFile = path.join(targetDir, 'settings.json') @@ -449,6 +561,42 @@ dataRuntime.withFileMutationLock(filePath, async () => { } }) + it('surfaces sync atomic write cleanup failures with the original write error', async () => { + const targetDir = await fsPromises.mkdtemp( + path.join(tmpdir(), 'ttdash-write-json-sync-cleanup-'), + ) + const targetFile = path.join(targetDir, 'settings.json') + const renameError = Object.assign(new Error('rename failed'), { code: 'EXDEV' }) + const cleanupError = Object.assign(new Error('cleanup failed'), { code: 'EACCES' }) + const runtime = createFileModeRuntime(targetDir, path.join(targetDir, 'legacy-data.json')) + + const renameSpy = vi.spyOn(fs, 'renameSync').mockImplementation(() => { + throw renameError + }) + const unlinkSpy = vi.spyOn(fs, 'unlinkSync').mockImplementation(() => { + throw cleanupError + }) + + try { + let caughtError: unknown + try { + runtime.writeJsonAtomic(targetFile, { ok: true }) + } catch (error) { + caughtError = error + } + + expect(caughtError).toBeInstanceOf(AggregateError) + expect((caughtError as AggregateError).message).toContain( + 'Failed atomic JSON write and temp-file cleanup', + ) + expect((caughtError as AggregateError).errors).toEqual([renameError, cleanupError]) + } finally { + renameSpy.mockRestore() + unlinkSpy.mockRestore() + await fsPromises.rm(targetDir, { recursive: true, force: true }) + } + }) + it('removes temporary files when writeFile rejects after creating the temp file', async () => { const targetDir = await fsPromises.mkdtemp(path.join(tmpdir(), 'ttdash-write-json-')) const targetFile = path.join(targetDir, 'settings.json') diff --git a/vitest.setup.frontend.ts b/vitest.setup.frontend.ts index b94bb55..3188a22 100644 --- a/vitest.setup.frontend.ts +++ b/vitest.setup.frontend.ts @@ -1,14 +1,34 @@ import '@testing-library/jest-dom/vitest' import { cleanup } from '@testing-library/react' -import { afterEach, beforeAll, vi } from 'vitest' +import { afterEach, beforeAll, beforeEach, vi } from 'vitest' import { initI18n } from '@/lib/i18n' +// jsdom can expose different Event/CustomEvent constructors on globalThis and window. +// Radix dispatches globalThis.CustomEvent instances through DOM nodes, so align both. +function syncDomEventConstructors() { + if (typeof window === 'undefined') { + return + } + + Object.defineProperty(globalThis, 'Event', { + configurable: true, + writable: true, + value: window.Event, + }) + Object.defineProperty(globalThis, 'CustomEvent', { + configurable: true, + writable: true, + value: window.CustomEvent, + }) +} + afterEach(() => { cleanup() }) beforeAll(async () => { await initI18n('de') + syncDomEventConstructors() if (typeof window === 'undefined') { return @@ -62,3 +82,8 @@ beforeAll(async () => { }) } }) + +// Some tests intentionally reset globals, so restore the jsdom event constructors per test. +beforeEach(() => { + syncDomEventConstructors() +}) From 104d66366368a458a2b6a586e8acafef535d0b87 Mon Sep 17 00:00:00 2001 From: tyl3r-ch Date: Fri, 1 May 2026 15:24:05 +0200 Subject: [PATCH 32/42] Split auto import runtime services --- docs/architecture.md | 11 +- server/auto-import-runtime.js | 889 ++---------------- server/auto-import-runtime/command-runner.js | 233 +++++ server/auto-import-runtime/import-executor.js | 269 ++++++ server/auto-import-runtime/latest-version.js | 114 +++ server/auto-import-runtime/messages.js | 93 ++ .../auto-import-runtime/toktrack-runners.js | 295 ++++++ server/runtime-formatters.js | 60 ++ server/startup-runtime.js | 32 +- tests/architecture/import-patterns.ts | 14 + ...ver-auto-import-runtime-boundaries.test.ts | 57 ++ .../server-data-runtime-boundaries.test.ts | 20 +- ...erver-helpers-auto-import-executor.test.ts | 299 ++++++ tests/unit/server-helpers-runner-core.test.ts | 23 +- tests/unit/server-helpers.shared.ts | 32 +- tests/unit/startup-runtime.test.ts | 31 +- 16 files changed, 1615 insertions(+), 857 deletions(-) create mode 100644 server/auto-import-runtime/command-runner.js create mode 100644 server/auto-import-runtime/import-executor.js create mode 100644 server/auto-import-runtime/latest-version.js create mode 100644 server/auto-import-runtime/messages.js create mode 100644 server/auto-import-runtime/toktrack-runners.js create mode 100644 server/runtime-formatters.js create mode 100644 tests/architecture/import-patterns.ts create mode 100644 tests/architecture/server-auto-import-runtime-boundaries.test.ts create mode 100644 tests/unit/server-helpers-auto-import-executor.test.ts diff --git a/docs/architecture.md b/docs/architecture.md index ed40c49..2909198 100644 --- a/docs/architecture.md +++ b/docs/architecture.md @@ -65,8 +65,15 @@ The server runtime is intentionally split so `server.js` stays an executable shi - `server/background-runtime.js` - owns background instance registry, start/stop flows, and registry locking - `server/auto-import-runtime.js` - - owns toktrack runner resolution, subprocess execution, version lookup, and auto-import execution + - is the facade for toktrack runner resolution, subprocess execution, version lookup, singleton leasing, and auto-import execution - uses the injected runtime-state services for singleton import leasing and latest-version cache isolation + - composes focused services from `server/auto-import-runtime/**`: + - `messages.js` exposes structured progress/error events and server-side fallback message formatting + - `command-runner.js` exposes cross-platform command spawning, timeout handling, process termination, and command diagnostics + - `toktrack-runners.js` exposes local/bunx/npx runner construction, probing, timeout selection, version parsing, and runner-resolution errors + - `latest-version.js` exposes the cached npm registry lookup for the latest toktrack version + - `import-executor.js` exposes singleton lease acquisition, toktrack JSON import orchestration, persistence updates, and startup auto-load logging + - wiring model: `server/auto-import-runtime.js` imports these five submodules, injects process/filesystem/cache/runtime configuration, and returns one public auto-import facade to `server/app-runtime.js` - `server/http-router.js` - owns API routing, SSE wiring, and static asset dispatch with injected runtime dependencies - must acquire auto-import work through `server/auto-import-runtime.js` instead of keeping route-local import flags @@ -87,7 +94,7 @@ The server runtime is intentionally split so `server.js` stays an executable shi - Local auth session state - `server/startup-runtime.js` writes the current local session metadata to a restrictive `session-auth.json` file in the user config dir through injected data-runtime IO - `server/background-runtime.js` stores per-instance auth headers and bootstrap URLs in the restrictive background registry so `ttdash stop` and no-open background starts stay usable -- `server/http-utils.js`, `server/runtime.js`, `server/process-utils.js`, `server/report/**` +- `server/http-utils.js`, `server/runtime.js`, `server/runtime-formatters.js`, `server/process-utils.js`, `server/report/**` - shared support modules used by the composed runtimes ## Shared Settings Contract diff --git a/server/auto-import-runtime.js b/server/auto-import-runtime.js index b7af8b4..2effccc 100644 --- a/server/auto-import-runtime.js +++ b/server/auto-import-runtime.js @@ -1,4 +1,9 @@ const { createExclusiveRuntimeLease, createExpiringAsyncCache } = require('./runtime-state'); +const { createAutoImportCommandRunner } = require('./auto-import-runtime/command-runner'); +const { createAutoImportExecutor } = require('./auto-import-runtime/import-executor'); +const { createLatestToktrackVersionLookup } = require('./auto-import-runtime/latest-version'); +const { createAutoImportMessages } = require('./auto-import-runtime/messages'); +const { createToktrackRunnerResolver } = require('./auto-import-runtime/toktrack-runners'); function createAutoImportRuntime({ fs, @@ -24,830 +29,76 @@ function createAutoImportRuntime({ toktrackLatestLookupTimeoutMs, toktrackLatestCacheSuccessTtlMs, toktrackLatestCacheFailureTtlMs, + probeLog, }) { - function createAutoImportMessageEvent(key, vars = {}) { - return { - key, - vars, - }; - } - - function createAutoImportError(message, key, vars = {}) { - const error = new Error(message); - error.messageKey = key; - error.messageVars = vars; - return error; - } - - function summarizeCommandError(error, fallbackMessage = 'Unknown error') { - if (error instanceof Error && error.message.trim()) { - return error.message.trim(); - } - - return fallbackMessage; - } - - function getTimeoutSeconds(timeoutMs) { - return Math.max(1, Math.ceil(Number(timeoutMs) / 1000)); - } - - function toAutoImportErrorEvent(error) { - if (error && typeof error.messageKey === 'string') { - return createAutoImportMessageEvent(error.messageKey, error.messageVars || {}); - } - - return createAutoImportMessageEvent('errorPrefix', { - message: error && error.message ? error.message : 'Unknown error', - }); - } - - function formatAutoImportMessageEvent(event) { - switch (event?.key) { - case 'startingLocalImport': - return 'Starting toktrack import...'; - case 'warmingUpPackageRunner': - return `Preparing ${event.vars?.runner || 'package runner'} (the first run may take longer while toktrack is downloaded)...`; - case 'loadingUsageData': - return `Loading usage data via ${event.vars?.command || 'unknown command'}...`; - case 'processingUsageData': - return `Processing usage data... (${event.vars?.seconds || 0}s)`; - case 'autoImportRunning': - return 'An auto-import is already running. Please wait.'; - case 'noRunnerFound': - return 'No local toktrack, Bun, or npm exec installation found.'; - case 'localToktrackVersionMismatch': - return `Local toktrack v${event.vars?.detectedVersion || 'unknown'} does not match the required v${event.vars?.expectedVersion || toktrackVersion}.`; - case 'localToktrackFailed': - return `Local toktrack could not be started: ${event.vars?.message || 'Unknown error'}`; - case 'packageRunnerFailed': - return `No compatible bunx or npm exec runner succeeded: ${event.vars?.message || 'Unknown error'}`; - case 'packageRunnerWarmupTimedOut': - return `${event.vars?.runner || 'The package runner'} took longer than ${event.vars?.seconds || 0}s to prepare toktrack. The first run may need to download the package first. Please try again or verify network access.`; - case 'toktrackVersionCheckFailed': - return `Toktrack was found, but the version check failed: ${event.vars?.message || 'Unknown error'}`; - case 'toktrackExecutionFailed': - return `Toktrack failed while loading usage data: ${event.vars?.message || 'Unknown error'}`; - case 'toktrackExecutionTimedOut': - return `Toktrack did not finish loading usage data within ${event.vars?.seconds || 0}s via ${event.vars?.runner || 'the selected runner'}. Please try again.`; - case 'toktrackInvalidJson': - return `Toktrack returned invalid JSON output: ${event.vars?.message || 'Unknown error'}`; - case 'toktrackInvalidData': - return `Toktrack returned data that TTDash could not process: ${event.vars?.message || 'Unknown error'}`; - case 'errorPrefix': - return `Error: ${event.vars?.message || 'Unknown error'}`; - default: - return 'Auto-import update'; - } - } - - function getExecutableName(baseName, forceWindows = isWindows) { - if (!forceWindows) { - return baseName; - } - - switch (baseName) { - case 'npm': - return 'npm.cmd'; - case 'bun': - case 'bunx': - return 'bun.exe'; - case 'npx': - return 'npx.cmd'; - default: - return baseName; - } - } - - function spawnCommand(command, args, options = {}) { - return spawnCrossPlatform(command, args, { - ...options, - windowsHide: options.windowsHide ?? true, - }); - } - - function commandExists(command, args = ['--version']) { - return new Promise((resolve) => { - const child = spawnCommand(command, args, { stdio: 'ignore' }); - child.on('error', () => resolve(false)); - child.on('close', (code) => resolve(code === 0)); - }); - } - - function parseToktrackVersionOutput(output) { - return String(output) - .trim() - .replace(/^toktrack\s+/, ''); - } - - function getLocalToktrackDisplayCommand(forceWindows = isWindows) { - if (processObject.env.TTDASH_TOKTRACK_LOCAL_BIN) { - return `${toktrackLocalBin} daily --json`; - } - - return forceWindows - ? 'node_modules\\.bin\\toktrack.cmd daily --json' - : 'node_modules/.bin/toktrack daily --json'; - } - - function createLocalToktrackRunner() { - return { - command: toktrackLocalBin, - prefixArgs: [], - env: processObject.env, - method: 'local', - label: 'local toktrack', - displayCommand: getLocalToktrackDisplayCommand(), - }; - } - - function createBunxToktrackRunner() { - return { - command: getExecutableName('bunx'), - prefixArgs: isWindows ? ['x', toktrackPackageSpec] : [toktrackPackageSpec], - env: processObject.env, - method: 'bunx', - label: 'bunx', - displayCommand: `bunx ${toktrackPackageSpec} daily --json`, - }; - } - - function createNpxToktrackRunner() { - return { - command: getExecutableName('npx'), - prefixArgs: ['--yes', toktrackPackageSpec], - env: { - ...processObject.env, - npm_config_cache: npxCacheDir, - }, - method: 'npm', - label: 'npm exec', - displayCommand: `npx --yes ${toktrackPackageSpec} daily --json`, - }; - } - - function isPackageToktrackRunner(runner) { - return runner?.method === 'bunx' || runner?.method === 'npm'; - } - - function getToktrackRunnerTimeouts(runner) { - if (isPackageToktrackRunner(runner)) { - return { - probeMs: toktrackPackageRunnerProbeTimeoutMs, - versionCheckMs: toktrackPackageRunnerVersionCheckTimeoutMs, - importMs: toktrackPackageRunnerImportTimeoutMs, - }; - } - - return { - probeMs: toktrackLocalRunnerProbeTimeoutMs, - versionCheckMs: toktrackLocalRunnerVersionCheckTimeoutMs, - importMs: toktrackLocalRunnerImportTimeoutMs, - }; - } - - function formatCommandForDisplay(command, args = []) { - return [command, ...args].join(' ').trim(); - } - - function createCommandError( - message, - { command, args = [], stdout = '', stderr = '', exitCode = null, timedOut = false } = {}, - ) { - const error = new Error(message); - error.command = command; - error.args = args; - error.stdout = stdout; - error.stderr = stderr; - error.exitCode = exitCode; - error.timedOut = timedOut; - return error; - } - - function terminateChildProcess(child) { - if (!child || child.exitCode !== null) { - return; - } - - child.kill('SIGTERM'); - - const forceKillTimeout = setTimeout(() => { - if (child.exitCode === null) { - child.kill('SIGKILL'); - } - }, processTerminationGraceMs); - - child.once('close', () => { - clearTimeout(forceKillTimeout); - }); - } - - function runCommand( - command, - args, - { - env = processObject.env, - streamStderr = false, - onStderr, - signalOnClose, - timeoutMs = null, - } = {}, - ) { - return runCommandWithSpawn(command, args, { - env, - streamStderr, - onStderr, - signalOnClose, - timeoutMs, - spawnImpl: spawnCommand, - }); - } - - function runCommandWithSpawn( - command, - args, - { - env = processObject.env, - streamStderr = false, - onStderr, - signalOnClose, - timeoutMs = null, - spawnImpl = spawnCommand, - } = {}, - ) { - return new Promise((resolve, reject) => { - const child = spawnImpl(command, args, { - stdio: ['ignore', 'pipe', 'pipe'], - env, - }); - const commandLabel = formatCommandForDisplay(command, args); - - let stdout = ''; - let stderr = ''; - let finished = false; - let timeoutId = null; - let timeoutError = null; - - const settle = (handler, value) => { - if (finished) { - return; - } - finished = true; - if (timeoutId) { - clearTimeout(timeoutId); - } - handler(value); - }; - - if (signalOnClose) { - signalOnClose(() => terminateChildProcess(child)); - } - - if (typeof timeoutMs === 'number' && Number.isFinite(timeoutMs) && timeoutMs > 0) { - timeoutId = setTimeout(() => { - timeoutError = createCommandError( - `Command timed out after ${timeoutMs}ms: ${commandLabel}`, - { - command, - args, - stdout, - stderr, - timedOut: true, - }, - ); - terminateChildProcess(child); - }, timeoutMs); - } - - child.stdout.on('data', (chunk) => { - stdout += chunk.toString(); - }); - - child.stderr.on('data', (chunk) => { - const line = chunk.toString(); - stderr += line; - if (streamStderr && onStderr && line.trim()) { - onStderr(line.trimEnd()); - } - }); - - child.on('error', (error) => - settle( - reject, - createCommandError(error.message || `Could not start ${commandLabel}.`, { - command, - args, - stdout, - stderr, - }), - ), - ); - child.on('close', (code) => { - if (finished) { - return; - } - if (timeoutError) { - settle(reject, timeoutError); - return; - } - if (code === 0) { - settle(resolve, stdout.trimEnd()); - return; - } - settle( - reject, - createCommandError( - stderr.trim() || stdout.trim() || `Command exited with code ${code}: ${commandLabel}`, - { - command, - args, - stdout, - stderr, - exitCode: code, - }, - ), - ); - }); - }); - } - - function runToktrack( - runner, - args, - { streamStderr = false, onStderr, signalOnClose, timeoutMs = null } = {}, - ) { - return runCommand(runner.command, [...runner.prefixArgs, ...args], { - env: runner.env, - streamStderr, - onStderr, - signalOnClose, - timeoutMs, - }); - } - - async function probeToktrackRunner( - runner, - timeoutMs = getToktrackRunnerTimeouts(runner).probeMs, - ) { - try { - await runToktrack(runner, ['--version'], { timeoutMs }); - return { - ok: true, - errorMessage: null, - timedOut: false, - }; - } catch (error) { - const message = summarizeCommandError(error, `Could not start ${runner.label}.`); - console.warn(`Failed to probe ${runner.label}: ${message}`); - return { - ok: false, - errorMessage: message, - timedOut: Boolean(error?.timedOut), - }; - } - } - - async function resolveToktrackRunnerWithDiagnostics() { - const resolution = { - runner: null, - localVersionMismatch: null, - localFailure: null, - runnerFailures: [], - }; - - if (fs.existsSync(toktrackLocalBin)) { - const localRunner = createLocalToktrackRunner(); - - try { - const localVersion = parseToktrackVersionOutput( - await runToktrack(localRunner, ['--version'], { - timeoutMs: getToktrackRunnerTimeouts(localRunner).probeMs, - }), - ); - if (localVersion === toktrackVersion) { - resolution.runner = localRunner; - return resolution; - } - resolution.localVersionMismatch = { - detectedVersion: localVersion || 'unknown', - expectedVersion: toktrackVersion, - }; - } catch (error) { - resolution.localFailure = summarizeCommandError( - error, - 'The local toktrack binary could not be started.', - ); - } - } - - const bunxRunner = createBunxToktrackRunner(); - const bunxProbe = await probeToktrackRunner(bunxRunner); - if (bunxProbe.ok) { - resolution.runner = bunxRunner; - return resolution; - } - if (bunxProbe.errorMessage) { - resolution.runnerFailures.push({ - label: bunxRunner.label, - message: bunxProbe.errorMessage, - timedOut: bunxProbe.timedOut, - }); - } - - const npxRunner = createNpxToktrackRunner(); - const npxProbe = await probeToktrackRunner(npxRunner); - if (npxProbe.ok) { - resolution.runner = npxRunner; - return resolution; - } - if (npxProbe.errorMessage) { - resolution.runnerFailures.push({ - label: npxRunner.label, - message: npxProbe.errorMessage, - timedOut: npxProbe.timedOut, - }); - } - - return resolution; - } - - async function resolveToktrackRunner() { - const resolution = await resolveToktrackRunnerWithDiagnostics(); - return resolution.runner; - } - - function toAutoImportRunnerResolutionError(resolution) { - if (resolution.localVersionMismatch) { - return createAutoImportError( - formatAutoImportMessageEvent( - createAutoImportMessageEvent( - 'localToktrackVersionMismatch', - resolution.localVersionMismatch, - ), - ), - 'localToktrackVersionMismatch', - resolution.localVersionMismatch, - ); - } - - if (resolution.localFailure) { - return createAutoImportError( - formatAutoImportMessageEvent( - createAutoImportMessageEvent('localToktrackFailed', { - message: resolution.localFailure, - }), - ), - 'localToktrackFailed', - { - message: resolution.localFailure, - }, - ); - } - - if (resolution.runnerFailures.length > 0) { - const timedOutRunnerFailures = resolution.runnerFailures.filter( - (failure) => failure.timedOut, - ); - if ( - timedOutRunnerFailures.length > 0 && - timedOutRunnerFailures.length === resolution.runnerFailures.length - ) { - const runners = timedOutRunnerFailures.map((failure) => failure.label).join(' / '); - const seconds = getTimeoutSeconds(toktrackPackageRunnerProbeTimeoutMs); - return createAutoImportError( - formatAutoImportMessageEvent( - createAutoImportMessageEvent('packageRunnerWarmupTimedOut', { - runner: runners, - seconds, - }), - ), - 'packageRunnerWarmupTimedOut', - { - runner: runners, - seconds, - }, - ); - } - - return createAutoImportError( - formatAutoImportMessageEvent( - createAutoImportMessageEvent('packageRunnerFailed', { - message: resolution.runnerFailures - .map((failure) => `${failure.label}: ${failure.message}`) - .join(' | '), - }), - ), - 'packageRunnerFailed', - { - message: resolution.runnerFailures - .map((failure) => `${failure.label}: ${failure.message}`) - .join(' | '), - }, - ); - } - - return createAutoImportError( - 'No local toktrack, Bun, or npm exec installation found.', - 'noRunnerFound', - ); - } - - function createAutoImportAlreadyRunningError() { - return createAutoImportError( - 'An auto-import is already running. Please wait.', - 'autoImportRunning', - ); - } - - const autoImportLease = createExclusiveRuntimeLease({ - createAlreadyRunningError: createAutoImportAlreadyRunningError, + const messages = createAutoImportMessages({ + toktrackVersion, }); - - const latestToktrackVersionStatusCache = createExpiringAsyncCache({ - load: async (timeoutMs = toktrackLatestLookupTimeoutMs) => { - try { - const latestVersion = String( - await runCommand( - getExecutableName('npm'), - ['view', `${toktrackPackageName}@latest`, 'version'], - { - env: { - ...processObject.env, - npm_config_cache: npxCacheDir, - }, - timeoutMs, - }, - ), - ).trim(); - - return { - configuredVersion: toktrackVersion, - latestVersion, - isLatest: latestVersion === toktrackVersion, - lookupStatus: 'ok', - }; - } catch (error) { - return { - configuredVersion: toktrackVersion, - latestVersion: null, - isLatest: null, - lookupStatus: 'failed', - message: - error instanceof Error && error.message.trim() - ? error.message.trim() - : 'Could not determine the latest toktrack version.', - }; - } - }, - getTtlMs: (value) => - value.lookupStatus === 'ok' - ? toktrackLatestCacheSuccessTtlMs - : toktrackLatestCacheFailureTtlMs, + const commandRunner = createAutoImportCommandRunner({ + processObject, + spawnCrossPlatform, + isWindows, + processTerminationGraceMs, + }); + const runnerResolver = createToktrackRunnerResolver({ + fs, + processObject, + commandRunner, + messages, + toktrackPackageSpec, + toktrackVersion, + toktrackLocalBin, + npxCacheDir, + isWindows, + toktrackLocalRunnerProbeTimeoutMs, + toktrackLocalRunnerVersionCheckTimeoutMs, + toktrackLocalRunnerImportTimeoutMs, + toktrackPackageRunnerProbeTimeoutMs, + toktrackPackageRunnerVersionCheckTimeoutMs, + toktrackPackageRunnerImportTimeoutMs, + probeLog, + }); + const latestVersionLookup = createLatestToktrackVersionLookup({ + createExpiringAsyncCache, + commandRunner, + processObject, + toktrackPackageName, + toktrackVersion, + npxCacheDir, + toktrackLatestLookupTimeoutMs, + toktrackLatestCacheSuccessTtlMs, + toktrackLatestCacheFailureTtlMs, + }); + const importExecutor = createAutoImportExecutor({ + createExclusiveRuntimeLease, + messages, + runnerResolver, + normalizeIncomingData, + withSettingsAndDataMutationLock, + writeData, + updateDataLoadState, }); - - async function lookupLatestToktrackVersion(timeoutMs = toktrackLatestLookupTimeoutMs) { - return latestToktrackVersionStatusCache.lookup(timeoutMs); - } - - function acquireAutoImportLease() { - return autoImportLease.acquire(); - } - - async function performAutoImport({ - source = 'auto-import', - onCheck = () => {}, - onProgress = () => {}, - onOutput = () => {}, - signalOnClose, - lease = null, - } = {}) { - const activeLease = lease || acquireAutoImportLease(); - let progressSeconds = 0; - const progressInterval = setInterval(() => { - progressSeconds += 5; - onProgress(createAutoImportMessageEvent('processingUsageData', { seconds: progressSeconds })); - }, 5000); - - try { - onCheck({ tool: 'toktrack', status: 'checking' }); - onProgress(createAutoImportMessageEvent('startingLocalImport')); - - const resolution = await resolveToktrackRunnerWithDiagnostics(); - const runner = resolution.runner; - if (!runner) { - const resolutionError = toAutoImportRunnerResolutionError(resolution); - if (resolutionError.messageKey === 'noRunnerFound') { - onCheck({ tool: 'toktrack', status: 'not_found' }); - } - throw resolutionError; - } - - if (isPackageToktrackRunner(runner)) { - onProgress( - createAutoImportMessageEvent('warmingUpPackageRunner', { - runner: runner.label, - }), - ); - } - - let versionResult; - try { - versionResult = await runToktrack(runner, ['--version'], { - timeoutMs: getToktrackRunnerTimeouts(runner).versionCheckMs, - }); - } catch (error) { - if (isPackageToktrackRunner(runner) && error?.timedOut) { - throw createAutoImportError( - formatAutoImportMessageEvent( - createAutoImportMessageEvent('packageRunnerWarmupTimedOut', { - runner: runner.label, - seconds: getTimeoutSeconds(getToktrackRunnerTimeouts(runner).versionCheckMs), - }), - ), - 'packageRunnerWarmupTimedOut', - { - runner: runner.label, - seconds: getTimeoutSeconds(getToktrackRunnerTimeouts(runner).versionCheckMs), - }, - ); - } - - throw createAutoImportError( - formatAutoImportMessageEvent( - createAutoImportMessageEvent('toktrackVersionCheckFailed', { - message: summarizeCommandError(error), - }), - ), - 'toktrackVersionCheckFailed', - { - message: summarizeCommandError(error), - }, - ); - } - - onCheck({ - tool: 'toktrack', - status: 'found', - method: runner.label, - version: parseToktrackVersionOutput(versionResult), - }); - onProgress( - createAutoImportMessageEvent('loadingUsageData', { - command: runner.displayCommand, - }), - ); - - let rawJson; - try { - rawJson = await runToktrack(runner, ['daily', '--json'], { - streamStderr: true, - onStderr: (line) => { - onOutput(line); - }, - signalOnClose, - timeoutMs: getToktrackRunnerTimeouts(runner).importMs, - }); - } catch (error) { - if (error?.timedOut) { - throw createAutoImportError( - formatAutoImportMessageEvent( - createAutoImportMessageEvent('toktrackExecutionTimedOut', { - runner: runner.label, - seconds: getTimeoutSeconds(getToktrackRunnerTimeouts(runner).importMs), - }), - ), - 'toktrackExecutionTimedOut', - { - runner: runner.label, - seconds: getTimeoutSeconds(getToktrackRunnerTimeouts(runner).importMs), - }, - ); - } - - throw createAutoImportError( - formatAutoImportMessageEvent( - createAutoImportMessageEvent('toktrackExecutionFailed', { - message: summarizeCommandError(error), - }), - ), - 'toktrackExecutionFailed', - { - message: summarizeCommandError(error), - }, - ); - } - - let parsedJson; - try { - parsedJson = JSON.parse(rawJson); - } catch (error) { - throw createAutoImportError( - formatAutoImportMessageEvent( - createAutoImportMessageEvent('toktrackInvalidJson', { - message: summarizeCommandError(error), - }), - ), - 'toktrackInvalidJson', - { - message: summarizeCommandError(error), - }, - ); - } - - let normalized; - try { - normalized = normalizeIncomingData(parsedJson); - } catch (error) { - throw createAutoImportError( - formatAutoImportMessageEvent( - createAutoImportMessageEvent('toktrackInvalidData', { - message: summarizeCommandError(error), - }), - ), - 'toktrackInvalidData', - { - message: summarizeCommandError(error), - }, - ); - } - - await withSettingsAndDataMutationLock(async () => { - await writeData(normalized); - await updateDataLoadState({ - lastLoadedAt: new Date().toISOString(), - lastLoadSource: source, - }); - }); - - return { - days: normalized.daily.length, - totalCost: normalized.totals.totalCost, - }; - } finally { - clearInterval(progressInterval); - activeLease.release(); - } - } - - async function runStartupAutoLoad({ - source = 'cli-auto-load', - log = console.log, - errorLog = console.error, - } = {}) { - log('Auto-load enabled, starting import...'); - - try { - const result = await performAutoImport({ - source, - onCheck: (event) => { - if (event.status === 'found') { - log(`toktrack found (${event.method}, v${event.version})`); - } - }, - onProgress: (event) => { - log(formatAutoImportMessageEvent(event)); - }, - onOutput: (line) => { - log(line); - }, - }); - - log(`Auto-load complete: imported ${result.days} days, ${result.totalCost}.`); - return result; - } catch (error) { - errorLog(`Auto-load failed: ${error.message}`); - errorLog('Dashboard will start without newly imported data.'); - return null; - } - } - - function getToktrackLatestLookupTimeoutMs() { - return toktrackLatestLookupTimeoutMs; - } - - function resetLatestToktrackVersionCache() { - latestToktrackVersionStatusCache.reset(); - } return { - acquireAutoImportLease, - commandExists, - createAutoImportMessageEvent, - formatAutoImportMessageEvent, - getExecutableName, - getLocalToktrackDisplayCommand, - getToktrackLatestLookupTimeoutMs, - getToktrackRunnerTimeouts, - isAutoImportRunning: autoImportLease.isActive, - lookupLatestToktrackVersion, - parseToktrackVersionOutput, - performAutoImport, - resetLatestToktrackVersionCache, - resolveToktrackRunner, - runCommandWithSpawn, - runStartupAutoLoad, - runToktrack, - toAutoImportErrorEvent, - toAutoImportRunnerResolutionError, + acquireAutoImportLease: importExecutor.acquireAutoImportLease, + commandExists: commandRunner.commandExists, + createAutoImportMessageEvent: messages.createAutoImportMessageEvent, + formatAutoImportMessageEvent: messages.formatAutoImportMessageEvent, + getExecutableName: commandRunner.getExecutableName, + getLocalToktrackDisplayCommand: runnerResolver.getLocalToktrackDisplayCommand, + getToktrackLatestLookupTimeoutMs: latestVersionLookup.getToktrackLatestLookupTimeoutMs, + getToktrackRunnerTimeouts: runnerResolver.getToktrackRunnerTimeouts, + isAutoImportRunning: importExecutor.isAutoImportRunning, + lookupLatestToktrackVersion: latestVersionLookup.lookupLatestToktrackVersion, + parseToktrackVersionOutput: runnerResolver.parseToktrackVersionOutput, + performAutoImport: importExecutor.performAutoImport, + resetLatestToktrackVersionCache: latestVersionLookup.resetLatestToktrackVersionCache, + resolveToktrackRunner: runnerResolver.resolveToktrackRunner, + runCommandWithSpawn: commandRunner.runCommandWithSpawn, + runStartupAutoLoad: importExecutor.runStartupAutoLoad, + runToktrack: runnerResolver.runToktrack, + toAutoImportErrorEvent: messages.toAutoImportErrorEvent, + toAutoImportRunnerResolutionError: runnerResolver.toAutoImportRunnerResolutionError, }; } diff --git a/server/auto-import-runtime/command-runner.js b/server/auto-import-runtime/command-runner.js new file mode 100644 index 0000000..d8e476b --- /dev/null +++ b/server/auto-import-runtime/command-runner.js @@ -0,0 +1,233 @@ +function createAutoImportCommandRunner({ + processObject = process, + spawnCrossPlatform, + isWindows, + processTerminationGraceMs, +}) { + function getExecutableName(baseName, forceWindows = isWindows) { + if (!forceWindows) { + return baseName; + } + + switch (baseName) { + case 'npm': + return 'npm.cmd'; + case 'bun': + case 'bunx': + return 'bun.exe'; + case 'npx': + return 'npx.cmd'; + default: + return baseName; + } + } + + function spawnCommand(command, args, options = {}) { + return spawnCrossPlatform(command, args, { + ...options, + windowsHide: options.windowsHide ?? true, + }); + } + + function commandExists(command, args = ['--version']) { + return new Promise((resolve) => { + const child = spawnCommand(command, args, { stdio: 'ignore' }); + child.on('error', () => resolve(false)); + child.on('close', (code) => resolve(code === 0)); + }); + } + + function formatCommandForDisplay(command, args = []) { + return [command, ...args].join(' ').trim(); + } + + function createCommandError( + message, + { command, args = [], stdout = '', stderr = '', exitCode = null, timedOut = false } = {}, + ) { + const error = new Error(message); + error.command = command; + error.args = args; + error.stdout = stdout; + error.stderr = stderr; + error.exitCode = exitCode; + error.timedOut = timedOut; + return error; + } + + function terminateChildProcess(child) { + if (!child || child.exitCode !== null) { + return; + } + + child.kill('SIGTERM'); + + const forceKillTimeout = setTimeout(() => { + if (child.exitCode === null) { + child.kill('SIGKILL'); + } + }, processTerminationGraceMs); + forceKillTimeout.unref?.(); + + child.once('close', () => { + clearTimeout(forceKillTimeout); + }); + } + + function runCommand( + command, + args, + { + env = processObject.env, + streamStderr = false, + onStderr, + signalOnClose, + timeoutMs = null, + } = {}, + ) { + return runCommandWithSpawn(command, args, { + env, + streamStderr, + onStderr, + signalOnClose, + timeoutMs, + spawnImpl: spawnCommand, + }); + } + + function runCommandWithSpawn( + command, + args, + { + env = processObject.env, + streamStderr = false, + onStderr, + signalOnClose, + timeoutMs = null, + spawnImpl = spawnCommand, + } = {}, + ) { + return new Promise((resolve, reject) => { + const commandLabel = formatCommandForDisplay(command, args); + let child; + + try { + child = spawnImpl(command, args, { + stdio: ['ignore', 'pipe', 'pipe'], + env, + }); + } catch (error) { + reject( + createCommandError(error?.message || `Could not start ${commandLabel}.`, { + command, + args, + }), + ); + return; + } + + let stdout = ''; + let stderr = ''; + let finished = false; + let timeoutId = null; + let timeoutError = null; + + const settle = (handler, value) => { + if (finished) { + return; + } + finished = true; + if (timeoutId) { + clearTimeout(timeoutId); + } + handler(value); + }; + + if (signalOnClose) { + signalOnClose(() => terminateChildProcess(child)); + } + + if (typeof timeoutMs === 'number' && Number.isFinite(timeoutMs) && timeoutMs > 0) { + timeoutId = setTimeout(() => { + timeoutError = createCommandError( + `Command timed out after ${timeoutMs}ms: ${commandLabel}`, + { + command, + args, + stdout, + stderr, + timedOut: true, + }, + ); + terminateChildProcess(child); + }, timeoutMs); + timeoutId.unref?.(); + } + + child.stdout.on('data', (chunk) => { + stdout += chunk.toString(); + }); + + child.stderr.on('data', (chunk) => { + const line = chunk.toString(); + stderr += line; + if (streamStderr && onStderr && line.trim()) { + onStderr(line.trimEnd()); + } + }); + + child.on('error', (error) => + settle( + reject, + createCommandError(error.message || `Could not start ${commandLabel}.`, { + command, + args, + stdout, + stderr, + }), + ), + ); + child.on('close', (code) => { + if (finished) { + return; + } + if (timeoutError) { + settle(reject, timeoutError); + return; + } + if (code === 0) { + settle(resolve, stdout.trimEnd()); + return; + } + settle( + reject, + createCommandError( + stderr.trim() || stdout.trim() || `Command exited with code ${code}: ${commandLabel}`, + { + command, + args, + stdout, + stderr, + exitCode: code, + }, + ), + ); + }); + }); + } + + return { + commandExists, + createCommandError, + formatCommandForDisplay, + getExecutableName, + runCommand, + runCommandWithSpawn, + spawnCommand, + terminateChildProcess, + }; +} + +module.exports = { + createAutoImportCommandRunner, +}; diff --git a/server/auto-import-runtime/import-executor.js b/server/auto-import-runtime/import-executor.js new file mode 100644 index 0000000..ceea5e4 --- /dev/null +++ b/server/auto-import-runtime/import-executor.js @@ -0,0 +1,269 @@ +const { formatCurrency, formatDayCount, formatErrorMessage } = require('../runtime-formatters'); + +function createAutoImportExecutor({ + createExclusiveRuntimeLease, + messages, + runnerResolver, + normalizeIncomingData, + withSettingsAndDataMutationLock, + writeData, + updateDataLoadState, +}) { + const { + createAutoImportError, + createAutoImportMessageEvent, + formatAutoImportMessageEvent, + getTimeoutSeconds, + summarizeCommandError, + } = messages; + const { + getToktrackRunnerTimeouts, + isPackageToktrackRunner, + parseToktrackVersionOutput, + resolveToktrackRunnerWithDiagnostics, + runToktrack, + toAutoImportRunnerResolutionError, + } = runnerResolver; + + function createAutoImportAlreadyRunningError() { + return createAutoImportError( + 'An auto-import is already running. Please wait.', + 'autoImportRunning', + ); + } + + const autoImportLease = createExclusiveRuntimeLease({ + createAlreadyRunningError: createAutoImportAlreadyRunningError, + }); + + function acquireAutoImportLease() { + return autoImportLease.acquire(); + } + + async function performAutoImport({ + source = 'auto-import', + onCheck = () => {}, + onProgress = () => {}, + onOutput = () => {}, + signalOnClose, + lease = null, + } = {}) { + const ownsLease = !lease; + const activeLease = lease || acquireAutoImportLease(); + let progressSeconds = 0; + const progressInterval = setInterval(() => { + progressSeconds += 5; + onProgress(createAutoImportMessageEvent('processingUsageData', { seconds: progressSeconds })); + }, 5000); + progressInterval.unref?.(); + + try { + onCheck({ tool: 'toktrack', status: 'checking' }); + onProgress(createAutoImportMessageEvent('startingLocalImport')); + + const resolution = await resolveToktrackRunnerWithDiagnostics(); + const runner = resolution.runner; + if (!runner) { + const resolutionError = toAutoImportRunnerResolutionError(resolution); + if (resolutionError.messageKey === 'noRunnerFound') { + onCheck({ tool: 'toktrack', status: 'not_found' }); + } + throw resolutionError; + } + + if (isPackageToktrackRunner(runner)) { + onProgress( + createAutoImportMessageEvent('warmingUpPackageRunner', { + runner: runner.label, + }), + ); + } + + let versionResult; + try { + versionResult = await runToktrack(runner, ['--version'], { + timeoutMs: getToktrackRunnerTimeouts(runner).versionCheckMs, + }); + } catch (error) { + if (isPackageToktrackRunner(runner) && error?.timedOut) { + throw createAutoImportError( + formatAutoImportMessageEvent( + createAutoImportMessageEvent('packageRunnerWarmupTimedOut', { + runner: runner.label, + seconds: getTimeoutSeconds(getToktrackRunnerTimeouts(runner).versionCheckMs), + }), + ), + 'packageRunnerWarmupTimedOut', + { + runner: runner.label, + seconds: getTimeoutSeconds(getToktrackRunnerTimeouts(runner).versionCheckMs), + }, + ); + } + + throw createAutoImportError( + formatAutoImportMessageEvent( + createAutoImportMessageEvent('toktrackVersionCheckFailed', { + message: summarizeCommandError(error), + }), + ), + 'toktrackVersionCheckFailed', + { + message: summarizeCommandError(error), + }, + ); + } + + onCheck({ + tool: 'toktrack', + status: 'found', + method: runner.label, + version: parseToktrackVersionOutput(versionResult), + }); + onProgress( + createAutoImportMessageEvent('loadingUsageData', { + command: runner.displayCommand, + }), + ); + + let rawJson; + try { + rawJson = await runToktrack(runner, ['daily', '--json'], { + streamStderr: true, + onStderr: (line) => { + onOutput(line); + }, + signalOnClose, + timeoutMs: getToktrackRunnerTimeouts(runner).importMs, + }); + } catch (error) { + if (error?.timedOut) { + throw createAutoImportError( + formatAutoImportMessageEvent( + createAutoImportMessageEvent('toktrackExecutionTimedOut', { + runner: runner.label, + seconds: getTimeoutSeconds(getToktrackRunnerTimeouts(runner).importMs), + }), + ), + 'toktrackExecutionTimedOut', + { + runner: runner.label, + seconds: getTimeoutSeconds(getToktrackRunnerTimeouts(runner).importMs), + }, + ); + } + + throw createAutoImportError( + formatAutoImportMessageEvent( + createAutoImportMessageEvent('toktrackExecutionFailed', { + message: summarizeCommandError(error), + }), + ), + 'toktrackExecutionFailed', + { + message: summarizeCommandError(error), + }, + ); + } + + let parsedJson; + try { + parsedJson = JSON.parse(rawJson); + } catch (error) { + throw createAutoImportError( + formatAutoImportMessageEvent( + createAutoImportMessageEvent('toktrackInvalidJson', { + message: summarizeCommandError(error), + }), + ), + 'toktrackInvalidJson', + { + message: summarizeCommandError(error), + }, + ); + } + + let normalized; + try { + normalized = normalizeIncomingData(parsedJson); + } catch (error) { + throw createAutoImportError( + formatAutoImportMessageEvent( + createAutoImportMessageEvent('toktrackInvalidData', { + message: summarizeCommandError(error), + }), + ), + 'toktrackInvalidData', + { + message: summarizeCommandError(error), + }, + ); + } + + await withSettingsAndDataMutationLock(async () => { + await writeData(normalized); + await updateDataLoadState({ + lastLoadedAt: new Date().toISOString(), + lastLoadSource: source, + }); + }); + + return { + days: normalized.daily.length, + totalCost: normalized.totals.totalCost, + }; + } finally { + clearInterval(progressInterval); + if (ownsLease) { + activeLease.release(); + } + } + } + + async function runStartupAutoLoad({ + source = 'cli-auto-load', + log = console.log, + errorLog = console.error, + } = {}) { + log('Auto-load enabled, starting import...'); + + try { + const result = await performAutoImport({ + source, + onCheck: (event) => { + if (event.status === 'found') { + log(`toktrack found (${event.method}, v${event.version})`); + } + }, + onProgress: (event) => { + log(formatAutoImportMessageEvent(event)); + }, + onOutput: (line) => { + log(line); + }, + }); + + log( + `Auto-load complete: imported ${formatDayCount(result.days)}, ${formatCurrency( + result.totalCost, + )}.`, + ); + return result; + } catch (error) { + errorLog(`Auto-load failed: ${formatErrorMessage(error)}`); + errorLog('Dashboard will start without newly imported data.'); + return null; + } + } + + return { + acquireAutoImportLease, + isAutoImportRunning: autoImportLease.isActive, + performAutoImport, + runStartupAutoLoad, + }; +} + +module.exports = { + createAutoImportExecutor, +}; diff --git a/server/auto-import-runtime/latest-version.js b/server/auto-import-runtime/latest-version.js new file mode 100644 index 0000000..6e52cbe --- /dev/null +++ b/server/auto-import-runtime/latest-version.js @@ -0,0 +1,114 @@ +const exactSemverPattern = + /^(?:0|[1-9]\d*)\.(?:0|[1-9]\d*)\.(?:0|[1-9]\d*)(?:-[0-9A-Za-z-]+(?:\.[0-9A-Za-z-]+)*)?(?:\+[0-9A-Za-z-]+(?:\.[0-9A-Za-z-]+)*)?$/; + +function validateLatestVersionOutput(output) { + const latestVersion = String(output).trim(); + + if (!latestVersion) { + return { + latestVersion: null, + message: 'npm returned no toktrack version.', + ok: false, + }; + } + + if (!exactSemverPattern.test(latestVersion)) { + return { + latestVersion: null, + message: `npm returned an invalid toktrack version: ${latestVersion}`, + ok: false, + }; + } + + return { + latestVersion, + message: null, + ok: true, + }; +} + +function createLatestToktrackVersionLookup({ + createExpiringAsyncCache, + commandRunner, + processObject = process, + toktrackPackageName, + toktrackVersion, + npxCacheDir, + toktrackLatestLookupTimeoutMs, + toktrackLatestCacheSuccessTtlMs, + toktrackLatestCacheFailureTtlMs, +}) { + const latestToktrackVersionStatusCache = createExpiringAsyncCache({ + load: async () => { + try { + const latestVersionResult = validateLatestVersionOutput( + await commandRunner.runCommand( + commandRunner.getExecutableName('npm'), + ['view', `${toktrackPackageName}@latest`, 'version'], + { + env: { + ...processObject.env, + npm_config_cache: npxCacheDir, + }, + timeoutMs: toktrackLatestLookupTimeoutMs, + }, + ), + ); + + if (!latestVersionResult.ok) { + return { + configuredVersion: toktrackVersion, + latestVersion: null, + isLatest: null, + lookupStatus: 'malformed-output', + message: latestVersionResult.message, + }; + } + + return { + configuredVersion: toktrackVersion, + latestVersion: latestVersionResult.latestVersion, + isLatest: latestVersionResult.latestVersion === toktrackVersion, + lookupStatus: 'ok', + }; + } catch (error) { + return { + configuredVersion: toktrackVersion, + latestVersion: null, + isLatest: null, + lookupStatus: 'failed', + message: + error instanceof Error && error.message.trim() + ? error.message.trim() + : 'Could not determine the latest toktrack version.', + }; + } + }, + getTtlMs: (value) => + value.lookupStatus === 'ok' + ? toktrackLatestCacheSuccessTtlMs + : toktrackLatestCacheFailureTtlMs, + }); + + async function lookupLatestToktrackVersion() { + return latestToktrackVersionStatusCache.lookup(); + } + + function getToktrackLatestLookupTimeoutMs() { + return toktrackLatestLookupTimeoutMs; + } + + function resetLatestToktrackVersionCache() { + latestToktrackVersionStatusCache.reset(); + } + + return { + getToktrackLatestLookupTimeoutMs, + lookupLatestToktrackVersion, + resetLatestToktrackVersionCache, + }; +} + +module.exports = { + createLatestToktrackVersionLookup, +}; diff --git a/server/auto-import-runtime/messages.js b/server/auto-import-runtime/messages.js new file mode 100644 index 0000000..ba920fe --- /dev/null +++ b/server/auto-import-runtime/messages.js @@ -0,0 +1,93 @@ +function createAutoImportMessageEvent(key, vars = {}) { + return { + key, + vars, + }; +} + +function createAutoImportError(message, key, vars = {}) { + const error = new Error(message); + error.messageKey = key; + error.messageVars = vars; + return error; +} + +function summarizeCommandError(error, fallbackMessage = 'Unknown error') { + if (error instanceof Error && error.message.trim()) { + return error.message.trim(); + } + + return fallbackMessage; +} + +function getTimeoutSeconds(timeoutMs) { + return Math.max(1, Math.ceil(Number(timeoutMs) / 1000)); +} + +function createAutoImportMessages({ toktrackVersion }) { + function toAutoImportErrorEvent(error) { + if (error && typeof error.messageKey === 'string') { + return createAutoImportMessageEvent(error.messageKey, error.messageVars || {}); + } + + return createAutoImportMessageEvent('errorPrefix', { + message: error && error.message ? error.message : 'Unknown error', + }); + } + + function formatAutoImportMessageEvent(event) { + switch (event?.key) { + case 'startingLocalImport': + return 'Starting toktrack import...'; + case 'warmingUpPackageRunner': + return `Preparing ${event.vars?.runner || 'package runner'} (the first run may take longer while toktrack is downloaded)...`; + case 'loadingUsageData': + return `Loading usage data via ${event.vars?.command || 'unknown command'}...`; + case 'processingUsageData': + return `Processing usage data... (${event.vars?.seconds || 0}s)`; + case 'autoImportRunning': + return 'An auto-import is already running. Please wait.'; + case 'noRunnerFound': + return 'No local toktrack, Bun, or npm exec installation found.'; + case 'localToktrackVersionMismatch': + return `Local toktrack v${event.vars?.detectedVersion || 'unknown'} does not match the required v${event.vars?.expectedVersion || toktrackVersion}.`; + case 'localToktrackFailed': + return `Local toktrack could not be started: ${event.vars?.message || 'Unknown error'}`; + case 'packageRunnerFailed': + return `No compatible bunx or npm exec runner succeeded: ${event.vars?.message || 'Unknown error'}`; + case 'packageRunnerWarmupTimedOut': + return `${event.vars?.runner || 'The package runner'} took longer than ${event.vars?.seconds || 0}s to prepare toktrack. The first run may need to download the package first. Please try again or verify network access.`; + case 'toktrackVersionCheckFailed': + return `Toktrack was found, but the version check failed: ${event.vars?.message || 'Unknown error'}`; + case 'toktrackExecutionFailed': + return `Toktrack failed while loading usage data: ${event.vars?.message || 'Unknown error'}`; + case 'toktrackExecutionTimedOut': + return `Toktrack did not finish loading usage data within ${event.vars?.seconds || 0}s via ${event.vars?.runner || 'the selected runner'}. Please try again.`; + case 'toktrackInvalidJson': + return `Toktrack returned invalid JSON output: ${event.vars?.message || 'Unknown error'}`; + case 'toktrackInvalidData': + return `Toktrack returned data that TTDash could not process: ${event.vars?.message || 'Unknown error'}`; + case 'errorPrefix': + return `Error: ${event.vars?.message || 'Unknown error'}`; + default: + return 'Auto-import update'; + } + } + + return { + createAutoImportError, + createAutoImportMessageEvent, + formatAutoImportMessageEvent, + getTimeoutSeconds, + summarizeCommandError, + toAutoImportErrorEvent, + }; +} + +module.exports = { + createAutoImportError, + createAutoImportMessageEvent, + createAutoImportMessages, + getTimeoutSeconds, + summarizeCommandError, +}; diff --git a/server/auto-import-runtime/toktrack-runners.js b/server/auto-import-runtime/toktrack-runners.js new file mode 100644 index 0000000..e1fcf82 --- /dev/null +++ b/server/auto-import-runtime/toktrack-runners.js @@ -0,0 +1,295 @@ +function createToktrackRunnerResolver({ + fs, + processObject = process, + commandRunner, + messages, + toktrackPackageSpec, + toktrackVersion, + toktrackLocalBin, + npxCacheDir, + isWindows, + toktrackLocalRunnerProbeTimeoutMs, + toktrackLocalRunnerVersionCheckTimeoutMs, + toktrackLocalRunnerImportTimeoutMs, + toktrackPackageRunnerProbeTimeoutMs, + toktrackPackageRunnerVersionCheckTimeoutMs, + toktrackPackageRunnerImportTimeoutMs, + probeLog = () => {}, +}) { + const { + createAutoImportError, + createAutoImportMessageEvent, + formatAutoImportMessageEvent, + getTimeoutSeconds, + summarizeCommandError, + } = messages; + const { getExecutableName, runCommand } = commandRunner; + + function parseToktrackVersionOutput(output) { + return String(output) + .trim() + .replace(/^toktrack\s+/, ''); + } + + function getLocalToktrackDisplayCommand(forceWindows = isWindows) { + if (processObject.env.TTDASH_TOKTRACK_LOCAL_BIN) { + return `${toktrackLocalBin} daily --json`; + } + + return forceWindows + ? 'node_modules\\.bin\\toktrack.cmd daily --json' + : 'node_modules/.bin/toktrack daily --json'; + } + + function createLocalToktrackRunner() { + return { + command: toktrackLocalBin, + prefixArgs: [], + env: processObject.env, + method: 'local', + label: 'local toktrack', + displayCommand: getLocalToktrackDisplayCommand(), + }; + } + + function createBunxToktrackRunner() { + return { + command: getExecutableName('bunx'), + prefixArgs: isWindows ? ['x', toktrackPackageSpec] : [toktrackPackageSpec], + env: processObject.env, + method: 'bunx', + label: 'bunx', + displayCommand: `bunx ${toktrackPackageSpec} daily --json`, + }; + } + + function createNpxToktrackRunner() { + return { + command: getExecutableName('npx'), + prefixArgs: ['--yes', toktrackPackageSpec], + env: { + ...processObject.env, + npm_config_cache: npxCacheDir, + }, + method: 'npm', + label: 'npm exec', + displayCommand: `npx --yes ${toktrackPackageSpec} daily --json`, + }; + } + + function isPackageToktrackRunner(runner) { + return runner?.method === 'bunx' || runner?.method === 'npm'; + } + + function getToktrackRunnerTimeouts(runner) { + if (isPackageToktrackRunner(runner)) { + return { + probeMs: toktrackPackageRunnerProbeTimeoutMs, + versionCheckMs: toktrackPackageRunnerVersionCheckTimeoutMs, + importMs: toktrackPackageRunnerImportTimeoutMs, + }; + } + + return { + probeMs: toktrackLocalRunnerProbeTimeoutMs, + versionCheckMs: toktrackLocalRunnerVersionCheckTimeoutMs, + importMs: toktrackLocalRunnerImportTimeoutMs, + }; + } + + function runToktrack( + runner, + args, + { streamStderr = false, onStderr, signalOnClose, timeoutMs = null } = {}, + ) { + return runCommand(runner.command, [...runner.prefixArgs, ...args], { + env: runner.env, + streamStderr, + onStderr, + signalOnClose, + timeoutMs, + }); + } + + async function probeToktrackRunner( + runner, + timeoutMs = getToktrackRunnerTimeouts(runner).probeMs, + ) { + try { + await runToktrack(runner, ['--version'], { timeoutMs }); + return { + ok: true, + errorMessage: null, + timedOut: false, + }; + } catch (error) { + const message = summarizeCommandError(error, `Could not start ${runner.label}.`); + probeLog(`Failed to probe ${runner.label}: ${message}`); + return { + ok: false, + errorMessage: message, + timedOut: Boolean(error?.timedOut), + }; + } + } + + async function resolveToktrackRunnerWithDiagnostics() { + const resolution = { + runner: null, + localVersionMismatch: null, + localFailure: null, + runnerFailures: [], + }; + + if (fs.existsSync(toktrackLocalBin)) { + const localRunner = createLocalToktrackRunner(); + + try { + const localVersion = parseToktrackVersionOutput( + await runToktrack(localRunner, ['--version'], { + timeoutMs: getToktrackRunnerTimeouts(localRunner).probeMs, + }), + ); + if (localVersion === toktrackVersion) { + resolution.runner = localRunner; + return resolution; + } + resolution.localVersionMismatch = { + detectedVersion: localVersion || 'unknown', + expectedVersion: toktrackVersion, + }; + } catch (error) { + resolution.localFailure = summarizeCommandError( + error, + 'The local toktrack binary could not be started.', + ); + } + } + + const bunxRunner = createBunxToktrackRunner(); + const bunxProbe = await probeToktrackRunner(bunxRunner); + if (bunxProbe.ok) { + resolution.runner = bunxRunner; + return resolution; + } + if (bunxProbe.errorMessage) { + resolution.runnerFailures.push({ + label: bunxRunner.label, + message: bunxProbe.errorMessage, + timedOut: bunxProbe.timedOut, + }); + } + + const npxRunner = createNpxToktrackRunner(); + const npxProbe = await probeToktrackRunner(npxRunner); + if (npxProbe.ok) { + resolution.runner = npxRunner; + return resolution; + } + if (npxProbe.errorMessage) { + resolution.runnerFailures.push({ + label: npxRunner.label, + message: npxProbe.errorMessage, + timedOut: npxProbe.timedOut, + }); + } + + return resolution; + } + + async function resolveToktrackRunner() { + const resolution = await resolveToktrackRunnerWithDiagnostics(); + return resolution.runner; + } + + function toAutoImportRunnerResolutionError(resolution) { + if (resolution.localVersionMismatch) { + return createAutoImportError( + formatAutoImportMessageEvent( + createAutoImportMessageEvent( + 'localToktrackVersionMismatch', + resolution.localVersionMismatch, + ), + ), + 'localToktrackVersionMismatch', + resolution.localVersionMismatch, + ); + } + + if (resolution.localFailure) { + return createAutoImportError( + formatAutoImportMessageEvent( + createAutoImportMessageEvent('localToktrackFailed', { + message: resolution.localFailure, + }), + ), + 'localToktrackFailed', + { + message: resolution.localFailure, + }, + ); + } + + if (resolution.runnerFailures.length > 0) { + const timedOutRunnerFailures = resolution.runnerFailures.filter( + (failure) => failure.timedOut, + ); + if ( + timedOutRunnerFailures.length > 0 && + timedOutRunnerFailures.length === resolution.runnerFailures.length + ) { + const runners = timedOutRunnerFailures.map((failure) => failure.label).join(' / '); + const seconds = getTimeoutSeconds(toktrackPackageRunnerProbeTimeoutMs); + return createAutoImportError( + formatAutoImportMessageEvent( + createAutoImportMessageEvent('packageRunnerWarmupTimedOut', { + runner: runners, + seconds, + }), + ), + 'packageRunnerWarmupTimedOut', + { + runner: runners, + seconds, + }, + ); + } + + const runnerFailuresMessage = resolution.runnerFailures + .map((failure) => `${failure.label}: ${failure.message}`) + .join(' | '); + + return createAutoImportError( + formatAutoImportMessageEvent( + createAutoImportMessageEvent('packageRunnerFailed', { + message: runnerFailuresMessage, + }), + ), + 'packageRunnerFailed', + { + message: runnerFailuresMessage, + }, + ); + } + + return createAutoImportError( + 'No local toktrack, Bun, or npm exec installation found.', + 'noRunnerFound', + ); + } + + return { + getLocalToktrackDisplayCommand, + getToktrackRunnerTimeouts, + isPackageToktrackRunner, + parseToktrackVersionOutput, + resolveToktrackRunner, + resolveToktrackRunnerWithDiagnostics, + runToktrack, + toAutoImportRunnerResolutionError, + }; +} + +module.exports = { + createToktrackRunnerResolver, +}; diff --git a/server/runtime-formatters.js b/server/runtime-formatters.js new file mode 100644 index 0000000..f200a8a --- /dev/null +++ b/server/runtime-formatters.js @@ -0,0 +1,60 @@ +function formatCurrency(value, locale = 'de-CH') { + const numericValue = Number(value) || 0; + const fractionDigits = Math.abs(numericValue) >= 100 ? 0 : 2; + + // Toktrack reports usage cost in USD; server logs keep the app's default Swiss locale. + return new Intl.NumberFormat(locale, { + style: 'currency', + currency: 'USD', + minimumFractionDigits: fractionDigits, + maximumFractionDigits: fractionDigits, + }).format(numericValue); +} + +/** Formats rounded whole-number counts for server startup/status logs. */ +function formatInteger(value, locale = 'de-CH') { + return new Intl.NumberFormat(locale).format(Math.round(Number(value) || 0)); +} + +// Server CLI/status copy is English; locale only controls number formatting. +function formatDayCount(value, locale = 'de-CH') { + const roundedValue = Math.round(Number(value) || 0); + const dayLabel = roundedValue === 1 ? 'day' : 'days'; + + return `${formatInteger(roundedValue, locale)} ${dayLabel}`; +} + +function formatErrorMessage(error, fallbackMessage = 'Unknown error') { + if (error instanceof Error && error.message.trim()) { + return error.message.trim(); + } + + if (typeof error === 'string' && error.trim()) { + return error.trim(); + } + + if (error === null || error === undefined) { + return fallbackMessage; + } + + if (Object.prototype.toString.call(error) === '[object Object]') { + try { + const serializedError = JSON.stringify(error); + if (serializedError && serializedError.trim()) { + return serializedError; + } + } catch { + return fallbackMessage; + } + } + + const message = String(error).trim(); + return message || fallbackMessage; +} + +module.exports = { + formatCurrency, + formatDayCount, + formatErrorMessage, + formatInteger, +}; diff --git a/server/startup-runtime.js b/server/startup-runtime.js index fe2e567..e15e2a1 100644 --- a/server/startup-runtime.js +++ b/server/startup-runtime.js @@ -1,15 +1,9 @@ -function formatCurrency(value, locale = 'de-CH') { - return new Intl.NumberFormat(locale, { - style: 'currency', - currency: 'USD', - minimumFractionDigits: value >= 100 ? 0 : 2, - maximumFractionDigits: value >= 100 ? 0 : 2, - }).format(value || 0); -} - -function formatInteger(value, locale = 'de-CH') { - return new Intl.NumberFormat(locale).format(value || 0); -} +const { + formatCurrency, + formatDayCount, + formatErrorMessage, + formatInteger, +} = require('./runtime-formatters'); function createStartupRuntime({ fs, @@ -83,9 +77,7 @@ function createStartupRuntime({ const totalCost = formatCurrency(normalized.totals?.totalCost || 0); const totalTokens = formatInteger(normalized.totals?.totalTokens || 0); const days = normalized.daily?.length || 0; - const dailyCount = formatInteger(days); - const dayLabel = days === 1 ? 'day' : 'days'; - return `${dailyCount} ${dayLabel}, ${totalCost}, ${totalTokens} tokens`; + return `${formatDayCount(days)}, ${totalCost}, ${totalTokens} tokens`; } catch { return 'present, but unreadable'; } @@ -208,9 +200,13 @@ function createStartupRuntime({ }); markStartupAutoLoadCompleted(); - log(`Auto-load complete: imported ${result.days} days, ${formatCurrency(result.totalCost)}.`); + log( + `Auto-load complete: imported ${formatDayCount(result.days)}, ${formatCurrency( + result.totalCost, + )}.`, + ); } catch (error) { - errorLog(`Auto-load failed: ${error.message}`); + errorLog(`Auto-load failed: ${formatErrorMessage(error)}`); errorLog('Dashboard will start without newly imported data.'); } } @@ -228,5 +224,7 @@ function createStartupRuntime({ module.exports = { createStartupRuntime, formatCurrency, + formatDayCount, + formatErrorMessage, formatInteger, }; diff --git a/tests/architecture/import-patterns.ts b/tests/architecture/import-patterns.ts new file mode 100644 index 0000000..fc17256 --- /dev/null +++ b/tests/architecture/import-patterns.ts @@ -0,0 +1,14 @@ +export function escapeRegex(value: string) { + return value.replace(/[.*+?^${}()|[\]\\]/g, '\\$&') +} + +export function createForbiddenImportPattern(importPath: string) { + const escapedPath = escapeRegex(importPath) + const importBoundary = importPath.endsWith('/') + ? escapedPath + : String.raw`${escapedPath}(?=(?:['"./]|$))` + + return new RegExp( + String.raw`(?:require\s*\(\s*['"]${importBoundary}|from\s*['"]${importBoundary}|import\s*\(\s*['"]${importBoundary})`, + ) +} diff --git a/tests/architecture/server-auto-import-runtime-boundaries.test.ts b/tests/architecture/server-auto-import-runtime-boundaries.test.ts new file mode 100644 index 0000000..cd0320e --- /dev/null +++ b/tests/architecture/server-auto-import-runtime-boundaries.test.ts @@ -0,0 +1,57 @@ +import { readFileSync } from 'node:fs' +import path from 'node:path' +import { describe, expect, it } from 'vitest' +import { createForbiddenImportPattern } from './import-patterns' + +function readRepoFile(relativePath: string) { + return readFileSync(path.resolve(process.cwd(), relativePath), 'utf8') +} + +const autoImportRuntimeServiceFiles = [ + 'server/auto-import-runtime/messages.js', + 'server/auto-import-runtime/command-runner.js', + 'server/auto-import-runtime/toktrack-runners.js', + 'server/auto-import-runtime/latest-version.js', + 'server/auto-import-runtime/import-executor.js', +] + +const forbiddenRuntimeImports = [ + '../background-runtime', + '../data-runtime', + '../http-router', + '../routes/', + '../server-lifecycle', + '../startup-runtime', +] + +describe('server auto-import runtime boundary contract', () => { + it('matches forbidden runtime imports with extensions and subpaths', () => { + expect("require('../background-runtime.js')").toMatch( + createForbiddenImportPattern('../background-runtime'), + ) + expect("from '../routes/auto-import-routes.js'").toMatch( + createForbiddenImportPattern('../routes/'), + ) + }) + + it('keeps auto-import-runtime.js as the auto-import composition facade', () => { + const source = readRepoFile('server/auto-import-runtime.js') + + expect(source).toContain("require('./auto-import-runtime/messages')") + expect(source).toContain("require('./auto-import-runtime/command-runner')") + expect(source).toContain("require('./auto-import-runtime/toktrack-runners')") + expect(source).toContain("require('./auto-import-runtime/latest-version')") + expect(source).toContain("require('./auto-import-runtime/import-executor')") + expect(source).toContain('function createAutoImportRuntime') + }) + + it('keeps auto-import runtime services independent from HTTP and sibling runtimes', () => { + for (const serviceFile of autoImportRuntimeServiceFiles) { + const source = readRepoFile(serviceFile) + + for (const importPath of forbiddenRuntimeImports) { + expect(source).not.toMatch(createForbiddenImportPattern(importPath)) + } + } + }) +}) diff --git a/tests/architecture/server-data-runtime-boundaries.test.ts b/tests/architecture/server-data-runtime-boundaries.test.ts index 28cb0bc..0b1ba70 100644 --- a/tests/architecture/server-data-runtime-boundaries.test.ts +++ b/tests/architecture/server-data-runtime-boundaries.test.ts @@ -1,6 +1,7 @@ import { readFileSync } from 'node:fs' import path from 'node:path' import { describe, expect, it } from 'vitest' +import { createForbiddenImportPattern } from './import-patterns' function readRepoFile(relativePath: string) { return readFileSync(path.resolve(process.cwd(), relativePath), 'utf8') @@ -20,19 +21,14 @@ const forbiddenRuntimeImports = [ '../background-runtime', ] -function escapeRegex(value: string) { - return value.replace(/[.*+?^${}()|[\]\\]/g, '\\$&') -} - -function createForbiddenImportPattern(importPath: string) { - const escapedPath = escapeRegex(importPath) - - return new RegExp( - String.raw`(?:require\s*\(\s*['"]${escapedPath}(?=(?:['"/]|$))|from\s*['"]${escapedPath}(?=(?:['"/]|$)))`, - ) -} - describe('server data runtime boundary contract', () => { + it('matches forbidden runtime imports with extensions and subpaths', () => { + expect("require('../auto-import-runtime.js')").toMatch( + createForbiddenImportPattern('../auto-import-runtime'), + ) + expect("from '../routes/usage-routes.js'").toMatch(createForbiddenImportPattern('../routes/')) + }) + it('keeps data-runtime.js as the persistence composition facade', () => { const source = readRepoFile('server/data-runtime.js') diff --git a/tests/unit/server-helpers-auto-import-executor.test.ts b/tests/unit/server-helpers-auto-import-executor.test.ts new file mode 100644 index 0000000..7051d2d --- /dev/null +++ b/tests/unit/server-helpers-auto-import-executor.test.ts @@ -0,0 +1,299 @@ +import { afterEach, describe, expect, it, vi } from 'vitest' +import { + EventEmitter, + TOKTRACK_VERSION, + createAutoImportRuntime, + resetServerHelperTestState, +} from './server-helpers.shared' + +type FakeSpawnOutcome = { + code?: number + stderr?: string + stdout?: string +} + +class FakeChildProcess extends EventEmitter { + stdout = new EventEmitter() + stderr = new EventEmitter() + exitCode: number | null = null + + kill(signal: string) { + if (this.exitCode !== null) { + return + } + + this.exitCode = signal === 'SIGKILL' ? 137 : 143 + queueMicrotask(() => this.emit('close', this.exitCode)) + } +} + +function createSpawnSequence(outcomes: FakeSpawnOutcome[]) { + const pendingOutcomes = [...outcomes] + + return vi.fn(() => { + const child = new FakeChildProcess() + const outcome = pendingOutcomes.shift() ?? { code: 0, stdout: '' } + + queueMicrotask(() => { + if (outcome.stdout) child.stdout.emit('data', Buffer.from(outcome.stdout)) + if (outcome.stderr) child.stderr.emit('data', Buffer.from(outcome.stderr)) + child.exitCode = outcome.code ?? 0 + child.emit('close', child.exitCode) + }) + + return child + }) +} + +function createRuntimeWithSpawn( + spawnImpl: ReturnType, + options: { + normalizeIncomingData?: (input: unknown) => unknown + updateDataLoadState?: ReturnType + withSettingsAndDataMutationLock?: ReturnType + writeData?: ReturnType + } = {}, +) { + return createAutoImportRuntime({ + fs: { + existsSync: () => false, + }, + processObject: { env: {} }, + spawnCrossPlatform: spawnImpl, + normalizeIncomingData: options.normalizeIncomingData ?? ((input: unknown) => input), + withSettingsAndDataMutationLock: + options.withSettingsAndDataMutationLock ?? + vi.fn(async (operation: () => Promise) => operation()), + writeData: options.writeData ?? vi.fn(), + updateDataLoadState: options.updateDataLoadState ?? vi.fn(async () => undefined), + toktrackPackageName: 'toktrack', + toktrackPackageSpec: `toktrack@${TOKTRACK_VERSION}`, + toktrackVersion: TOKTRACK_VERSION, + toktrackLocalBin: '/missing/toktrack', + npxCacheDir: '/tmp/ttdash-test-npx-cache', + isWindows: false, + processTerminationGraceMs: 1000, + toktrackLocalRunnerProbeTimeoutMs: 7000, + toktrackLocalRunnerVersionCheckTimeoutMs: 7000, + toktrackLocalRunnerImportTimeoutMs: 60000, + toktrackPackageRunnerProbeTimeoutMs: 45000, + toktrackPackageRunnerVersionCheckTimeoutMs: 45000, + toktrackPackageRunnerImportTimeoutMs: 60000, + toktrackLatestLookupTimeoutMs: 15000, + toktrackLatestCacheSuccessTtlMs: 5 * 60 * 1000, + toktrackLatestCacheFailureTtlMs: 60 * 1000, + probeLog: () => undefined, + }) +} + +afterEach(() => { + resetServerHelperTestState() +}) + +describe('server helper utilities: auto-import executor behavior', () => { + it('imports toktrack JSON through the mutation lock and reports structured progress', async () => { + const rawPayload = { daily: [{ date: '2026-05-01' }] } + const normalizedPayload = { + daily: [{ date: '2026-05-01' }], + totals: { totalCost: 12.5 }, + } + const spawnImpl = createSpawnSequence([ + { stdout: `toktrack ${TOKTRACK_VERSION}\n` }, + { stdout: `toktrack ${TOKTRACK_VERSION}\n` }, + { stdout: JSON.stringify(rawPayload), stderr: 'runner progress\n' }, + ]) + const normalizeIncomingData = vi.fn(() => normalizedPayload) + const writeData = vi.fn() + const updateDataLoadState = vi.fn(async () => undefined) + const lockEvents: string[] = [] + const withSettingsAndDataMutationLock = vi.fn(async (operation: () => Promise) => { + lockEvents.push('lock') + const result = await operation() + lockEvents.push('unlock') + return result + }) + const runtime = createRuntimeWithSpawn(spawnImpl, { + normalizeIncomingData, + updateDataLoadState, + withSettingsAndDataMutationLock, + writeData, + }) + const checks: Array> = [] + const progress: Array<{ key: string; vars?: Record }> = [] + const output: string[] = [] + + await expect( + runtime.performAutoImport({ + source: 'unit-test', + onCheck: (event) => checks.push(event), + onProgress: (event) => progress.push(event), + onOutput: (line) => output.push(line), + }), + ).resolves.toEqual({ + days: 1, + totalCost: 12.5, + }) + + expect(normalizeIncomingData).toHaveBeenCalledWith(rawPayload) + expect(withSettingsAndDataMutationLock).toHaveBeenCalledTimes(1) + expect(lockEvents).toEqual(['lock', 'unlock']) + expect(writeData).toHaveBeenCalledWith(normalizedPayload) + expect(updateDataLoadState).toHaveBeenCalledWith( + expect.objectContaining({ + lastLoadSource: 'unit-test', + }), + ) + expect(checks).toEqual([ + { tool: 'toktrack', status: 'checking' }, + { + tool: 'toktrack', + status: 'found', + method: 'bunx', + version: TOKTRACK_VERSION, + }, + ]) + expect(progress.map((event) => event.key)).toEqual([ + 'startingLocalImport', + 'warmingUpPackageRunner', + 'loadingUsageData', + ]) + expect(output).toEqual(['runner progress']) + expect(runtime.isAutoImportRunning()).toBe(false) + }) + + it('surfaces normalizer failures as structured invalid-data errors without writing data', async () => { + const spawnImpl = createSpawnSequence([ + { stdout: `toktrack ${TOKTRACK_VERSION}\n` }, + { stdout: `toktrack ${TOKTRACK_VERSION}\n` }, + { stdout: JSON.stringify({ daily: [] }) }, + ]) + const writeData = vi.fn() + const updateDataLoadState = vi.fn(async () => undefined) + const runtime = createRuntimeWithSpawn(spawnImpl, { + normalizeIncomingData: () => { + throw new Error('Unsupported usage payload') + }, + updateDataLoadState, + writeData, + }) + + await expect(runtime.performAutoImport()).rejects.toMatchObject({ + messageKey: 'toktrackInvalidData', + messageVars: { + message: 'Unsupported usage payload', + }, + }) + + expect(writeData).not.toHaveBeenCalled() + expect(updateDataLoadState).not.toHaveBeenCalled() + expect(runtime.isAutoImportRunning()).toBe(false) + }) + + it('rejects a second import while the singleton lease is active without spawning toktrack', async () => { + const spawnImpl = createSpawnSequence([]) + const runtime = createRuntimeWithSpawn(spawnImpl) + const lease = runtime.acquireAutoImportLease() + + try { + await expect(runtime.performAutoImport()).rejects.toMatchObject({ + messageKey: 'autoImportRunning', + }) + expect(spawnImpl).not.toHaveBeenCalled() + expect(runtime.isAutoImportRunning()).toBe(true) + } finally { + lease.release() + } + + expect(runtime.isAutoImportRunning()).toBe(false) + }) + + it('leaves caller-owned leases active until the caller releases them', async () => { + const spawnImpl = createSpawnSequence([ + { stdout: `toktrack ${TOKTRACK_VERSION}\n` }, + { stdout: `toktrack ${TOKTRACK_VERSION}\n` }, + { stdout: JSON.stringify({ daily: [{ date: '2026-05-01' }] }) }, + ]) + const runtime = createRuntimeWithSpawn(spawnImpl, { + normalizeIncomingData: () => ({ + daily: [{ date: '2026-05-01' }], + totals: { totalCost: 1.23 }, + }), + }) + const lease = runtime.acquireAutoImportLease() + + try { + await expect( + runtime.performAutoImport({ + lease, + }), + ).resolves.toEqual({ + days: 1, + totalCost: 1.23, + }) + expect(runtime.isAutoImportRunning()).toBe(true) + } finally { + lease.release() + } + + expect(runtime.isAutoImportRunning()).toBe(false) + }) + + it('formats startup auto-load totals with the shared server currency formatter', async () => { + const spawnImpl = createSpawnSequence([ + { stdout: `toktrack ${TOKTRACK_VERSION}\n` }, + { stdout: `toktrack ${TOKTRACK_VERSION}\n` }, + { stdout: JSON.stringify({ daily: [{ date: '2026-05-01' }] }) }, + ]) + const runtime = createRuntimeWithSpawn(spawnImpl, { + normalizeIncomingData: () => ({ + daily: [{ date: '2026-05-01' }], + totals: { totalCost: 1.23 }, + }), + }) + const logs: string[] = [] + const errors: string[] = [] + + await expect( + runtime.runStartupAutoLoad({ + log: (message) => logs.push(message), + errorLog: (message) => errors.push(message), + }), + ).resolves.toEqual({ + days: 1, + totalCost: 1.23, + }) + + expect(logs).toContain('Auto-load complete: imported 1 day, $ 1.23.') + expect(errors).toEqual([]) + }) + + it('logs non-error startup auto-load failures defensively', async () => { + const spawnImpl = createSpawnSequence([ + { stdout: `toktrack ${TOKTRACK_VERSION}\n` }, + { stdout: `toktrack ${TOKTRACK_VERSION}\n` }, + { stdout: JSON.stringify({ daily: [{ date: '2026-05-01' }] }) }, + ]) + const runtime = createRuntimeWithSpawn(spawnImpl, { + normalizeIncomingData: () => ({ + daily: [{ date: '2026-05-01' }], + totals: { totalCost: 1.23 }, + }), + withSettingsAndDataMutationLock: vi.fn(async () => { + throw 'lock failed' + }), + }) + const errors: string[] = [] + + await expect( + runtime.runStartupAutoLoad({ + log: () => undefined, + errorLog: (message) => errors.push(message), + }), + ).resolves.toBeNull() + + expect(errors).toEqual([ + 'Auto-load failed: lock failed', + 'Dashboard will start without newly imported data.', + ]) + }) +}) diff --git a/tests/unit/server-helpers-runner-core.test.ts b/tests/unit/server-helpers-runner-core.test.ts index d8de98d..cd13d4e 100644 --- a/tests/unit/server-helpers-runner-core.test.ts +++ b/tests/unit/server-helpers-runner-core.test.ts @@ -57,7 +57,7 @@ function createSpawnSequence(outcomes: FakeSpawnOutcome[]) { function createRuntimeWithSpawn( spawnImpl: ReturnType, - options: { localBinExists?: boolean } = {}, + options: { latestLookupTimeoutMs?: number; localBinExists?: boolean } = {}, ) { return createAutoImportRuntime({ fs: { @@ -83,9 +83,10 @@ function createRuntimeWithSpawn( toktrackPackageRunnerProbeTimeoutMs: 45000, toktrackPackageRunnerVersionCheckTimeoutMs: 45000, toktrackPackageRunnerImportTimeoutMs: 60000, - toktrackLatestLookupTimeoutMs: 15000, + toktrackLatestLookupTimeoutMs: options.latestLookupTimeoutMs ?? 15000, toktrackLatestCacheSuccessTtlMs: 5 * 60 * 1000, toktrackLatestCacheFailureTtlMs: 60 * 1000, + probeLog: (message: string) => console.warn(message), }) } @@ -183,10 +184,12 @@ describe('server helper utilities: toktrack runner core behavior', () => { it('returns a structured warning when the latest toktrack version lookup times out', async () => { vi.useFakeTimers() - const runtime = createRuntimeWithSpawn(createSpawnSequence([{ hang: true }])) + const runtime = createRuntimeWithSpawn(createSpawnSequence([{ hang: true }]), { + latestLookupTimeoutMs: 50, + }) try { - const statusPromise = runtime.lookupLatestToktrackVersion(50) + const statusPromise = runtime.lookupLatestToktrackVersion() await vi.advanceTimersByTimeAsync(50) await expect(statusPromise).resolves.toMatchObject({ @@ -201,6 +204,18 @@ describe('server helper utilities: toktrack runner core behavior', () => { } }) + it('returns a structured warning when the latest toktrack version lookup is malformed', async () => { + const runtime = createRuntimeWithSpawn(createSpawnSequence([{ stdout: 'not-a-version\n' }])) + + await expect(runtime.lookupLatestToktrackVersion()).resolves.toMatchObject({ + configuredVersion: TOKTRACK_VERSION, + latestVersion: null, + isLatest: null, + lookupStatus: 'malformed-output', + message: 'npm returned an invalid toktrack version: not-a-version', + }) + }) + it('reuses a cached successful latest-version lookup until the TTL expires', async () => { const spawnImpl = createSpawnSequence([ { stdout: `${TOKTRACK_VERSION}\n` }, diff --git a/tests/unit/server-helpers.shared.ts b/tests/unit/server-helpers.shared.ts index 4d92d0d..80a7d86 100644 --- a/tests/unit/server-helpers.shared.ts +++ b/tests/unit/server-helpers.shared.ts @@ -56,7 +56,16 @@ const { listenOnAvailablePort } = require('../../server/runtime.js') as { } const { createAutoImportRuntime } = require('../../server/auto-import-runtime.js') as { createAutoImportRuntime: (options: Record) => { + acquireAutoImportLease: () => { release: () => void } commandExists: (command: string, args?: string[]) => Promise + createAutoImportMessageEvent: ( + key: string, + vars?: Record, + ) => { key: string; vars: Record } + formatAutoImportMessageEvent: (event: { + key: string + vars?: Record + }) => string getExecutableName: (baseName: string, isWindows?: boolean) => string getLocalToktrackDisplayCommand: (isWindows?: boolean) => string getToktrackLatestLookupTimeoutMs: () => number @@ -65,15 +74,29 @@ const { createAutoImportRuntime } = require('../../server/auto-import-runtime.js versionCheckMs: number importMs: number } - lookupLatestToktrackVersion: (timeoutMs?: number) => Promise<{ + lookupLatestToktrackVersion: () => Promise<{ configuredVersion: string latestVersion: string | null isLatest: boolean | null - lookupStatus: 'ok' | 'failed' + lookupStatus: 'ok' | 'failed' | 'malformed-output' message?: string }> + isAutoImportRunning: () => boolean + performAutoImport: (options?: { + source?: string + onCheck?: (event: Record) => void + onProgress?: (event: { key: string; vars?: Record }) => void + onOutput?: (line: string) => void + signalOnClose?: (close: () => void) => void + lease?: { release: () => void } | null + }) => Promise<{ days: number; totalCost: number }> resetLatestToktrackVersionCache: () => void parseToktrackVersionOutput: (output: string) => string + runStartupAutoLoad: (options?: { + source?: string + log?: (message: string) => void + errorLog?: (message: string) => void + }) => Promise<{ days: number; totalCost: number } | null> resolveToktrackRunner: () => Promise<{ command: string prefixArgs: string[] @@ -127,6 +150,10 @@ const { createAutoImportRuntime } = require('../../server/auto-import-runtime.js } }, ) => Promise + toAutoImportErrorEvent: (error: Error) => { + key: string + vars?: Record + } } } @@ -178,6 +205,7 @@ const autoImportRuntime = createAutoImportRuntime({ toktrackLatestLookupTimeoutMs: 15000, toktrackLatestCacheSuccessTtlMs: 5 * 60 * 1000, toktrackLatestCacheFailureTtlMs: 60 * 1000, + probeLog: (message: string) => console.warn(message), }) const { commandExists, diff --git a/tests/unit/startup-runtime.test.ts b/tests/unit/startup-runtime.test.ts index 94c1d40..879bce5 100644 --- a/tests/unit/startup-runtime.test.ts +++ b/tests/unit/startup-runtime.test.ts @@ -3,7 +3,7 @@ import { describe, expect, it, vi } from 'vitest' import { TOKTRACK_VERSION } from '../../shared/toktrack-version.js' const require = createRequire(import.meta.url) -const { createStartupRuntime, formatCurrency, formatInteger } = +const { createStartupRuntime, formatCurrency, formatDayCount, formatErrorMessage, formatInteger } = require('../../server/startup-runtime.js') as { createStartupRuntime: (options: Record) => { describeDataFile: () => string @@ -14,6 +14,8 @@ const { createStartupRuntime, formatCurrency, formatInteger } = writeLocalAuthSessionFile: (url: string, runtimeInstance: { id: string }) => void } formatCurrency: (value: number) => string + formatDayCount: (value: number) => string + formatErrorMessage: (error: unknown) => string formatInteger: (value: number) => string } @@ -102,7 +104,13 @@ describe('startup runtime', () => { const { dataRuntime, runtime } = createStartupRuntimeFixture() expect(formatCurrency(123.45)).toBe('$ 123') + expect(formatCurrency(-123.45)).toBe('$-123') + expect(formatDayCount(1)).toBe('1 day') + expect(formatDayCount(2)).toBe('2 days') + expect(formatErrorMessage(' plain failure ')).toBe('plain failure') + expect(formatErrorMessage({ code: 'E_TTDASH' })).toBe('{"code":"E_TTDASH"}') expect(formatInteger(12_345)).toBe("12'345") + expect(formatInteger(12_345.6)).toBe("12'346") expect(runtime.describeDataFile()).toBe("1 day, $ 12.34, 5'678 tokens") dataRuntime.readData.mockReturnValue({ @@ -248,4 +256,25 @@ describe('startup runtime', () => { expect(logs).toContain('processingUsageData') expect(logs).toContain('Auto-load complete: imported 2 days, $ 1.23.') }) + + it('logs non-error auto-load failures defensively', async () => { + const markStartupAutoLoadCompleted = vi.fn() + const { errors, runtime } = createStartupRuntimeFixture({ + autoImportRuntime: { + formatAutoImportMessageEvent: (event: { key: string }) => event.key, + performAutoImport: vi.fn(async () => { + throw 'string failure' + }), + }, + markStartupAutoLoadCompleted, + }) + + await runtime.runStartupAutoLoad() + + expect(markStartupAutoLoadCompleted).not.toHaveBeenCalled() + expect(errors).toEqual([ + 'Auto-load failed: string failure', + 'Dashboard will start without newly imported data.', + ]) + }) }) From f5cf2b63c8d7e3dbfc4db3c838793120f43842a3 Mon Sep 17 00:00:00 2001 From: tyl3r-ch Date: Fri, 1 May 2026 16:48:57 +0200 Subject: [PATCH 33/42] Guard shared declaration contracts --- docs/architecture.md | 6 + ... => dashboard-section-renderer-types.d.ts} | 0 .../shared-declaration-contract.test.ts | 329 ++++++++++++++++++ 3 files changed, 335 insertions(+) rename src/components/dashboard/sections/{dashboard-section-renderer-types.ts => dashboard-section-renderer-types.d.ts} (100%) create mode 100644 tests/architecture/shared-declaration-contract.test.ts diff --git a/docs/architecture.md b/docs/architecture.md index 2909198..c25a12e 100644 --- a/docs/architecture.md +++ b/docs/architecture.md @@ -105,6 +105,9 @@ Persisted settings are a shared contract across the frontend bootstrap path and - owns app-level settings defaults, provider-limit normalization, and timestamp/load-source coercion - consumes `shared/dashboard-preferences.js` for dashboard-specific filter/section defaults and normalization - is the only production module that should define persisted settings defaults or normalization rules +- `shared/*.d.ts` + - must expose the same value exports as their sibling CommonJS modules + - are guarded by `tests/architecture/shared-declaration-contract.test.ts` so runtime exports cannot drift from TypeScript consumers silently - `src/lib/app-settings.ts` - is a typed frontend adapter over `shared/app-settings.js` - may keep DOM-only behavior such as `applyTheme`, but must not recompute settings defaults from local helpers @@ -169,6 +172,9 @@ Dashboard-specific presets, static section metadata, and preset date semantics a - `src/components/dashboard/DashboardSections.tsx` - consumes a single `DashboardSectionsViewModel` - should keep section ownership grouped by section bundle instead of reintroducing broad prop lists +- `src/components/dashboard/sections/dashboard-section-renderer-types.d.ts` + - owns the type-only renderer contract shared by section family renderers + - must stay declaration-only because it is intentionally erased from the runtime dependency graph - `src/components/layout/FilterBar.tsx` - owns the public filter bar shell and composes private layout filter groups for status, time presets, date range, and provider/model chips - `src/components/layout/FilterBar*.tsx` diff --git a/src/components/dashboard/sections/dashboard-section-renderer-types.ts b/src/components/dashboard/sections/dashboard-section-renderer-types.d.ts similarity index 100% rename from src/components/dashboard/sections/dashboard-section-renderer-types.ts rename to src/components/dashboard/sections/dashboard-section-renderer-types.d.ts diff --git a/tests/architecture/shared-declaration-contract.test.ts b/tests/architecture/shared-declaration-contract.test.ts new file mode 100644 index 0000000..eee5a60 --- /dev/null +++ b/tests/architecture/shared-declaration-contract.test.ts @@ -0,0 +1,329 @@ +import { createRequire } from 'node:module' +import { existsSync, mkdtempSync, readFileSync, rmSync, writeFileSync } from 'node:fs' +import { tmpdir } from 'node:os' +import path from 'node:path' +import * as ts from 'typescript' + +const require = createRequire(import.meta.url) + +interface SharedContractModule { + declarationPath: string + modulePath: string +} + +const sharedContractModules: SharedContractModule[] = [ + { + modulePath: 'shared/app-settings.js', + declarationPath: 'shared/app-settings.d.ts', + }, + { + modulePath: 'shared/dashboard-preferences.js', + declarationPath: 'shared/dashboard-preferences.d.ts', + }, + { + modulePath: 'shared/toktrack-version.js', + declarationPath: 'shared/toktrack-version.d.ts', + }, +] + +function hasExportModifier(node: ts.Node) { + return ts.canHaveModifiers(node) + ? Boolean(node.modifiers?.some((modifier) => modifier.kind === ts.SyntaxKind.ExportKeyword)) + : false +} + +function unwrapExpression(expression: ts.Expression): ts.Expression { + return ts.isParenthesizedExpression(expression) + ? unwrapExpression(expression.expression) + : expression +} + +function readExportAssignmentName(statement: ts.ExportAssignment) { + const expression = unwrapExpression(statement.expression) + + if (ts.isIdentifier(expression)) { + return expression.text + } + + if ( + (ts.isClassExpression(expression) || ts.isFunctionExpression(expression)) && + expression.name + ) { + return expression.name.text + } + + return null +} + +function collectBindingIdentifierNames(name: ts.BindingName): string[] { + if (ts.isIdentifier(name)) { + return [name.text] + } + + return name.elements.flatMap((element) => { + if (ts.isOmittedExpression(element)) { + return [] + } + + return collectBindingIdentifierNames(element.name) + }) +} + +function readDeclarationSourceFile(filePath: string) { + return ts.createSourceFile( + filePath, + readFileSync(filePath, 'utf8'), + ts.ScriptTarget.Latest, + true, + ts.ScriptKind.TS, + ) +} + +function resolveDeclarationModulePath(importerPath: string, moduleSpecifier: string) { + if (!moduleSpecifier.startsWith('.')) { + return null + } + + const resolvedBasePath = path.resolve(path.dirname(importerPath), moduleSpecifier) + const candidates = [ + resolvedBasePath, + `${resolvedBasePath}.d.ts`, + `${resolvedBasePath}.ts`, + path.join(resolvedBasePath, 'index.d.ts'), + path.join(resolvedBasePath, 'index.ts'), + ] + + return candidates.find((candidate) => existsSync(candidate)) ?? null +} + +function collectStarReExportValueExports( + sourceFile: ts.SourceFile, + statement: ts.ExportDeclaration, + visitedFiles: Set, +) { + if (!statement.moduleSpecifier || !ts.isStringLiteral(statement.moduleSpecifier)) { + return [] + } + + const declarationPath = resolveDeclarationModulePath( + sourceFile.fileName, + statement.moduleSpecifier.text, + ) + if (!declarationPath) { + return [] + } + + return collectDeclarationValueExports(readDeclarationSourceFile(declarationPath), visitedFiles) +} + +function collectDeclarationValueExports( + sourceFile: ts.SourceFile, + visitedFiles = new Set(), +) { + const sourcePath = path.resolve(sourceFile.fileName) + if (visitedFiles.has(sourcePath)) { + return [] + } + visitedFiles.add(sourcePath) + + const exports = new Set() + + for (const statement of sourceFile.statements) { + if ( + hasExportModifier(statement) && + (ts.isFunctionDeclaration(statement) || + ts.isClassDeclaration(statement) || + ts.isEnumDeclaration(statement)) && + statement.name + ) { + exports.add(statement.name.text) + continue + } + + if (hasExportModifier(statement) && ts.isVariableStatement(statement)) { + for (const declaration of statement.declarationList.declarations) { + for (const exportName of collectBindingIdentifierNames(declaration.name)) { + exports.add(exportName) + } + } + continue + } + + if (ts.isExportDeclaration(statement)) { + if (statement.isTypeOnly) { + continue + } + + if (!statement.exportClause) { + for (const exportName of collectStarReExportValueExports( + sourceFile, + statement, + visitedFiles, + )) { + exports.add(exportName) + } + continue + } + + if (ts.isNamespaceExport(statement.exportClause)) { + exports.add(statement.exportClause.name.text) + continue + } + + for (const element of statement.exportClause.elements) { + if (!element.isTypeOnly) { + exports.add(element.name.text) + } + } + } + + if (ts.isExportAssignment(statement)) { + const exportName = readExportAssignmentName(statement) + if (exportName) { + exports.add(exportName) + } + } + } + + return [...exports].sort() +} + +function readDeclarationValueExports(declarationPath: string) { + const absolutePath = path.resolve(process.cwd(), declarationPath) + return collectDeclarationValueExports(readDeclarationSourceFile(absolutePath)) +} + +function readRuntimeExports(modulePath: string) { + const moduleExports = require(path.resolve(process.cwd(), modulePath)) as unknown + + if (moduleExports === null || moduleExports === undefined) { + return [] + } + + if (typeof moduleExports === 'object') { + return Object.keys(moduleExports).sort() + } + + if (typeof moduleExports === 'function' && moduleExports.name) { + return [moduleExports.name] + } + + return ['default'] +} + +function createDeclarationFixture(source: string) { + return ts.createSourceFile('fixture.d.ts', source, ts.ScriptTarget.Latest, true, ts.ScriptKind.TS) +} + +describe('shared runtime declaration contracts', () => { + it('collects named exports and assignment exports from declaration files', () => { + expect( + collectDeclarationValueExports( + createDeclarationFixture(` + export const namedConst: string + export function namedFunction(): void + `), + ), + ).toEqual(['namedConst', 'namedFunction']) + + expect( + collectDeclarationValueExports( + createDeclarationFixture(` + declare const defaultContract: unknown + export default defaultContract + `), + ), + ).toEqual(['defaultContract']) + + expect( + collectDeclarationValueExports( + createDeclarationFixture(` + declare const equalsContract: unknown + export = equalsContract + `), + ), + ).toEqual(['equalsContract']) + }) + + it('collects value exports from binding pattern declarations', () => { + const sourceFile = createDeclarationFixture(` + const valueSource = {} + export const { objectValue, nested: { nestedValue } } = valueSource + export const [firstArrayValue, , { deepArrayValue }] = valueSource + `) + + expect(collectDeclarationValueExports(sourceFile)).toEqual([ + 'deepArrayValue', + 'firstArrayValue', + 'nestedValue', + 'objectValue', + ]) + }) + + it('skips type-only re-export declarations', () => { + const sourceFile = createDeclarationFixture(` + const runtimeValue = 'runtime' + interface TypeOnlyContract {} + export { runtimeValue, type TypeOnlyContract } + export type { TypeOnlyContract as RenamedTypeOnlyContract } + `) + + expect(collectDeclarationValueExports(sourceFile)).toEqual(['runtimeValue']) + }) + + it('collects value exports from star re-export declarations', () => { + const fixtureRoot = mkdtempSync(path.join(tmpdir(), 'ttdash-declaration-fixture-')) + + try { + const reExportPath = path.join(fixtureRoot, 're-export.d.ts') + writeFileSync( + path.join(fixtureRoot, 'fixture-exports.d.ts'), + ` + export const FIRST_VALUE: string + export function secondValue(): void + export interface IgnoredTypeOnlyContract {} + `, + 'utf8', + ) + + const sourceFile = ts.createSourceFile( + reExportPath, + "export * from './fixture-exports'", + ts.ScriptTarget.Latest, + true, + ts.ScriptKind.TS, + ) + + expect(collectDeclarationValueExports(sourceFile)).toEqual(['FIRST_VALUE', 'secondValue']) + } finally { + rmSync(fixtureRoot, { recursive: true, force: true }) + } + }) + + it('reads named function default exports from CommonJS runtime modules', () => { + const fixtureRoot = mkdtempSync(path.join(tmpdir(), 'ttdash-runtime-fixture-')) + + try { + const namedFunctionPath = path.join(fixtureRoot, 'named-function.cjs') + const primitivePath = path.join(fixtureRoot, 'primitive.cjs') + const emptyPath = path.join(fixtureRoot, 'empty.cjs') + + writeFileSync(namedFunctionPath, 'module.exports = function exportedRuntime() {}', 'utf8') + writeFileSync(primitivePath, 'module.exports = 42', 'utf8') + writeFileSync(emptyPath, 'module.exports = null', 'utf8') + + expect(readRuntimeExports(namedFunctionPath)).toEqual(['exportedRuntime']) + expect(readRuntimeExports(primitivePath)).toEqual(['default']) + expect(readRuntimeExports(emptyPath)).toEqual([]) + } finally { + rmSync(fixtureRoot, { recursive: true, force: true }) + } + }) + + it.each(sharedContractModules)( + 'keeps $declarationPath value exports aligned with $modulePath', + ({ declarationPath, modulePath }) => { + expect(readDeclarationValueExports(declarationPath)).toEqual(readRuntimeExports(modulePath)) + }, + ) +}) From 4d31a8fa805b35b475d528ec48cf4df4438f4fac Mon Sep 17 00:00:00 2001 From: tyl3r-ch Date: Sat, 2 May 2026 12:23:22 +0200 Subject: [PATCH 34/42] v6.3.0: Update changelog --- CHANGELOG.md | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 503a876..77fb525 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,29 @@ # Changelog +## [6.3.0] - 2026-05-02 + +### Added + +- **Paralleles Verification-Gate für lokale und CI-nahe Prüfläufe** — ein neuer Parallelmodus bündelt die wichtigsten Test- und Build-Oberflächen mit eigener Portsteuerung, damit breite Validierungen schneller und reproduzierbarer laufen +- **Test-Timing-Budgets und Benchmark-Transparenz** — neue Timing-Budgets, Projekt-Benchmarks und dokumentierte Gate-Optionen machen langsame oder driftende Tests besser sichtbar +- **Gezielte Branch-Coverage für Runtime-, Router- und Exportpfade** — zusätzliche Unit-, Frontend- und Integrationstests decken Settings, CSV-Export, Runner-Auflösung, Background-Runtime, Auto-Import-Router und process-adjacent Serverpfade gegen Regressionen ab + +### Improved + +- **Klarere Ownership in Runtime und Dashboard-Struktur** — Auto-Import-, Daten-, HTTP-Router- und Dashboard-Section-Verantwortlichkeiten wurden weiter in fokussierte Services und Contracts aufgeteilt +- **Stabilere parallele Testausführung** — Playwright-Specs isolieren ihren Zustand robuster, Vitest-/Frontend-Parallelismus ist gezielter abgestimmt, Toktrack-Cache-Tests nutzen Fake-Spawn-Pfade, und Chart-Legend-Tests bleiben leichtergewichtig +- **Bessere Diagnosequalität in Test- und Verify-Gates** — parallele Gate-Fehler, Runner-Auflösungsfehler und Scope-Fehler liefern klarere Hinweise, während Test-Gates und Review-Baselines aktualisiert und dokumentiert wurden + +### Fixed + +- **Flaky Cross-Test-State in Browser- und Parallelgates** — Playwright-Smoke-Scope, Spec-State-Isolation und parallele Gate-Optionsfehler sind gegen versteckte Zustandskopplung und schwer lesbare Fehlschläge abgesichert +- **Unvollständige Fehlerpfadabdeckung in Server-Runtime und Auto-Import** — Background-Concurrency, Router-Auto-Import-Fehler, process-adjacent Runtime-Kanten und Runner-Resolution-Branches sind jetzt gezielt getestet +- **Shared-Declaration- und Architekturdrift** — gemeinsame Deklarationsverträge werden explizit geschützt, damit geteilte Typ- und Boundary-Erwartungen nicht unbemerkt auseinanderlaufen + +### Commits + +- Enthält alle Branch-Commits seit `origin/main`: `f5cf2b6`, `104d663`, `aec28b2`, `892d551`, `c4e3049`, `475aee4`, `5a701b7`, `0834a07`, `ca29012`, `04aa7a6`, `be994fd`, `708c3c7`, `7e60008`, `efb9cee`, `eab08c5`, `f2b3400`, `7555c0e`, `1657822`, `b7fbe80`, `67db759`, `44c073d`, `c4882ae`, `2932697`, `944a049`, `e9d54fa`, `117cfeb`, `b5cac6a`, `3a4285d`, `cb31d35`, `ab49513`, `c4abb2b`, `5f69ade`, `acb22ab` + ## [6.2.9] - 2026-04-28 ### Improved From 0a58eb6666f1719376704be7c0590b94eb64f978 Mon Sep 17 00:00:00 2001 From: tyl3r-ch Date: Sat, 2 May 2026 12:53:35 +0200 Subject: [PATCH 35/42] Fix E2E reset state flake --- tests/e2e/dashboard-settings-backups.spec.ts | 13 ++- tests/e2e/fixtures.ts | 36 +++++- tests/e2e/helpers.ts | 112 +++++++++++++++++-- 3 files changed, 147 insertions(+), 14 deletions(-) diff --git a/tests/e2e/dashboard-settings-backups.spec.ts b/tests/e2e/dashboard-settings-backups.spec.ts index 46a7a91..4d91c49 100644 --- a/tests/e2e/dashboard-settings-backups.spec.ts +++ b/tests/e2e/dashboard-settings-backups.spec.ts @@ -2,6 +2,7 @@ import fsPromises from 'node:fs/promises' import { expect, test } from './fixtures' import { createApiAuthHeaders, + createApiUrl, createTrustedMutationHeaders, dailyViewPattern, filterStatusPattern, @@ -192,13 +193,15 @@ test('manages settings and backup imports through the settings dialog using isol await expect .poll(async () => { - const response = await page.request.get('/api/usage', { headers: createApiAuthHeaders() }) + const response = await page.request.get(createApiUrl('/api/usage', baseURL), { + headers: createApiAuthHeaders(), + }) const usage = await response.json() return usage.daily[0]?.date }) .toBe('2026-03-31') - const mergedUsageResponse = await page.request.get('/api/usage', { + const mergedUsageResponse = await page.request.get(createApiUrl('/api/usage', baseURL), { headers: createApiAuthHeaders(), }) expect(mergedUsageResponse.ok()).toBe(true) @@ -256,7 +259,7 @@ test('manages settings and backup imports through the settings dialog using isol expect(settingsImportResponse.ok()).toBe(true) await expect(page.getByRole('button', { name: exportSettingsButtonPattern })).toBeVisible() - const importedSettingsResponse = await page.request.get('/api/settings', { + const importedSettingsResponse = await page.request.get(createApiUrl('/api/settings', baseURL), { headers: createApiAuthHeaders(), }) expect(importedSettingsResponse.ok()).toBe(true) @@ -289,7 +292,7 @@ test('loads persisted settings on a fresh browser start and applies them immedia await resetAppState(page, baseURL) const trustedMutationHeaders = createTrustedMutationHeaders(baseURL) - const patchSettingsResponse = await page.request.patch('/api/settings', { + const patchSettingsResponse = await page.request.patch(createApiUrl('/api/settings', baseURL), { headers: trustedMutationHeaders, data: { language: 'en', @@ -317,7 +320,7 @@ test('loads persisted settings on a fresh browser start and applies them immedia }) expect(patchSettingsResponse.ok()).toBe(true) - const uploadResponse = await page.request.post('/api/upload', { + const uploadResponse = await page.request.post(createApiUrl('/api/upload', baseURL), { headers: trustedMutationHeaders, data: sampleUsage, }) diff --git a/tests/e2e/fixtures.ts b/tests/e2e/fixtures.ts index e8d071f..eb5dc98 100644 --- a/tests/e2e/fixtures.ts +++ b/tests/e2e/fixtures.ts @@ -13,6 +13,10 @@ type WorkerFixtures = { e2eServer: E2EServer } +type LocalAuthSession = { + authorizationHeader?: unknown +} + const startupTimeoutMs = 120_000 const host = process.env.PLAYWRIGHT_TEST_HOST || '127.0.0.1' const basePort = Number(process.env.PLAYWRIGHT_TEST_PORT || '3015') @@ -64,6 +68,24 @@ function createOutputBuffer(serverProcess: ChildProcessWithoutNullStreams) { return () => output.trim() } +function readApiAuthHeaders(authSessionPath: string) { + if (!fs.existsSync(authSessionPath)) { + return null + } + + const authSession = JSON.parse(fs.readFileSync(authSessionPath, 'utf-8')) as LocalAuthSession + + if (typeof authSession.authorizationHeader !== 'string') { + throw new Error( + `Playwright auth session is missing an authorization header: ${authSessionPath}`, + ) + } + + return { + Authorization: authSession.authorizationHeader, + } +} + async function waitForServerReady( serverProcess: ChildProcessWithoutNullStreams, baseURL: string, @@ -87,7 +109,19 @@ async function waitForServerReady( }) if (response.status < 400 && fs.existsSync(authSessionPath)) { - return + const authHeaders = readApiAuthHeaders(authSessionPath) + if (authHeaders) { + const apiResponse = await fetch(new URL('/api/usage', baseURL), { + headers: authHeaders, + signal: AbortSignal.timeout(1_000), + }) + + if (apiResponse.status === 200) { + return + } + + lastError = `GET /api/usage returned ${apiResponse.status}` + } } } catch (error) { lastError = error instanceof Error ? error.message : String(error) diff --git a/tests/e2e/helpers.ts b/tests/e2e/helpers.ts index a0d97fd..41a7a67 100644 --- a/tests/e2e/helpers.ts +++ b/tests/e2e/helpers.ts @@ -1,7 +1,13 @@ import fs from 'node:fs' import fsPromises from 'node:fs/promises' import path from 'node:path' -import { expect, type Download, type Page } from '@playwright/test' +import { + expect, + type APIRequestContext, + type APIResponse, + type Download, + type Page, +} from '@playwright/test' import { TOKTRACK_VERSION } from '../../shared/toktrack-version.js' export const sampleUsagePath = path.join(process.cwd(), 'examples', 'sample-usage.json') @@ -46,8 +52,80 @@ type InitScriptTarget = { addInitScript: (script: () => void) => Promise } +type ApiRequestMethod = 'DELETE' | 'GET' | 'PATCH' | 'POST' +type ApiRequestOptions = Parameters[1] + +const transientApiRequestErrorPattern = + /(socket hang up|ECONNRESET|ECONNREFUSED|EPIPE|ETIMEDOUT|Request context disposed|Target page, context or browser has been closed)/i +const apiRequestRetryAttempts = 3 +const apiRequestRetryDelayMs = 150 + export const sampleUsage = JSON.parse(fs.readFileSync(sampleUsagePath, 'utf-8')) as SampleUsage +function sleep(ms: number) { + return new Promise((resolve) => setTimeout(resolve, ms)) +} + +function isTransientApiRequestError(error: unknown) { + const message = error instanceof Error ? error.message : String(error) + return transientApiRequestErrorPattern.test(message) +} + +async function apiResponseErrorMessage(response: APIResponse, context: string) { + let body = '' + + try { + body = await response.text() + } catch (error) { + body = `Could not read response body: ${error instanceof Error ? error.message : String(error)}` + } + + return `${context} failed with ${response.status()} ${response.statusText()}: ${body}` +} + +async function expectApiResponseOk(response: APIResponse, context: string) { + if (response.ok()) { + return + } + + throw new Error(await apiResponseErrorMessage(response, context)) +} + +async function requestApiWithRetry( + request: APIRequestContext, + method: ApiRequestMethod, + url: string, + options: ApiRequestOptions = {}, +) { + let lastError: unknown + + for (let attempt = 1; attempt <= apiRequestRetryAttempts; attempt += 1) { + try { + return await request.fetch(url, { + ...options, + method, + }) + } catch (error) { + lastError = error + if (attempt === apiRequestRetryAttempts || !isTransientApiRequestError(error)) { + throw error + } + + await sleep(apiRequestRetryDelayMs * attempt) + } + } + + throw lastError +} + +export function createApiUrl(pathname: string, baseURL?: string) { + if (!baseURL) { + throw new Error('Playwright baseURL is required for API requests') + } + + return new URL(pathname, baseURL).toString() +} + export function readLocalAuthSession() { return JSON.parse(fs.readFileSync(getLocalAuthSessionPath(), 'utf-8')) as LocalAuthSession } @@ -71,8 +149,21 @@ export function createTrustedMutationHeaders(baseURL?: string) { export async function resetAppState(page: Page, baseURL?: string) { const trustedMutationHeaders = createTrustedMutationHeaders(baseURL) - await page.request.delete('/api/usage', { headers: trustedMutationHeaders }) - await page.request.delete('/api/settings', { headers: trustedMutationHeaders }) + const usageResponse = await requestApiWithRetry( + page.request, + 'DELETE', + createApiUrl('/api/usage', baseURL), + { headers: trustedMutationHeaders }, + ) + await expectApiResponseOk(usageResponse, 'DELETE /api/usage') + + const settingsResponse = await requestApiWithRetry( + page.request, + 'DELETE', + createApiUrl('/api/settings', baseURL), + { headers: trustedMutationHeaders }, + ) + await expectApiResponseOk(settingsResponse, 'DELETE /api/settings') } export async function gotoDashboard(page: Page) { @@ -116,12 +207,17 @@ export async function seedUsage( usageData: SampleUsage = buildRelativeUsageData(), ) { const trustedMutationHeaders = createTrustedMutationHeaders(baseURL) - const uploadResponse = await page.request.post('/api/upload', { - headers: trustedMutationHeaders, - data: usageData, - }) + const uploadResponse = await requestApiWithRetry( + page.request, + 'POST', + createApiUrl('/api/upload', baseURL), + { + headers: trustedMutationHeaders, + data: usageData, + }, + ) - expect(uploadResponse.ok()).toBe(true) + await expectApiResponseOk(uploadResponse, 'POST /api/upload') return usageData } From 851ac771657cf47152739e7e9e00e4079d5c2877 Mon Sep 17 00:00:00 2001 From: tyl3r-ch Date: Sat, 2 May 2026 15:32:30 +0200 Subject: [PATCH 36/42] v6.3.0: Address review feedback --- AGENTS.md | 2 +- docs/testing.md | 1 - package.json | 12 +-- scripts/run-vitest-project-timings.js | 4 +- .../auto-import-runtime/toktrack-runners.js | 10 +- server/data-runtime/file-io.js | 11 +- server/data-runtime/file-locks.js | 10 +- server/data-runtime/import-merge.js | 35 +++--- server/routes/static-routes.js | 9 +- server/routes/usage-routes.js | 33 ++++-- src/components/cards/MonthMetrics.tsx | 2 +- src/components/charts/CostByWeekday.tsx | 40 ++++++- src/lib/data-transforms.ts | 2 +- src/types/index.ts | 1 + tests/e2e/fixtures.ts | 31 +++++- .../dashboard-language-regressions.test.tsx | 14 ++- tests/frontend/filter-bar-presets.test.tsx | 28 ++++- tests/frontend/lazy-dashboard-charts.test.tsx | 30 ++++-- tests/frontend/metric-ratio-locale.test.tsx | 6 +- tests/frontend/requests-over-time.test.tsx | 5 +- tests/unit/background-runtime.test.ts | 1 - tests/unit/dashboard-preferences.test.ts | 4 +- tests/unit/data-runtime-contract.test.ts | 102 ++++++++++++++---- tests/unit/http-router-mutations.test.ts | 24 +++++ tests/unit/http-router-static.test.ts | 33 +++++- tests/unit/report-test-timings.test.ts | 2 +- tests/unit/run-vitest-project-timings.test.ts | 5 + tests/unit/server-helpers-file-locks.test.ts | 44 ++++++-- tests/unit/server-helpers-runner-core.test.ts | 38 ++++++- tests/unit/test-pipeline-scripts.test.ts | 11 ++ 30 files changed, 442 insertions(+), 108 deletions(-) diff --git a/AGENTS.md b/AGENTS.md index b9be6c0..5bc8e91 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -37,7 +37,7 @@ When adding tests: - use real subprocesses only when shell/PATH/CLI or cross-process behavior is the thing being tested - split files once they mix unrelated concerns like content, navigation, localization, motion, or keyboard behavior - keep `waitFor(...)` for real eventual consistency only, not as the default assertion pattern -- keep Playwright specs on `tests/e2e/fixtures.ts` and reset state with `resetAppState(...)` or `prepareDashboard(...)` +- keep Playwright fixtures and shared helpers in `tests/e2e/fixtures.ts`; place specs in `tests/e2e` spec files and reset state with `resetAppState(...)` or `prepareDashboard(...)` - when improving coverage, prefer critical branch-heavy runtime modules like `src/lib/api.ts`, `src/hooks/use-usage-data.ts`, and `src/hooks/use-dashboard-controller.ts` over adding another broad dashboard catch-all test Continue to manually verify the main flows affected by the change: dashboard load, auto-import, JSON upload, filtering, and export actions. diff --git a/docs/testing.md b/docs/testing.md index bb75b00..a7cab95 100644 --- a/docs/testing.md +++ b/docs/testing.md @@ -148,7 +148,6 @@ Prioritize targeted branch coverage in runtime-heavy modules before adding anoth - CI-style Playwright smoke: `npm run test:e2e:ci` - Serial local mirror of the CI gate: `npm run verify:ci` - Optional parallel local gate without Playwright: `npm run verify:parallel` -- Optional parallel local fast path including Playwright, without coverage instrumentation: `PLAYWRIGHT_TEST_PORT=3016 npm run verify:full:parallel` ## Architecture Guardrails diff --git a/package.json b/package.json index cd0afe9..3f5734f 100644 --- a/package.json +++ b/package.json @@ -29,17 +29,17 @@ "start": "node server.js", "start:test-server": "node scripts/start-test-server.js", "test": "npm run test:vitest", - "test:architecture": "vitest run --project architecture --outputFile.junit=./test-results/vitest-architecture.junit.xml", + "test:architecture": "vitest run --project architecture --reporter=default --reporter=junit --outputFile.junit=./test-results/vitest-architecture.junit.xml", "test:static": "npm run format:check && npm run lint && npm run check:deps && npm run typecheck", - "test:unit": "vitest run --project unit --project frontend --project integration --project integration-background --outputFile.junit=./test-results/vitest-main.junit.xml", + "test:unit": "vitest run --project unit --project frontend --project integration --project integration-background --reporter=default --reporter=junit --outputFile.junit=./test-results/vitest-main.junit.xml", "test:unit:watch": "vitest --project unit --project frontend --project integration --project integration-background", "test:vitest": "npm run test:architecture && npm run test:unit", "test:vitest:architecture": "npm run test:architecture", "test:vitest:coverage": "npm run test:unit:coverage", - "test:vitest:unit": "vitest run --project unit --outputFile.junit=./test-results/vitest-unit.junit.xml", - "test:vitest:frontend": "vitest run --project frontend --outputFile.junit=./test-results/vitest-frontend.junit.xml", - "test:vitest:integration": "vitest run --project integration --outputFile.junit=./test-results/vitest-integration.junit.xml", - "test:vitest:integration-background": "vitest run --project integration-background --outputFile.junit=./test-results/vitest-integration-background.junit.xml", + "test:vitest:unit": "vitest run --project unit --reporter=default --reporter=junit --outputFile.junit=./test-results/vitest-unit.junit.xml", + "test:vitest:frontend": "vitest run --project frontend --reporter=default --reporter=junit --outputFile.junit=./test-results/vitest-frontend.junit.xml", + "test:vitest:integration": "vitest run --project integration --reporter=default --reporter=junit --outputFile.junit=./test-results/vitest-integration.junit.xml", + "test:vitest:integration-background": "vitest run --project integration-background --reporter=default --reporter=junit --outputFile.junit=./test-results/vitest-integration-background.junit.xml", "test:unit:coverage": "vitest run --coverage --project unit --project frontend --project integration --project integration-background --reporter=dot --reporter=junit --outputFile.junit=./test-results/vitest-coverage.junit.xml", "test:timings": "vitest run --project unit --project frontend --project integration --project integration-background --reporter=dot --reporter=junit --outputFile.junit=./test-results/vitest-timings.junit.xml && node scripts/report-test-timings.js ./test-results/vitest-timings.junit.xml", "test:timings:budget": "vitest run --project unit --project frontend --project integration --project integration-background --reporter=dot --reporter=junit --outputFile.junit=./test-results/vitest-timing-budget.junit.xml && node scripts/report-test-timings.js ./test-results/vitest-timing-budget.junit.xml --max-suite-seconds=20 --max-test-seconds=12", diff --git a/scripts/run-vitest-project-timings.js b/scripts/run-vitest-project-timings.js index efa4c05..6a54b05 100644 --- a/scripts/run-vitest-project-timings.js +++ b/scripts/run-vitest-project-timings.js @@ -198,7 +198,9 @@ function run(argv = process.argv.slice(2), streams = process, spawnSyncImpl = sp return 0; } - fs.mkdirSync(options.reportDir, { recursive: true }); + if (!options.dryRun) { + fs.mkdirSync(options.reportDir, { recursive: true }); + } const results = new Map(); diff --git a/server/auto-import-runtime/toktrack-runners.js b/server/auto-import-runtime/toktrack-runners.js index e1fcf82..fbfcc4f 100644 --- a/server/auto-import-runtime/toktrack-runners.js +++ b/server/auto-import-runtime/toktrack-runners.js @@ -31,9 +31,13 @@ function createToktrackRunnerResolver({ .replace(/^toktrack\s+/, ''); } + function getConfiguredLocalToktrackBin() { + return processObject.env.TTDASH_TOKTRACK_LOCAL_BIN || toktrackLocalBin; + } + function getLocalToktrackDisplayCommand(forceWindows = isWindows) { if (processObject.env.TTDASH_TOKTRACK_LOCAL_BIN) { - return `${toktrackLocalBin} daily --json`; + return `${getConfiguredLocalToktrackBin()} daily --json`; } return forceWindows @@ -43,7 +47,7 @@ function createToktrackRunnerResolver({ function createLocalToktrackRunner() { return { - command: toktrackLocalBin, + command: getConfiguredLocalToktrackBin(), prefixArgs: [], env: processObject.env, method: 'local', @@ -141,7 +145,7 @@ function createToktrackRunnerResolver({ runnerFailures: [], }; - if (fs.existsSync(toktrackLocalBin)) { + if (fs.existsSync(getConfiguredLocalToktrackBin())) { const localRunner = createLocalToktrackRunner(); try { diff --git a/server/data-runtime/file-io.js b/server/data-runtime/file-io.js index 8a8d463..b42af74 100644 --- a/server/data-runtime/file-io.js +++ b/server/data-runtime/file-io.js @@ -1,3 +1,5 @@ +const { randomBytes } = require('crypto'); + /** Creates secure JSON file I/O helpers for the data runtime. */ function createDataRuntimeFileIo({ fs, @@ -42,9 +44,14 @@ function createDataRuntimeFileIo({ return error?.code === 'ENOENT'; } + function createAtomicTempPath(filePath) { + const randomSuffix = randomBytes(8).toString('hex'); + return `${filePath}.${processObject.pid}.${Date.now()}.${randomSuffix}.tmp`; + } + function writeJsonAtomic(filePath, data) { ensureDir(path.dirname(filePath)); - const tempPath = `${filePath}.${processObject.pid}.${Date.now()}.tmp`; + const tempPath = createAtomicTempPath(filePath); try { fs.writeFileSync(tempPath, JSON.stringify(data, null, 2), { mode: secureFileMode, @@ -67,7 +74,7 @@ function createDataRuntimeFileIo({ } async function writeJsonAtomicAsync(filePath, data) { - const tempPath = `${filePath}.${processObject.pid}.${Date.now()}.tmp`; + const tempPath = createAtomicTempPath(filePath); const parentDir = path.dirname(filePath); try { diff --git a/server/data-runtime/file-locks.js b/server/data-runtime/file-locks.js index 3b3baca..b4047e3 100644 --- a/server/data-runtime/file-locks.js +++ b/server/data-runtime/file-locks.js @@ -23,16 +23,10 @@ function getInvalidFileLockOptionNames({ invalidOptions.push('runtimeInstanceId'); } if (typeof isWindows !== 'boolean') invalidOptions.push('isWindows'); - if ( - secureDirMode != null && - (!Number.isInteger(secureDirMode) || secureDirMode < 0 || secureDirMode > 0o777) - ) { + if (!Number.isInteger(secureDirMode) || secureDirMode < 0 || secureDirMode > 0o777) { invalidOptions.push('secureDirMode'); } - if ( - secureFileMode != null && - (!Number.isInteger(secureFileMode) || secureFileMode < 0 || secureFileMode > 0o777) - ) { + if (!Number.isInteger(secureFileMode) || secureFileMode < 0 || secureFileMode > 0o777) { invalidOptions.push('secureFileMode'); } if ( diff --git a/server/data-runtime/import-merge.js b/server/data-runtime/import-merge.js index f928eaa..a443af2 100644 --- a/server/data-runtime/import-merge.js +++ b/server/data-runtime/import-merge.js @@ -126,16 +126,18 @@ function areUsageDaysEquivalent(left, right) { } function computeUsageTotals(daily) { + const toNumber = (value) => Number(value) || 0; + return daily.reduce( (totals, day) => ({ - inputTokens: totals.inputTokens + (day.inputTokens || 0), - outputTokens: totals.outputTokens + (day.outputTokens || 0), - cacheCreationTokens: totals.cacheCreationTokens + (day.cacheCreationTokens || 0), - cacheReadTokens: totals.cacheReadTokens + (day.cacheReadTokens || 0), - thinkingTokens: totals.thinkingTokens + (day.thinkingTokens || 0), - totalCost: totals.totalCost + (day.totalCost || 0), - totalTokens: totals.totalTokens + (day.totalTokens || 0), - requestCount: totals.requestCount + (day.requestCount || 0), + inputTokens: totals.inputTokens + toNumber(day.inputTokens), + outputTokens: totals.outputTokens + toNumber(day.outputTokens), + cacheCreationTokens: totals.cacheCreationTokens + toNumber(day.cacheCreationTokens), + cacheReadTokens: totals.cacheReadTokens + toNumber(day.cacheReadTokens), + thinkingTokens: totals.thinkingTokens + toNumber(day.thinkingTokens), + totalCost: totals.totalCost + toNumber(day.totalCost), + totalTokens: totals.totalTokens + toNumber(day.totalTokens), + requestCount: totals.requestCount + toNumber(day.requestCount), }), { inputTokens: 0, @@ -208,7 +210,9 @@ function createDataRuntimeImportMerge({ throw new Error('Imported data must contain a daily array.'); } - const validImportedDaily = importedData.daily.filter(hasValidUsageDayDate); + const validImportedDaily = importedData.daily + .filter(hasValidUsageDayDate) + .map(canonicalizeUsageDay); const skippedDays = importedData.daily.length - validImportedDaily.length; const current = currentData && Array.isArray(currentData.daily) && currentData.daily.length > 0 @@ -216,16 +220,11 @@ function createDataRuntimeImportMerge({ : null; if (!current) { - const data = - skippedDays > 0 - ? { - daily: validImportedDaily, - totals: computeUsageTotals(validImportedDaily), - } - : importedData; - return { - data, + data: { + daily: validImportedDaily, + totals: computeUsageTotals(validImportedDaily), + }, summary: { importedDays: importedData.daily.length, addedDays: validImportedDaily.length, diff --git a/server/routes/static-routes.js b/server/routes/static-routes.js index 8604700..6b7a835 100644 --- a/server/routes/static-routes.js +++ b/server/routes/static-routes.js @@ -44,9 +44,14 @@ function createStaticRouteHandler({ const ext = path.extname(reqPath).toLowerCase(); const contentType = mimeTypes[ext] || 'application/octet-stream'; const isHtml = ext === '.html'; - const htmlResponse = isHtml ? prepareHtmlResponse(data.toString('utf8')) : null; + const htmlResponse = + isHtml && typeof prepareHtmlResponse === 'function' + ? prepareHtmlResponse(data.toString('utf8')) + : null; const responseBody = htmlResponse ? htmlResponse.body : data; - const responseSecurityHeaders = htmlResponse ? htmlResponse.headers : securityHeaders; + const responseSecurityHeaders = htmlResponse + ? { ...securityHeaders, ...htmlResponse.headers } + : securityHeaders; res.writeHead(200, { 'Content-Type': contentType, diff --git a/server/routes/usage-routes.js b/server/routes/usage-routes.js index 95a9dc3..8967ac0 100644 --- a/server/routes/usage-routes.js +++ b/server/routes/usage-routes.js @@ -28,20 +28,35 @@ function createUsageRoutes({ json, validateMutationRequest, readMutationBody, da paths: { dataFile }, } = dataRuntime; + function isPlainObject(value) { + return value !== null && typeof value === 'object' && !Array.isArray(value); + } + + function isUsageData(value) { + return ( + isPlainObject(value) && + Array.isArray(value.daily) && + value.daily.every((day) => isPlainObject(day) && typeof day.date === 'string') && + isPlainObject(value.totals) + ); + } + function normalizeIncomingUsagePayload(payload, invalidMessage) { - if (typeof dataRuntime.normalizeIncomingData !== 'function') { - return payload; - } + let nextPayload = payload; - const normalized = dataRuntime.normalizeIncomingData(payload); - // normalizeIncomingData may return undefined to keep the original payload; null is invalid. - if (normalized === undefined) { - return payload; + if (typeof dataRuntime.normalizeIncomingData === 'function') { + const normalized = dataRuntime.normalizeIncomingData(payload); + // normalizeIncomingData may return undefined to keep the original payload; null is invalid. + if (normalized !== undefined) { + nextPayload = normalized; + } } - if (normalized === null) { + + if (!isUsageData(nextPayload)) { throw new Error(invalidMessage); } - return normalized; + + return nextPayload; } async function handleUsageRoutes(apiPath, req, res) { diff --git a/src/components/cards/MonthMetrics.tsx b/src/components/cards/MonthMetrics.tsx index 2ad9ed0..a5407ca 100644 --- a/src/components/cards/MonthMetrics.tsx +++ b/src/components/cards/MonthMetrics.tsx @@ -86,7 +86,7 @@ export function MonthMetrics({ daily, metrics }: MonthMetricsProps) { if (!topModel || cost > topModel.cost) topModel = { name, cost } } - // Days elapsed in the current month so far + // Days elapsed in the current local month so far. const today = new Date() const dayOfMonth = today.getDate() diff --git a/src/components/charts/CostByWeekday.tsx b/src/components/charts/CostByWeekday.tsx index b29300f..8cc08ec 100644 --- a/src/components/charts/CostByWeekday.tsx +++ b/src/components/charts/CostByWeekday.tsx @@ -21,6 +21,44 @@ interface CostByWeekdayProps { data: WeekdayData[] } +const WEEKEND_DAY_FALLBACKS = new Set([ + // Saturday + 'sa', + 'sam', + 'samedi', + 'sab', + 'sabado', + 'sabato', + 'sat', + 'saturday', + // Sunday + 'dim', + 'dimanche', + 'dom', + 'domingo', + 'domenica', + 'so', + 'sonntag', + 'su', + 'sun', + 'sunday', +]) + +function isWeekendEntry(entry: WeekdayData) { + if (entry.weekdayIndex !== undefined) { + return entry.weekdayIndex === 5 || entry.weekdayIndex === 6 + } + + // buildDashboardChartTransforms provides weekdayIndex; day labels are a legacy/external fallback. + const normalizedDay = entry.day + .trim() + .toLocaleLowerCase() + .normalize('NFD') + .replace(/[\u0300-\u036f]/g, '') + .replace(/\.$/, '') + return WEEKEND_DAY_FALLBACKS.has(normalizedDay) +} + /** Renders average cost by weekday. */ export function CostByWeekday({ data }: CostByWeekdayProps) { const { t } = useTranslation() @@ -33,7 +71,7 @@ export function CostByWeekday({ data }: CostByWeekdayProps) { const peakIndex = data.findIndex((d) => d.cost === maxCost) const lowIndex = data.findIndex((d) => d.cost === minCost) const weekendCost = data - .filter((_, index) => index === 5 || index === 6) + .filter((entry) => isWeekendEntry(entry)) .reduce((sum, entry) => sum + entry.cost, 0) const weekTotal = data.reduce((sum, entry) => sum + entry.cost, 0) diff --git a/src/lib/data-transforms.ts b/src/lib/data-transforms.ts index 6a06722..a815c30 100644 --- a/src/lib/data-transforms.ts +++ b/src/lib/data-transforms.ts @@ -295,7 +295,7 @@ export function buildDashboardChartTransforms( const values = weekdayCosts[index] ?? [] const average = values.length > 0 ? values.reduce((sum, value) => sum + value, 0) / values.length : 0 - return { day, cost: average } + return { day, cost: average, weekdayIndex: index } }) return { diff --git a/src/types/index.ts b/src/types/index.ts index 02f995d..fb65c82 100644 --- a/src/types/index.ts +++ b/src/types/index.ts @@ -218,6 +218,7 @@ export interface ModelCostChartPoint extends ChartDataPoint { export interface WeekdayData { day: string cost: number + weekdayIndex?: number } /** Grades the forecast quality based on data density and volatility. */ diff --git a/tests/e2e/fixtures.ts b/tests/e2e/fixtures.ts index eb5dc98..641b191 100644 --- a/tests/e2e/fixtures.ts +++ b/tests/e2e/fixtures.ts @@ -140,15 +140,38 @@ async function stopServer(serverProcess: ChildProcessWithoutNullStreams) { return } + const waitForExit = (timeoutMs: number) => { + if (serverProcess.exitCode !== null) { + return Promise.resolve(true) + } + + return new Promise((resolve) => { + const timeoutId = setTimeout(() => { + cleanup() + resolve(false) + }, timeoutMs) + + const cleanup = () => { + clearTimeout(timeoutId) + serverProcess.off('exit', onExit) + } + + const onExit = () => { + cleanup() + resolve(true) + } + + serverProcess.once('exit', onExit) + }) + } + serverProcess.kill('SIGTERM') - const exited = await Promise.race([ - new Promise((resolve) => serverProcess.once('exit', () => resolve(true))), - sleep(5_000).then(() => false), - ]) + const exited = await waitForExit(5_000) if (!exited && serverProcess.exitCode === null) { serverProcess.kill('SIGKILL') + await waitForExit(2_000) } } diff --git a/tests/frontend/dashboard-language-regressions.test.tsx b/tests/frontend/dashboard-language-regressions.test.tsx index 5ee2ca1..319af2e 100644 --- a/tests/frontend/dashboard-language-regressions.test.tsx +++ b/tests/frontend/dashboard-language-regressions.test.tsx @@ -2,7 +2,7 @@ import { fireEvent, screen } from '@testing-library/react' import type { ComponentProps } from 'react' -import { beforeAll, describe, expect, it, vi } from 'vitest' +import { afterAll, beforeAll, describe, expect, it, vi } from 'vitest' import { PrimaryMetrics } from '@/components/cards/PrimaryMetrics' import { ChartCard } from '@/components/charts/ChartCard' import { CommandPalette } from '@/components/features/command-palette/CommandPalette' @@ -90,11 +90,23 @@ function buildCommandPaletteProps( } describe('Dashboard language regressions', () => { + let originalScrollIntoView: Element['scrollIntoView'] | undefined + beforeAll(async () => { + originalScrollIntoView = Element.prototype.scrollIntoView Element.prototype.scrollIntoView = vi.fn() await initI18n('de') }) + afterAll(() => { + if (originalScrollIntoView) { + Element.prototype.scrollIntoView = originalScrollIntoView + return + } + + delete (Element.prototype as { scrollIntoView?: Element['scrollIntoView'] }).scrollIntoView + }) + it('localizes expanded chart actions in German', () => { renderWithAppProviders( { vi.useRealTimers() }) + it('renders preset labels in the shared order', () => { + renderFilterBar() + + const presetLabelByKey = { + '7d': '7D', + '30d': '30D', + month: 'Month', + year: 'Year', + all: 'All', + } as const satisfies Record<(typeof DASHBOARD_QUICK_DATE_PRESETS)[number], string> + const presetLabels = DASHBOARD_QUICK_DATE_PRESETS.map((key) => { + const label = presetLabelByKey[key] + if (!label) throw new Error(`Missing label for preset key: ${key}`) + return label + }) + const renderedPresetLabels = screen + .getAllByRole('button') + .map((button) => button.textContent?.trim() ?? '') + .filter((label) => presetLabels.includes(label)) + + expect(renderedPresetLabels).toEqual(presetLabels) + }) + it('derives preset highlighting from the actual date range and clears it for custom ranges or month filters', () => { const sevenDayRange = resolveDashboardPresetRange('7d', new Date()) diff --git a/tests/frontend/lazy-dashboard-charts.test.tsx b/tests/frontend/lazy-dashboard-charts.test.tsx index 2797e67..0d6cace 100644 --- a/tests/frontend/lazy-dashboard-charts.test.tsx +++ b/tests/frontend/lazy-dashboard-charts.test.tsx @@ -185,13 +185,23 @@ function createDailyUsage( } const weekdayData: WeekdayData[] = [ - { day: 'Mo', cost: 3 }, - { day: 'Tu', cost: 9 }, - { day: 'We', cost: 6 }, - { day: 'Th', cost: 4 }, - { day: 'Fr', cost: 7 }, - { day: 'Sa', cost: 2 }, - { day: 'Su', cost: 5 }, + { day: 'Mo', cost: 3, weekdayIndex: 0 }, + { day: 'Sa', cost: 2, weekdayIndex: 5 }, + { day: 'Tu', cost: 9, weekdayIndex: 1 }, + { day: 'Su', cost: 5, weekdayIndex: 6 }, + { day: 'We', cost: 6, weekdayIndex: 2 }, + { day: 'Th', cost: 4, weekdayIndex: 3 }, + { day: 'Fr', cost: 7, weekdayIndex: 4 }, +] + +const legacyWeekdayData: WeekdayData[] = [ + { day: 'Lun', cost: 3 }, + { day: 'Sáb.', cost: 2 }, + { day: 'Mar', cost: 9 }, + { day: 'Dom.', cost: 5 }, + { day: 'Mié', cost: 6 }, + { day: 'Jue', cost: 4 }, + { day: 'Vie', cost: 7 }, ] const tokenData: TokenChartDataPoint[] = [ @@ -245,6 +255,12 @@ describe('lazy dashboard charts', () => { expect(fills.some((fill) => fill?.includes('weekdayLow'))).toBe(true) }) + it('uses localized day labels as a defensive weekend fallback without weekday indices', () => { + renderWithTooltip() + + expect(screen.getByText('Peak: Mar · Low: Sáb. · Weekend 19%')).toBeInTheDocument() + }) + it('renders ModelMix for enough model history and skips underspecified input', () => { const modelMixData = [ createDailyUsage('2026-04-01', { claudeCost: 3, gptCost: 7 }), diff --git a/tests/frontend/metric-ratio-locale.test.tsx b/tests/frontend/metric-ratio-locale.test.tsx index 44d4f8a..94714ca 100644 --- a/tests/frontend/metric-ratio-locale.test.tsx +++ b/tests/frontend/metric-ratio-locale.test.tsx @@ -44,6 +44,8 @@ const metrics: DashboardMetrics = { } describe('metric ratio localization', () => { + const daysElapsedInTestMonth = 30 + const testDate = new Date(2026, 3, daysElapsedInTestMonth, 12) const daily: DailyUsage[] = [ { date: '2026-04-02', @@ -75,7 +77,7 @@ describe('metric ratio localization', () => { beforeEach(async () => { vi.useFakeTimers() - vi.setSystemTime(new Date('2026-04-30T12:00:00Z')) + vi.setSystemTime(testDate) vi.stubGlobal( 'IntersectionObserver', class { @@ -132,7 +134,7 @@ describe('metric ratio localization', () => { style: 'percent', minimumFractionDigits: 0, maximumFractionDigits: 0, - }).format(2 / new Date().getDate()) + }).format(2 / daysElapsedInTestMonth) render( diff --git a/tests/frontend/requests-over-time.test.tsx b/tests/frontend/requests-over-time.test.tsx index 66ec148..9dd6026 100644 --- a/tests/frontend/requests-over-time.test.tsx +++ b/tests/frontend/requests-over-time.test.tsx @@ -154,8 +154,11 @@ describe('RequestsOverTime', () => { expect(lineNames).toEqual( expect.arrayContaining([ - 'Claude Opus 4.7', + 'GPT-5.4 7-day avg', + 'Claude Sonnet 4.5 7-day avg', 'Claude Opus 4.7 7-day avg', + 'Gemini 2.5 Pro 7-day avg', + 'GPT-4.1 7-day avg', 'Claude Haiku 4.5 7-day avg', ]), ) diff --git a/tests/unit/background-runtime.test.ts b/tests/unit/background-runtime.test.ts index 90ae188..08846a3 100644 --- a/tests/unit/background-runtime.test.ts +++ b/tests/unit/background-runtime.test.ts @@ -77,7 +77,6 @@ function createTestBackgroundRuntime(overrides: Partial { today: 'false', unknown: false, }) + const defaultVisibility = getDefaultDashboardSectionVisibility() expect(visibility.metrics).toBe(false) - expect(visibility.today).toBe(true) + expect(visibility.today).toBe(defaultVisibility.today) expect(visibility).not.toHaveProperty('unknown') const orderedSections = normalizeDashboardSectionOrder(['tables', 'metrics', 'tables', 7]) diff --git a/tests/unit/data-runtime-contract.test.ts b/tests/unit/data-runtime-contract.test.ts index 5457036..1695b99 100644 --- a/tests/unit/data-runtime-contract.test.ts +++ b/tests/unit/data-runtime-contract.test.ts @@ -1,5 +1,6 @@ import { promises as fsPromises } from 'node:fs' import { createRequire } from 'node:module' +import { tmpdir } from 'node:os' import path from 'node:path' import { describe, expect, it, vi } from 'vitest' @@ -93,28 +94,37 @@ function createRuntime({ } describe('data runtime contracts', () => { - it('prefers explicit data, config, and cache directories over platform defaults', () => { - const runtime = createRuntime({ - env: { - TTDASH_CACHE_DIR: '/tmp/ttdash/cache', - TTDASH_CONFIG_DIR: '/tmp/ttdash/config', - TTDASH_DATA_DIR: '/tmp/ttdash/data', - }, - getCliAutoLoadActive: () => true, - platform: 'linux', - }) + it('prefers explicit data, config, and cache directories over platform defaults', async () => { + const runtimeRoot = await fsPromises.mkdtemp(path.join(tmpdir(), 'ttdash-runtime-contract-')) + const cacheDir = path.join(runtimeRoot, 'cache') + const configDir = path.join(runtimeRoot, 'config') + const dataDir = path.join(runtimeRoot, 'data') - expect(runtime.appPaths).toEqual({ - cacheDir: '/tmp/ttdash/cache', - configDir: '/tmp/ttdash/config', - dataDir: '/tmp/ttdash/data', - }) - expect(runtime.paths).toMatchObject({ - dataFile: '/tmp/ttdash/data/data.json', - settingsFile: '/tmp/ttdash/config/settings.json', - npxCacheDir: '/tmp/ttdash/cache/npx-cache', - }) - expect(runtime.readSettings()).toMatchObject({ cliAutoLoadActive: true }) + try { + const runtime = createRuntime({ + env: { + TTDASH_CACHE_DIR: cacheDir, + TTDASH_CONFIG_DIR: configDir, + TTDASH_DATA_DIR: dataDir, + }, + getCliAutoLoadActive: () => true, + platform: 'linux', + }) + + expect(runtime.appPaths).toEqual({ + cacheDir, + configDir, + dataDir, + }) + expect(runtime.paths).toMatchObject({ + dataFile: path.join(dataDir, 'data.json'), + settingsFile: path.join(configDir, 'settings.json'), + npxCacheDir: path.join(cacheDir, 'npx-cache'), + }) + expect(runtime.readSettings()).toMatchObject({ cliAutoLoadActive: true }) + } finally { + await fsPromises.rm(runtimeRoot, { recursive: true, force: true }) + } }) it('resolves macOS, Windows, and XDG platform paths without touching the host profile', () => { @@ -315,6 +325,56 @@ describe('data runtime contracts', () => { expect(merge.data.totals).toMatchObject({ totalCost: 0.1, totalTokens: 3 }) }) + it('recomputes imported totals with numeric coercion when no current usage exists', () => { + const runtime = createRuntime() + + const merge = runtime.mergeUsageData(null, { + daily: [ + { + date: '2026-04-03', + inputTokens: '5', + outputTokens: '7', + cacheCreationTokens: '2', + cacheReadTokens: '3', + thinkingTokens: '4', + totalTokens: '21', + totalCost: '1.5', + requestCount: '6', + modelsUsed: ['GPT-5.4'], + modelBreakdowns: [], + }, + ], + totals: { totalCost: 999, totalTokens: 999, requestCount: 999 }, + }) + + expect(merge.summary).toMatchObject({ + importedDays: 1, + addedDays: 1, + skippedDays: 0, + totalDays: 1, + }) + expect(merge.data.daily[0]).toMatchObject({ + inputTokens: 5, + outputTokens: 7, + cacheCreationTokens: 2, + cacheReadTokens: 3, + thinkingTokens: 4, + totalTokens: 21, + totalCost: 1.5, + requestCount: 6, + }) + expect(merge.data.totals).toEqual({ + inputTokens: 5, + outputTokens: 7, + cacheCreationTokens: 2, + cacheReadTokens: 3, + thinkingTokens: 4, + totalCost: 1.5, + totalTokens: 21, + requestCount: 6, + }) + }) + it('rejects usage imports that do not contain a daily array before merging', () => { const runtime = createRuntime() diff --git a/tests/unit/http-router-mutations.test.ts b/tests/unit/http-router-mutations.test.ts index 444a9d4..a72f346 100644 --- a/tests/unit/http-router-mutations.test.ts +++ b/tests/unit/http-router-mutations.test.ts @@ -353,6 +353,18 @@ describe('HTTP router mutation errors', () => { expect(body).toEqual({ message: 'Server error' }) }) + it('rejects normalized upload payloads that are not valid usage data', async () => { + const { dataRuntime, router } = createRouter({ + readBody: vi.fn(async () => ({ daily: [{ date: 42 }], totals: {} })), + }) + + const { res, body } = await request(router, '/api/upload', 'POST') + + expect(res.status).toBe(400) + expect(body).toEqual({ message: 'Invalid JSON' }) + expect(dataRuntime.writeData).not.toHaveBeenCalled() + }) + it('returns server errors for usage import write failures', async () => { const usageData = { daily: [{ date: '2026-04-27' }], totals: { totalCost: 1 } } const { router } = createRouter({ @@ -370,6 +382,18 @@ describe('HTTP router mutation errors', () => { expect(body).toEqual({ message: 'Server error' }) }) + it('rejects normalized usage imports that are not valid usage data', async () => { + const { dataRuntime, router } = createRouter({ + readBody: vi.fn(async () => ({ data: { daily: [{ date: 42 }], totals: {} } })), + }) + + const { res, body } = await request(router, '/api/usage/import', 'POST') + + expect(res.status).toBe(400) + expect(body).toEqual({ message: 'Invalid usage backup file' }) + expect(dataRuntime.writeData).not.toHaveBeenCalled() + }) + it('returns service unavailable when toktrack version lookup throws', async () => { const { router } = createRouter({ autoImportRuntimeOverrides: { diff --git a/tests/unit/http-router-static.test.ts b/tests/unit/http-router-static.test.ts index 12e3b6c..4c1d703 100644 --- a/tests/unit/http-router-static.test.ts +++ b/tests/unit/http-router-static.test.ts @@ -27,7 +27,10 @@ class MockResponse { } } -function createRouter(readFile: (filePath: string) => Promise) { +function createRouter( + readFile: (filePath: string) => Promise, + prepareHtmlResponse?: (html: string) => { body: string; headers: Record }, +) { return createHttpRouter({ fs: { promises: { @@ -35,6 +38,7 @@ function createRouter(readFile: (filePath: string) => Promise) { }, }, path, + prepareHtmlResponse, staticRoot: '/app/dist', securityHeaders: { 'X-Test-Security': '1' }, httpUtils: { @@ -95,6 +99,33 @@ describe('HTTP router static file handling', () => { expect(res.body).toContain('') }) + it('preserves base security headers when preparing HTML responses', async () => { + const router = createRouter( + async (filePath) => { + if (filePath.endsWith('index.html')) { + return Buffer.from('') + } + throw Object.assign(new Error('missing'), { code: 'ENOENT' }) + }, + (html) => ({ + body: html.replace('', ''), + headers: { + 'Content-Security-Policy': "default-src 'self'; script-src 'nonce-test'", + }, + }), + ) + const res = new MockResponse() + + await router.handleServerRequest({ url: '/dashboard', method: 'GET', headers: {} }, res) + + expect(res.status).toBe(200) + expect(res.headers['X-Test-Security']).toBe('1') + expect(res.headers['Content-Security-Policy']).toBe( + "default-src 'self'; script-src 'nonce-test'", + ) + expect(res.body).toContain('ttdash-csp-nonce') + }) + it('returns not found for missing static assets instead of serving the SPA shell', async () => { const router = createRouter(async (filePath) => { if (filePath.endsWith('index.html')) { diff --git a/tests/unit/report-test-timings.test.ts b/tests/unit/report-test-timings.test.ts index 3cee06e..b11ee84 100644 --- a/tests/unit/report-test-timings.test.ts +++ b/tests/unit/report-test-timings.test.ts @@ -101,7 +101,7 @@ describe('test timing report budgets', () => { '--max-test-seconds=12', ]) - expect(options.junitPath).toMatch(/test-results\/custom\.junit\.xml$/) + expect(options.junitPath.replaceAll('\\', '/')).toMatch(/test-results\/custom\.junit\.xml$/) expect(options.warnSuiteSeconds).toBe(1.5) expect(options.warnTestSeconds).toBe(0.25) expect(options.maxSuiteSeconds).toBe(20) diff --git a/tests/unit/run-vitest-project-timings.test.ts b/tests/unit/run-vitest-project-timings.test.ts index 3aaacab..cbc4318 100644 --- a/tests/unit/run-vitest-project-timings.test.ts +++ b/tests/unit/run-vitest-project-timings.test.ts @@ -1,4 +1,5 @@ import { createRequire } from 'node:module' +import fs from 'node:fs' import path from 'node:path' import { describe, expect, it, vi } from 'vitest' @@ -131,6 +132,7 @@ describe('Vitest project timing runner', () => { const stdout = { write: vi.fn() } const stderr = { write: vi.fn() } const spawnSyncImpl = vi.fn(() => ({ status: 0 })) + const mkdirSync = vi.spyOn(fs, 'mkdirSync') const status = timingRunner.run( ['--projects=unit', '--repeat=2', '--dry-run'], @@ -140,8 +142,11 @@ describe('Vitest project timing runner', () => { expect(status).toBe(0) expect(spawnSyncImpl).not.toHaveBeenCalled() + expect(mkdirSync).not.toHaveBeenCalled() expect(stdout.write.mock.calls.join('\n')).toContain('vitest-unit.timing-run-1.junit.xml') expect(stdout.write.mock.calls.join('\n')).toContain('vitest-unit.timing-run-2.junit.xml') + + mkdirSync.mockRestore() }) it('calculates median values for repeated timings', () => { diff --git a/tests/unit/server-helpers-file-locks.test.ts b/tests/unit/server-helpers-file-locks.test.ts index 554b869..a16abdd 100644 --- a/tests/unit/server-helpers-file-locks.test.ts +++ b/tests/unit/server-helpers-file-locks.test.ts @@ -105,6 +105,17 @@ function getMode(filePath: string) { return fs.statSync(filePath).mode & 0o777 } +function escapeRegExp(value: string) { + return value.replace(/[.*+?^${}()|[\]\\]/g, '\\$&') +} + +function expectAtomicTempPath(tempPath: string, targetFile: string, timestamp: number) { + const prefix = `${targetFile}.${process.pid}.${timestamp}.` + + expect(tempPath).toMatch(new RegExp(`^${escapeRegExp(prefix)}[a-f0-9]+\\.tmp$`)) + expect(tempPath.length).toBeGreaterThan(prefix.length + '.tmp'.length) +} + function waitForChildMessage( child: ReturnType, timeoutMs: number, @@ -199,6 +210,20 @@ describe('server helper utilities: file mutation locks', () => { dataFile: '/tmp/data.json', }), ).toThrow('runtimeInstanceId, secureDirMode, fileMutationLockTimeoutMs') + + expect(() => + createDataRuntimeFileLocks({ + fsPromises, + path, + processObject: process, + runtimeInstanceId: 'runtime', + isWindows: false, + fileMutationLockTimeoutMs: 100, + fileMutationLockStaleMs: 30_000, + settingsFile: '/tmp/settings.json', + dataFile: '/tmp/data.json', + }), + ).toThrow('secureDirMode, secureFileMode') }) it('serializes operations for the same file path and cleans up locks afterwards', async () => { @@ -489,7 +514,6 @@ dataRuntime.withFileMutationLock(filePath, async () => { it('removes temporary files when atomic async writes fail after creating the temp file', async () => { const targetDir = await fsPromises.mkdtemp(path.join(tmpdir(), 'ttdash-write-json-')) const targetFile = path.join(targetDir, 'settings.json') - const expectedTempPath = `${targetFile}.${process.pid}.1700000000000.tmp` const renameError = Object.assign(new Error('rename failed'), { code: 'EXDEV' }) const renameSpy = vi.spyOn(fsPromises, 'rename').mockRejectedValue(renameError) @@ -499,8 +523,9 @@ dataRuntime.withFileMutationLock(filePath, async () => { await expect(writeJsonAtomicAsync(targetFile, { ok: true })).rejects.toBe(renameError) expect(renameSpy).toHaveBeenCalled() - expect(unlinkSpy).toHaveBeenCalledWith(expectedTempPath) - expect(existsSync(expectedTempPath)).toBe(false) + const tempPath = unlinkSpy.mock.calls[0]?.[0] as string + expectAtomicTempPath(tempPath, targetFile, 1700000000000) + expect(existsSync(tempPath)).toBe(false) renameSpy.mockRestore() unlinkSpy.mockRestore() @@ -540,7 +565,6 @@ dataRuntime.withFileMutationLock(filePath, async () => { it('removes temporary files when atomic sync writes fail after creating the temp file', async () => { const targetDir = await fsPromises.mkdtemp(path.join(tmpdir(), 'ttdash-write-json-sync-')) const targetFile = path.join(targetDir, 'settings.json') - const expectedTempPath = `${targetFile}.${process.pid}.1700000000002.tmp` const renameError = Object.assign(new Error('rename failed'), { code: 'EXDEV' }) const runtime = createFileModeRuntime(targetDir, path.join(targetDir, 'legacy-data.json')) @@ -552,8 +576,10 @@ dataRuntime.withFileMutationLock(filePath, async () => { try { expect(() => runtime.writeJsonAtomic(targetFile, { ok: true })).toThrow(renameError) - expect(renameSpy).toHaveBeenCalledWith(expectedTempPath, targetFile) - expect(existsSync(expectedTempPath)).toBe(false) + const tempPath = renameSpy.mock.calls[0]?.[0] as string + expect(renameSpy).toHaveBeenCalledWith(tempPath, targetFile) + expectAtomicTempPath(tempPath, targetFile, 1700000000002) + expect(existsSync(tempPath)).toBe(false) } finally { renameSpy.mockRestore() nowSpy.mockRestore() @@ -600,7 +626,6 @@ dataRuntime.withFileMutationLock(filePath, async () => { it('removes temporary files when writeFile rejects after creating the temp file', async () => { const targetDir = await fsPromises.mkdtemp(path.join(tmpdir(), 'ttdash-write-json-')) const targetFile = path.join(targetDir, 'settings.json') - const expectedTempPath = `${targetFile}.${process.pid}.1700000000001.tmp` const writeError = Object.assign(new Error('write failed'), { code: 'EIO' }) const originalWriteFile = fsPromises.writeFile.bind(fsPromises) const writeSpy = vi @@ -615,8 +640,9 @@ dataRuntime.withFileMutationLock(filePath, async () => { await expect(writeJsonAtomicAsync(targetFile, { ok: true })).rejects.toBe(writeError) expect(writeSpy).toHaveBeenCalled() - expect(unlinkSpy).toHaveBeenCalledWith(expectedTempPath) - expect(existsSync(expectedTempPath)).toBe(false) + const tempPath = unlinkSpy.mock.calls[0]?.[0] as string + expectAtomicTempPath(tempPath, targetFile, 1700000000001) + expect(existsSync(tempPath)).toBe(false) writeSpy.mockRestore() unlinkSpy.mockRestore() diff --git a/tests/unit/server-helpers-runner-core.test.ts b/tests/unit/server-helpers-runner-core.test.ts index cd13d4e..30fd991 100644 --- a/tests/unit/server-helpers-runner-core.test.ts +++ b/tests/unit/server-helpers-runner-core.test.ts @@ -57,14 +57,21 @@ function createSpawnSequence(outcomes: FakeSpawnOutcome[]) { function createRuntimeWithSpawn( spawnImpl: ReturnType, - options: { latestLookupTimeoutMs?: number; localBinExists?: boolean } = {}, + options: { + latestLookupTimeoutMs?: number + localBinExists?: boolean + localBinOverride?: string + } = {}, ) { + const localBin = options.localBinOverride ?? '/missing/toktrack' + return createAutoImportRuntime({ fs: { - existsSync: (filePath: string) => - Boolean(options.localBinExists && filePath === '/missing/toktrack'), + existsSync: (filePath: string) => Boolean(options.localBinExists && filePath === localBin), + }, + processObject: { + env: options.localBinOverride ? { TTDASH_TOKTRACK_LOCAL_BIN: options.localBinOverride } : {}, }, - processObject: { env: {} }, spawnCrossPlatform: spawnImpl, normalizeIncomingData: (input: unknown) => input, withSettingsAndDataMutationLock: async (operation: () => Promise) => operation(), @@ -114,6 +121,29 @@ describe('server helper utilities: toktrack runner core behavior', () => { ) }) + it('honors a configured local toktrack binary override for display and execution', async () => { + const spawnImpl = createSpawnSequence([{ stdout: `toktrack ${TOKTRACK_VERSION}` }]) + const runtime = createRuntimeWithSpawn(spawnImpl, { + localBinExists: true, + localBinOverride: '/custom/bin/toktrack', + }) + + expect(runtime.getLocalToktrackDisplayCommand()).toBe('/custom/bin/toktrack daily --json') + + await expect(runtime.resolveToktrackRunner()).resolves.toMatchObject({ + command: '/custom/bin/toktrack', + displayCommand: '/custom/bin/toktrack daily --json', + method: 'local', + }) + expect(spawnImpl).toHaveBeenCalledWith( + '/custom/bin/toktrack', + ['--version'], + expect.objectContaining({ + env: { TTDASH_TOKTRACK_LOCAL_BIN: '/custom/bin/toktrack' }, + }), + ) + }) + it('parses toktrack version banners down to the raw version', () => { expect(parseToktrackVersionOutput(`toktrack ${TOKTRACK_VERSION}`)).toBe(TOKTRACK_VERSION) expect(parseToktrackVersionOutput(`${TOKTRACK_VERSION}\n`)).toBe(TOKTRACK_VERSION) diff --git a/tests/unit/test-pipeline-scripts.test.ts b/tests/unit/test-pipeline-scripts.test.ts index b5cf323..151757e 100644 --- a/tests/unit/test-pipeline-scripts.test.ts +++ b/tests/unit/test-pipeline-scripts.test.ts @@ -36,6 +36,17 @@ describe('test pipeline scripts', () => { expect(scripts.typecheck).toContain('--tsBuildInfoFile .cache/tsc/tsconfig.tsbuildinfo') }) + it('enables the JUnit reporter whenever a Vitest script writes JUnit output', () => { + const scriptsWithJunitOutput = Object.entries(scripts).filter(([, command]) => + command.includes('--outputFile.junit'), + ) + + expect(scriptsWithJunitOutput.length).toBeGreaterThan(0) + for (const [scriptName, command] of scriptsWithJunitOutput) { + expect(command, scriptName).toContain('--reporter=junit') + } + }) + it('keeps CI split into parallel jobs that share one production bundle artifact', async () => { const workflow = await readFile('.github/workflows/ci.yml', 'utf8') From e2afb63451aae5ca40ce4abb77ca8f8338791e11 Mon Sep 17 00:00:00 2001 From: tyl3r-ch Date: Sat, 2 May 2026 15:42:56 +0200 Subject: [PATCH 37/42] Add required CI gate --- .github/workflows/ci.yml | 41 +++++++++++ .github/workflows/release.yml | 1 + docs/testing.md | 1 + scripts/verify-main-ci.js | 88 +++++++++++++++++++++++- tests/unit/test-pipeline-scripts.test.ts | 23 +++++++ 5 files changed, 153 insertions(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index b692529..e4aac8d 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -225,3 +225,44 @@ jobs: - name: Build production bundle run: npm run build:app + + ci-required: + name: CI Required + runs-on: ubuntu-latest + timeout-minutes: 5 + needs: + - static + - vitest + - coverage + - build + - package-smoke + - e2e + - windows-smoke + if: ${{ always() }} + + steps: + - name: Verify required CI jobs + run: | + failed=0 + + check_result() { + local job_name="$1" + local result="$2" + + if [[ "${result}" = "success" ]]; then + echo "${job_name}: ${result}" + else + echo "::error::${job_name}: ${result}" + failed=1 + fi + } + + check_result "static" "${{ needs.static.result }}" + check_result "vitest" "${{ needs.vitest.result }}" + check_result "coverage" "${{ needs.coverage.result }}" + check_result "build" "${{ needs.build.result }}" + check_result "package-smoke" "${{ needs['package-smoke'].result }}" + check_result "e2e" "${{ needs.e2e.result }}" + check_result "windows-smoke" "${{ needs['windows-smoke'].result }}" + + exit "${failed}" diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 40d7e29..39afa8b 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -116,6 +116,7 @@ jobs: --workflow ci.yml \ --branch main \ --sha "${MAIN_SHA}" \ + --required-job "CI Required" \ --retries 90 \ --retry-delay-ms 10000 diff --git a/docs/testing.md b/docs/testing.md index a7cab95..37e271c 100644 --- a/docs/testing.md +++ b/docs/testing.md @@ -206,6 +206,7 @@ JUnit reports. - Keep workflow test paths aligned with the split test structure. - The main CI workflow is intentionally a DAG: `static`, Vitest project matrix, `coverage`, and `build` can run independently; `package-smoke` and `e2e` depend only on the `production-dist` artifact from `build`. +- `CI Required` is the stable branch-protection check for `ci.yml`; it runs after every required CI job with `always()` and fails on failed, skipped, or cancelled dependencies. - Keep CI report artifacts job-scoped so parallel jobs do not overwrite each other. Vitest project jobs upload `test-reports-vitest-`, coverage uploads `coverage-reports`, and Playwright uploads `test-reports-e2e`. - Each Vitest matrix job evaluates its own JUnit report with `scripts/report-test-timings.js` and the same `20s` suite / `12s` test hard budget used by `npm run test:timings:budget`. diff --git a/scripts/verify-main-ci.js b/scripts/verify-main-ci.js index 6eb1982..d93e5dd 100644 --- a/scripts/verify-main-ci.js +++ b/scripts/verify-main-ci.js @@ -18,6 +18,7 @@ function parseArgs(argv) { workflow: null, branch: 'main', sha: null, + requiredJob: null, retries: DEFAULT_RETRIES, retryDelayMs: DEFAULT_RETRY_DELAY_MS, }; @@ -50,6 +51,12 @@ function parseArgs(argv) { continue; } + if (arg === '--required-job' && next) { + options.requiredJob = next; + index += 1; + continue; + } + if (arg === '--retries' && next) { options.retries = Number.parseInt(next, 10); index += 1; @@ -65,7 +72,7 @@ function parseArgs(argv) { if (!options.repo || !options.workflow || !options.sha) { fail( - 'Usage: node scripts/verify-main-ci.js --repo --workflow --sha [--branch main] [--retries N] [--retry-delay-ms MS]', + 'Usage: node scripts/verify-main-ci.js --repo --workflow --sha [--branch main] [--required-job ] [--retries N] [--retry-delay-ms MS]', ); } @@ -120,10 +127,59 @@ async function fetchWorkflowRuns(options, token) { return response.json(); } +async function fetchWorkflowRunJobs(options, token, runId) { + const jobs = []; + let page = 1; + + while (true) { + const url = new URL(`https://api.github.com/repos/${options.repo}/actions/runs/${runId}/jobs`); + url.searchParams.set('per_page', '100'); + url.searchParams.set('page', String(page)); + + const response = await fetch(url, { + headers: { + Accept: 'application/vnd.github+json', + Authorization: `Bearer ${token}`, + 'X-GitHub-Api-Version': '2022-11-28', + 'User-Agent': 'ttdash-release-workflow', + }, + }); + + if (!response.ok) { + const body = await response.text(); + const normalizedBody = body.replace(/\s+/g, ' ').trim(); + const maxPreviewLength = 200; + const bodyPreview = + normalizedBody.length > maxPreviewLength + ? `${normalizedBody.slice(0, maxPreviewLength)}…` + : normalizedBody; + const previewSuffix = bodyPreview ? ` Response preview: ${bodyPreview}` : ''; + throw new Error(`GitHub API request failed with ${response.status}.${previewSuffix}`); + } + + const payload = await response.json(); + if (!Array.isArray(payload.jobs)) { + return jobs; + } + + jobs.push(...payload.jobs); + + if (payload.jobs.length < 100) { + return jobs; + } + + page += 1; + } +} + function describeRun(run) { return `${run.name} (${run.status}/${run.conclusion ?? 'pending'})`; } +function describeJob(job) { + return `${job.name} (${job.status}/${job.conclusion ?? 'pending'})`; +} + async function main() { const token = getToken(); if (!token) { @@ -144,6 +200,30 @@ async function main() { log( `CI workflow run is still in progress: ${describeRun(run)} (attempt ${attempt}/${options.retries}).`, ); + } else if (options.requiredJob) { + const jobs = await fetchWorkflowRunJobs(options, token, run.id); + const job = jobs.find((candidate) => candidate.name === options.requiredJob); + + if (!job) { + const jobNames = jobs + .map((candidate) => candidate.name) + .sort() + .join(', '); + fail( + `Required CI job "${options.requiredJob}" was not found in workflow run ${run.id} for ${options.sha}. Available jobs: ${jobNames || 'none'}.`, + ); + } else if (job.status !== 'completed') { + log( + `Required CI job is still in progress: ${describeJob(job)} (attempt ${attempt}/${options.retries}).`, + ); + } else if (job.conclusion === 'success') { + log(`Verified required CI job for ${options.sha}: ${describeJob(job)}.`); + return; + } else { + fail( + `Required CI job "${options.requiredJob}" for ${options.sha} completed with ${job.conclusion}. Release aborted.`, + ); + } } else if (run.conclusion === 'success') { log(`Verified CI success for ${options.sha}: ${describeRun(run)}.`); return; @@ -156,6 +236,12 @@ async function main() { } } + if (options.requiredJob) { + fail( + `Required CI job "${options.requiredJob}" for ${options.sha} did not reach a successful conclusion in time.`, + ); + } + fail(`CI workflow for ${options.sha} did not reach a successful conclusion in time.`); } diff --git a/tests/unit/test-pipeline-scripts.test.ts b/tests/unit/test-pipeline-scripts.test.ts index 151757e..233fd16 100644 --- a/tests/unit/test-pipeline-scripts.test.ts +++ b/tests/unit/test-pipeline-scripts.test.ts @@ -58,6 +58,7 @@ describe('test pipeline scripts', () => { 'package-smoke', 'e2e', 'windows-smoke', + 'ci-required', ]) { expect(workflow).toContain(`\n ${job}:`) } @@ -72,6 +73,28 @@ describe('test pipeline scripts', () => { expect(workflow).toContain('package-smoke:') expect(workflow).toContain('e2e:') expect(workflow).toContain('needs: build') + expect(workflow).toContain('name: CI Required') + expect(workflow).toContain('if: ${{ always() }}') + expect(workflow).toContain('- static') + expect(workflow).toContain('- vitest') + expect(workflow).toContain('- coverage') + expect(workflow).toContain('- package-smoke') + expect(workflow).toContain('- e2e') + expect(workflow).toContain('- windows-smoke') + expect(workflow).toContain( + 'check_result "windows-smoke" "${{ needs[\'windows-smoke\'].result }}"', + ) expect(workflow).toContain('uses: actions/download-artifact@') }) + + it('uses the required CI job as the release gate', async () => { + const releaseWorkflow = await readFile('.github/workflows/release.yml', 'utf8') + const verifyScript = await readFile('scripts/verify-main-ci.js', 'utf8') + + expect(releaseWorkflow).toContain('--workflow ci.yml') + expect(releaseWorkflow).toContain('--required-job "CI Required"') + expect(verifyScript).toContain('--required-job') + expect(verifyScript).toContain('/actions/runs/${runId}/jobs') + expect(verifyScript).toContain('Required CI job') + }) }) From 9fe0a3e4c9f2b1f2254ddb524b64477990bdacf6 Mon Sep 17 00:00:00 2001 From: tyl3r-ch Date: Sun, 3 May 2026 13:14:59 +0200 Subject: [PATCH 38/42] v6.3.0: Address security review findings --- scripts/run-vitest-project-timings.js | 15 + server/auto-import-runtime/command-runner.js | 2 +- .../auto-import-runtime/toktrack-runners.js | 2 +- server/routes/usage-routes.js | 11 +- shared/locales/de/common.json | 2 +- src/lib/data-transforms.ts | 6 +- tests/e2e/command-palette.spec.ts | 2 +- tests/e2e/fixtures.ts | 134 +++-- tests/e2e/helpers.ts | 20 +- .../command-palette-language.test.tsx | 84 +++ tests/frontend/cost-by-weekday.test.tsx | 33 ++ .../dashboard-language-regressions.test.tsx | 58 -- ...sx => lazy-dashboard-chart-test-utils.tsx} | 108 +--- tests/frontend/model-mix.test.tsx | 34 ++ tests/frontend/period-comparison.test.tsx | 50 ++ tests/frontend/tokens-over-time.test.tsx | 32 ++ tests/unit/auto-import-stream-routes.test.ts | 83 +++ tests/unit/data-transforms.test.ts | 18 +- tests/unit/e2e-auth-helpers.test.ts | 66 +++ tests/unit/http-router-mutations.test.ts | 534 ++++-------------- tests/unit/http-router-test-helpers.ts | 235 ++++++++ tests/unit/report-pdf-routes.test.ts | 97 ++++ tests/unit/run-vitest-project-timings.test.ts | 72 ++- tests/unit/runtime-routes.test.ts | 103 ++++ ...erver-helpers-auto-import-executor.test.ts | 2 +- tests/unit/server-helpers-file-locks.test.ts | 44 +- tests/unit/server-helpers-runner-core.test.ts | 34 +- 27 files changed, 1189 insertions(+), 692 deletions(-) create mode 100644 tests/frontend/command-palette-language.test.tsx create mode 100644 tests/frontend/cost-by-weekday.test.tsx rename tests/frontend/{lazy-dashboard-charts.test.tsx => lazy-dashboard-chart-test-utils.tsx} (56%) create mode 100644 tests/frontend/model-mix.test.tsx create mode 100644 tests/frontend/period-comparison.test.tsx create mode 100644 tests/frontend/tokens-over-time.test.tsx create mode 100644 tests/unit/auto-import-stream-routes.test.ts create mode 100644 tests/unit/e2e-auth-helpers.test.ts create mode 100644 tests/unit/http-router-test-helpers.ts create mode 100644 tests/unit/report-pdf-routes.test.ts create mode 100644 tests/unit/runtime-routes.test.ts diff --git a/scripts/run-vitest-project-timings.js b/scripts/run-vitest-project-timings.js index 6a54b05..ef62a6d 100644 --- a/scripts/run-vitest-project-timings.js +++ b/scripts/run-vitest-project-timings.js @@ -183,6 +183,11 @@ function runCommand(command, args, spawnSyncImpl) { }); } +function writeLaunchError(stderr, label, error) { + const message = error instanceof Error ? error.message : String(error); + stderr.write(`Failed to launch ${label}: ${message}\n`); +} + function run(argv = process.argv.slice(2), streams = process, spawnSyncImpl = spawnSync) { let options; @@ -225,12 +230,22 @@ function run(argv = process.argv.slice(2), streams = process, spawnSyncImpl = sp const testResult = runCommand(vitestCommand.command, vitestCommand.args, spawnSyncImpl); const durationMs = Date.now() - startedAt; + if (testResult.error) { + writeLaunchError(streams.stderr, 'vitest', testResult.error); + return 1; + } + if (testResult.status !== 0) { return testResult.status || 1; } const budgetResult = runCommand(budgetCommand.command, budgetCommand.args, spawnSyncImpl); + if (budgetResult.error) { + writeLaunchError(streams.stderr, 'budget check', budgetResult.error); + return 1; + } + if (budgetResult.status !== 0) { return budgetResult.status || 1; } diff --git a/server/auto-import-runtime/command-runner.js b/server/auto-import-runtime/command-runner.js index d8e476b..6319c8b 100644 --- a/server/auto-import-runtime/command-runner.js +++ b/server/auto-import-runtime/command-runner.js @@ -172,7 +172,7 @@ function createAutoImportCommandRunner({ const line = chunk.toString(); stderr += line; if (streamStderr && onStderr && line.trim()) { - onStderr(line.trimEnd()); + onStderr(line); } }); diff --git a/server/auto-import-runtime/toktrack-runners.js b/server/auto-import-runtime/toktrack-runners.js index fbfcc4f..86d333c 100644 --- a/server/auto-import-runtime/toktrack-runners.js +++ b/server/auto-import-runtime/toktrack-runners.js @@ -151,7 +151,7 @@ function createToktrackRunnerResolver({ try { const localVersion = parseToktrackVersionOutput( await runToktrack(localRunner, ['--version'], { - timeoutMs: getToktrackRunnerTimeouts(localRunner).probeMs, + timeoutMs: getToktrackRunnerTimeouts(localRunner).versionCheckMs, }), ); if (localVersion === toktrackVersion) { diff --git a/server/routes/usage-routes.js b/server/routes/usage-routes.js index 8967ac0..83da582 100644 --- a/server/routes/usage-routes.js +++ b/server/routes/usage-routes.js @@ -14,6 +14,8 @@ const EMPTY_USAGE_RESPONSE = { }, }; +const REQUIRED_USAGE_TOTAL_KEYS = Object.keys(EMPTY_USAGE_RESPONSE.totals); + /** Creates usage, upload, and usage-import API route handlers. */ function createUsageRoutes({ json, validateMutationRequest, readMutationBody, dataRuntime }) { const { @@ -32,12 +34,19 @@ function createUsageRoutes({ json, validateMutationRequest, readMutationBody, da return value !== null && typeof value === 'object' && !Array.isArray(value); } + function hasNumericUsageTotals(totals) { + return ( + isPlainObject(totals) && + REQUIRED_USAGE_TOTAL_KEYS.every((key) => Number.isFinite(totals[key])) + ); + } + function isUsageData(value) { return ( isPlainObject(value) && Array.isArray(value.daily) && value.daily.every((day) => isPlainObject(day) && typeof day.date === 'string') && - isPlainObject(value.totals) + hasNumericUsageTotals(value.totals) ); } diff --git a/shared/locales/de/common.json b/shared/locales/de/common.json index 74fd0f5..aad663f 100644 --- a/shared/locales/de/common.json +++ b/shared/locales/de/common.json @@ -752,7 +752,7 @@ "close": "Schliessen" }, "commandPalette": { - "title": "Command Palette", + "title": "Befehlspalette", "description": "Tastaturgesteuerte Befehlsauswahl für Navigation und Aktionen im ttdash Dashboard.", "placeholder": "Befehl suchen...", "empty": "Kein Befehl gefunden.", diff --git a/src/lib/data-transforms.ts b/src/lib/data-transforms.ts index a815c30..685f6a5 100644 --- a/src/lib/data-transforms.ts +++ b/src/lib/data-transforms.ts @@ -143,7 +143,11 @@ export function buildDashboardChartTransforms( modelCostChartData: [], tokenChartData: [], requestChartData: [], - weekdayData: createWeekdayLabels(locale).map((day) => ({ day, cost: 0 })), + weekdayData: createWeekdayLabels(locale).map((day, weekdayIndex) => ({ + day, + cost: 0, + weekdayIndex, + })), } } diff --git a/tests/e2e/command-palette.spec.ts b/tests/e2e/command-palette.spec.ts index a39112e..80ebaea 100644 --- a/tests/e2e/command-palette.spec.ts +++ b/tests/e2e/command-palette.spec.ts @@ -8,7 +8,7 @@ import { viewModeComboboxPattern, } from './helpers' -const commandPaletteTitlePattern = /^Command [Pp]alette$/ +const commandPaletteTitlePattern = /^(Command Palette|Befehlspalette)$/ const helpDialogTitlePattern = /^(Help & shortcuts|Hilfe & Tastenkürzel)$/ const dateFilterActivePattern = /^(Date filter active|Datumsfilter aktiv)$/ const preset7Pattern = /^(7D|7T)$/ diff --git a/tests/e2e/fixtures.ts b/tests/e2e/fixtures.ts index 641b191..220f3d4 100644 --- a/tests/e2e/fixtures.ts +++ b/tests/e2e/fixtures.ts @@ -5,7 +5,9 @@ import { test as base, expect, type Page } from '@playwright/test' type E2EServer = { authSessionPath: string + authHeader: string baseURL: string + bootstrapUrl: string runtimeRoot: string } @@ -13,10 +15,6 @@ type WorkerFixtures = { e2eServer: E2EServer } -type LocalAuthSession = { - authorizationHeader?: unknown -} - const startupTimeoutMs = 120_000 const host = process.env.PLAYWRIGHT_TEST_HOST || '127.0.0.1' const basePort = Number(process.env.PLAYWRIGHT_TEST_PORT || '3015') @@ -27,6 +25,8 @@ function sleep(ms: number) { function buildWorkerServer(workerIndex: number) { const port = basePort + workerIndex + const authToken = `ttdash-playwright-local-auth-token-worker-${workerIndex}-port-${port}` + const authHeader = `Bearer ${authToken}` const runtimeRoot = path.join( process.cwd(), '.tmp-playwright', @@ -34,11 +34,14 @@ function buildWorkerServer(workerIndex: number) { String(workerIndex), 'app', ) + // The session file is only a server-readiness signal; test credentials come from env. const authSessionPath = path.join(runtimeRoot, 'config', 'session-auth.json') return { authSessionPath, + authHeader, baseURL: `http://${host}:${port}`, + bootstrapUrl: `http://${host}:${port}/?ttdash_token=${encodeURIComponent(authToken)}`, env: { ...process.env, HOST: host, @@ -47,6 +50,7 @@ function buildWorkerServer(workerIndex: number) { PLAYWRIGHT_TEST_PORT: String(port), PLAYWRIGHT_TEST_RUNTIME_ROOT: runtimeRoot, PORT: String(port), + TTDASH_LOCAL_AUTH_TOKEN: authToken, }, runtimeRoot, } @@ -68,71 +72,75 @@ function createOutputBuffer(serverProcess: ChildProcessWithoutNullStreams) { return () => output.trim() } -function readApiAuthHeaders(authSessionPath: string) { - if (!fs.existsSync(authSessionPath)) { - return null - } - - const authSession = JSON.parse(fs.readFileSync(authSessionPath, 'utf-8')) as LocalAuthSession - - if (typeof authSession.authorizationHeader !== 'string') { - throw new Error( - `Playwright auth session is missing an authorization header: ${authSessionPath}`, - ) - } - - return { - Authorization: authSession.authorizationHeader, - } -} - async function waitForServerReady( serverProcess: ChildProcessWithoutNullStreams, baseURL: string, authSessionPath: string, + authHeader: string, readOutput: () => string, ) { - const deadline = Date.now() + startupTimeoutMs - let lastError = '' + let rejectSpawnError: ((error: Error) => void) | null = null + const spawnErrorPromise = new Promise((_, reject) => { + rejectSpawnError = reject + }) + const onSpawnError = (error: Error) => { + rejectSpawnError?.( + new Error(`Playwright server spawn error: ${error.message}\n${readOutput()}`), + ) + } - while (Date.now() < deadline) { - if (serverProcess.exitCode !== null) { - throw new Error( - `Playwright test server exited with code ${serverProcess.exitCode}.\n${readOutput()}`, - ) - } + serverProcess.once('error', onSpawnError) - try { - const response = await fetch(baseURL, { - redirect: 'manual', - signal: AbortSignal.timeout(1_000), - }) + const readinessPromise = (async () => { + const deadline = Date.now() + startupTimeoutMs + let lastError = '' + + while (Date.now() < deadline) { + if (serverProcess.exitCode !== null) { + throw new Error( + `Playwright test server exited with code ${serverProcess.exitCode}.\n${readOutput()}`, + ) + } - if (response.status < 400 && fs.existsSync(authSessionPath)) { - const authHeaders = readApiAuthHeaders(authSessionPath) - if (authHeaders) { - const apiResponse = await fetch(new URL('/api/usage', baseURL), { - headers: authHeaders, - signal: AbortSignal.timeout(1_000), - }) + try { + const response = await fetch(baseURL, { + redirect: 'manual', + signal: AbortSignal.timeout(1_000), + }) - if (apiResponse.status === 200) { - return - } + if (response.status < 400) { + if (!fs.existsSync(authSessionPath)) { + lastError = `Waiting for server readiness signal file at ${authSessionPath}` + } else { + const apiResponse = await fetch(new URL('/api/usage', baseURL), { + headers: { Authorization: authHeader }, + signal: AbortSignal.timeout(1_000), + }) - lastError = `GET /api/usage returned ${apiResponse.status}` + if (apiResponse.status === 200) { + return + } + + lastError = `GET /api/usage returned ${apiResponse.status}` + } } + } catch (error) { + lastError = error instanceof Error ? error.message : String(error) } - } catch (error) { - lastError = error instanceof Error ? error.message : String(error) + + await sleep(250) } - await sleep(250) - } + throw new Error( + `Timed out waiting for Playwright test server at ${baseURL}. Last error: ${lastError}\n${readOutput()}`, + ) + })() - throw new Error( - `Timed out waiting for Playwright test server at ${baseURL}. Last error: ${lastError}\n${readOutput()}`, - ) + try { + await Promise.race([spawnErrorPromise, readinessPromise]) + } finally { + serverProcess.off('error', onSpawnError) + } } async function stopServer(serverProcess: ChildProcessWithoutNullStreams) { @@ -179,6 +187,7 @@ export const test = base.extend, WorkerFixtures>({ e2eServer: [ async ({}, runFixture, workerInfo) => { const workerServer = buildWorkerServer(workerInfo.parallelIndex) + fs.rmSync(workerServer.runtimeRoot, { recursive: true, force: true }) const serverProcess = spawn(process.execPath, ['scripts/start-test-server.js'], { cwd: process.cwd(), env: workerServer.env, @@ -186,20 +195,27 @@ export const test = base.extend, WorkerFixtures>({ const readOutput = createOutputBuffer(serverProcess) const previousRuntimeRoot = process.env.PLAYWRIGHT_TEST_RUNTIME_ROOT const previousAuthSessionPath = process.env.PLAYWRIGHT_TEST_AUTH_SESSION_PATH + const previousAuthorizationHeader = process.env.PLAYWRIGHT_TEST_AUTHORIZATION_HEADER + const previousBootstrapUrl = process.env.PLAYWRIGHT_TEST_BOOTSTRAP_URL process.env.PLAYWRIGHT_TEST_RUNTIME_ROOT = workerServer.runtimeRoot process.env.PLAYWRIGHT_TEST_AUTH_SESSION_PATH = workerServer.authSessionPath + process.env.PLAYWRIGHT_TEST_AUTHORIZATION_HEADER = workerServer.authHeader + process.env.PLAYWRIGHT_TEST_BOOTSTRAP_URL = workerServer.bootstrapUrl try { await waitForServerReady( serverProcess, workerServer.baseURL, workerServer.authSessionPath, + workerServer.authHeader, readOutput, ) await runFixture({ authSessionPath: workerServer.authSessionPath, + authHeader: workerServer.authHeader, baseURL: workerServer.baseURL, + bootstrapUrl: workerServer.bootstrapUrl, runtimeRoot: workerServer.runtimeRoot, }) } finally { @@ -216,6 +232,18 @@ export const test = base.extend, WorkerFixtures>({ } else { process.env.PLAYWRIGHT_TEST_AUTH_SESSION_PATH = previousAuthSessionPath } + + if (previousAuthorizationHeader === undefined) { + delete process.env.PLAYWRIGHT_TEST_AUTHORIZATION_HEADER + } else { + process.env.PLAYWRIGHT_TEST_AUTHORIZATION_HEADER = previousAuthorizationHeader + } + + if (previousBootstrapUrl === undefined) { + delete process.env.PLAYWRIGHT_TEST_BOOTSTRAP_URL + } else { + process.env.PLAYWRIGHT_TEST_BOOTSTRAP_URL = previousBootstrapUrl + } } }, { scope: 'worker', timeout: startupTimeoutMs }, diff --git a/tests/e2e/helpers.ts b/tests/e2e/helpers.ts index 41a7a67..6cf9a14 100644 --- a/tests/e2e/helpers.ts +++ b/tests/e2e/helpers.ts @@ -12,13 +12,6 @@ import { TOKTRACK_VERSION } from '../../shared/toktrack-version.js' export const sampleUsagePath = path.join(process.cwd(), 'examples', 'sample-usage.json') -function getLocalAuthSessionPath(): string { - return ( - process.env.PLAYWRIGHT_TEST_AUTH_SESSION_PATH || - path.join(process.cwd(), '.tmp-playwright', 'app', 'config', 'session-auth.json') - ) -} - export const uploadToastPattern = /^(Datei sample-usage\.json erfolgreich geladen|File sample-usage\.json loaded successfully)$/ export const viewModeComboboxPattern = /^(Ansichtsmodus|View mode)$/ @@ -126,8 +119,19 @@ export function createApiUrl(pathname: string, baseURL?: string) { return new URL(pathname, baseURL).toString() } +function getRequiredPlaywrightEnv(name: string) { + const value = process.env[name] + if (!value) { + throw new Error(`${name} is required for Playwright API authentication`) + } + return value +} + export function readLocalAuthSession() { - return JSON.parse(fs.readFileSync(getLocalAuthSessionPath(), 'utf-8')) as LocalAuthSession + return { + authorizationHeader: getRequiredPlaywrightEnv('PLAYWRIGHT_TEST_AUTHORIZATION_HEADER'), + bootstrapUrl: getRequiredPlaywrightEnv('PLAYWRIGHT_TEST_BOOTSTRAP_URL'), + } satisfies LocalAuthSession } export function createApiAuthHeaders() { diff --git a/tests/frontend/command-palette-language.test.tsx b/tests/frontend/command-palette-language.test.tsx new file mode 100644 index 0000000..95cfe8b --- /dev/null +++ b/tests/frontend/command-palette-language.test.tsx @@ -0,0 +1,84 @@ +// @vitest-environment jsdom + +import { fireEvent, screen } from '@testing-library/react' +import type { ComponentProps } from 'react' +import { afterAll, beforeAll, describe, expect, it, vi } from 'vitest' +import { CommandPalette } from '@/components/features/command-palette/CommandPalette' +import { DEFAULT_APP_SETTINGS } from '@/lib/app-settings' +import { initI18n } from '@/lib/i18n' +import { renderWithAppProviders } from '../test-utils' + +type CommandPaletteProps = ComponentProps + +function buildCommandPaletteProps( + overrides: Partial = {}, +): CommandPaletteProps { + const noop = () => {} + + return { + isDark: true, + availableProviders: [], + selectedProviders: [], + availableModels: [], + selectedModels: [], + hasTodaySection: false, + hasMonthSection: false, + hasRequestSection: false, + sectionVisibility: { ...DEFAULT_APP_SETTINGS.sectionVisibility }, + sectionOrder: [...DEFAULT_APP_SETTINGS.sectionOrder], + reportGenerating: false, + onToggleTheme: noop, + onExportCSV: noop, + onGenerateReport: noop, + onDelete: noop, + onUpload: noop, + onAutoImport: noop, + onOpenSettings: noop, + onScrollTo: noop, + onViewModeChange: noop, + onApplyPreset: noop, + onToggleProvider: noop, + onToggleModel: noop, + onClearProviders: noop, + onClearModels: noop, + onClearDateRange: noop, + onResetAll: noop, + onHelp: noop, + onLanguageChange: noop, + ...overrides, + } +} + +describe('Command palette language', () => { + let originalScrollIntoView: Element['scrollIntoView'] | undefined + + beforeAll(async () => { + originalScrollIntoView = Element.prototype.scrollIntoView + Element.prototype.scrollIntoView = vi.fn() + await initI18n('de') + }) + + afterAll(async () => { + if (originalScrollIntoView) { + Element.prototype.scrollIntoView = originalScrollIntoView + } else { + delete (Element.prototype as { scrollIntoView?: Element['scrollIntoView'] }).scrollIntoView + } + + await initI18n('en') + }) + + it('localizes action groups while keeping representative command ids renderable', async () => { + renderWithAppProviders() + + fireEvent.keyDown(document, { key: 'k', metaKey: true }) + + expect(await screen.findByRole('dialog', { name: 'Befehlspalette' })).toBeInTheDocument() + expect(screen.getByText('Daten laden')).toBeInTheDocument() + expect(screen.getByText('Exporte')).toBeInTheDocument() + expect(screen.getByText('Wartung')).toBeInTheDocument() + expect(screen.getByTestId('command-auto-import')).toBeInTheDocument() + expect(screen.getByTestId('command-csv')).toBeInTheDocument() + expect(screen.getByTestId('command-settings-open')).toBeInTheDocument() + }) +}) diff --git a/tests/frontend/cost-by-weekday.test.tsx b/tests/frontend/cost-by-weekday.test.tsx new file mode 100644 index 0000000..c96e5cf --- /dev/null +++ b/tests/frontend/cost-by-weekday.test.tsx @@ -0,0 +1,33 @@ +// @vitest-environment jsdom + +import { screen } from '@testing-library/react' +import { beforeAll, describe, expect, it } from 'vitest' +import { legacyWeekdayData, weekdayData } from './lazy-dashboard-chart-test-utils' +import { CostByWeekday } from '@/components/charts/CostByWeekday' +import { initI18n } from '@/lib/i18n' +import { renderWithTooltip } from '../test-utils' + +describe('CostByWeekday chart', () => { + beforeAll(async () => { + await initI18n('en') + }) + + it('renders peak and low bars without a browser-only chart dependency', () => { + renderWithTooltip() + + expect(screen.getByText('Cost by weekday')).toBeInTheDocument() + expect(screen.getByText('Peak: Tu · Low: Sa · Weekend 19%')).toBeInTheDocument() + expect(screen.getByTestId('chart-bar')).toHaveAttribute('data-name', 'Avg cost') + + const fills = screen.getAllByTestId('bar-cell').map((cell) => cell.getAttribute('data-fill')) + expect(fills).toHaveLength(7) + expect(fills.some((fill) => fill?.includes('weekdayPeak'))).toBe(true) + expect(fills.some((fill) => fill?.includes('weekdayLow'))).toBe(true) + }) + + it('uses localized day labels as a defensive weekend fallback without weekday indices', () => { + renderWithTooltip() + + expect(screen.getByText('Peak: Mar · Low: Sáb. · Weekend 19%')).toBeInTheDocument() + }) +}) diff --git a/tests/frontend/dashboard-language-regressions.test.tsx b/tests/frontend/dashboard-language-regressions.test.tsx index 319af2e..8f91213 100644 --- a/tests/frontend/dashboard-language-regressions.test.tsx +++ b/tests/frontend/dashboard-language-regressions.test.tsx @@ -1,20 +1,15 @@ // @vitest-environment jsdom import { fireEvent, screen } from '@testing-library/react' -import type { ComponentProps } from 'react' import { afterAll, beforeAll, describe, expect, it, vi } from 'vitest' import { PrimaryMetrics } from '@/components/cards/PrimaryMetrics' import { ChartCard } from '@/components/charts/ChartCard' -import { CommandPalette } from '@/components/features/command-palette/CommandPalette' import { UsageInsights } from '@/components/features/insights/UsageInsights' import { Header } from '@/components/layout/Header' -import { DEFAULT_APP_SETTINGS } from '@/lib/app-settings' import { initI18n } from '@/lib/i18n' import type { DashboardMetrics } from '@/types' import { renderWithAppProviders } from '../test-utils' -type CommandPaletteProps = ComponentProps - const emptyMetrics: DashboardMetrics = { totalCost: 0, totalTokens: 0, @@ -50,45 +45,6 @@ const emptyMetrics: DashboardMetrics = { providerConcentrationIndex: 0, } -function buildCommandPaletteProps( - overrides: Partial = {}, -): CommandPaletteProps { - const noop = () => {} - - return { - isDark: true, - availableProviders: [], - selectedProviders: [], - availableModels: [], - selectedModels: [], - hasTodaySection: false, - hasMonthSection: false, - hasRequestSection: false, - sectionVisibility: { ...DEFAULT_APP_SETTINGS.sectionVisibility }, - sectionOrder: [...DEFAULT_APP_SETTINGS.sectionOrder], - reportGenerating: false, - onToggleTheme: noop, - onExportCSV: noop, - onGenerateReport: noop, - onDelete: noop, - onUpload: noop, - onAutoImport: noop, - onOpenSettings: noop, - onScrollTo: noop, - onViewModeChange: noop, - onApplyPreset: noop, - onToggleProvider: noop, - onToggleModel: noop, - onClearProviders: noop, - onClearModels: noop, - onClearDateRange: noop, - onResetAll: noop, - onHelp: noop, - onLanguageChange: noop, - ...overrides, - } -} - describe('Dashboard language regressions', () => { let originalScrollIntoView: Element['scrollIntoView'] | undefined @@ -208,18 +164,4 @@ describe('Dashboard language regressions', () => { expect(document.body).not.toHaveTextContent('Quick Read') expect(document.body).not.toHaveTextContent('Streak') }) - - it('localizes command palette action groups while keeping representative command ids renderable', async () => { - renderWithAppProviders() - - fireEvent.keyDown(document, { key: 'k', metaKey: true }) - - expect(await screen.findByRole('dialog', { name: 'Command Palette' })).toBeInTheDocument() - expect(screen.getByText('Daten laden')).toBeInTheDocument() - expect(screen.getByText('Exporte')).toBeInTheDocument() - expect(screen.getByText('Wartung')).toBeInTheDocument() - expect(screen.getByTestId('command-auto-import')).toBeInTheDocument() - expect(screen.getByTestId('command-csv')).toBeInTheDocument() - expect(screen.getByTestId('command-settings-open')).toBeInTheDocument() - }) }) diff --git a/tests/frontend/lazy-dashboard-charts.test.tsx b/tests/frontend/lazy-dashboard-chart-test-utils.tsx similarity index 56% rename from tests/frontend/lazy-dashboard-charts.test.tsx rename to tests/frontend/lazy-dashboard-chart-test-utils.tsx index 0d6cace..8b735ca 100644 --- a/tests/frontend/lazy-dashboard-charts.test.tsx +++ b/tests/frontend/lazy-dashboard-chart-test-utils.tsx @@ -1,16 +1,7 @@ -// @vitest-environment jsdom - import type { ReactNode } from 'react' -import { fireEvent, screen } from '@testing-library/react' -import { beforeAll, describe, expect, it, vi } from 'vitest' -import { CostByWeekday } from '@/components/charts/CostByWeekday' -import { ModelMix } from '@/components/charts/ModelMix' -import { TokensOverTime } from '@/components/charts/TokensOverTime' -import { PeriodComparison } from '@/components/features/comparison/PeriodComparison' -import { initI18n } from '@/lib/i18n' +import { vi } from 'vitest' import type { DailyUsage, TokenChartDataPoint, WeekdayData } from '@/types' import { MockSvgContainer, MockSvgGroup } from '../recharts-test-utils' -import { renderWithTooltip } from '../test-utils' vi.mock('@/components/charts/ChartCard', () => ({ ChartCard: ({ @@ -137,7 +128,7 @@ vi.mock('recharts', () => ({ YAxis: () => null, })) -function createDailyUsage( +export function createDailyUsage( date: string, { claudeCost, @@ -184,7 +175,8 @@ function createDailyUsage( } } -const weekdayData: WeekdayData[] = [ +// Intentionally unsorted to verify chart components normalize weekday order. +export const weekdayData: WeekdayData[] = [ { day: 'Mo', cost: 3, weekdayIndex: 0 }, { day: 'Sa', cost: 2, weekdayIndex: 5 }, { day: 'Tu', cost: 9, weekdayIndex: 1 }, @@ -194,7 +186,8 @@ const weekdayData: WeekdayData[] = [ { day: 'Fr', cost: 7, weekdayIndex: 4 }, ] -const legacyWeekdayData: WeekdayData[] = [ +// Legacy shape without weekdayIndex to verify defensive fallback in CostByWeekday. +export const legacyWeekdayData: WeekdayData[] = [ { day: 'Lun', cost: 3 }, { day: 'Sáb.', cost: 2 }, { day: 'Mar', cost: 9 }, @@ -204,7 +197,7 @@ const legacyWeekdayData: WeekdayData[] = [ { day: 'Vie', cost: 7 }, ] -const tokenData: TokenChartDataPoint[] = [ +export const tokenData: TokenChartDataPoint[] = [ { date: '2026-04-01', Input: 100, @@ -236,90 +229,3 @@ const tokenData: TokenChartDataPoint[] = [ thinkingMA7: 15, }, ] - -describe('lazy dashboard charts', () => { - beforeAll(async () => { - await initI18n('en') - }) - - it('renders CostByWeekday peak and low bars without a browser-only chart dependency', () => { - renderWithTooltip() - - expect(screen.getByText('Cost by weekday')).toBeInTheDocument() - expect(screen.getByText('Peak: Tu · Low: Sa · Weekend 19%')).toBeInTheDocument() - expect(screen.getByTestId('chart-bar')).toHaveAttribute('data-name', 'Avg cost') - - const fills = screen.getAllByTestId('bar-cell').map((cell) => cell.getAttribute('data-fill')) - expect(fills).toHaveLength(7) - expect(fills.some((fill) => fill?.includes('weekdayPeak'))).toBe(true) - expect(fills.some((fill) => fill?.includes('weekdayLow'))).toBe(true) - }) - - it('uses localized day labels as a defensive weekend fallback without weekday indices', () => { - renderWithTooltip() - - expect(screen.getByText('Peak: Mar · Low: Sáb. · Weekend 19%')).toBeInTheDocument() - }) - - it('renders ModelMix for enough model history and skips underspecified input', () => { - const modelMixData = [ - createDailyUsage('2026-04-01', { claudeCost: 3, gptCost: 7 }), - createDailyUsage('2026-04-02', { claudeCost: 5, gptCost: 5 }), - createDailyUsage('2026-04-03', { claudeCost: 8, gptCost: 2 }), - ] - const view = renderWithTooltip() - - expect(view.container).toBeEmptyDOMElement() - view.unmount() - - renderWithTooltip() - - expect(screen.getByText('Model mix')).toBeInTheDocument() - expect(screen.getByText('Cost share by model over time')).toBeInTheDocument() - expect(screen.getAllByTestId('chart-area')).toHaveLength(2) - }) - - it('renders token totals, moving averages, and drilldown clicks in TokensOverTime', () => { - const onClickDay = vi.fn() - - renderWithTooltip() - - expect(screen.getByText('Tokens over time')).toBeInTheDocument() - expect(screen.getByText('Cache tokens')).toBeInTheDocument() - expect(screen.getByText('Input / Output tokens')).toBeInTheDocument() - expect(screen.getByText('Thinking tokens')).toBeInTheDocument() - expect(screen.getByText('Total tokens (all types)')).toBeInTheDocument() - expect(screen.getAllByTestId('chart-area')).toHaveLength(6) - expect(screen.getAllByTestId('chart-line')).toHaveLength(6) - - fireEvent.click(screen.getAllByTestId('composed-chart')[0]) - - expect(onClickDay).toHaveBeenCalledWith('2026-04-01') - }) - - it('renders PeriodComparison empty and populated states with preset switching', () => { - const comparisonData = Array.from({ length: 14 }, (_, index) => { - const day = String(index + 1).padStart(2, '0') - return createDailyUsage(`2026-04-${day}`, { - claudeCost: index + 1, - gptCost: index + 2, - }) - }) - const view = renderWithTooltip() - - expect(screen.getByText('Not enough data for a comparison')).toBeInTheDocument() - expect(screen.getByText('At least 7 days required (currently: 3)')).toBeInTheDocument() - view.unmount() - - renderWithTooltip() - - expect(screen.getByText('Period comparison')).toBeInTheDocument() - expect(screen.getByText('Last week')).toBeInTheDocument() - expect(screen.getByText('This week')).toBeInTheDocument() - - fireEvent.click(screen.getByRole('button', { name: 'Month' })) - - expect(screen.getByText('Last month')).toBeInTheDocument() - expect(screen.getByText('This month')).toBeInTheDocument() - }) -}) diff --git a/tests/frontend/model-mix.test.tsx b/tests/frontend/model-mix.test.tsx new file mode 100644 index 0000000..063c811 --- /dev/null +++ b/tests/frontend/model-mix.test.tsx @@ -0,0 +1,34 @@ +// @vitest-environment jsdom + +import { screen } from '@testing-library/react' +import { beforeAll, describe, expect, it } from 'vitest' +import { createDailyUsage } from './lazy-dashboard-chart-test-utils' +import { ModelMix } from '@/components/charts/ModelMix' +import { initI18n } from '@/lib/i18n' +import { renderWithTooltip } from '../test-utils' + +const modelMixData = [ + createDailyUsage('2026-04-01', { claudeCost: 3, gptCost: 7 }), + createDailyUsage('2026-04-02', { claudeCost: 5, gptCost: 5 }), + createDailyUsage('2026-04-03', { claudeCost: 8, gptCost: 2 }), +] + +describe('ModelMix chart', () => { + beforeAll(async () => { + await initI18n('en') + }) + + it('skips underspecified input', () => { + const { container } = renderWithTooltip() + + expect(container).toBeEmptyDOMElement() + }) + + it('renders enough model history', () => { + renderWithTooltip() + + expect(screen.getByText('Model mix')).toBeInTheDocument() + expect(screen.getByText('Cost share by model over time')).toBeInTheDocument() + expect(screen.getAllByTestId('chart-area')).toHaveLength(2) + }) +}) diff --git a/tests/frontend/period-comparison.test.tsx b/tests/frontend/period-comparison.test.tsx new file mode 100644 index 0000000..0183bff --- /dev/null +++ b/tests/frontend/period-comparison.test.tsx @@ -0,0 +1,50 @@ +// @vitest-environment jsdom + +import { fireEvent, screen } from '@testing-library/react' +import { beforeAll, describe, expect, it } from 'vitest' +import { createDailyUsage } from './lazy-dashboard-chart-test-utils' +import { PeriodComparison } from '@/components/features/comparison/PeriodComparison' +import { initI18n } from '@/lib/i18n' +import { renderWithTooltip } from '../test-utils' + +const DAYS_FOR_COMPARISON = 14 + +function buildComparisonData() { + return Array.from({ length: DAYS_FOR_COMPARISON }, (_, index) => { + const day = String(index + 1).padStart(2, '0') + return createDailyUsage(`2026-04-${day}`, { + claudeCost: index + 1, + gptCost: index + 2, + }) + }) +} + +describe('PeriodComparison chart', () => { + beforeAll(async () => { + await initI18n('en') + }) + + it('renders empty state when data is insufficient', () => { + renderWithTooltip() + + expect(screen.getByText('Not enough data for a comparison')).toBeInTheDocument() + expect(screen.getByText('At least 7 days required (currently: 3)')).toBeInTheDocument() + }) + + it('renders populated state with week preset by default', () => { + renderWithTooltip() + + expect(screen.getByText('Period comparison')).toBeInTheDocument() + expect(screen.getByText('Last week')).toBeInTheDocument() + expect(screen.getByText('This week')).toBeInTheDocument() + }) + + it('switches to month preset when Month button is clicked', () => { + renderWithTooltip() + + fireEvent.click(screen.getByRole('button', { name: 'Month' })) + + expect(screen.getByText('Last month')).toBeInTheDocument() + expect(screen.getByText('This month')).toBeInTheDocument() + }) +}) diff --git a/tests/frontend/tokens-over-time.test.tsx b/tests/frontend/tokens-over-time.test.tsx new file mode 100644 index 0000000..a6102ab --- /dev/null +++ b/tests/frontend/tokens-over-time.test.tsx @@ -0,0 +1,32 @@ +// @vitest-environment jsdom + +import { fireEvent, screen } from '@testing-library/react' +import { beforeAll, describe, expect, it, vi } from 'vitest' +import { tokenData } from './lazy-dashboard-chart-test-utils' +import { TokensOverTime } from '@/components/charts/TokensOverTime' +import { initI18n } from '@/lib/i18n' +import { renderWithTooltip } from '../test-utils' + +describe('TokensOverTime chart', () => { + beforeAll(async () => { + await initI18n('en') + }) + + it('renders token totals, moving averages, and drilldown clicks', () => { + const onClickDay = vi.fn() + + renderWithTooltip() + + expect(screen.getByText('Tokens over time')).toBeInTheDocument() + expect(screen.getByText('Cache tokens')).toBeInTheDocument() + expect(screen.getByText('Input / Output tokens')).toBeInTheDocument() + expect(screen.getByText('Thinking tokens')).toBeInTheDocument() + expect(screen.getByText('Total tokens (all types)')).toBeInTheDocument() + expect(screen.getAllByTestId('chart-area')).toHaveLength(6) + expect(screen.getAllByTestId('chart-line')).toHaveLength(6) + + fireEvent.click(screen.getAllByTestId('composed-chart')[0]) + + expect(onClickDay).toHaveBeenCalledWith('2026-04-01') + }) +}) diff --git a/tests/unit/auto-import-stream-routes.test.ts b/tests/unit/auto-import-stream-routes.test.ts new file mode 100644 index 0000000..b5d25ee --- /dev/null +++ b/tests/unit/auto-import-stream-routes.test.ts @@ -0,0 +1,83 @@ +import { describe, expect, it, vi } from 'vitest' +import { createRouter, request, requestRaw } from './http-router-test-helpers' + +describe('auto-import stream routes', () => { + it('streams auto-import success events and releases the acquired lease', async () => { + const lease = { release: vi.fn() } + const closeImport = vi.fn() + const performAutoImport = vi.fn(async ({ onCheck, onOutput, onProgress, signalOnClose }) => { + signalOnClose(closeImport) + onCheck({ tool: 'toktrack', status: 'found' }) + onProgress({ key: 'startingLocalImport', vars: {} }) + onOutput('runner warning') + return { days: 2, totalCost: 3.5 } + }) + const { router } = createRouter({ + autoImportRuntimeOverrides: { + acquireAutoImportLease: vi.fn(() => lease), + performAutoImport, + }, + }) + + const { req, res } = await requestRaw(router, '/api/auto-import/stream', 'POST') + + expect(res.status).toBe(200) + expect(res.headers['content-type']).toBe('text/event-stream') + expect(res.body).toContain('event: check') + expect(res.body).toContain('"status":"found"') + expect(res.body).toContain('event: progress') + expect(res.body).toContain('event: stderr') + expect(res.body).toContain('runner warning') + expect(res.body).toContain('event: success') + expect(res.body).toContain('"totalCost":3.5') + expect(res.body).toContain('event: done') + expect(performAutoImport).toHaveBeenCalledWith( + expect.objectContaining({ + source: 'auto-import', + lease, + }), + ) + expect(req.on).toHaveBeenCalledWith('close', closeImport) + expect(lease.release).toHaveBeenCalledTimes(1) + }) + + it('maps concurrent auto-import starts to a localized conflict response', async () => { + const { router } = createRouter({ + autoImportRuntimeOverrides: { + acquireAutoImportLease: vi.fn(() => { + throw Object.assign(new Error('already running'), { messageKey: 'autoImportRunning' }) + }), + createAutoImportMessageEvent: vi.fn((key: string) => ({ key })), + formatAutoImportMessageEvent: vi.fn(() => 'An auto-import is already running.'), + }, + }) + + const { res, body } = await request(router, '/api/auto-import/stream', 'POST') + + expect(res.status).toBe(409) + expect(body).toEqual({ message: 'An auto-import is already running.' }) + }) + + it('streams structured auto-import errors and releases the lease after failures', async () => { + const lease = { release: vi.fn() } + const { router } = createRouter({ + autoImportRuntimeOverrides: { + acquireAutoImportLease: vi.fn(() => lease), + performAutoImport: vi.fn(async () => { + throw new Error('toktrack failed') + }), + toAutoImportErrorEvent: vi.fn((error: Error) => ({ + message: error.message, + })), + }, + }) + + const { res } = await requestRaw(router, '/api/auto-import/stream', 'POST') + + expect(res.status).toBe(200) + expect(res.body).toContain('event: error') + expect(res.body).toContain('"message":"toktrack failed"') + expect(res.body).toContain('event: done') + expect(lease.release).toHaveBeenCalledTimes(1) + }) +}) diff --git a/tests/unit/data-transforms.test.ts b/tests/unit/data-transforms.test.ts index f73c89d..e891f6c 100644 --- a/tests/unit/data-transforms.test.ts +++ b/tests/unit/data-transforms.test.ts @@ -1,5 +1,5 @@ import { describe, expect, it } from 'vitest' -import { getCurrentMonthForecastData } from '@/lib/data-transforms' +import { buildDashboardChartTransforms, getCurrentMonthForecastData } from '@/lib/data-transforms' import type { DailyUsage } from '@/types' describe('getCurrentMonthForecastData', () => { @@ -142,3 +142,19 @@ describe('getCurrentMonthForecastData', () => { ).toEqual([]) }) }) + +describe('buildDashboardChartTransforms', () => { + it('includes weekday indices for empty weekday chart data', () => { + const transforms = buildDashboardChartTransforms([], 'en-US') + + expect(transforms.weekdayData).toEqual([ + { day: 'Mo', cost: 0, weekdayIndex: 0 }, + { day: 'Tu', cost: 0, weekdayIndex: 1 }, + { day: 'We', cost: 0, weekdayIndex: 2 }, + { day: 'Th', cost: 0, weekdayIndex: 3 }, + { day: 'Fr', cost: 0, weekdayIndex: 4 }, + { day: 'Sa', cost: 0, weekdayIndex: 5 }, + { day: 'Su', cost: 0, weekdayIndex: 6 }, + ]) + }) +}) diff --git a/tests/unit/e2e-auth-helpers.test.ts b/tests/unit/e2e-auth-helpers.test.ts new file mode 100644 index 0000000..88b3a63 --- /dev/null +++ b/tests/unit/e2e-auth-helpers.test.ts @@ -0,0 +1,66 @@ +import { afterEach, describe, expect, it } from 'vitest' +import { createApiAuthHeaders, gotoDashboard, readLocalAuthSession } from '../e2e/helpers' + +const originalAuthorizationHeader = process.env.PLAYWRIGHT_TEST_AUTHORIZATION_HEADER +const originalBootstrapUrl = process.env.PLAYWRIGHT_TEST_BOOTSTRAP_URL + +function restoreEnv() { + if (originalAuthorizationHeader === undefined) { + delete process.env.PLAYWRIGHT_TEST_AUTHORIZATION_HEADER + } else { + process.env.PLAYWRIGHT_TEST_AUTHORIZATION_HEADER = originalAuthorizationHeader + } + + if (originalBootstrapUrl === undefined) { + delete process.env.PLAYWRIGHT_TEST_BOOTSTRAP_URL + } else { + process.env.PLAYWRIGHT_TEST_BOOTSTRAP_URL = originalBootstrapUrl + } +} + +describe('E2E auth helpers', () => { + afterEach(() => { + restoreEnv() + }) + + it('uses fixture-provided auth environment instead of reading session files', () => { + process.env.PLAYWRIGHT_TEST_AUTHORIZATION_HEADER = 'Bearer test-token' + process.env.PLAYWRIGHT_TEST_BOOTSTRAP_URL = 'http://127.0.0.1:3015/?ttdash_token=test-token' + + expect(readLocalAuthSession()).toEqual({ + authorizationHeader: 'Bearer test-token', + bootstrapUrl: 'http://127.0.0.1:3015/?ttdash_token=test-token', + }) + expect(createApiAuthHeaders()).toEqual({ Authorization: 'Bearer test-token' }) + }) + + it('fails clearly when fixture auth environment is missing', () => { + delete process.env.PLAYWRIGHT_TEST_AUTHORIZATION_HEADER + process.env.PLAYWRIGHT_TEST_BOOTSTRAP_URL = 'http://127.0.0.1:3015/?ttdash_token=test-token' + + expect(() => createApiAuthHeaders()).toThrow( + 'PLAYWRIGHT_TEST_AUTHORIZATION_HEADER is required for Playwright API authentication', + ) + }) + + it('fails clearly when bootstrap URL is missing', () => { + process.env.PLAYWRIGHT_TEST_AUTHORIZATION_HEADER = 'Bearer test-token' + delete process.env.PLAYWRIGHT_TEST_BOOTSTRAP_URL + + expect(() => readLocalAuthSession()).toThrow( + 'PLAYWRIGHT_TEST_BOOTSTRAP_URL is required for Playwright API authentication', + ) + }) + + it('navigates to the fixture-provided bootstrap URL', async () => { + process.env.PLAYWRIGHT_TEST_AUTHORIZATION_HEADER = 'Bearer test-token' + process.env.PLAYWRIGHT_TEST_BOOTSTRAP_URL = 'http://127.0.0.1:3015/?ttdash_token=test-token' + const page = { + goto: async (url: string) => ({ url }), + } + + await expect(gotoDashboard(page as never)).resolves.toEqual({ + url: 'http://127.0.0.1:3015/?ttdash_token=test-token', + }) + }) +}) diff --git a/tests/unit/http-router-mutations.test.ts b/tests/unit/http-router-mutations.test.ts index a72f346..a8128bf 100644 --- a/tests/unit/http-router-mutations.test.ts +++ b/tests/unit/http-router-mutations.test.ts @@ -1,192 +1,5 @@ -import path from 'node:path' -import { createRequire } from 'node:module' import { describe, expect, it, vi } from 'vitest' - -const require = createRequire(import.meta.url) -const { createHttpRouter } = require('../../server/http-router.js') as { - createHttpRouter: (options: Record) => { - handleServerRequest: ( - req: { - url: string - method: string - headers: Record - on?: (event: string, listener: () => void) => unknown - }, - res: MockResponse, - ) => Promise - } -} - -class MockResponse { - status = 0 - headers: Record = {} - body = '' - - writeHead(status: number, headers: Record) { - this.status = status - this.headers = headers - } - - write(body: string | Buffer) { - this.body += Buffer.isBuffer(body) ? body.toString('utf8') : body - } - - end(body?: string | Buffer) { - if (body !== undefined) { - this.body += Buffer.isBuffer(body) ? body.toString('utf8') : body - } - } -} - -function createRouter({ - autoImportRuntimeOverrides = {}, - dataRuntimeOverrides = {}, - generatePdfReport = vi.fn(), - getRuntimeSnapshot = vi.fn(() => ({ - id: 'runtime-1', - mode: 'foreground', - port: 3000, - url: 'http://127.0.0.1:3000', - })), - httpUtilsOverrides = {}, - readBody = vi.fn(async () => ({})), - remoteAuthOverrides = {}, -}: { - autoImportRuntimeOverrides?: Record - dataRuntimeOverrides?: Record - generatePdfReport?: () => Promise<{ buffer: Buffer; filename: string }> - getRuntimeSnapshot?: () => unknown - httpUtilsOverrides?: Record - readBody?: () => Promise - remoteAuthOverrides?: Record -} = {}) { - const dataRuntime = { - extractSettingsImportPayload: vi.fn( - (payload: { settings?: unknown }) => payload.settings ?? payload, - ), - extractUsageImportPayload: vi.fn((payload: { data?: unknown }) => payload.data ?? payload), - isPayloadTooLargeError: vi.fn( - (error: { code?: string }) => error?.code === 'PAYLOAD_TOO_LARGE', - ), - isPersistedStateError: vi.fn(() => false), - mergeUsageData: vi.fn((_currentData, importedData) => ({ - data: importedData, - summary: { - importedDays: Array.isArray(importedData?.daily) ? importedData.daily.length : 0, - addedDays: Array.isArray(importedData?.daily) ? importedData.daily.length : 0, - unchangedDays: 0, - conflictingDays: 0, - skippedDays: 0, - totalDays: Array.isArray(importedData?.daily) ? importedData.daily.length : 0, - }, - })), - normalizeIncomingData: vi.fn((payload) => payload), - normalizeSettings: vi.fn((payload) => payload), - readData: vi.fn(() => null), - readSettings: vi.fn(() => ({ language: 'en' })), - unlinkIfExists: vi.fn(), - _updateDataLoadStateUnlocked: vi.fn(async () => undefined), - updateDataLoadState: vi.fn(async () => undefined), - updateSettings: vi.fn(async (body) => ({ ok: true, body })), - withFileMutationLock: vi.fn(async (_filePath: string, operation: () => Promise) => - operation(), - ), - withSettingsAndDataMutationLock: vi.fn(async (operation: () => Promise) => - operation(), - ), - writeData: vi.fn(async () => undefined), - writeSettings: vi.fn(async () => undefined), - paths: { - dataFile: '/data/data.json', - settingsFile: '/data/settings.json', - }, - ...dataRuntimeOverrides, - } - - const router = createHttpRouter({ - fs: { promises: { readFile: vi.fn() } }, - path, - staticRoot: '/app/dist', - securityHeaders: { 'X-Test-Security': '1' }, - httpUtils: { - json: ( - res: MockResponse, - status: number, - payload: unknown, - headers: Record = {}, - ) => { - res.writeHead(status, { - 'Content-Type': 'application/json; charset=utf-8', - ...headers, - }) - res.end(JSON.stringify(payload)) - }, - readBody, - resolveApiPath: (pathname: string) => - pathname === '/api' - ? '/' - : pathname.startsWith('/api/') - ? pathname.slice('/api'.length) - : null, - sendBuffer: ( - res: MockResponse, - status: number, - headers: Record, - buffer: Buffer, - ) => { - res.writeHead(status, headers) - res.end(buffer) - }, - validateMutationRequest: vi.fn(() => null), - validateRequestHost: vi.fn(() => null), - ...httpUtilsOverrides, - }, - remoteAuth: { - resolveBootstrapResponse: () => null, - validateApiRequest: () => null, - ...remoteAuthOverrides, - }, - dataRuntime, - autoImportRuntime: { - lookupLatestToktrackVersion: vi.fn(async () => ({ - configuredVersion: '1.0.0', - latestVersion: '1.0.0', - isLatest: true, - lookupStatus: 'ok', - })), - ...autoImportRuntimeOverrides, - }, - generatePdfReport, - getRuntimeSnapshot, - }) - - return { dataRuntime, generatePdfReport, getRuntimeSnapshot, readBody, router } -} - -async function requestRaw( - router: ReturnType['router'], - url: string, - method: string, -) { - const res = new MockResponse() - const req = { - url, - method, - headers: { 'content-type': 'application/json' }, - on: vi.fn(), - } - await router.handleServerRequest(req, res) - return { res } -} - -async function request( - router: ReturnType['router'], - url: string, - method: string, -) { - const { res } = await requestRaw(router, url, method) - return { res, body: JSON.parse(res.body) } -} +import { createRouter, createValidUsageData, request } from './http-router-test-helpers' describe('HTTP router mutation errors', () => { it('keeps malformed settings requests as client errors', async () => { @@ -337,7 +150,7 @@ describe('HTTP router mutation errors', () => { }) it('returns server errors for upload write failures', async () => { - const usageData = { daily: [{ date: '2026-04-27' }], totals: { totalCost: 1 } } + const usageData = createValidUsageData() const { router } = createRouter({ readBody: vi.fn(async () => usageData), dataRuntimeOverrides: { @@ -365,289 +178,154 @@ describe('HTTP router mutation errors', () => { expect(dataRuntime.writeData).not.toHaveBeenCalled() }) - it('returns server errors for usage import write failures', async () => { - const usageData = { daily: [{ date: '2026-04-27' }], totals: { totalCost: 1 } } - const { router } = createRouter({ - readBody: vi.fn(async () => ({ data: usageData })), - dataRuntimeOverrides: { - writeData: vi.fn(async () => { - throw Object.assign(new Error('disk full'), { code: 'ENOSPC' }) - }), - }, - }) - - const { res, body } = await request(router, '/api/usage/import', 'POST') - - expect(res.status).toBe(500) - expect(body).toEqual({ message: 'Server error' }) - }) - - it('rejects normalized usage imports that are not valid usage data', async () => { + it('rejects normalized upload payloads with partial totals', async () => { const { dataRuntime, router } = createRouter({ - readBody: vi.fn(async () => ({ data: { daily: [{ date: 42 }], totals: {} } })), + readBody: vi.fn(async () => ({ + daily: [{ date: '2026-04-27' }], + totals: {}, + })), }) - const { res, body } = await request(router, '/api/usage/import', 'POST') + const { res, body } = await request(router, '/api/upload', 'POST') expect(res.status).toBe(400) - expect(body).toEqual({ message: 'Invalid usage backup file' }) + expect(body).toEqual({ message: 'Invalid JSON' }) expect(dataRuntime.writeData).not.toHaveBeenCalled() }) - it('returns service unavailable when toktrack version lookup throws', async () => { - const { router } = createRouter({ - autoImportRuntimeOverrides: { - lookupLatestToktrackVersion: vi.fn(async () => { - throw new Error('registry unavailable') - }), - }, - }) - - const { res, body } = await request(router, '/api/toktrack/version-status', 'GET') - - expect(res.status).toBe(503) - expect(body).toEqual({ - message: 'Service Unavailable', - detail: 'registry unavailable', - }) - }) - - it('streams auto-import success events and releases the acquired lease', async () => { - const lease = { release: vi.fn() } - const closeImport = vi.fn() - const performAutoImport = vi.fn(async ({ onCheck, onOutput, onProgress, signalOnClose }) => { - signalOnClose(closeImport) - onCheck({ tool: 'toktrack', status: 'found' }) - onProgress({ key: 'startingLocalImport', vars: {} }) - onOutput('runner warning') - return { days: 2, totalCost: 3.5 } - }) - const { router } = createRouter({ - autoImportRuntimeOverrides: { - acquireAutoImportLease: vi.fn(() => lease), - performAutoImport, - }, + it('rejects normalized upload payloads missing detailed totals', async () => { + const { dataRuntime, router } = createRouter({ + readBody: vi.fn(async () => ({ + daily: [{ date: '2026-04-27' }], + totals: { + totalCost: 1, + totalTokens: 100, + requestCount: 2, + }, + })), }) - const { res } = await requestRaw(router, '/api/auto-import/stream', 'POST') + const { res, body } = await request(router, '/api/upload', 'POST') - expect(res.status).toBe(200) - expect(res.headers['Content-Type']).toBe('text/event-stream') - expect(res.body).toContain('event: check') - expect(res.body).toContain('"status":"found"') - expect(res.body).toContain('event: progress') - expect(res.body).toContain('event: stderr') - expect(res.body).toContain('runner warning') - expect(res.body).toContain('event: success') - expect(res.body).toContain('"totalCost":3.5') - expect(res.body).toContain('event: done') - expect(performAutoImport).toHaveBeenCalledWith( - expect.objectContaining({ - source: 'auto-import', - lease, - }), - ) - expect(lease.release).toHaveBeenCalledTimes(1) + expect(res.status).toBe(400) + expect(body).toEqual({ message: 'Invalid JSON' }) + expect(dataRuntime.writeData).not.toHaveBeenCalled() }) - it('maps concurrent auto-import starts to a localized conflict response', async () => { - const { router } = createRouter({ - autoImportRuntimeOverrides: { - acquireAutoImportLease: vi.fn(() => { - throw Object.assign(new Error('already running'), { messageKey: 'autoImportRunning' }) - }), - createAutoImportMessageEvent: vi.fn((key: string) => ({ key })), - formatAutoImportMessageEvent: vi.fn(() => 'An auto-import is already running.'), - }, + it('rejects normalized upload payloads with non-finite totals', async () => { + const { dataRuntime, router } = createRouter({ + readBody: vi.fn(async () => ({ + daily: [{ date: '2026-04-27' }], + totals: { + inputTokens: 60, + outputTokens: 20, + cacheCreationTokens: 5, + cacheReadTokens: 10, + thinkingTokens: 5, + totalCost: Number.POSITIVE_INFINITY, + totalTokens: 10, + requestCount: 1, + }, + })), }) - const { res, body } = await request(router, '/api/auto-import/stream', 'POST') + const { res, body } = await request(router, '/api/upload', 'POST') - expect(res.status).toBe(409) - expect(body).toEqual({ message: 'An auto-import is already running.' }) + expect(res.status).toBe(400) + expect(body).toEqual({ message: 'Invalid JSON' }) + expect(dataRuntime.writeData).not.toHaveBeenCalled() }) - it('streams structured auto-import errors and releases the lease after failures', async () => { - const lease = { release: vi.fn() } + it('returns server errors for usage import write failures', async () => { + const usageData = createValidUsageData() const { router } = createRouter({ - autoImportRuntimeOverrides: { - acquireAutoImportLease: vi.fn(() => lease), - performAutoImport: vi.fn(async () => { - throw new Error('toktrack failed') + readBody: vi.fn(async () => ({ data: usageData })), + dataRuntimeOverrides: { + writeData: vi.fn(async () => { + throw Object.assign(new Error('disk full'), { code: 'ENOSPC' }) }), - toAutoImportErrorEvent: vi.fn((error: Error) => ({ - message: error.message, - })), }, }) - const { res } = await requestRaw(router, '/api/auto-import/stream', 'POST') - - expect(res.status).toBe(200) - expect(res.body).toContain('event: error') - expect(res.body).toContain('"message":"toktrack failed"') - expect(res.body).toContain('event: done') - expect(lease.release).toHaveBeenCalledTimes(1) - }) - - it('rejects untrusted hosts before API auth is evaluated', async () => { - const validateApiRequest = vi.fn(() => ({ - status: 401, - message: 'Authentication required', - })) - const { router } = createRouter({ - httpUtilsOverrides: { - validateRequestHost: vi.fn(() => ({ - status: 403, - message: 'Untrusted host header', - })), - }, - remoteAuthOverrides: { - validateApiRequest, - }, - }) - - const { res, body } = await request(router, '/api/runtime', 'GET') - - expect(res.status).toBe(403) - expect(body).toEqual({ message: 'Untrusted host header' }) - expect(validateApiRequest).not.toHaveBeenCalled() - }) - - it('forwards API authentication failures with challenge headers', async () => { - const { router } = createRouter({ - remoteAuthOverrides: { - validateApiRequest: vi.fn(() => ({ - headers: { 'WWW-Authenticate': 'Bearer realm="TTDash API"' }, - status: 401, - message: 'Authentication required', - })), - }, - }) - - const { res, body } = await request(router, '/api/runtime', 'GET') + const { res, body } = await request(router, '/api/usage/import', 'POST') - expect(res.status).toBe(401) - expect(res.headers['WWW-Authenticate']).toBe('Bearer realm="TTDash API"') - expect(body).toEqual({ message: 'Authentication required' }) + expect(res.status).toBe(500) + expect(body).toEqual({ message: 'Server error' }) }) - it('serves runtime snapshots only through GET', async () => { - const snapshot = { - id: 'runtime-2', - mode: 'background', - port: 3020, - url: 'http://localhost:3020', - } - const { getRuntimeSnapshot, router } = createRouter({ - getRuntimeSnapshot: vi.fn(() => snapshot), + it('rejects normalized usage imports that are not valid usage data', async () => { + const { dataRuntime, router } = createRouter({ + readBody: vi.fn(async () => ({ data: { daily: [{ date: 42 }], totals: {} } })), }) - const runtimeResponse = await request(router, '/api/runtime', 'GET') - const rejectedMethod = await request(router, '/api/runtime', 'POST') + const { res, body } = await request(router, '/api/usage/import', 'POST') - expect(runtimeResponse.res.status).toBe(200) - expect(runtimeResponse.body).toEqual(snapshot) - expect(getRuntimeSnapshot).toHaveBeenCalledTimes(1) - expect(rejectedMethod.res.status).toBe(405) - expect(rejectedMethod.body).toEqual({ message: 'Method Not Allowed' }) + expect(res.status).toBe(400) + expect(body).toEqual({ message: 'Invalid usage backup file' }) + expect(dataRuntime.writeData).not.toHaveBeenCalled() }) - it('keeps unknown API endpoints distinct from stale API prefixes', async () => { - const { router } = createRouter({ - httpUtilsOverrides: { - // Simulate a configured API prefix with no handler separately from a stale /api URL. - resolveApiPath: (pathname: string) => (pathname === '/custom/unknown' ? '/unknown' : null), - }, + it('rejects normalized usage imports with partial totals', async () => { + const { dataRuntime, router } = createRouter({ + readBody: vi.fn(async () => ({ + data: { + daily: [{ date: '2026-04-27' }], + totals: {}, + }, + })), }) - const unknownEndpoint = await request(router, '/custom/unknown', 'GET') - const stalePrefix = await request(router, '/api/not-configured', 'GET') + const { res, body } = await request(router, '/api/usage/import', 'POST') - expect(unknownEndpoint.res.status).toBe(404) - expect(unknownEndpoint.body).toEqual({ message: 'API endpoint not found' }) - expect(stalePrefix.res.status).toBe(404) - expect(stalePrefix.body).toEqual({ message: 'Not Found' }) + expect(res.status).toBe(400) + expect(body).toEqual({ message: 'Invalid usage backup file' }) + expect(dataRuntime.writeData).not.toHaveBeenCalled() }) - it('rejects PDF reports without usage data before reading the request body', async () => { - const readBody = vi.fn(async () => ({ title: 'Usage' })) - const { router } = createRouter({ - dataRuntimeOverrides: { - readData: vi.fn(() => null), - }, - readBody, + it('rejects normalized usage imports missing detailed totals', async () => { + const { dataRuntime, router } = createRouter({ + readBody: vi.fn(async () => ({ + data: { + daily: [{ date: '2026-04-27' }], + totals: { + totalCost: 1, + totalTokens: 100, + requestCount: 2, + }, + }, + })), }) - const { res, body } = await request(router, '/api/report/pdf', 'POST') + const { res, body } = await request(router, '/api/usage/import', 'POST') expect(res.status).toBe(400) - expect(body).toEqual({ message: 'No data available for the report.' }) - expect(readBody).not.toHaveBeenCalled() - }) - - it('maps PDF body and generator failures to client or service errors', async () => { - const usageData = { daily: [{ date: '2026-04-27' }], totals: { totalCost: 1 } } - const invalidBodyRouter = createRouter({ - dataRuntimeOverrides: { - readData: vi.fn(() => usageData), - }, - readBody: vi.fn(async () => { - throw new Error('broken report JSON') - }), - }).router - const oversizedRouter = createRouter({ - dataRuntimeOverrides: { - readData: vi.fn(() => usageData), - }, - readBody: vi.fn(async () => { - throw Object.assign(new Error('too large'), { code: 'PAYLOAD_TOO_LARGE' }) - }), - }).router - const typstMissingRouter = createRouter({ - dataRuntimeOverrides: { - readData: vi.fn(() => usageData), - }, - generatePdfReport: vi.fn(async () => { - throw Object.assign(new Error('Typst not found'), { code: 'TYPST_MISSING' }) - }), - readBody: vi.fn(async () => ({ locale: 'de-CH' })), - }).router - - const invalidBody = await request(invalidBodyRouter, '/api/report/pdf', 'POST') - const oversized = await request(oversizedRouter, '/api/report/pdf', 'POST') - const typstMissing = await request(typstMissingRouter, '/api/report/pdf', 'POST') - - expect(invalidBody.res.status).toBe(400) - expect(invalidBody.body).toEqual({ message: 'Invalid report request' }) - expect(oversized.res.status).toBe(413) - expect(oversized.body).toEqual({ message: 'Report request too large' }) - expect(typstMissing.res.status).toBe(503) - expect(typstMissing.body).toEqual({ message: 'Typst not found' }) + expect(body).toEqual({ message: 'Invalid usage backup file' }) + expect(dataRuntime.writeData).not.toHaveBeenCalled() }) - it('sends generated PDF buffers with attachment headers', async () => { - const usageData = { daily: [{ date: '2026-04-27' }], totals: { totalCost: 1 } } - const generatePdfReport = vi.fn(async () => ({ - buffer: Buffer.from('%PDF-1.4'), - filename: 'ttdash-report.pdf', - })) - const { router } = createRouter({ - dataRuntimeOverrides: { - readData: vi.fn(() => usageData), - }, - generatePdfReport, - readBody: vi.fn(async () => ({ locale: 'de-CH' })), + it('rejects normalized usage imports with non-finite totals', async () => { + const { dataRuntime, router } = createRouter({ + readBody: vi.fn(async () => ({ + data: { + daily: [{ date: '2026-04-27' }], + totals: { + inputTokens: 60, + outputTokens: 20, + cacheCreationTokens: 5, + cacheReadTokens: Number.NaN, + thinkingTokens: 5, + totalCost: 1, + totalTokens: 100, + requestCount: 2, + }, + }, + })), }) - const { res } = await requestRaw(router, '/api/report/pdf', 'POST') + const { res, body } = await request(router, '/api/usage/import', 'POST') - expect(res.status).toBe(200) - expect(res.headers['Content-Type']).toBe('application/pdf') - expect(res.headers['Content-Disposition']).toBe( - 'attachment; filename="ttdash-report.pdf"; filename*=UTF-8\'\'ttdash-report.pdf', - ) - expect(res.body).toBe('%PDF-1.4') - expect(generatePdfReport).toHaveBeenCalledWith(usageData.daily, { locale: 'de-CH' }) + expect(res.status).toBe(400) + expect(body).toEqual({ message: 'Invalid usage backup file' }) + expect(dataRuntime.writeData).not.toHaveBeenCalled() }) }) diff --git a/tests/unit/http-router-test-helpers.ts b/tests/unit/http-router-test-helpers.ts new file mode 100644 index 0000000..e34feba --- /dev/null +++ b/tests/unit/http-router-test-helpers.ts @@ -0,0 +1,235 @@ +import path from 'node:path' +import { createRequire } from 'node:module' +import { vi } from 'vitest' + +const require = createRequire(import.meta.url) +const { createHttpRouter } = require('../../server/http-router.js') as { + createHttpRouter: (options: Record) => { + handleServerRequest: ( + req: { + url: string + method: string + headers: Record + on?: (event: string, listener: () => void) => unknown + }, + res: MockResponse, + ) => Promise + } +} + +type HeaderValue = string | number | string[] + +export class MockResponse { + status = 0 + headers: Record = {} + body = '' + + setHeader(name: string, value: HeaderValue) { + const key = name.toLowerCase() + const previous = this.headers[key] + + if (previous === undefined) { + this.headers[key] = value + return + } + + const previousValues = Array.isArray(previous) ? previous : [String(previous)] + const nextValues = Array.isArray(value) ? value.map(String) : [String(value)] + this.headers[key] = [...previousValues, ...nextValues] + } + + writeHead(status: number, headers: Record) { + this.status = status + const normalizedHeaders = Object.fromEntries( + Object.entries(headers).map(([key, value]) => [key.toLowerCase(), value]), + ) + this.headers = { ...this.headers, ...normalizedHeaders } + } + + write(body: string | Buffer) { + this.body += Buffer.isBuffer(body) ? body.toString('utf8') : body + } + + end(body?: string | Buffer) { + if (body !== undefined) { + this.body += Buffer.isBuffer(body) ? body.toString('utf8') : body + } + } +} + +export function createValidUsageData() { + return { + daily: [{ date: '2026-04-27' }], + totals: { + inputTokens: 60, + outputTokens: 20, + cacheCreationTokens: 5, + cacheReadTokens: 10, + thinkingTokens: 5, + totalCost: 1, + totalTokens: 100, + requestCount: 2, + }, + } +} + +export function createRouter({ + autoImportRuntimeOverrides = {}, + dataRuntimeOverrides = {}, + generatePdfReport = vi.fn(async () => ({ + buffer: Buffer.from(''), + filename: 'test.pdf', + })), + getRuntimeSnapshot = vi.fn(() => ({ + id: 'runtime-1', + mode: 'foreground', + port: 3000, + url: 'http://127.0.0.1:3000', + })), + httpUtilsOverrides = {}, + readBody = vi.fn(async () => ({})), + remoteAuthOverrides = {}, +}: { + autoImportRuntimeOverrides?: Record + dataRuntimeOverrides?: Record + generatePdfReport?: () => Promise<{ buffer: Buffer; filename: string }> + getRuntimeSnapshot?: () => unknown + httpUtilsOverrides?: Record + readBody?: () => Promise + remoteAuthOverrides?: Record +} = {}) { + const dataRuntime = { + extractSettingsImportPayload: vi.fn( + (payload: { settings?: unknown }) => payload.settings ?? payload, + ), + extractUsageImportPayload: vi.fn((payload: { data?: unknown }) => payload.data ?? payload), + isPayloadTooLargeError: vi.fn( + (error: { code?: string }) => error?.code === 'PAYLOAD_TOO_LARGE', + ), + isPersistedStateError: vi.fn(() => false), + mergeUsageData: vi.fn((_currentData, importedData) => ({ + data: importedData, + summary: { + importedDays: Array.isArray(importedData?.daily) ? importedData.daily.length : 0, + addedDays: Array.isArray(importedData?.daily) ? importedData.daily.length : 0, + unchangedDays: 0, + conflictingDays: 0, + skippedDays: 0, + totalDays: Array.isArray(importedData?.daily) ? importedData.daily.length : 0, + }, + })), + normalizeIncomingData: vi.fn((payload) => payload), + normalizeSettings: vi.fn((payload) => payload), + readData: vi.fn(() => null), + readSettings: vi.fn(() => ({ language: 'en' })), + unlinkIfExists: vi.fn(), + _updateDataLoadStateUnlocked: vi.fn(async () => undefined), + updateDataLoadState: vi.fn(async () => undefined), + updateSettings: vi.fn(async (body) => ({ ok: true, body })), + withFileMutationLock: vi.fn(async (_filePath: string, operation: () => Promise) => + operation(), + ), + withSettingsAndDataMutationLock: vi.fn(async (operation: () => Promise) => + operation(), + ), + writeData: vi.fn(async () => undefined), + writeSettings: vi.fn(async () => undefined), + paths: { + dataFile: '/data/data.json', + settingsFile: '/data/settings.json', + }, + ...dataRuntimeOverrides, + } + + const router = createHttpRouter({ + fs: { promises: { readFile: vi.fn() } }, + path, + staticRoot: '/app/dist', + securityHeaders: { 'X-Test-Security': '1' }, + httpUtils: { + json: ( + res: MockResponse, + status: number, + payload: unknown, + headers: Record = {}, + ) => { + res.writeHead(status, { + 'Content-Type': 'application/json; charset=utf-8', + ...headers, + }) + res.end(JSON.stringify(payload)) + }, + readBody, + resolveApiPath: (pathname: string) => + pathname === '/api' + ? '/' + : pathname.startsWith('/api/') + ? pathname.slice('/api'.length) + : null, + sendBuffer: ( + res: MockResponse, + status: number, + headers: Record, + buffer: Buffer, + ) => { + res.writeHead(status, headers) + res.end(buffer) + }, + validateMutationRequest: vi.fn(() => null), + validateRequestHost: vi.fn(() => null), + ...httpUtilsOverrides, + }, + remoteAuth: { + resolveBootstrapResponse: () => null, + validateApiRequest: () => null, + ...remoteAuthOverrides, + }, + dataRuntime, + autoImportRuntime: { + lookupLatestToktrackVersion: vi.fn(async () => ({ + configuredVersion: '1.0.0', + latestVersion: '1.0.0', + isLatest: true, + lookupStatus: 'ok', + })), + ...autoImportRuntimeOverrides, + }, + generatePdfReport, + getRuntimeSnapshot, + }) + + return { dataRuntime, generatePdfReport, getRuntimeSnapshot, readBody, router } +} + +export async function requestRaw( + router: ReturnType['router'], + url: string, + method: string, + headers: Record = { 'content-type': 'application/json' }, +) { + const res = new MockResponse() + const req = { + url, + method, + headers, + on: vi.fn(), + } + await router.handleServerRequest(req, res) + return { req, res } +} + +export async function request( + router: ReturnType['router'], + url: string, + method: string, + headers: Record = { 'content-type': 'application/json' }, +) { + const { res } = await requestRaw(router, url, method, headers) + try { + return { res, body: JSON.parse(res.body) } + } catch { + throw new Error( + `Failed to parse JSON response (status=${res.status}): ${res.body.slice(0, 200)}`, + ) + } +} diff --git a/tests/unit/report-pdf-routes.test.ts b/tests/unit/report-pdf-routes.test.ts new file mode 100644 index 0000000..5766f4b --- /dev/null +++ b/tests/unit/report-pdf-routes.test.ts @@ -0,0 +1,97 @@ +import { describe, expect, it, vi } from 'vitest' +import { createRouter, createValidUsageData, request, requestRaw } from './http-router-test-helpers' + +describe('PDF report routes', () => { + it('rejects PDF reports without usage data before reading the request body', async () => { + const readBody = vi.fn(async () => ({ title: 'Usage' })) + const { router } = createRouter({ + dataRuntimeOverrides: { + readData: vi.fn(() => null), + }, + readBody, + }) + + const { res, body } = await request(router, '/api/report/pdf', 'POST') + + expect(res.status).toBe(400) + expect(body).toEqual({ message: 'No data available for the report.' }) + expect(readBody).not.toHaveBeenCalled() + }) + + it('maps invalid PDF request bodies to 400', async () => { + const usageData = createValidUsageData() + const { router } = createRouter({ + dataRuntimeOverrides: { + readData: vi.fn(() => usageData), + }, + readBody: vi.fn(async () => { + throw new Error('broken report JSON') + }), + }) + + const { res, body } = await request(router, '/api/report/pdf', 'POST') + + expect(res.status).toBe(400) + expect(body).toEqual({ message: 'Invalid report request' }) + }) + + it('maps oversized PDF request bodies to 413', async () => { + const usageData = createValidUsageData() + const { router } = createRouter({ + dataRuntimeOverrides: { + readData: vi.fn(() => usageData), + }, + readBody: vi.fn(async () => { + throw Object.assign(new Error('too large'), { code: 'PAYLOAD_TOO_LARGE' }) + }), + }) + + const { res, body } = await request(router, '/api/report/pdf', 'POST') + + expect(res.status).toBe(413) + expect(body).toEqual({ message: 'Report request too large' }) + }) + + it('maps missing Typst PDF generator failures to 503', async () => { + const usageData = createValidUsageData() + const { router } = createRouter({ + dataRuntimeOverrides: { + readData: vi.fn(() => usageData), + }, + generatePdfReport: vi.fn(async () => { + throw Object.assign(new Error('Typst not found'), { code: 'TYPST_MISSING' }) + }), + readBody: vi.fn(async () => ({ locale: 'de-CH' })), + }) + + const { res, body } = await request(router, '/api/report/pdf', 'POST') + + expect(res.status).toBe(503) + expect(body).toEqual({ message: 'Typst not found' }) + }) + + it('sends generated PDF buffers with attachment headers', async () => { + const usageData = createValidUsageData() + const generatePdfReport = vi.fn(async () => ({ + buffer: Buffer.from('%PDF-1.4'), + filename: 'ttdash-report.pdf', + })) + const { router } = createRouter({ + dataRuntimeOverrides: { + readData: vi.fn(() => usageData), + }, + generatePdfReport, + readBody: vi.fn(async () => ({ locale: 'de-CH' })), + }) + + const { res } = await requestRaw(router, '/api/report/pdf', 'POST') + + expect(res.status).toBe(200) + expect(res.headers['content-type']).toBe('application/pdf') + expect(res.headers['content-disposition']).toBe( + 'attachment; filename="ttdash-report.pdf"; filename*=UTF-8\'\'ttdash-report.pdf', + ) + expect(res.body).toBe('%PDF-1.4') + expect(generatePdfReport).toHaveBeenCalledWith(usageData.daily, { locale: 'de-CH' }) + }) +}) diff --git a/tests/unit/run-vitest-project-timings.test.ts b/tests/unit/run-vitest-project-timings.test.ts index cbc4318..2ac4840 100644 --- a/tests/unit/run-vitest-project-timings.test.ts +++ b/tests/unit/run-vitest-project-timings.test.ts @@ -38,7 +38,11 @@ type RunnerScript = { stdout: { write: (message: string) => void } stderr: { write: (message: string) => void } }, - spawnSyncImpl: (command: string, args: string[], options: unknown) => { status: number | null }, + spawnSyncImpl: ( + command: string, + args: string[], + options: unknown, + ) => { error?: Error; status: number | null }, ) => number } @@ -56,7 +60,7 @@ describe('Vitest project timing runner', () => { expect(options.projects).toEqual(['unit', 'frontend']) expect(options.repeat).toBe(3) - expect(options.reportDir).toMatch(/\.cache\/test-timings$/) + expect(options.reportDir.replaceAll('\\', '/')).toMatch(/\.cache\/test-timings$/) expect(options.maxSuiteSeconds).toBe(15) expect(options.maxTestSeconds).toBe(8) }) @@ -134,19 +138,61 @@ describe('Vitest project timing runner', () => { const spawnSyncImpl = vi.fn(() => ({ status: 0 })) const mkdirSync = vi.spyOn(fs, 'mkdirSync') - const status = timingRunner.run( - ['--projects=unit', '--repeat=2', '--dry-run'], - { stdout, stderr }, - spawnSyncImpl, - ) + try { + const status = timingRunner.run( + ['--projects=unit', '--repeat=2', '--dry-run'], + { stdout, stderr }, + spawnSyncImpl, + ) + + expect(status).toBe(0) + expect(spawnSyncImpl).not.toHaveBeenCalled() + expect(mkdirSync).not.toHaveBeenCalled() + expect(stdout.write.mock.calls.join('\n')).toContain('vitest-unit.timing-run-1.junit.xml') + expect(stdout.write.mock.calls.join('\n')).toContain('vitest-unit.timing-run-2.junit.xml') + } finally { + mkdirSync.mockRestore() + } + }) - expect(status).toBe(0) - expect(spawnSyncImpl).not.toHaveBeenCalled() - expect(mkdirSync).not.toHaveBeenCalled() - expect(stdout.write.mock.calls.join('\n')).toContain('vitest-unit.timing-run-1.junit.xml') - expect(stdout.write.mock.calls.join('\n')).toContain('vitest-unit.timing-run-2.junit.xml') + it('fails explicitly when Vitest cannot be launched', () => { + const stdout = { write: vi.fn() } + const stderr = { write: vi.fn() } + const spawnSyncImpl = vi.fn(() => ({ + error: new Error('spawn npx ENOENT'), + status: null, + })) + const mkdirSync = vi.spyOn(fs, 'mkdirSync').mockImplementation(() => undefined) + + try { + const status = timingRunner.run(['--projects=unit'], { stdout, stderr }, spawnSyncImpl) + + expect(status).toBe(1) + expect(stderr.write).toHaveBeenCalledWith('Failed to launch vitest: spawn npx ENOENT\n') + } finally { + mkdirSync.mockRestore() + } + }) - mkdirSync.mockRestore() + it('fails explicitly when the timing budget check cannot be launched', () => { + const stdout = { write: vi.fn() } + const stderr = { write: vi.fn() } + const spawnSyncImpl = vi + .fn() + .mockReturnValueOnce({ status: 0 }) + .mockReturnValueOnce({ error: new Error('spawn node ENOENT'), status: null }) + const mkdirSync = vi.spyOn(fs, 'mkdirSync').mockImplementation(() => undefined) + + try { + const status = timingRunner.run(['--projects=unit'], { stdout, stderr }, spawnSyncImpl) + + expect(status).toBe(1) + expect(stderr.write).toHaveBeenCalledWith( + 'Failed to launch budget check: spawn node ENOENT\n', + ) + } finally { + mkdirSync.mockRestore() + } }) it('calculates median values for repeated timings', () => { diff --git a/tests/unit/runtime-routes.test.ts b/tests/unit/runtime-routes.test.ts new file mode 100644 index 0000000..c3f1591 --- /dev/null +++ b/tests/unit/runtime-routes.test.ts @@ -0,0 +1,103 @@ +import { describe, expect, it, vi } from 'vitest' +import { createRouter, request } from './http-router-test-helpers' + +describe('runtime and API guard routes', () => { + it('returns service unavailable when toktrack version lookup throws', async () => { + const { router } = createRouter({ + autoImportRuntimeOverrides: { + lookupLatestToktrackVersion: vi.fn(async () => { + throw new Error('registry unavailable') + }), + }, + }) + + const { res, body } = await request(router, '/api/toktrack/version-status', 'GET') + + expect(res.status).toBe(503) + expect(body).toEqual({ + message: 'Service Unavailable', + detail: 'registry unavailable', + }) + }) + + it('rejects untrusted hosts before API auth is evaluated', async () => { + const validateApiRequest = vi.fn(() => ({ + status: 401, + message: 'Authentication required', + })) + const { router } = createRouter({ + httpUtilsOverrides: { + validateRequestHost: vi.fn(() => ({ + status: 403, + message: 'Untrusted host header', + })), + }, + remoteAuthOverrides: { + validateApiRequest, + }, + }) + + const { res, body } = await request(router, '/api/runtime', 'GET') + + expect(res.status).toBe(403) + expect(body).toEqual({ message: 'Untrusted host header' }) + expect(validateApiRequest).not.toHaveBeenCalled() + }) + + it('forwards API authentication failures with challenge headers', async () => { + const { router } = createRouter({ + remoteAuthOverrides: { + validateApiRequest: vi.fn(() => ({ + headers: { 'WWW-Authenticate': 'Bearer realm="TTDash API"' }, + status: 401, + message: 'Authentication required', + })), + }, + }) + + const { res, body } = await request(router, '/api/runtime', 'GET') + + expect(res.status).toBe(401) + expect(res.headers['www-authenticate']).toBe('Bearer realm="TTDash API"') + expect(body).toEqual({ message: 'Authentication required' }) + }) + + it('serves runtime snapshots only through GET', async () => { + const snapshot = { + id: 'runtime-2', + mode: 'background', + port: 3020, + url: 'http://localhost:3020', + } + const getRuntimeSnapshot = vi.fn(() => snapshot) + const { router } = createRouter({ + getRuntimeSnapshot, + }) + + const runtimeResponse = await request(router, '/api/runtime', 'GET') + const rejectedMethod = await request(router, '/api/runtime', 'POST') + + expect(runtimeResponse.res.status).toBe(200) + expect(runtimeResponse.body).toEqual(snapshot) + expect(getRuntimeSnapshot).toHaveBeenCalledTimes(1) + expect(rejectedMethod.res.status).toBe(405) + expect(rejectedMethod.body).toEqual({ message: 'Method Not Allowed' }) + }) + + it('keeps unknown API endpoints distinct from stale API prefixes', async () => { + const { router } = createRouter({ + httpUtilsOverrides: { + // Simulate a configured API prefix with no handler separately from a stale /api URL. + resolveApiPath: (pathname: string) => (pathname === '/custom/unknown' ? '/unknown' : null), + }, + }) + + const unknownEndpoint = await request(router, '/custom/unknown', 'GET') + const stalePrefix = await request(router, '/api/not-configured', 'GET') + + expect(unknownEndpoint.res.status).toBe(404) + expect(unknownEndpoint.body).toEqual({ message: 'API endpoint not found' }) + expect(stalePrefix.res.status).toBe(404) + expect(stalePrefix.body).toEqual({ message: 'Not Found' }) + }) +}) diff --git a/tests/unit/server-helpers-auto-import-executor.test.ts b/tests/unit/server-helpers-auto-import-executor.test.ts index 7051d2d..fe09531 100644 --- a/tests/unit/server-helpers-auto-import-executor.test.ts +++ b/tests/unit/server-helpers-auto-import-executor.test.ts @@ -157,7 +157,7 @@ describe('server helper utilities: auto-import executor behavior', () => { 'warmingUpPackageRunner', 'loadingUsageData', ]) - expect(output).toEqual(['runner progress']) + expect(output).toEqual(['runner progress\n']) expect(runtime.isAutoImportRunning()).toBe(false) }) diff --git a/tests/unit/server-helpers-file-locks.test.ts b/tests/unit/server-helpers-file-locks.test.ts index a16abdd..1b134a2 100644 --- a/tests/unit/server-helpers-file-locks.test.ts +++ b/tests/unit/server-helpers-file-locks.test.ts @@ -520,17 +520,19 @@ dataRuntime.withFileMutationLock(filePath, async () => { const unlinkSpy = vi.spyOn(fsPromises, 'unlink') const nowSpy = vi.spyOn(Date, 'now').mockReturnValue(1700000000000) - await expect(writeJsonAtomicAsync(targetFile, { ok: true })).rejects.toBe(renameError) - - expect(renameSpy).toHaveBeenCalled() - const tempPath = unlinkSpy.mock.calls[0]?.[0] as string - expectAtomicTempPath(tempPath, targetFile, 1700000000000) - expect(existsSync(tempPath)).toBe(false) + try { + await expect(writeJsonAtomicAsync(targetFile, { ok: true })).rejects.toBe(renameError) - renameSpy.mockRestore() - unlinkSpy.mockRestore() - nowSpy.mockRestore() - await fsPromises.rm(targetDir, { recursive: true, force: true }) + expect(renameSpy).toHaveBeenCalled() + const tempPath = unlinkSpy.mock.calls[0]?.[0] as string + expectAtomicTempPath(tempPath, targetFile, 1700000000000) + expect(existsSync(tempPath)).toBe(false) + } finally { + renameSpy.mockRestore() + unlinkSpy.mockRestore() + nowSpy.mockRestore() + await fsPromises.rm(targetDir, { recursive: true, force: true }) + } }) it('surfaces async atomic write cleanup failures with the original write error', async () => { @@ -637,17 +639,19 @@ dataRuntime.withFileMutationLock(filePath, async () => { const unlinkSpy = vi.spyOn(fsPromises, 'unlink') const nowSpy = vi.spyOn(Date, 'now').mockReturnValue(1700000000001) - await expect(writeJsonAtomicAsync(targetFile, { ok: true })).rejects.toBe(writeError) - - expect(writeSpy).toHaveBeenCalled() - const tempPath = unlinkSpy.mock.calls[0]?.[0] as string - expectAtomicTempPath(tempPath, targetFile, 1700000000001) - expect(existsSync(tempPath)).toBe(false) + try { + await expect(writeJsonAtomicAsync(targetFile, { ok: true })).rejects.toBe(writeError) - writeSpy.mockRestore() - unlinkSpy.mockRestore() - nowSpy.mockRestore() - await fsPromises.rm(targetDir, { recursive: true, force: true }) + expect(writeSpy).toHaveBeenCalled() + const tempPath = unlinkSpy.mock.calls[0]?.[0] as string + expectAtomicTempPath(tempPath, targetFile, 1700000000001) + expect(existsSync(tempPath)).toBe(false) + } finally { + writeSpy.mockRestore() + unlinkSpy.mockRestore() + nowSpy.mockRestore() + await fsPromises.rm(targetDir, { recursive: true, force: true }) + } }) it.runIf(process.platform !== 'win32')( diff --git a/tests/unit/server-helpers-runner-core.test.ts b/tests/unit/server-helpers-runner-core.test.ts index 30fd991..1387cd1 100644 --- a/tests/unit/server-helpers-runner-core.test.ts +++ b/tests/unit/server-helpers-runner-core.test.ts @@ -61,6 +61,8 @@ function createRuntimeWithSpawn( latestLookupTimeoutMs?: number localBinExists?: boolean localBinOverride?: string + localProbeTimeoutMs?: number + localVersionCheckTimeoutMs?: number } = {}, ) { const localBin = options.localBinOverride ?? '/missing/toktrack' @@ -84,8 +86,8 @@ function createRuntimeWithSpawn( npxCacheDir: '/tmp/ttdash-test-npx-cache', isWindows: false, processTerminationGraceMs: 1000, - toktrackLocalRunnerProbeTimeoutMs: 7000, - toktrackLocalRunnerVersionCheckTimeoutMs: 7000, + toktrackLocalRunnerProbeTimeoutMs: options.localProbeTimeoutMs ?? 7000, + toktrackLocalRunnerVersionCheckTimeoutMs: options.localVersionCheckTimeoutMs ?? 7000, toktrackLocalRunnerImportTimeoutMs: 60000, toktrackPackageRunnerProbeTimeoutMs: 45000, toktrackPackageRunnerVersionCheckTimeoutMs: 45000, @@ -190,6 +192,32 @@ describe('server helper utilities: toktrack runner core behavior', () => { ) }) + it('uses the local version-check timeout before probing package fallbacks', async () => { + vi.useFakeTimers() + const spawnImpl = createSpawnSequence([ + { hang: true }, + { stdout: `toktrack ${TOKTRACK_VERSION}\n` }, + ]) + const runtime = createRuntimeWithSpawn(spawnImpl, { + localBinExists: true, + localProbeTimeoutMs: 7000, + localVersionCheckTimeoutMs: 50, + }) + + try { + const resolutionPromise = runtime.resolveToktrackRunner() + await vi.advanceTimersByTimeAsync(50) + + expect(spawnImpl).toHaveBeenCalledTimes(2) + await expect(resolutionPromise).resolves.toMatchObject({ + command: 'bunx', + method: 'bunx', + }) + } finally { + vi.useRealTimers() + } + }) + it('falls back to npm when the local runner version mismatches and bunx probing fails', async () => { const warnSpy = vi.spyOn(console, 'warn').mockImplementation(() => undefined) const spawnImpl = createSpawnSequence([ @@ -359,7 +387,7 @@ describe('server helper utilities: toktrack runner core behavior', () => { }) child.stderr.emit('data', Buffer.from('runner warning\n')) - expect(stderrLines).toEqual(['runner warning']) + expect(stderrLines).toEqual(['runner warning\n']) closeCommand?.() From 19ed61c9dbd9777ad82fc4f6158843610423d353 Mon Sep 17 00:00:00 2001 From: tyl3r-ch Date: Sun, 3 May 2026 15:47:54 +0200 Subject: [PATCH 39/42] v6.3.0: Address CodeRabbit follow-up findings --- CHANGELOG.md | 2 +- server/auto-import-runtime/command-runner.js | 71 +++++++++- server/auto-import-runtime/latest-version.js | 3 +- server/routes/static-routes.js | 6 + server/routes/usage-routes.js | 2 + .../dashboard-section-lazy-components.ts | 12 +- .../use-settings-modal-version-status.ts | 4 +- src/lib/toktrack-version-status.ts | 2 +- src/types/index.ts | 2 +- .../dashboard-language-regressions.test.tsx | 5 +- .../dashboard-sections-rendering.test.tsx | 11 +- tests/frontend/filter-bar-presets.test.tsx | 6 +- tests/unit/http-router-static.test.ts | 22 ++- tests/unit/playwright-config.test.ts | 1 - tests/unit/run-parallel-gate.test.ts | 3 +- ...erver-helpers-auto-import-executor.test.ts | 41 +----- tests/unit/server-helpers-runner-core.test.ts | 92 +++++++------ tests/unit/server-helpers.shared.ts | 50 ++++++- tests/unit/toktrack-version.test.ts | 129 +++++++++++++++++- 19 files changed, 346 insertions(+), 118 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 77fb525..9d6fb9b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -22,7 +22,7 @@ ### Commits -- Enthält alle Branch-Commits seit `origin/main`: `f5cf2b6`, `104d663`, `aec28b2`, `892d551`, `c4e3049`, `475aee4`, `5a701b7`, `0834a07`, `ca29012`, `04aa7a6`, `be994fd`, `708c3c7`, `7e60008`, `efb9cee`, `eab08c5`, `f2b3400`, `7555c0e`, `1657822`, `b7fbe80`, `67db759`, `44c073d`, `c4882ae`, `2932697`, `944a049`, `e9d54fa`, `117cfeb`, `b5cac6a`, `3a4285d`, `cb31d35`, `ab49513`, `c4abb2b`, `5f69ade`, `acb22ab` +- Enthält alle Branch-Commits seit `v6.2.9`: `f5cf2b6`, `104d663`, `aec28b2`, `892d551`, `c4e3049`, `475aee4`, `5a701b7`, `0834a07`, `ca29012`, `04aa7a6`, `be994fd`, `708c3c7`, `7e60008`, `efb9cee`, `eab08c5`, `f2b3400`, `7555c0e`, `1657822`, `b7fbe80`, `67db759`, `44c073d`, `c4882ae`, `2932697`, `944a049`, `e9d54fa`, `117cfeb`, `b5cac6a`, `3a4285d`, `cb31d35`, `ab49513`, `c4abb2b`, `5f69ade`, `acb22ab` ## [6.2.9] - 2026-04-28 diff --git a/server/auto-import-runtime/command-runner.js b/server/auto-import-runtime/command-runner.js index 6319c8b..43f8b00 100644 --- a/server/auto-import-runtime/command-runner.js +++ b/server/auto-import-runtime/command-runner.js @@ -1,3 +1,5 @@ +const DEFAULT_COMMAND_OUTPUT_MAX_BYTES = 1024 * 1024; + function createAutoImportCommandRunner({ processObject = process, spawnCrossPlatform, @@ -43,7 +45,15 @@ function createAutoImportCommandRunner({ function createCommandError( message, - { command, args = [], stdout = '', stderr = '', exitCode = null, timedOut = false } = {}, + { + command, + args = [], + stdout = '', + stderr = '', + exitCode = null, + timedOut = false, + outputTruncated = false, + } = {}, ) { const error = new Error(message); error.command = command; @@ -52,9 +62,48 @@ function createAutoImportCommandRunner({ error.stderr = stderr; error.exitCode = exitCode; error.timedOut = timedOut; + error.outputTruncated = outputTruncated; return error; } + function getMaxOutputBytes(value) { + return typeof value === 'number' && Number.isFinite(value) && value > 0 + ? value + : DEFAULT_COMMAND_OUTPUT_MAX_BYTES; + } + + function appendCapturedOutput(currentValue, currentBytes, chunk, maxOutputBytes) { + const chunkBuffer = Buffer.isBuffer(chunk) ? chunk : Buffer.from(String(chunk)); + const remainingBytes = maxOutputBytes - currentBytes; + + if (remainingBytes <= 0) { + return { + bytes: currentBytes, + truncated: chunkBuffer.length > 0, + value: currentValue, + }; + } + + if (chunkBuffer.length > remainingBytes) { + let safeEnd = remainingBytes; + while (safeEnd > 0 && (chunkBuffer[safeEnd] & 0b11000000) === 0b10000000) { + safeEnd -= 1; + } + + return { + bytes: maxOutputBytes, + truncated: true, + value: currentValue + chunkBuffer.subarray(0, safeEnd).toString(), + }; + } + + return { + bytes: currentBytes + chunkBuffer.length, + truncated: false, + value: currentValue + chunkBuffer.toString(), + }; + } + function terminateChildProcess(child) { if (!child || child.exitCode !== null) { return; @@ -83,6 +132,7 @@ function createAutoImportCommandRunner({ onStderr, signalOnClose, timeoutMs = null, + maxOutputBytes = DEFAULT_COMMAND_OUTPUT_MAX_BYTES, } = {}, ) { return runCommandWithSpawn(command, args, { @@ -91,6 +141,7 @@ function createAutoImportCommandRunner({ onStderr, signalOnClose, timeoutMs, + maxOutputBytes, spawnImpl: spawnCommand, }); } @@ -104,6 +155,7 @@ function createAutoImportCommandRunner({ onStderr, signalOnClose, timeoutMs = null, + maxOutputBytes = DEFAULT_COMMAND_OUTPUT_MAX_BYTES, spawnImpl = spawnCommand, } = {}, ) { @@ -128,6 +180,10 @@ function createAutoImportCommandRunner({ let stdout = ''; let stderr = ''; + let stdoutBytes = 0; + let stderrBytes = 0; + let outputTruncated = false; + const capturedOutputMaxBytes = getMaxOutputBytes(maxOutputBytes); let finished = false; let timeoutId = null; let timeoutError = null; @@ -157,6 +213,7 @@ function createAutoImportCommandRunner({ stdout, stderr, timedOut: true, + outputTruncated, }, ); terminateChildProcess(child); @@ -165,12 +222,18 @@ function createAutoImportCommandRunner({ } child.stdout.on('data', (chunk) => { - stdout += chunk.toString(); + const nextStdout = appendCapturedOutput(stdout, stdoutBytes, chunk, capturedOutputMaxBytes); + stdout = nextStdout.value; + stdoutBytes = nextStdout.bytes; + outputTruncated = outputTruncated || nextStdout.truncated; }); child.stderr.on('data', (chunk) => { const line = chunk.toString(); - stderr += line; + const nextStderr = appendCapturedOutput(stderr, stderrBytes, chunk, capturedOutputMaxBytes); + stderr = nextStderr.value; + stderrBytes = nextStderr.bytes; + outputTruncated = outputTruncated || nextStderr.truncated; if (streamStderr && onStderr && line.trim()) { onStderr(line); } @@ -184,6 +247,7 @@ function createAutoImportCommandRunner({ args, stdout, stderr, + outputTruncated, }), ), ); @@ -209,6 +273,7 @@ function createAutoImportCommandRunner({ stdout, stderr, exitCode: code, + outputTruncated, }, ), ); diff --git a/server/auto-import-runtime/latest-version.js b/server/auto-import-runtime/latest-version.js index 6e52cbe..a46af12 100644 --- a/server/auto-import-runtime/latest-version.js +++ b/server/auto-import-runtime/latest-version.js @@ -72,11 +72,12 @@ function createLatestToktrackVersionLookup({ lookupStatus: 'ok', }; } catch (error) { + const timedOut = error instanceof Error && error.timedOut === true; return { configuredVersion: toktrackVersion, latestVersion: null, isLatest: null, - lookupStatus: 'failed', + lookupStatus: timedOut ? 'timeout' : 'failed', message: error instanceof Error && error.message.trim() ? error.message.trim() diff --git a/server/routes/static-routes.js b/server/routes/static-routes.js index 6b7a835..5024b9c 100644 --- a/server/routes/static-routes.js +++ b/server/routes/static-routes.js @@ -104,6 +104,12 @@ function createStaticRouteHandler({ status: 500, message: 'Internal Server Error', }; + console.error('Static file read failed', { + error, + reqPath, + safePath, + staticRoot, + }); writeStaticErrorResponse(res, response.status, response.message); } } diff --git a/server/routes/usage-routes.js b/server/routes/usage-routes.js index 83da582..351e59e 100644 --- a/server/routes/usage-routes.js +++ b/server/routes/usage-routes.js @@ -118,6 +118,7 @@ function createUsageRoutes({ json, validateMutationRequest, readMutationBody, da invalidMessage: 'Invalid JSON', }); if (!bodyResult.ok) { + // readMutationBody has already written the error response; true means handled. return true; } @@ -165,6 +166,7 @@ function createUsageRoutes({ json, validateMutationRequest, readMutationBody, da invalidMessage: 'Invalid usage backup file', }); if (!bodyResult.ok) { + // readMutationBody has already written the error response; true means handled. return true; } diff --git a/src/components/dashboard/sections/dashboard-section-lazy-components.ts b/src/components/dashboard/sections/dashboard-section-lazy-components.ts index a4f03fc..2288f90 100644 --- a/src/components/dashboard/sections/dashboard-section-lazy-components.ts +++ b/src/components/dashboard/sections/dashboard-section-lazy-components.ts @@ -15,9 +15,9 @@ function lazyWithPreload>( return Component } -async function preloadComponents( - ...components: Array<{ preload: () => Promise }> -): Promise { +async function preloadComponents( + ...components: { readonly [K in keyof T]: { preload: () => Promise } } +): Promise { const results = await Promise.allSettled(components.map((component) => component.preload())) const rejectedResult = results.find((result): result is PromiseRejectedResult => { return result.status === 'rejected' @@ -27,9 +27,9 @@ async function preloadComponents( throw rejectedResult.reason } - return results - .filter((result): result is PromiseFulfilledResult => result.status === 'fulfilled') - .map((result) => result.value) + // The rejection guard above ensures results contains only fulfilled preload promises here. + const fulfilledResults = results as readonly PromiseFulfilledResult[] + return fulfilledResults.map((result) => result.value) as unknown as T } const CostForecast = lazyWithPreload(() => diff --git a/src/components/features/settings/use-settings-modal-version-status.ts b/src/components/features/settings/use-settings-modal-version-status.ts index 302eb06..d944248 100644 --- a/src/components/features/settings/use-settings-modal-version-status.ts +++ b/src/components/features/settings/use-settings-modal-version-status.ts @@ -28,13 +28,13 @@ export function useSettingsModalVersionStatus(): SettingsVersionStatusViewModel const statusToneClass = useMemo(() => { if (state.isLoading) return 'text-muted-foreground' - if (state.lookupStatus === 'failed' || state.isLatest === false) return 'text-amber-500' + if (state.lookupStatus !== 'ok' || state.isLatest === false) return 'text-amber-500' return 'text-green-500' }, [state.isLatest, state.isLoading, state.lookupStatus]) const statusLabel = useMemo(() => { if (state.isLoading) return t('settings.modal.toktrackCheckingLatest') - if (state.lookupStatus === 'failed') return t('settings.modal.toktrackLatestCheckFailed') + if (state.lookupStatus !== 'ok') return t('settings.modal.toktrackLatestCheckFailed') if (state.isLatest) return t('settings.modal.toktrackLatest') return t('settings.modal.toktrackUpdateAvailable', { diff --git a/src/lib/toktrack-version-status.ts b/src/lib/toktrack-version-status.ts index 27c54fb..fe510cf 100644 --- a/src/lib/toktrack-version-status.ts +++ b/src/lib/toktrack-version-status.ts @@ -60,7 +60,7 @@ function normalizeToktrackVersionStatus( configuredVersion: status.configuredVersion || TOKTRACK_VERSION, latestVersion: status.latestVersion ?? null, isLatest: typeof status.isLatest === 'boolean' ? status.isLatest : null, - lookupStatus: status.lookupStatus === 'failed' ? 'failed' : 'ok', + lookupStatus: status.lookupStatus, isLoading: false, } } diff --git a/src/types/index.ts b/src/types/index.ts index fb65c82..a8bfab8 100644 --- a/src/types/index.ts +++ b/src/types/index.ts @@ -318,6 +318,6 @@ export interface ToktrackVersionStatus { configuredVersion: string latestVersion: string | null isLatest: boolean | null - lookupStatus: 'ok' | 'failed' + lookupStatus: 'ok' | 'failed' | 'malformed-output' | 'timeout' message?: string } diff --git a/tests/frontend/dashboard-language-regressions.test.tsx b/tests/frontend/dashboard-language-regressions.test.tsx index 8f91213..84015e5 100644 --- a/tests/frontend/dashboard-language-regressions.test.tsx +++ b/tests/frontend/dashboard-language-regressions.test.tsx @@ -57,10 +57,9 @@ describe('Dashboard language regressions', () => { afterAll(() => { if (originalScrollIntoView) { Element.prototype.scrollIntoView = originalScrollIntoView - return + } else { + delete (Element.prototype as { scrollIntoView?: Element['scrollIntoView'] }).scrollIntoView } - - delete (Element.prototype as { scrollIntoView?: Element['scrollIntoView'] }).scrollIntoView }) it('localizes expanded chart actions in German', () => { diff --git a/tests/frontend/dashboard-sections-rendering.test.tsx b/tests/frontend/dashboard-sections-rendering.test.tsx index 069b270..e51c1b1 100644 --- a/tests/frontend/dashboard-sections-rendering.test.tsx +++ b/tests/frontend/dashboard-sections-rendering.test.tsx @@ -1,13 +1,14 @@ // @vitest-environment jsdom import type { ReactNode } from 'react' -import { fireEvent, render, screen } from '@testing-library/react' +import { fireEvent, screen } from '@testing-library/react' import { beforeEach, describe, expect, it, vi } from 'vitest' import { DashboardSections } from '@/components/dashboard/DashboardSections' import { DEFAULT_APP_SETTINGS } from '@/lib/app-settings' import { initI18n } from '@/lib/i18n' import type { DashboardSectionId } from '@/types' import { createDashboardSectionsViewModel } from './dashboard-controller-test-helpers' +import { renderWithAppProviders } from '../test-utils' const dashboardMotionMocks = vi.hoisted(() => ({ cancelPreloads: vi.fn(), @@ -214,7 +215,7 @@ describe('DashboardSections rendering contracts', () => { }, }) - render() + renderWithAppProviders() expect(screen.getByTestId('section-metrics')).toHaveAttribute('data-eager', 'true') expect(screen.queryByTestId('section-today')).not.toBeInTheDocument() @@ -241,7 +242,7 @@ describe('DashboardSections rendering contracts', () => { }, }) - const { unmount } = render() + const { unmount } = renderWithAppProviders() expect(dashboardMotionMocks.scheduleDashboardPreloads).toHaveBeenCalledTimes(1) expect(dashboardMotionMocks.scheduleDashboardPreloads.mock.calls[0]?.[0]).toHaveLength(2) @@ -261,7 +262,7 @@ describe('DashboardSections rendering contracts', () => { }, }) - render() + renderWithAppProviders() expect(screen.getByTestId('section-metrics')).toHaveAttribute('data-eager', 'true') expect(screen.getByTestId('section-activity')).toHaveAttribute('data-eager', 'false') @@ -275,7 +276,7 @@ describe('DashboardSections rendering contracts', () => { }, }) - render() + renderWithAppProviders() // Forecast cards cross a React.lazy boundary in DashboardSections, even with mocked chunks. await screen.findByTestId('cost-forecast-expand') diff --git a/tests/frontend/filter-bar-presets.test.tsx b/tests/frontend/filter-bar-presets.test.tsx index df5fd87..0751125 100644 --- a/tests/frontend/filter-bar-presets.test.tsx +++ b/tests/frontend/filter-bar-presets.test.tsx @@ -34,11 +34,7 @@ describe('FilterBar preset and chip states', () => { year: 'Year', all: 'All', } as const satisfies Record<(typeof DASHBOARD_QUICK_DATE_PRESETS)[number], string> - const presetLabels = DASHBOARD_QUICK_DATE_PRESETS.map((key) => { - const label = presetLabelByKey[key] - if (!label) throw new Error(`Missing label for preset key: ${key}`) - return label - }) + const presetLabels = DASHBOARD_QUICK_DATE_PRESETS.map((key) => presetLabelByKey[key]) const renderedPresetLabels = screen .getAllByRole('button') .map((button) => button.textContent?.trim() ?? '') diff --git a/tests/unit/http-router-static.test.ts b/tests/unit/http-router-static.test.ts index 4c1d703..f1d239f 100644 --- a/tests/unit/http-router-static.test.ts +++ b/tests/unit/http-router-static.test.ts @@ -1,6 +1,6 @@ import path from 'node:path' import { createRequire } from 'node:module' -import { describe, expect, it, vi } from 'vitest' +import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest' const require = createRequire(import.meta.url) const { createHttpRouter } = require('../../server/http-router.js') as { @@ -84,6 +84,16 @@ function createRouter( } describe('HTTP router static file handling', () => { + let consoleError: ReturnType + + beforeEach(() => { + consoleError = vi.spyOn(console, 'error').mockImplementation(() => undefined) + }) + + afterEach(() => { + vi.restoreAllMocks() + }) + it('falls back to index.html when a static file is not found', async () => { const router = createRouter(async (filePath) => { if (filePath.endsWith('index.html')) { @@ -97,6 +107,7 @@ describe('HTTP router static file handling', () => { expect(res.status).toBe(200) expect(res.body).toContain('') + expect(consoleError).not.toHaveBeenCalled() }) it('preserves base security headers when preparing HTML responses', async () => { @@ -163,5 +174,14 @@ describe('HTTP router static file handling', () => { expect(res.status).toBe(400) expect(JSON.parse(res.body)).toEqual({ message: 'Invalid request path' }) + expect(consoleError).toHaveBeenCalledWith( + 'Static file read failed', + expect.objectContaining({ + error: expect.objectContaining({ code: 'ERR_INVALID_ARG_VALUE' }), + reqPath: '/app/dist/asset.js', + safePath: '/asset.js', + staticRoot: '/app/dist', + }), + ) }) }) diff --git a/tests/unit/playwright-config.test.ts b/tests/unit/playwright-config.test.ts index 28453ce..d0d1bd0 100644 --- a/tests/unit/playwright-config.test.ts +++ b/tests/unit/playwright-config.test.ts @@ -124,7 +124,6 @@ describe('playwright config', () => { }, 0) expect(totalTests).toBe(11) - expect(totalTests).toBeLessThanOrEqual(12) expect(packageJson.scripts['test:e2e:parallel']).toBe('npm run build:app && playwright test') expect(packageJson.scripts['test:e2e']).toBe('npm run test:e2e:parallel') }) diff --git a/tests/unit/run-parallel-gate.test.ts b/tests/unit/run-parallel-gate.test.ts index c4272c3..2a2a463 100644 --- a/tests/unit/run-parallel-gate.test.ts +++ b/tests/unit/run-parallel-gate.test.ts @@ -143,6 +143,7 @@ describe('parallel verification gate', () => { it('stops after the first failing group', async () => { const stdout = { write: vi.fn() } const stderr = { write: vi.fn() } + const expectedFirstGroupSize = parallelGate.createParallelGatePlan({ e2e: false })[0].length const spawnImpl = vi.fn(() => { const child = new FakeChild() queueMicrotask(() => child.emit('close', 1)) @@ -152,7 +153,7 @@ describe('parallel verification gate', () => { const status = await parallelGate.run([], { stdout, stderr }, spawnImpl) expect(status).toBe(1) - expect(spawnImpl).toHaveBeenCalledTimes(3) + expect(spawnImpl).toHaveBeenCalledTimes(expectedFirstGroupSize) }) it('prints task durations and a timing summary', async () => { diff --git a/tests/unit/server-helpers-auto-import-executor.test.ts b/tests/unit/server-helpers-auto-import-executor.test.ts index fe09531..3aeb27b 100644 --- a/tests/unit/server-helpers-auto-import-executor.test.ts +++ b/tests/unit/server-helpers-auto-import-executor.test.ts @@ -1,50 +1,11 @@ import { afterEach, describe, expect, it, vi } from 'vitest' import { - EventEmitter, TOKTRACK_VERSION, createAutoImportRuntime, + createSpawnSequence, resetServerHelperTestState, } from './server-helpers.shared' -type FakeSpawnOutcome = { - code?: number - stderr?: string - stdout?: string -} - -class FakeChildProcess extends EventEmitter { - stdout = new EventEmitter() - stderr = new EventEmitter() - exitCode: number | null = null - - kill(signal: string) { - if (this.exitCode !== null) { - return - } - - this.exitCode = signal === 'SIGKILL' ? 137 : 143 - queueMicrotask(() => this.emit('close', this.exitCode)) - } -} - -function createSpawnSequence(outcomes: FakeSpawnOutcome[]) { - const pendingOutcomes = [...outcomes] - - return vi.fn(() => { - const child = new FakeChildProcess() - const outcome = pendingOutcomes.shift() ?? { code: 0, stdout: '' } - - queueMicrotask(() => { - if (outcome.stdout) child.stdout.emit('data', Buffer.from(outcome.stdout)) - if (outcome.stderr) child.stderr.emit('data', Buffer.from(outcome.stderr)) - child.exitCode = outcome.code ?? 0 - child.emit('close', child.exitCode) - }) - - return child - }) -} - function createRuntimeWithSpawn( spawnImpl: ReturnType, options: { diff --git a/tests/unit/server-helpers-runner-core.test.ts b/tests/unit/server-helpers-runner-core.test.ts index 1387cd1..3772ef5 100644 --- a/tests/unit/server-helpers-runner-core.test.ts +++ b/tests/unit/server-helpers-runner-core.test.ts @@ -1,8 +1,10 @@ import { afterEach, describe, expect, it, vi } from 'vitest' import { EventEmitter, + FakeChildProcess, TOKTRACK_VERSION, createAutoImportRuntime, + createSpawnSequence, getExecutableName, getLocalToktrackDisplayCommand, getToktrackLatestLookupTimeoutMs, @@ -13,48 +15,6 @@ import { toAutoImportRunnerResolutionError, } from './server-helpers.shared' -type FakeSpawnOutcome = { - code?: number - hang?: boolean - stderr?: string - stdout?: string -} - -class FakeChildProcess extends EventEmitter { - stdout = new EventEmitter() - stderr = new EventEmitter() - exitCode: number | null = null - - kill(signal: string) { - if (this.exitCode !== null) { - return - } - - this.exitCode = signal === 'SIGKILL' ? 137 : 143 - queueMicrotask(() => this.emit('close', this.exitCode)) - } -} - -function createSpawnSequence(outcomes: FakeSpawnOutcome[]) { - const pendingOutcomes = [...outcomes] - - return vi.fn(() => { - const child = new FakeChildProcess() - const outcome = pendingOutcomes.shift() ?? { code: 0, stdout: '' } - - if (!outcome.hang) { - queueMicrotask(() => { - if (outcome.stdout) child.stdout.emit('data', Buffer.from(outcome.stdout)) - if (outcome.stderr) child.stderr.emit('data', Buffer.from(outcome.stderr)) - child.exitCode = outcome.code ?? 0 - child.emit('close', child.exitCode) - }) - } - - return child - }) -} - function createRuntimeWithSpawn( spawnImpl: ReturnType, options: { @@ -254,7 +214,7 @@ describe('server helper utilities: toktrack runner core behavior', () => { configuredVersion: TOKTRACK_VERSION, latestVersion: null, isLatest: null, - lookupStatus: 'failed', + lookupStatus: 'timeout', message: expect.stringContaining('Command timed out'), }) } finally { @@ -398,6 +358,52 @@ describe('server helper utilities: toktrack runner core behavior', () => { }) }) + it('bounds captured output while preserving streamed stderr chunks', async () => { + const spawnImpl = createSpawnSequence([ + { + code: 1, + stdout: 'abcdef', + stderr: 'runner warning\n', + }, + ]) + const stderrLines: string[] = [] + + await expect( + runCommandWithSpawn('fake-runner', ['daily', '--json'], { + maxOutputBytes: 4, + streamStderr: true, + onStderr: (line) => stderrLines.push(line), + spawnImpl, + }), + ).rejects.toMatchObject({ + stdout: 'abcd', + stderr: 'runn', + outputTruncated: true, + exitCode: 1, + }) + expect(stderrLines).toEqual(['runner warning\n']) + }) + + it('truncates captured UTF-8 output on character boundaries', async () => { + const spawnImpl = createSpawnSequence([ + { + code: 1, + stdout: 'a€b', + }, + ]) + + await expect( + runCommandWithSpawn('fake-runner', ['daily', '--json'], { + maxOutputBytes: 3, + spawnImpl, + }), + ).rejects.toMatchObject({ + stdout: 'a', + outputTruncated: true, + exitCode: 1, + }) + }) + it('wraps spawn errors with command diagnostics', async () => { const child = new FakeChildProcess() const spawnImpl = vi.fn(() => child) diff --git a/tests/unit/server-helpers.shared.ts b/tests/unit/server-helpers.shared.ts index 80a7d86..5260a82 100644 --- a/tests/unit/server-helpers.shared.ts +++ b/tests/unit/server-helpers.shared.ts @@ -78,7 +78,7 @@ const { createAutoImportRuntime } = require('../../server/auto-import-runtime.js configuredVersion: string latestVersion: string | null isLatest: boolean | null - lookupStatus: 'ok' | 'failed' | 'malformed-output' + lookupStatus: 'ok' | 'failed' | 'malformed-output' | 'timeout' message?: string }> isAutoImportRunning: () => boolean @@ -138,6 +138,7 @@ const { createAutoImportRuntime } = require('../../server/auto-import-runtime.js onStderr?: (line: string) => void signalOnClose?: (close: () => void) => void timeoutMs?: number | null + maxOutputBytes?: number spawnImpl?: ( command: string, args: string[], @@ -157,6 +158,53 @@ const { createAutoImportRuntime } = require('../../server/auto-import-runtime.js } } +export type FakeSpawnOutcome = { + code?: number + hang?: boolean + stderr?: string + stdout?: string +} + +export class FakeChildProcess extends EventEmitter { + stdout = new EventEmitter() + stderr = new EventEmitter() + exitCode: number | null = null + + kill(signal: string) { + if (this.exitCode !== null) { + return + } + + this.exitCode = signal === 'SIGKILL' ? 137 : 143 + queueMicrotask(() => this.emit('close', this.exitCode)) + } +} + +export function createSpawnSequence(outcomes: FakeSpawnOutcome[]) { + const pendingOutcomes = [...outcomes] + + return vi.fn((command?: string, args: string[] = []) => { + if (pendingOutcomes.length === 0) { + const commandDetails = [command, ...args].filter(Boolean).join(' ') + throw new Error(`Unexpected extra spawn${commandDetails ? `: ${commandDetails}` : ''}`) + } + + const child = new FakeChildProcess() + const outcome = pendingOutcomes.shift() as FakeSpawnOutcome + + if (!outcome.hang) { + queueMicrotask(() => { + if (outcome.stdout) child.stdout.emit('data', Buffer.from(outcome.stdout)) + if (outcome.stderr) child.stderr.emit('data', Buffer.from(outcome.stderr)) + child.exitCode = outcome.code ?? 0 + child.emit('close', child.exitCode) + }) + } + + return child + }) +} + const dataRuntime = createDataRuntime({ fs, fsPromises, diff --git a/tests/unit/toktrack-version.test.ts b/tests/unit/toktrack-version.test.ts index ed672a9..e39e9af 100644 --- a/tests/unit/toktrack-version.test.ts +++ b/tests/unit/toktrack-version.test.ts @@ -19,6 +19,11 @@ const packageLockJson = require('../../package-lock.json') as { > } +type BunLockJson = { + workspaces?: Record }> + packages?: Record +} + const repoRoot = path.resolve(path.dirname(fileURLToPath(import.meta.url)), '../..') const exactSemverPattern = /^(?:0|[1-9]\d*)\.(?:0|[1-9]\d*)\.(?:0|[1-9]\d*)(?:-[0-9A-Za-z-]+(?:\.[0-9A-Za-z-]+)*)?(?:\+[0-9A-Za-z-]+(?:\.[0-9A-Za-z-]+)*)?$/ @@ -59,6 +64,124 @@ function getToktrackDependency() { return version } +type JsoncOutsideStringTransform = (context: { + character: string + index: number + nextCharacter: string | undefined + source: string +}) => { nextIndex?: number; text: string } | null + +function transformJsoncOutsideStrings( + source: string, + transformOutsideString: JsoncOutsideStringTransform, +) { + let result = '' + let inString = false + let isEscaped = false + + for (let index = 0; index < source.length; index += 1) { + const character = source[index] + const nextCharacter = source[index + 1] + + if (inString) { + result += character + if (isEscaped) { + isEscaped = false + } else if (character === '\\') { + isEscaped = true + } else if (character === '"') { + inString = false + } + continue + } + + if (character === '"') { + inString = true + result += character + continue + } + + const transformed = transformOutsideString({ + character, + index, + nextCharacter, + source, + }) + if (transformed) { + result += transformed.text + index = transformed.nextIndex ?? index + continue + } + + result += character + } + + return result +} + +// Deliberately dependency-free for a lockfile contract test; the scanner handles escaped +// strings, comments, and trailing commas without mutating comment-like text inside strings. +function stripJsoncComments(source: string) { + return transformJsoncOutsideStrings( + source, + ({ character, index, nextCharacter, source: jsoncSource }) => { + if (character === '/' && nextCharacter === '/') { + let nextIndex = index + 2 + while ( + nextIndex < jsoncSource.length && + jsoncSource[nextIndex] !== '\n' && + jsoncSource[nextIndex] !== '\r' + ) { + nextIndex += 1 + } + return { + nextIndex, + text: jsoncSource[nextIndex] ?? '', + } + } + + if (character === '/' && nextCharacter === '*') { + let nextIndex = index + 2 + while ( + nextIndex < jsoncSource.length && + !(jsoncSource[nextIndex] === '*' && jsoncSource[nextIndex + 1] === '/') + ) { + nextIndex += 1 + } + return { + nextIndex: nextIndex < jsoncSource.length ? nextIndex + 1 : nextIndex, + text: '', + } + } + + return null + }, + ) +} + +function stripJsoncTrailingCommas(source: string) { + return transformJsoncOutsideStrings(source, ({ character, index, source: jsoncSource }) => { + if (character !== ',') { + return null + } + + let nextIndex = index + 1 + while (/\s/.test(jsoncSource[nextIndex] ?? '')) { + nextIndex += 1 + } + + if (jsoncSource[nextIndex] === '}' || jsoncSource[nextIndex] === ']') { + return { text: '' } + } + + return null + }) +} + +function parseBunLockJsonc(source: string): BunLockJson { + return JSON.parse(stripJsoncTrailingCommas(stripJsoncComments(source))) as BunLockJson +} + function shouldSkipPath(relativePath: string) { return ( excludedFiles.has(relativePath) || @@ -105,12 +228,12 @@ describe('toktrack version constants', () => { const toktrackVersion = getToktrackDependency() const rootPackage = packageLockJson.packages?.[''] const installedToktrackPackage = packageLockJson.packages?.['node_modules/toktrack'] - const bunLock = readFileSync(path.join(repoRoot, 'bun.lock'), 'utf8') + const bunLockJson = parseBunLockJsonc(readFileSync(path.join(repoRoot, 'bun.lock'), 'utf8')) expect(rootPackage?.dependencies?.toktrack).toBe(toktrackVersion) expect(installedToktrackPackage?.version).toBe(toktrackVersion) - expect(bunLock).toContain(`"toktrack": "${toktrackVersion}",`) - expect(bunLock).toContain(`"toktrack": ["toktrack@${toktrackVersion}",`) + expect(bunLockJson.workspaces?.['']?.dependencies?.toktrack).toBe(toktrackVersion) + expect(bunLockJson.packages?.toktrack?.[0]).toBe(`toktrack@${toktrackVersion}`) }) it('keeps toktrack package specs free of hardcoded versions outside managed files', () => { From ecc89c97131c21c43feca95a0836fd0d2d1ffb99 Mon Sep 17 00:00:00 2001 From: tyl3r-ch Date: Sun, 3 May 2026 16:45:51 +0200 Subject: [PATCH 40/42] v6.3.0: Address CodeRabbit review follow-ups --- scripts/run-vitest-project-timings.js | 9 +- server/auto-import-runtime/command-runner.js | 25 +- server/routes/static-routes.js | 10 +- shared/dashboard-preferences.js | 7 +- tests/frontend/cost-by-weekday.test.tsx | 8 +- .../lazy-dashboard-chart-test-utils.tsx | 8 +- tests/frontend/model-mix.test.tsx | 7 +- .../toktrack-version-contract.test.ts | 243 ++++++++++++++++++ tests/unit/auto-import-stream-routes.test.ts | 12 + tests/unit/dashboard-preferences.test.ts | 4 +- tests/unit/http-router-static.test.ts | 45 ++-- tests/unit/http-router-test-helpers.ts | 4 + tests/unit/run-vitest-project-timings.test.ts | 7 +- tests/unit/toktrack-version.test.ts | 213 --------------- 14 files changed, 352 insertions(+), 250 deletions(-) create mode 100644 tests/integration/toktrack-version-contract.test.ts diff --git a/scripts/run-vitest-project-timings.js b/scripts/run-vitest-project-timings.js index ef62a6d..1536101 100644 --- a/scripts/run-vitest-project-timings.js +++ b/scripts/run-vitest-project-timings.js @@ -116,9 +116,13 @@ function buildProjectRuns(options) { return runs; } -function buildVitestCommand(run) { +function getNpxCommand(platform = process.platform) { + return platform === 'win32' ? 'npx.cmd' : 'npx'; +} + +function buildVitestCommand(run, platform = process.platform) { return { - command: 'npx', + command: getNpxCommand(platform), args: [ 'vitest', 'run', @@ -286,6 +290,7 @@ module.exports = { buildBudgetCommand, buildProjectRuns, buildVitestCommand, + getNpxCommand, getReportPath, median, parseArgs, diff --git a/server/auto-import-runtime/command-runner.js b/server/auto-import-runtime/command-runner.js index 43f8b00..fb3947f 100644 --- a/server/auto-import-runtime/command-runner.js +++ b/server/auto-import-runtime/command-runner.js @@ -72,8 +72,13 @@ function createAutoImportCommandRunner({ : DEFAULT_COMMAND_OUTPUT_MAX_BYTES; } - function appendCapturedOutput(currentValue, currentBytes, chunk, maxOutputBytes) { - const chunkBuffer = Buffer.isBuffer(chunk) ? chunk : Buffer.from(String(chunk)); + function appendCapturedOutput( + currentValue, + currentBytes, + chunkBuffer, + maxOutputBytes, + chunkText, + ) { const remainingBytes = maxOutputBytes - currentBytes; if (remainingBytes <= 0) { @@ -100,7 +105,7 @@ function createAutoImportCommandRunner({ return { bytes: currentBytes + chunkBuffer.length, truncated: false, - value: currentValue + chunkBuffer.toString(), + value: currentValue + (chunkText ?? chunkBuffer.toString()), }; } @@ -229,13 +234,19 @@ function createAutoImportCommandRunner({ }); child.stderr.on('data', (chunk) => { - const line = chunk.toString(); - const nextStderr = appendCapturedOutput(stderr, stderrBytes, chunk, capturedOutputMaxBytes); + const chunkText = chunk.toString(); + const nextStderr = appendCapturedOutput( + stderr, + stderrBytes, + chunk, + capturedOutputMaxBytes, + chunkText, + ); stderr = nextStderr.value; stderrBytes = nextStderr.bytes; outputTruncated = outputTruncated || nextStderr.truncated; - if (streamStderr && onStderr && line.trim()) { - onStderr(line); + if (streamStderr && onStderr && chunkText.trim()) { + onStderr(chunkText); } }); diff --git a/server/routes/static-routes.js b/server/routes/static-routes.js index 5024b9c..54d00fc 100644 --- a/server/routes/static-routes.js +++ b/server/routes/static-routes.js @@ -85,11 +85,17 @@ function createStaticRouteHandler({ return; } + const indexPath = path.join(staticRoot, 'index.html'); try { - const indexPath = path.join(staticRoot, 'index.html'); const html = await readStaticFile(indexPath); sendStaticFile(res, indexPath, html); - } catch { + } catch (fallbackError) { + console.error('SPA fallback read failed', { + error: fallbackError, + indexPath, + safePath, + staticRoot, + }); writeStaticErrorResponse(res, 500, 'Internal Server Error'); } return; diff --git a/shared/dashboard-preferences.js b/shared/dashboard-preferences.js index 8af91f0..5f89eb1 100644 --- a/shared/dashboard-preferences.js +++ b/shared/dashboard-preferences.js @@ -229,8 +229,13 @@ function normalizeDashboardSectionVisibility(value) { const defaults = getDefaultDashboardSectionVisibility() return DASHBOARD_SECTION_IDS.reduce((visibility, sectionId) => { + // Boolean values are preserved; malformed present keys are treated as explicitly hidden. visibility[sectionId] = - typeof source[sectionId] === 'boolean' ? source[sectionId] : defaults[sectionId] + typeof source[sectionId] === 'boolean' + ? source[sectionId] + : Object.prototype.hasOwnProperty.call(source, sectionId) + ? false + : defaults[sectionId] return visibility }, {}) } diff --git a/tests/frontend/cost-by-weekday.test.tsx b/tests/frontend/cost-by-weekday.test.tsx index c96e5cf..973d231 100644 --- a/tests/frontend/cost-by-weekday.test.tsx +++ b/tests/frontend/cost-by-weekday.test.tsx @@ -2,7 +2,11 @@ import { screen } from '@testing-library/react' import { beforeAll, describe, expect, it } from 'vitest' -import { legacyWeekdayData, weekdayData } from './lazy-dashboard-chart-test-utils' +import { + assertLegacyWeekdayData, + legacyWeekdayData, + weekdayData, +} from './lazy-dashboard-chart-test-utils' import { CostByWeekday } from '@/components/charts/CostByWeekday' import { initI18n } from '@/lib/i18n' import { renderWithTooltip } from '../test-utils' @@ -26,6 +30,8 @@ describe('CostByWeekday chart', () => { }) it('uses localized day labels as a defensive weekend fallback without weekday indices', () => { + assertLegacyWeekdayData(legacyWeekdayData) + renderWithTooltip() expect(screen.getByText('Peak: Mar · Low: Sáb. · Weekend 19%')).toBeInTheDocument() diff --git a/tests/frontend/lazy-dashboard-chart-test-utils.tsx b/tests/frontend/lazy-dashboard-chart-test-utils.tsx index 8b735ca..4ab770a 100644 --- a/tests/frontend/lazy-dashboard-chart-test-utils.tsx +++ b/tests/frontend/lazy-dashboard-chart-test-utils.tsx @@ -1,5 +1,5 @@ import type { ReactNode } from 'react' -import { vi } from 'vitest' +import { expect, vi } from 'vitest' import type { DailyUsage, TokenChartDataPoint, WeekdayData } from '@/types' import { MockSvgContainer, MockSvgGroup } from '../recharts-test-utils' @@ -197,6 +197,12 @@ export const legacyWeekdayData: WeekdayData[] = [ { day: 'Vie', cost: 7 }, ] +export function assertLegacyWeekdayData(data: WeekdayData[]) { + data.forEach((entry) => { + expect(entry).not.toHaveProperty('weekdayIndex') + }) +} + export const tokenData: TokenChartDataPoint[] = [ { date: '2026-04-01', diff --git a/tests/frontend/model-mix.test.tsx b/tests/frontend/model-mix.test.tsx index 063c811..5692f0a 100644 --- a/tests/frontend/model-mix.test.tsx +++ b/tests/frontend/model-mix.test.tsx @@ -29,6 +29,11 @@ describe('ModelMix chart', () => { expect(screen.getByText('Model mix')).toBeInTheDocument() expect(screen.getByText('Cost share by model over time')).toBeInTheDocument() - expect(screen.getAllByTestId('chart-area')).toHaveLength(2) + expect(screen.getByTestId('area-chart')).toBeInTheDocument() + const areaNames = screen + .getAllByTestId('chart-area') + .map((area) => area.getAttribute('data-name')) + expect(areaNames).toHaveLength(2) + expect(areaNames).toEqual(expect.arrayContaining(['Claude Sonnet 4.5', 'GPT-5.4'])) }) }) diff --git a/tests/integration/toktrack-version-contract.test.ts b/tests/integration/toktrack-version-contract.test.ts new file mode 100644 index 0000000..f1822eb --- /dev/null +++ b/tests/integration/toktrack-version-contract.test.ts @@ -0,0 +1,243 @@ +import { createRequire } from 'node:module' +import { existsSync, readdirSync, readFileSync } from 'node:fs' +import path from 'node:path' +import { fileURLToPath } from 'node:url' +import { describe, expect, it } from 'vitest' + +const require = createRequire(import.meta.url) +const packageJson = require('../../package.json') as { + dependencies?: Record +} +const packageLockJson = require('../../package-lock.json') as { + packages?: Record< + string, + { + dependencies?: Record + version?: string + } + > +} + +type BunLockJson = { + workspaces?: Record }> + packages?: Record +} + +const repoRoot = path.resolve(path.dirname(fileURLToPath(import.meta.url)), '../..') +const exactSemverPattern = + /^(?:0|[1-9]\d*)\.(?:0|[1-9]\d*)\.(?:0|[1-9]\d*)(?:-[0-9A-Za-z-]+(?:\.[0-9A-Za-z-]+)*)?(?:\+[0-9A-Za-z-]+(?:\.[0-9A-Za-z-]+)*)?$/ +const hardcodedToktrackVersionPattern = + /(?:toktrack@\d+\.\d+\.\d+(?:[-+][0-9A-Za-z.-]+)?|TOKTRACK_(?:VERSION|PACKAGE_SPEC)\s*[:=]\s*['"`][^'"`]*\d+\.\d+\.\d+)/ +const scannedExtensions = new Set([ + '.bat', + '.cjs', + '.html', + '.js', + '.json', + '.md', + '.mjs', + '.sh', + '.ts', + '.tsx', + '.yaml', + '.yml', +]) +const excludedDirectories = new Set([ + '.git', + '.tmp-playwright', + 'coverage', + 'dist', + 'docs/review', + 'node_modules', + 'playwright-report', + 'test-results', +]) +const excludedFiles = new Set(['bun.lock', 'CHANGELOG.md', 'package-lock.json']) + +function getToktrackDependency() { + const version = packageJson.dependencies?.toktrack + if (typeof version !== 'string') { + throw new Error('package.json dependencies.toktrack must be defined.') + } + + return version +} + +type JsoncOutsideStringTransform = (context: { + character: string + index: number + nextCharacter: string | undefined + source: string +}) => { nextIndex?: number; text: string } | null + +function transformJsoncOutsideStrings( + source: string, + transformOutsideString: JsoncOutsideStringTransform, +) { + let result = '' + let inString = false + let isEscaped = false + + for (let index = 0; index < source.length; index += 1) { + const character = source[index] + const nextCharacter = source[index + 1] + + if (inString) { + result += character + if (isEscaped) { + isEscaped = false + } else if (character === '\\') { + isEscaped = true + } else if (character === '"') { + inString = false + } + continue + } + + if (character === '"') { + inString = true + result += character + continue + } + + const transformed = transformOutsideString({ + character, + index, + nextCharacter, + source, + }) + if (transformed) { + result += transformed.text + index = transformed.nextIndex ?? index + continue + } + + result += character + } + + return result +} + +// Deliberately dependency-free for a lockfile contract test; the scanner handles escaped +// strings, comments, and trailing commas without mutating comment-like text inside strings. +function stripJsoncComments(source: string) { + return transformJsoncOutsideStrings( + source, + ({ character, index, nextCharacter, source: jsoncSource }) => { + if (character === '/' && nextCharacter === '/') { + let nextIndex = index + 2 + while ( + nextIndex < jsoncSource.length && + jsoncSource[nextIndex] !== '\n' && + jsoncSource[nextIndex] !== '\r' + ) { + nextIndex += 1 + } + return { + nextIndex, + text: jsoncSource[nextIndex] ?? '', + } + } + + if (character === '/' && nextCharacter === '*') { + let nextIndex = index + 2 + while ( + nextIndex < jsoncSource.length && + !(jsoncSource[nextIndex] === '*' && jsoncSource[nextIndex + 1] === '/') + ) { + nextIndex += 1 + } + return { + nextIndex: nextIndex < jsoncSource.length ? nextIndex + 1 : nextIndex, + text: '', + } + } + + return null + }, + ) +} + +function stripJsoncTrailingCommas(source: string) { + return transformJsoncOutsideStrings(source, ({ character, index, source: jsoncSource }) => { + if (character !== ',') { + return null + } + + let nextIndex = index + 1 + while (/\s/.test(jsoncSource[nextIndex] ?? '')) { + nextIndex += 1 + } + + if (jsoncSource[nextIndex] === '}' || jsoncSource[nextIndex] === ']') { + return { text: '' } + } + + return null + }) +} + +function parseBunLockJsonc(source: string): BunLockJson { + return JSON.parse(stripJsoncTrailingCommas(stripJsoncComments(source))) as BunLockJson +} + +function shouldSkipPath(relativePath: string) { + if (excludedFiles.has(relativePath)) return true + + for (const directory of excludedDirectories) { + if (relativePath === directory || relativePath.startsWith(`${directory}/`)) { + return true + } + } + + return false +} + +function collectScannedFiles(directory: string, collected: string[] = []) { + for (const entry of readdirSync(directory, { withFileTypes: true })) { + const absolutePath = path.join(directory, entry.name) + const relativePath = path.relative(repoRoot, absolutePath) + + if (shouldSkipPath(relativePath)) continue + if (entry.isSymbolicLink()) continue + + if (entry.isDirectory()) { + collectScannedFiles(absolutePath, collected) + continue + } + + if (entry.isFile() && scannedExtensions.has(path.extname(entry.name))) { + collected.push(relativePath) + } + } + + return collected +} + +describe('toktrack version repository contract', () => { + it('keeps npm and bun lockfiles aligned with package.json', () => { + const toktrackVersion = getToktrackDependency() + const rootPackage = packageLockJson.packages?.[''] + const installedToktrackPackage = packageLockJson.packages?.['node_modules/toktrack'] + const bunLockPath = path.join(repoRoot, 'bun.lock') + + expect(toktrackVersion).toMatch(exactSemverPattern) + expect(rootPackage?.dependencies?.toktrack).toBe(toktrackVersion) + expect(installedToktrackPackage?.version).toBe(toktrackVersion) + + if (existsSync(bunLockPath)) { + const bunLockJson = parseBunLockJsonc(readFileSync(bunLockPath, 'utf8')) + expect(bunLockJson.workspaces?.['']?.dependencies?.toktrack).toBe(toktrackVersion) + expect(bunLockJson.packages?.toktrack?.[0]).toBe(`toktrack@${toktrackVersion}`) + } + }) + + it('keeps toktrack package specs free of hardcoded versions outside managed files', () => { + const offenders = collectScannedFiles(repoRoot).filter((relativePath) => { + const content = readFileSync(path.join(repoRoot, relativePath), 'utf8') + return hardcodedToktrackVersionPattern.test(content) + }) + + expect(offenders).toEqual([]) + }) +}) diff --git a/tests/unit/auto-import-stream-routes.test.ts b/tests/unit/auto-import-stream-routes.test.ts index b5d25ee..549f734 100644 --- a/tests/unit/auto-import-stream-routes.test.ts +++ b/tests/unit/auto-import-stream-routes.test.ts @@ -31,6 +31,18 @@ describe('auto-import stream routes', () => { expect(res.body).toContain('event: success') expect(res.body).toContain('"totalCost":3.5') expect(res.body).toContain('event: done') + + const checkIndex = res.body.indexOf('event: check') + const progressIndex = res.body.indexOf('event: progress') + const stderrIndex = res.body.indexOf('event: stderr') + const successIndex = res.body.indexOf('event: success') + const doneIndex = res.body.indexOf('event: done') + expect(checkIndex).toBeGreaterThanOrEqual(0) + expect(progressIndex).toBeGreaterThan(checkIndex) + expect(stderrIndex).toBeGreaterThan(progressIndex) + expect(successIndex).toBeGreaterThan(stderrIndex) + expect(doneIndex).toBeGreaterThan(successIndex) + expect(performAutoImport).toHaveBeenCalledWith( expect.objectContaining({ source: 'auto-import', diff --git a/tests/unit/dashboard-preferences.test.ts b/tests/unit/dashboard-preferences.test.ts index 54215c1..585fed1 100644 --- a/tests/unit/dashboard-preferences.test.ts +++ b/tests/unit/dashboard-preferences.test.ts @@ -7,7 +7,6 @@ import { DASHBOARD_SECTION_DEFINITIONS, DASHBOARD_SECTION_DEFINITION_MAP, DASHBOARD_VIEW_MODES, - getDefaultDashboardSectionVisibility, normalizeDashboardDefaultFilters, normalizeDashboardSectionOrder, normalizeDashboardSectionVisibility, @@ -208,9 +207,8 @@ describe('dashboard preferences config', () => { today: 'false', unknown: false, }) - const defaultVisibility = getDefaultDashboardSectionVisibility() expect(visibility.metrics).toBe(false) - expect(visibility.today).toBe(defaultVisibility.today) + expect(visibility.today).toBe(false) expect(visibility).not.toHaveProperty('unknown') const orderedSections = normalizeDashboardSectionOrder(['tables', 'metrics', 'tables', 7]) diff --git a/tests/unit/http-router-static.test.ts b/tests/unit/http-router-static.test.ts index f1d239f..3983d06 100644 --- a/tests/unit/http-router-static.test.ts +++ b/tests/unit/http-router-static.test.ts @@ -1,6 +1,7 @@ import path from 'node:path' import { createRequire } from 'node:module' import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest' +import { MockResponse } from './http-router-test-helpers' const require = createRequire(import.meta.url) const { createHttpRouter } = require('../../server/http-router.js') as { @@ -12,21 +13,6 @@ const { createHttpRouter } = require('../../server/http-router.js') as { } } -class MockResponse { - status = 0 - headers: Record = {} - body = '' - - writeHead(status: number, headers: Record) { - this.status = status - this.headers = headers - } - - end(body?: string | Buffer) { - this.body = Buffer.isBuffer(body) ? body.toString('utf8') : (body ?? '') - } -} - function createRouter( readFile: (filePath: string) => Promise, prepareHtmlResponse?: (html: string) => { body: string; headers: Record }, @@ -110,6 +96,31 @@ describe('HTTP router static file handling', () => { expect(consoleError).not.toHaveBeenCalled() }) + it('logs failed SPA fallback reads before returning a server error', async () => { + const fallbackError = Object.assign(new Error('index missing'), { code: 'ENOENT' }) + const router = createRouter(async (filePath) => { + if (filePath.endsWith('index.html')) { + throw fallbackError + } + throw Object.assign(new Error('missing'), { code: 'ENOENT' }) + }) + const res = new MockResponse() + + await router.handleServerRequest({ url: '/dashboard', method: 'GET', headers: {} }, res) + + expect(res.status).toBe(500) + expect(JSON.parse(res.body)).toEqual({ message: 'Internal Server Error' }) + expect(consoleError).toHaveBeenCalledWith( + 'SPA fallback read failed', + expect.objectContaining({ + error: fallbackError, + indexPath: '/app/dist/index.html', + safePath: '/dashboard', + staticRoot: '/app/dist', + }), + ) + }) + it('preserves base security headers when preparing HTML responses', async () => { const router = createRouter( async (filePath) => { @@ -130,8 +141,8 @@ describe('HTTP router static file handling', () => { await router.handleServerRequest({ url: '/dashboard', method: 'GET', headers: {} }, res) expect(res.status).toBe(200) - expect(res.headers['X-Test-Security']).toBe('1') - expect(res.headers['Content-Security-Policy']).toBe( + expect(res.headers['x-test-security']).toBe('1') + expect(res.headers['content-security-policy']).toBe( "default-src 'self'; script-src 'nonce-test'", ) expect(res.body).toContain('ttdash-csp-nonce') diff --git a/tests/unit/http-router-test-helpers.ts b/tests/unit/http-router-test-helpers.ts index e34feba..8bfed41 100644 --- a/tests/unit/http-router-test-helpers.ts +++ b/tests/unit/http-router-test-helpers.ts @@ -46,6 +46,10 @@ export class MockResponse { this.headers = { ...this.headers, ...normalizedHeaders } } + getHeader(name: string) { + return this.headers[name.toLowerCase()] + } + write(body: string | Buffer) { this.body += Buffer.isBuffer(body) ? body.toString('utf8') : body } diff --git a/tests/unit/run-vitest-project-timings.test.ts b/tests/unit/run-vitest-project-timings.test.ts index 2ac4840..39884f6 100644 --- a/tests/unit/run-vitest-project-timings.test.ts +++ b/tests/unit/run-vitest-project-timings.test.ts @@ -28,7 +28,8 @@ type RunnerScript = { buildProjectRuns: ( options: Pick, ) => ProjectRun[] - buildVitestCommand: (run: ProjectRun) => { command: string; args: string[] } + buildVitestCommand: (run: ProjectRun, platform?: string) => { command: string; args: string[] } + getNpxCommand: (platform?: string) => string getReportPath: (project: string, iteration: number, repeat: number, reportDir: string) => string median: (values: number[]) => number parseArgs: (argv: string[]) => RunnerOptions @@ -109,7 +110,7 @@ describe('Vitest project timing runner', () => { } expect(timingRunner.buildVitestCommand(projectRun)).toEqual({ - command: 'npx', + command: timingRunner.getNpxCommand(), args: [ 'vitest', 'run', @@ -120,6 +121,8 @@ describe('Vitest project timing runner', () => { `--outputFile.junit=${projectRun.reportPath}`, ], }) + expect(timingRunner.buildVitestCommand(projectRun, 'win32').command).toBe('npx.cmd') + expect(timingRunner.buildVitestCommand(projectRun, 'linux').command).toBe('npx') const budgetCommand = timingRunner.buildBudgetCommand(projectRun, { maxSuiteSeconds: 20, diff --git a/tests/unit/toktrack-version.test.ts b/tests/unit/toktrack-version.test.ts index e39e9af..31b7156 100644 --- a/tests/unit/toktrack-version.test.ts +++ b/tests/unit/toktrack-version.test.ts @@ -1,7 +1,4 @@ import { createRequire } from 'node:module' -import { readdirSync, readFileSync } from 'node:fs' -import path from 'node:path' -import { fileURLToPath } from 'node:url' import { describe, expect, it } from 'vitest' import { TOKTRACK_PACKAGE_SPEC, TOKTRACK_VERSION } from '../../shared/toktrack-version.js' @@ -9,51 +6,9 @@ const require = createRequire(import.meta.url) const packageJson = require('../../package.json') as { dependencies?: Record } -const packageLockJson = require('../../package-lock.json') as { - packages?: Record< - string, - { - dependencies?: Record - version?: string - } - > -} - -type BunLockJson = { - workspaces?: Record }> - packages?: Record -} -const repoRoot = path.resolve(path.dirname(fileURLToPath(import.meta.url)), '../..') const exactSemverPattern = /^(?:0|[1-9]\d*)\.(?:0|[1-9]\d*)\.(?:0|[1-9]\d*)(?:-[0-9A-Za-z-]+(?:\.[0-9A-Za-z-]+)*)?(?:\+[0-9A-Za-z-]+(?:\.[0-9A-Za-z-]+)*)?$/ -const hardcodedToktrackVersionPattern = - /(?:toktrack@\d+\.\d+\.\d+(?:[-+][0-9A-Za-z.-]+)?|TOKTRACK_(?:VERSION|PACKAGE_SPEC):\s*['"`][^'"`]*\d+\.\d+\.\d+)/ -const scannedExtensions = new Set([ - '.bat', - '.cjs', - '.html', - '.js', - '.json', - '.md', - '.mjs', - '.sh', - '.ts', - '.tsx', - '.yaml', - '.yml', -]) -const excludedDirectories = new Set([ - '.git', - '.tmp-playwright', - 'coverage', - 'dist', - 'docs/review', - 'node_modules', - 'playwright-report', - 'test-results', -]) -const excludedFiles = new Set(['bun.lock', 'CHANGELOG.md', 'package-lock.json']) function getToktrackDependency() { const version = packageJson.dependencies?.toktrack @@ -64,153 +19,6 @@ function getToktrackDependency() { return version } -type JsoncOutsideStringTransform = (context: { - character: string - index: number - nextCharacter: string | undefined - source: string -}) => { nextIndex?: number; text: string } | null - -function transformJsoncOutsideStrings( - source: string, - transformOutsideString: JsoncOutsideStringTransform, -) { - let result = '' - let inString = false - let isEscaped = false - - for (let index = 0; index < source.length; index += 1) { - const character = source[index] - const nextCharacter = source[index + 1] - - if (inString) { - result += character - if (isEscaped) { - isEscaped = false - } else if (character === '\\') { - isEscaped = true - } else if (character === '"') { - inString = false - } - continue - } - - if (character === '"') { - inString = true - result += character - continue - } - - const transformed = transformOutsideString({ - character, - index, - nextCharacter, - source, - }) - if (transformed) { - result += transformed.text - index = transformed.nextIndex ?? index - continue - } - - result += character - } - - return result -} - -// Deliberately dependency-free for a lockfile contract test; the scanner handles escaped -// strings, comments, and trailing commas without mutating comment-like text inside strings. -function stripJsoncComments(source: string) { - return transformJsoncOutsideStrings( - source, - ({ character, index, nextCharacter, source: jsoncSource }) => { - if (character === '/' && nextCharacter === '/') { - let nextIndex = index + 2 - while ( - nextIndex < jsoncSource.length && - jsoncSource[nextIndex] !== '\n' && - jsoncSource[nextIndex] !== '\r' - ) { - nextIndex += 1 - } - return { - nextIndex, - text: jsoncSource[nextIndex] ?? '', - } - } - - if (character === '/' && nextCharacter === '*') { - let nextIndex = index + 2 - while ( - nextIndex < jsoncSource.length && - !(jsoncSource[nextIndex] === '*' && jsoncSource[nextIndex + 1] === '/') - ) { - nextIndex += 1 - } - return { - nextIndex: nextIndex < jsoncSource.length ? nextIndex + 1 : nextIndex, - text: '', - } - } - - return null - }, - ) -} - -function stripJsoncTrailingCommas(source: string) { - return transformJsoncOutsideStrings(source, ({ character, index, source: jsoncSource }) => { - if (character !== ',') { - return null - } - - let nextIndex = index + 1 - while (/\s/.test(jsoncSource[nextIndex] ?? '')) { - nextIndex += 1 - } - - if (jsoncSource[nextIndex] === '}' || jsoncSource[nextIndex] === ']') { - return { text: '' } - } - - return null - }) -} - -function parseBunLockJsonc(source: string): BunLockJson { - return JSON.parse(stripJsoncTrailingCommas(stripJsoncComments(source))) as BunLockJson -} - -function shouldSkipPath(relativePath: string) { - return ( - excludedFiles.has(relativePath) || - Array.from(excludedDirectories).some( - (directory) => relativePath === directory || relativePath.startsWith(`${directory}/`), - ) - ) -} - -function collectScannedFiles(directory: string, collected: string[] = []) { - for (const entry of readdirSync(directory, { withFileTypes: true })) { - const absolutePath = path.join(directory, entry.name) - const relativePath = path.relative(repoRoot, absolutePath) - - if (shouldSkipPath(relativePath)) continue - - if (entry.isDirectory()) { - collectScannedFiles(absolutePath, collected) - continue - } - - if (entry.isFile() && scannedExtensions.has(path.extname(entry.name))) { - collected.push(relativePath) - } - } - - return collected -} - describe('toktrack version constants', () => { it('keeps the shared pinned version aligned with package.json', () => { expect(TOKTRACK_VERSION).toBe(getToktrackDependency()) @@ -223,25 +31,4 @@ describe('toktrack version constants', () => { it('keeps the package dependency pinned to an exact SemVer version', () => { expect(getToktrackDependency()).toMatch(exactSemverPattern) }) - - it('keeps npm and bun lockfiles aligned with package.json', () => { - const toktrackVersion = getToktrackDependency() - const rootPackage = packageLockJson.packages?.[''] - const installedToktrackPackage = packageLockJson.packages?.['node_modules/toktrack'] - const bunLockJson = parseBunLockJsonc(readFileSync(path.join(repoRoot, 'bun.lock'), 'utf8')) - - expect(rootPackage?.dependencies?.toktrack).toBe(toktrackVersion) - expect(installedToktrackPackage?.version).toBe(toktrackVersion) - expect(bunLockJson.workspaces?.['']?.dependencies?.toktrack).toBe(toktrackVersion) - expect(bunLockJson.packages?.toktrack?.[0]).toBe(`toktrack@${toktrackVersion}`) - }) - - it('keeps toktrack package specs free of hardcoded versions outside managed files', () => { - const offenders = collectScannedFiles(repoRoot).filter((relativePath) => { - const content = readFileSync(path.join(repoRoot, relativePath), 'utf8') - return hardcodedToktrackVersionPattern.test(content) - }) - - expect(offenders).toEqual([]) - }) }) From 66ed7e78e7a4dd32c4f69bdd489298c439bec8d7 Mon Sep 17 00:00:00 2001 From: tyl3r-ch Date: Sun, 3 May 2026 17:56:40 +0200 Subject: [PATCH 41/42] Fix CodeRabbit review feedback --- server/auto-import-runtime/command-runner.js | 155 +++++++++-------- server/http-router.js | 1 - server/routes/static-routes.js | 13 +- src/components/charts/CostByWeekday.tsx | 34 ++-- tests/frontend/cost-by-weekday.test.tsx | 12 ++ tests/unit/http-router-static.test.ts | 156 +++++++----------- tests/unit/http-router-test-helpers.ts | 15 +- tests/unit/server-helpers-runner-core.test.ts | 114 ++++++++++++- 8 files changed, 317 insertions(+), 183 deletions(-) diff --git a/server/auto-import-runtime/command-runner.js b/server/auto-import-runtime/command-runner.js index fb3947f..18d0260 100644 --- a/server/auto-import-runtime/command-runner.js +++ b/server/auto-import-runtime/command-runner.js @@ -1,4 +1,7 @@ +const { StringDecoder } = require('node:string_decoder'); + const DEFAULT_COMMAND_OUTPUT_MAX_BYTES = 1024 * 1024; +const DEFAULT_PROCESS_TERMINATION_GRACE_MS = 5000; function createAutoImportCommandRunner({ processObject = process, @@ -6,6 +9,13 @@ function createAutoImportCommandRunner({ isWindows, processTerminationGraceMs, }) { + const terminationGraceMs = + typeof processTerminationGraceMs === 'number' && + Number.isFinite(processTerminationGraceMs) && + processTerminationGraceMs > 0 + ? processTerminationGraceMs + : DEFAULT_PROCESS_TERMINATION_GRACE_MS; + function getExecutableName(baseName, forceWindows = isWindows) { if (!forceWindows) { return baseName; @@ -72,41 +82,50 @@ function createAutoImportCommandRunner({ : DEFAULT_COMMAND_OUTPUT_MAX_BYTES; } - function appendCapturedOutput( - currentValue, - currentBytes, - chunkBuffer, - maxOutputBytes, - chunkText, - ) { - const remainingBytes = maxOutputBytes - currentBytes; + function createCapturedOutputState() { + return { + bytes: 0, + decoder: new StringDecoder('utf8'), + truncated: false, + value: '', + }; + } + + function getUtf8SafePrefixLength(chunkBuffer, maxBytes) { + let safeEnd = Math.min(maxBytes, chunkBuffer.length); + while (safeEnd > 0 && (chunkBuffer[safeEnd] & 0b11000000) === 0b10000000) { + safeEnd -= 1; + } + return safeEnd; + } + + function appendCapturedOutput(state, chunkBuffer, maxOutputBytes) { + const remainingBytes = maxOutputBytes - state.bytes; if (remainingBytes <= 0) { - return { - bytes: currentBytes, - truncated: chunkBuffer.length > 0, - value: currentValue, - }; + state.truncated = state.truncated || chunkBuffer.length > 0; + return; } if (chunkBuffer.length > remainingBytes) { - let safeEnd = remainingBytes; - while (safeEnd > 0 && (chunkBuffer[safeEnd] & 0b11000000) === 0b10000000) { - safeEnd -= 1; + const safeEnd = getUtf8SafePrefixLength(chunkBuffer, remainingBytes); + if (safeEnd > 0) { + state.value += state.decoder.write(chunkBuffer.subarray(0, safeEnd)); } - - return { - bytes: maxOutputBytes, - truncated: true, - value: currentValue + chunkBuffer.subarray(0, safeEnd).toString(), - }; + state.bytes = maxOutputBytes; + state.truncated = true; + return; } - return { - bytes: currentBytes + chunkBuffer.length, - truncated: false, - value: currentValue + (chunkText ?? chunkBuffer.toString()), - }; + state.bytes += chunkBuffer.length; + state.value += state.decoder.write(chunkBuffer); + } + + function flushCapturedOutput(state) { + if (!state.truncated) { + state.value += state.decoder.end(); + } + return state.value; } function terminateChildProcess(child) { @@ -120,7 +139,7 @@ function createAutoImportCommandRunner({ if (child.exitCode === null) { child.kill('SIGKILL'); } - }, processTerminationGraceMs); + }, terminationGraceMs); forceKillTimeout.unref?.(); child.once('close', () => { @@ -183,15 +202,34 @@ function createAutoImportCommandRunner({ return; } + const stdoutCapture = createCapturedOutputState(); + const stderrCapture = createCapturedOutputState(); + const stderrStreamDecoder = new StringDecoder('utf8'); let stdout = ''; let stderr = ''; - let stdoutBytes = 0; - let stderrBytes = 0; let outputTruncated = false; const capturedOutputMaxBytes = getMaxOutputBytes(maxOutputBytes); let finished = false; + let timedOut = false; let timeoutId = null; - let timeoutError = null; + let outputFinalized = false; + + const emitStderrChunk = (chunkText) => { + if (streamStderr && onStderr && chunkText.trim()) { + onStderr(chunkText); + } + }; + + const finalizeOutput = () => { + if (outputFinalized) { + return; + } + outputFinalized = true; + stdout = flushCapturedOutput(stdoutCapture); + stderr = flushCapturedOutput(stderrCapture); + outputTruncated = stdoutCapture.truncated || stderrCapture.truncated; + emitStderrChunk(stderrStreamDecoder.end()); + }; const settle = (handler, value) => { if (finished) { @@ -210,47 +248,23 @@ function createAutoImportCommandRunner({ if (typeof timeoutMs === 'number' && Number.isFinite(timeoutMs) && timeoutMs > 0) { timeoutId = setTimeout(() => { - timeoutError = createCommandError( - `Command timed out after ${timeoutMs}ms: ${commandLabel}`, - { - command, - args, - stdout, - stderr, - timedOut: true, - outputTruncated, - }, - ); + timedOut = true; terminateChildProcess(child); }, timeoutMs); timeoutId.unref?.(); } child.stdout.on('data', (chunk) => { - const nextStdout = appendCapturedOutput(stdout, stdoutBytes, chunk, capturedOutputMaxBytes); - stdout = nextStdout.value; - stdoutBytes = nextStdout.bytes; - outputTruncated = outputTruncated || nextStdout.truncated; + appendCapturedOutput(stdoutCapture, chunk, capturedOutputMaxBytes); }); child.stderr.on('data', (chunk) => { - const chunkText = chunk.toString(); - const nextStderr = appendCapturedOutput( - stderr, - stderrBytes, - chunk, - capturedOutputMaxBytes, - chunkText, - ); - stderr = nextStderr.value; - stderrBytes = nextStderr.bytes; - outputTruncated = outputTruncated || nextStderr.truncated; - if (streamStderr && onStderr && chunkText.trim()) { - onStderr(chunkText); - } + appendCapturedOutput(stderrCapture, chunk, capturedOutputMaxBytes); + emitStderrChunk(stderrStreamDecoder.write(chunk)); }); - child.on('error', (error) => + child.on('error', (error) => { + finalizeOutput(); settle( reject, createCommandError(error.message || `Could not start ${commandLabel}.`, { @@ -260,14 +274,25 @@ function createAutoImportCommandRunner({ stderr, outputTruncated, }), - ), - ); + ); + }); child.on('close', (code) => { if (finished) { return; } - if (timeoutError) { - settle(reject, timeoutError); + finalizeOutput(); + if (timedOut) { + settle( + reject, + createCommandError(`Command timed out after ${timeoutMs}ms: ${commandLabel}`, { + command, + args, + stdout, + stderr, + timedOut: true, + outputTruncated, + }), + ); return; } if (code === 0) { diff --git a/server/http-router.js b/server/http-router.js index fa79575..81f64c5 100644 --- a/server/http-router.js +++ b/server/http-router.js @@ -72,7 +72,6 @@ function createHttpRouter({ staticRoot, securityHeaders, prepareHtmlResponse, - json, }); const apiRouteHandlers = [ usageRoutes.handleUsageRoutes, diff --git a/server/routes/static-routes.js b/server/routes/static-routes.js index 54d00fc..083cc30 100644 --- a/server/routes/static-routes.js +++ b/server/routes/static-routes.js @@ -1,12 +1,5 @@ /** Creates the static asset and SPA fallback route handler. */ -function createStaticRouteHandler({ - fs, - path, - staticRoot, - securityHeaders, - prepareHtmlResponse, - json, -}) { +function createStaticRouteHandler({ fs, path, staticRoot, securityHeaders, prepareHtmlResponse }) { const mimeTypes = { '.html': 'text/html; charset=utf-8', '.css': 'text/css; charset=utf-8', @@ -123,13 +116,13 @@ function createStaticRouteHandler({ async function handleStaticRequest(req, res, pathname) { const safePath = pathname === '/' ? '/index.html' : pathname; if (safePath.includes('\0')) { - return json(res, 400, { message: 'Invalid request path' }); + return writeStaticErrorResponse(res, 400, 'Invalid request path'); } const resolvedStaticRoot = path.resolve(staticRoot); const filePath = path.resolve(staticRoot, `.${safePath}`); if (filePath === resolvedStaticRoot || !filePath.startsWith(resolvedStaticRoot + path.sep)) { - return json(res, 403, { message: 'Access denied' }); + return writeStaticErrorResponse(res, 403, 'Access denied'); } await serveFile(req, res, filePath, safePath); diff --git a/src/components/charts/CostByWeekday.tsx b/src/components/charts/CostByWeekday.tsx index 8cc08ec..f9849d5 100644 --- a/src/components/charts/CostByWeekday.tsx +++ b/src/components/charts/CostByWeekday.tsx @@ -65,26 +65,38 @@ export function CostByWeekday({ data }: CostByWeekdayProps) { const [activeIndex, setActiveIndex] = useState(null) const uid = useId() const gid = (n: string) => `${uid}-${n}`.replace(/:/g, '') + const chartData = [...data].sort((left, right) => { + if (left.weekdayIndex === undefined && right.weekdayIndex === undefined) { + return 0 + } + if (left.weekdayIndex === undefined) { + return 1 + } + if (right.weekdayIndex === undefined) { + return -1 + } + return left.weekdayIndex - right.weekdayIndex + }) - const maxCost = Math.max(...data.map((d) => d.cost)) - const minCost = Math.min(...data.map((d) => d.cost)) - const peakIndex = data.findIndex((d) => d.cost === maxCost) - const lowIndex = data.findIndex((d) => d.cost === minCost) - const weekendCost = data + const maxCost = Math.max(...chartData.map((d) => d.cost)) + const minCost = Math.min(...chartData.map((d) => d.cost)) + const peakIndex = chartData.findIndex((d) => d.cost === maxCost) + const lowIndex = chartData.findIndex((d) => d.cost === minCost) + const weekendCost = chartData .filter((entry) => isWeekendEntry(entry)) .reduce((sum, entry) => sum + entry.cost, 0) - const weekTotal = data.reduce((sum, entry) => sum + entry.cost, 0) + const weekTotal = chartData.reduce((sum, entry) => sum + entry.cost, 0) return ( 0 ? (weekendCost / weekTotal) * 100 : 0).toFixed(0)}%`, })} info={CHART_HELP.costByWeekday} - chartData={data as unknown as Record[]} + chartData={chartData as unknown as Record[]} valueKey="cost" valueFormatter={formatCurrency} > @@ -93,7 +105,7 @@ export function CostByWeekday({ data }: CostByWeekdayProps) { { if ( @@ -145,7 +157,7 @@ export function CostByWeekday({ data }: CostByWeekdayProps) { name={t('charts.costByWeekday.averageCost')} {...getBarAnimationProps(animate)} > - {data.map((_, index) => { + {chartData.map((_, index) => { let fill = `url(#${gid('weekday')})` if (activeIndex === index) fill = `url(#${gid('weekdayActive')})` else if (index === peakIndex) fill = `url(#${gid('weekdayPeak')})` diff --git a/tests/frontend/cost-by-weekday.test.tsx b/tests/frontend/cost-by-weekday.test.tsx index 973d231..8a15a35 100644 --- a/tests/frontend/cost-by-weekday.test.tsx +++ b/tests/frontend/cost-by-weekday.test.tsx @@ -22,6 +22,18 @@ describe('CostByWeekday chart', () => { expect(screen.getByText('Cost by weekday')).toBeInTheDocument() expect(screen.getByText('Peak: Tu · Low: Sa · Weekend 19%')).toBeInTheDocument() expect(screen.getByTestId('chart-bar')).toHaveAttribute('data-name', 'Avg cost') + const chartPayload = JSON.parse( + screen.getByTestId('bar-chart').getAttribute('data-chart') ?? '[]', + ) as Array<{ day: string }> + expect(chartPayload.map((entry) => entry.day)).toEqual([ + 'Mo', + 'Tu', + 'We', + 'Th', + 'Fr', + 'Sa', + 'Su', + ]) const fills = screen.getAllByTestId('bar-cell').map((cell) => cell.getAttribute('data-fill')) expect(fills).toHaveLength(7) diff --git a/tests/unit/http-router-static.test.ts b/tests/unit/http-router-static.test.ts index 3983d06..63d14d1 100644 --- a/tests/unit/http-router-static.test.ts +++ b/tests/unit/http-router-static.test.ts @@ -1,73 +1,5 @@ -import path from 'node:path' -import { createRequire } from 'node:module' import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest' -import { MockResponse } from './http-router-test-helpers' - -const require = createRequire(import.meta.url) -const { createHttpRouter } = require('../../server/http-router.js') as { - createHttpRouter: (options: Record) => { - handleServerRequest: ( - req: { url: string; method: string; headers: Record }, - res: MockResponse, - ) => Promise - } -} - -function createRouter( - readFile: (filePath: string) => Promise, - prepareHtmlResponse?: (html: string) => { body: string; headers: Record }, -) { - return createHttpRouter({ - fs: { - promises: { - readFile, - }, - }, - path, - prepareHtmlResponse, - staticRoot: '/app/dist', - securityHeaders: { 'X-Test-Security': '1' }, - httpUtils: { - json: (res: MockResponse, status: number, payload: unknown) => { - res.writeHead(status, { 'Content-Type': 'application/json; charset=utf-8' }) - res.end(JSON.stringify(payload)) - }, - readBody: vi.fn(), - resolveApiPath: () => null, - sendBuffer: vi.fn(), - validateMutationRequest: vi.fn(), - validateRequestHost: () => null, - }, - remoteAuth: { - resolveBootstrapResponse: () => null, - validateApiRequest: () => null, - }, - dataRuntime: { - extractSettingsImportPayload: vi.fn(), - extractUsageImportPayload: vi.fn(), - isPayloadTooLargeError: vi.fn(), - isPersistedStateError: vi.fn(), - mergeUsageData: vi.fn(), - readData: vi.fn(), - readSettings: vi.fn(), - unlinkIfExists: vi.fn(), - updateDataLoadState: vi.fn(), - updateSettings: vi.fn(), - withFileMutationLock: vi.fn(), - withSettingsAndDataMutationLock: vi.fn(), - writeData: vi.fn(), - writeSettings: vi.fn(), - normalizeSettings: vi.fn(), - paths: { - dataFile: '/data/data.json', - settingsFile: '/data/settings.json', - }, - }, - autoImportRuntime: {}, - generatePdfReport: vi.fn(), - getRuntimeSnapshot: vi.fn(), - }) -} +import { MockResponse, createRouter } from './http-router-test-helpers' describe('HTTP router static file handling', () => { let consoleError: ReturnType @@ -81,11 +13,13 @@ describe('HTTP router static file handling', () => { }) it('falls back to index.html when a static file is not found', async () => { - const router = createRouter(async (filePath) => { - if (filePath.endsWith('index.html')) { - return Buffer.from('') - } - throw Object.assign(new Error('missing'), { code: 'ENOENT' }) + const { router } = createRouter({ + readFile: async (filePath) => { + if (filePath.endsWith('index.html')) { + return Buffer.from('') + } + throw Object.assign(new Error('missing'), { code: 'ENOENT' }) + }, }) const res = new MockResponse() @@ -98,17 +32,20 @@ describe('HTTP router static file handling', () => { it('logs failed SPA fallback reads before returning a server error', async () => { const fallbackError = Object.assign(new Error('index missing'), { code: 'ENOENT' }) - const router = createRouter(async (filePath) => { - if (filePath.endsWith('index.html')) { - throw fallbackError - } - throw Object.assign(new Error('missing'), { code: 'ENOENT' }) + const { router } = createRouter({ + readFile: async (filePath) => { + if (filePath.endsWith('index.html')) { + throw fallbackError + } + throw Object.assign(new Error('missing'), { code: 'ENOENT' }) + }, }) const res = new MockResponse() await router.handleServerRequest({ url: '/dashboard', method: 'GET', headers: {} }, res) expect(res.status).toBe(500) + expect(res.headers['x-test-security']).toBe('1') expect(JSON.parse(res.body)).toEqual({ message: 'Internal Server Error' }) expect(consoleError).toHaveBeenCalledWith( 'SPA fallback read failed', @@ -122,20 +59,20 @@ describe('HTTP router static file handling', () => { }) it('preserves base security headers when preparing HTML responses', async () => { - const router = createRouter( - async (filePath) => { + const { router } = createRouter({ + readFile: async (filePath) => { if (filePath.endsWith('index.html')) { return Buffer.from('') } throw Object.assign(new Error('missing'), { code: 'ENOENT' }) }, - (html) => ({ + prepareHtmlResponse: (html) => ({ body: html.replace('', ''), headers: { 'Content-Security-Policy': "default-src 'self'; script-src 'nonce-test'", }, }), - ) + }) const res = new MockResponse() await router.handleServerRequest({ url: '/dashboard', method: 'GET', headers: {} }, res) @@ -148,42 +85,77 @@ describe('HTTP router static file handling', () => { expect(res.body).toContain('ttdash-csp-nonce') }) + it('returns a bad request with static security headers for null-byte paths', async () => { + const readFile = vi.fn(async () => Buffer.from('')) + const { router } = createRouter({ readFile }) + const res = new MockResponse() + + await router.handleServerRequest({ url: '/assets/app%00.js', method: 'GET', headers: {} }, res) + + expect(res.status).toBe(400) + expect(res.headers['x-test-security']).toBe('1') + expect(JSON.parse(res.body)).toEqual({ message: 'Invalid request path' }) + expect(readFile).not.toHaveBeenCalled() + }) + + it('returns access denied with static security headers for escaped paths', async () => { + const readFile = vi.fn(async () => Buffer.from('')) + const { router } = createRouter({ readFile }) + const res = new MockResponse() + + await router.handleServerRequest({ url: '/..%2Fpackage.json', method: 'GET', headers: {} }, res) + + expect(res.status).toBe(403) + expect(res.headers['x-test-security']).toBe('1') + expect(JSON.parse(res.body)).toEqual({ message: 'Access denied' }) + expect(readFile).not.toHaveBeenCalled() + }) + it('returns not found for missing static assets instead of serving the SPA shell', async () => { - const router = createRouter(async (filePath) => { - if (filePath.endsWith('index.html')) { - return Buffer.from('') - } - throw Object.assign(new Error('missing'), { code: 'ENOENT' }) + const { router } = createRouter({ + readFile: async (filePath) => { + if (filePath.endsWith('index.html')) { + return Buffer.from('') + } + throw Object.assign(new Error('missing'), { code: 'ENOENT' }) + }, }) const res = new MockResponse() await router.handleServerRequest({ url: '/assets/app.js', method: 'GET', headers: {} }, res) expect(res.status).toBe(404) + expect(res.headers['x-test-security']).toBe('1') expect(JSON.parse(res.body)).toEqual({ message: 'Not Found' }) }) it('rejects directory reads instead of treating directories as static files', async () => { - const router = createRouter(async () => { - throw Object.assign(new Error('directory'), { code: 'EISDIR' }) + const { router } = createRouter({ + readFile: async () => { + throw Object.assign(new Error('directory'), { code: 'EISDIR' }) + }, }) const res = new MockResponse() await router.handleServerRequest({ url: '/assets', method: 'GET', headers: {} }, res) expect(res.status).toBe(403) + expect(res.headers['x-test-security']).toBe('1') expect(JSON.parse(res.body)).toEqual({ message: 'Access denied' }) }) it('returns a bad request for invalid static read paths', async () => { - const router = createRouter(async () => { - throw Object.assign(new Error('invalid'), { code: 'ERR_INVALID_ARG_VALUE' }) + const { router } = createRouter({ + readFile: async () => { + throw Object.assign(new Error('invalid'), { code: 'ERR_INVALID_ARG_VALUE' }) + }, }) const res = new MockResponse() await router.handleServerRequest({ url: '/asset.js', method: 'GET', headers: {} }, res) expect(res.status).toBe(400) + expect(res.headers['x-test-security']).toBe('1') expect(JSON.parse(res.body)).toEqual({ message: 'Invalid request path' }) expect(consoleError).toHaveBeenCalledWith( 'Static file read failed', diff --git a/tests/unit/http-router-test-helpers.ts b/tests/unit/http-router-test-helpers.ts index 8bfed41..18a1918 100644 --- a/tests/unit/http-router-test-helpers.ts +++ b/tests/unit/http-router-test-helpers.ts @@ -91,16 +91,24 @@ export function createRouter({ url: 'http://127.0.0.1:3000', })), httpUtilsOverrides = {}, + prepareHtmlResponse, readBody = vi.fn(async () => ({})), + readFile = vi.fn(), remoteAuthOverrides = {}, + securityHeaders = { 'X-Test-Security': '1' }, + staticRoot = '/app/dist', }: { autoImportRuntimeOverrides?: Record dataRuntimeOverrides?: Record generatePdfReport?: () => Promise<{ buffer: Buffer; filename: string }> getRuntimeSnapshot?: () => unknown httpUtilsOverrides?: Record + prepareHtmlResponse?: (html: string) => { body: string; headers: Record } readBody?: () => Promise + readFile?: (filePath: string) => Promise remoteAuthOverrides?: Record + securityHeaders?: Record + staticRoot?: string } = {}) { const dataRuntime = { extractSettingsImportPayload: vi.fn( @@ -146,10 +154,11 @@ export function createRouter({ } const router = createHttpRouter({ - fs: { promises: { readFile: vi.fn() } }, + fs: { promises: { readFile } }, path, - staticRoot: '/app/dist', - securityHeaders: { 'X-Test-Security': '1' }, + prepareHtmlResponse, + staticRoot, + securityHeaders, httpUtils: { json: ( res: MockResponse, diff --git a/tests/unit/server-helpers-runner-core.test.ts b/tests/unit/server-helpers-runner-core.test.ts index 3772ef5..784b63d 100644 --- a/tests/unit/server-helpers-runner-core.test.ts +++ b/tests/unit/server-helpers-runner-core.test.ts @@ -23,9 +23,16 @@ function createRuntimeWithSpawn( localBinOverride?: string localProbeTimeoutMs?: number localVersionCheckTimeoutMs?: number + processTerminationGraceMs?: number | undefined } = {}, ) { const localBin = options.localBinOverride ?? '/missing/toktrack' + const processTerminationGraceMs = Object.prototype.hasOwnProperty.call( + options, + 'processTerminationGraceMs', + ) + ? options.processTerminationGraceMs + : 1000 return createAutoImportRuntime({ fs: { @@ -45,7 +52,7 @@ function createRuntimeWithSpawn( toktrackLocalBin: '/missing/toktrack', npxCacheDir: '/tmp/ttdash-test-npx-cache', isWindows: false, - processTerminationGraceMs: 1000, + processTerminationGraceMs, toktrackLocalRunnerProbeTimeoutMs: options.localProbeTimeoutMs ?? 7000, toktrackLocalRunnerVersionCheckTimeoutMs: options.localVersionCheckTimeoutMs ?? 7000, toktrackLocalRunnerImportTimeoutMs: 60000, @@ -358,6 +365,53 @@ describe('server helper utilities: toktrack runner core behavior', () => { }) }) + it('captures stdout split across UTF-8 chunk boundaries', async () => { + const child = new FakeChildProcess() + const spawnImpl = vi.fn(() => child) + const outcomePromise = runCommandWithSpawn('fake-runner', ['daily', '--json'], { + spawnImpl, + }) + const output = Buffer.from('a€b') + + child.stdout.emit('data', output.subarray(0, 2)) + child.stdout.emit('data', output.subarray(2)) + child.exitCode = 1 + child.emit('close', 1) + + await expect(outcomePromise).rejects.toMatchObject({ + message: 'a€b', + stdout: 'a€b', + exitCode: 1, + }) + }) + + it('streams stderr split across UTF-8 chunk boundaries without replacement characters', async () => { + const child = new FakeChildProcess() + const spawnImpl = vi.fn(() => child) + const stderrLines: string[] = [] + const outcomePromise = runCommandWithSpawn('fake-runner', ['daily', '--json'], { + streamStderr: true, + onStderr: (line) => stderrLines.push(line), + spawnImpl, + }) + const prefix = Buffer.from('runner ') + const symbol = Buffer.from('€') + const suffix = Buffer.from(' warning\n') + + child.stderr.emit('data', Buffer.concat([prefix, symbol.subarray(0, 1)])) + child.stderr.emit('data', Buffer.concat([symbol.subarray(1), suffix])) + child.exitCode = 1 + child.emit('close', 1) + + await expect(outcomePromise).rejects.toMatchObject({ + message: 'runner € warning', + stderr: 'runner € warning\n', + exitCode: 1, + }) + expect(stderrLines.join('')).toBe('runner € warning\n') + expect(stderrLines.join('')).not.toContain('\uFFFD') + }) + it('bounds captured output while preserving streamed stderr chunks', async () => { const spawnImpl = createSpawnSequence([ { @@ -482,6 +536,64 @@ describe('server helper utilities: toktrack runner core behavior', () => { } }) + it('uses a 5s process termination grace fallback when none is configured', async () => { + vi.useFakeTimers() + + class FakeChild extends EventEmitter { + stdout = new EventEmitter() + stderr = new EventEmitter() + exitCode: number | null = null + signals: string[] = [] + + kill(signal: string) { + this.signals.push(signal) + if (signal === 'SIGKILL') { + this.exitCode = 137 + this.emit('close', 137) + } + } + } + + const child = new FakeChild() + const spawnImpl = vi.fn(() => child) as unknown as ReturnType + const runtime = createRuntimeWithSpawn(spawnImpl, { + processTerminationGraceMs: undefined, + }) + + try { + const outcomePromise = runtime + .runToktrack( + { + command: 'fake-runner', + prefixArgs: [], + env: {}, + }, + ['daily', '--json'], + { timeoutMs: 50 }, + ) + .then((value) => ({ ok: true as const, value })) + .catch((error) => ({ ok: false as const, error })) + + await vi.advanceTimersByTimeAsync(50) + expect(child.signals).toEqual(['SIGTERM']) + + await vi.advanceTimersByTimeAsync(4999) + expect(child.signals).toEqual(['SIGTERM']) + + await vi.advanceTimersByTimeAsync(1) + expect(child.signals).toEqual(['SIGTERM', 'SIGKILL']) + await expect(outcomePromise).resolves.toMatchObject({ + ok: false, + error: { + message: expect.stringContaining('Command timed out'), + timedOut: true, + }, + }) + } finally { + vi.useRealTimers() + } + }) + it('prioritizes local version mismatches over generic no-runner feedback', () => { const error = toAutoImportRunnerResolutionError({ localVersionMismatch: { From 2f73f557ce8f04fe0aebe944fcd96a25418716b7 Mon Sep 17 00:00:00 2001 From: tyl3r-ch Date: Sun, 3 May 2026 18:23:25 +0200 Subject: [PATCH 42/42] Harden command runner follow-up --- server/auto-import-runtime/command-runner.js | 68 +- .../server-helpers-command-runner.test.ts | 345 +++++++++ .../server-helpers-latest-version.test.ts | 115 +++ tests/unit/server-helpers-runner-core.test.ts | 666 ------------------ .../unit/server-helpers-runner-errors.test.ts | 82 +++ .../server-helpers-runner-resolution.test.ts | 103 +++ .../unit/server-helpers-runner-test-utils.ts | 53 ++ .../unit/server-helpers-runner-utils.test.ts | 62 ++ 8 files changed, 811 insertions(+), 683 deletions(-) create mode 100644 tests/unit/server-helpers-command-runner.test.ts create mode 100644 tests/unit/server-helpers-latest-version.test.ts delete mode 100644 tests/unit/server-helpers-runner-core.test.ts create mode 100644 tests/unit/server-helpers-runner-errors.test.ts create mode 100644 tests/unit/server-helpers-runner-resolution.test.ts create mode 100644 tests/unit/server-helpers-runner-test-utils.ts create mode 100644 tests/unit/server-helpers-runner-utils.test.ts diff --git a/server/auto-import-runtime/command-runner.js b/server/auto-import-runtime/command-runner.js index 18d0260..90319d1 100644 --- a/server/auto-import-runtime/command-runner.js +++ b/server/auto-import-runtime/command-runner.js @@ -61,6 +61,7 @@ function createAutoImportCommandRunner({ stdout = '', stderr = '', exitCode = null, + exitSignal = null, timedOut = false, outputTruncated = false, } = {}, @@ -71,6 +72,7 @@ function createAutoImportCommandRunner({ error.stdout = stdout; error.stderr = stderr; error.exitCode = exitCode; + error.exitSignal = exitSignal; error.timedOut = timedOut; error.outputTruncated = outputTruncated; return error; @@ -214,32 +216,51 @@ function createAutoImportCommandRunner({ let timeoutId = null; let outputFinalized = false; + const settle = (handler, value) => { + if (finished) { + return false; + } + finished = true; + if (timeoutId) { + clearTimeout(timeoutId); + } + handler(value); + return true; + }; + + const rejectFromCallbackError = (error) => { + const callbackError = error instanceof Error ? error : new Error(String(error)); + const rejected = settle(reject, callbackError); + if (rejected) { + terminateChildProcess(child); + } + return false; + }; + const emitStderrChunk = (chunkText) => { - if (streamStderr && onStderr && chunkText.trim()) { + if (finished) { + return false; + } + if (!streamStderr || !onStderr || !chunkText.trim()) { + return true; + } + try { onStderr(chunkText); + return true; + } catch (error) { + return rejectFromCallbackError(error); } }; const finalizeOutput = () => { if (outputFinalized) { - return; + return true; } outputFinalized = true; stdout = flushCapturedOutput(stdoutCapture); stderr = flushCapturedOutput(stderrCapture); outputTruncated = stdoutCapture.truncated || stderrCapture.truncated; - emitStderrChunk(stderrStreamDecoder.end()); - }; - - const settle = (handler, value) => { - if (finished) { - return; - } - finished = true; - if (timeoutId) { - clearTimeout(timeoutId); - } - handler(value); + return emitStderrChunk(stderrStreamDecoder.end()); }; if (signalOnClose) { @@ -255,16 +276,24 @@ function createAutoImportCommandRunner({ } child.stdout.on('data', (chunk) => { + if (finished) { + return; + } appendCapturedOutput(stdoutCapture, chunk, capturedOutputMaxBytes); }); child.stderr.on('data', (chunk) => { + if (finished) { + return; + } appendCapturedOutput(stderrCapture, chunk, capturedOutputMaxBytes); emitStderrChunk(stderrStreamDecoder.write(chunk)); }); child.on('error', (error) => { - finalizeOutput(); + if (!finalizeOutput()) { + return; + } settle( reject, createCommandError(error.message || `Could not start ${commandLabel}.`, { @@ -276,11 +305,13 @@ function createAutoImportCommandRunner({ }), ); }); - child.on('close', (code) => { + child.on('close', (code, signal) => { if (finished) { return; } - finalizeOutput(); + if (!finalizeOutput()) { + return; + } if (timedOut) { settle( reject, @@ -289,6 +320,8 @@ function createAutoImportCommandRunner({ args, stdout, stderr, + exitCode: code, + exitSignal: signal ?? null, timedOut: true, outputTruncated, }), @@ -309,6 +342,7 @@ function createAutoImportCommandRunner({ stdout, stderr, exitCode: code, + exitSignal: signal ?? null, outputTruncated, }, ), diff --git a/tests/unit/server-helpers-command-runner.test.ts b/tests/unit/server-helpers-command-runner.test.ts new file mode 100644 index 0000000..23289ce --- /dev/null +++ b/tests/unit/server-helpers-command-runner.test.ts @@ -0,0 +1,345 @@ +import { afterEach, describe, expect, it, vi } from 'vitest' +import { + EventEmitter, + FakeChildProcess, + createSpawnSequence, + resetServerHelperTestState, + runCommandWithSpawn, +} from './server-helpers.shared' +import { createRuntimeWithSpawn } from './server-helpers-runner-test-utils' + +afterEach(() => { + resetServerHelperTestState() +}) + +describe('server helper utilities: command runner', () => { + it('uses stdout as the toktrack command error message when stderr is empty', async () => { + const spawnImpl = createSpawnSequence([{ code: 1, stdout: 'stdout failure\n' }]) + const runtime = createRuntimeWithSpawn(spawnImpl) + + await expect( + runtime.runToktrack( + { + command: 'fake-runner', + prefixArgs: ['--prefix'], + env: { PATH: '/fake-bin' }, + }, + ['--version'], + ), + ).rejects.toMatchObject({ + message: 'stdout failure', + stdout: 'stdout failure\n', + stderr: '', + exitCode: 1, + }) + expect(spawnImpl).toHaveBeenCalledWith( + 'fake-runner', + ['--prefix', '--version'], + expect.objectContaining({ + stdio: ['ignore', 'pipe', 'pipe'], + env: { PATH: '/fake-bin' }, + }), + ) + }) + + it('streams stderr and lets callers terminate a running command on close', async () => { + const child = new FakeChildProcess() + const spawnImpl = vi.fn(() => child) + const stderrLines: string[] = [] + let closeCommand: (() => void) | null = null + + const outcomePromise = runCommandWithSpawn('fake-runner', ['daily', '--json'], { + streamStderr: true, + onStderr: (line) => stderrLines.push(line), + signalOnClose: (close) => { + closeCommand = close + }, + spawnImpl, + }) + + child.stderr.emit('data', Buffer.from('runner warning\n')) + expect(stderrLines).toEqual(['runner warning\n']) + + closeCommand?.() + + await expect(outcomePromise).rejects.toMatchObject({ + message: 'runner warning', + stderr: 'runner warning\n', + exitCode: 143, + }) + }) + + it('rejects and terminates the command when a streamed stderr callback throws', async () => { + const child = new FakeChildProcess() + const spawnImpl = vi.fn(() => child) + const callbackError = new Error('stderr listener failed') + const onStderr = vi.fn(() => { + throw callbackError + }) + const outcomePromise = runCommandWithSpawn('fake-runner', ['daily', '--json'], { + streamStderr: true, + onStderr, + spawnImpl, + }) + + child.stderr.emit('data', Buffer.from('runner warning\n')) + child.stderr.emit('data', Buffer.from('ignored warning\n')) + + await expect(outcomePromise).rejects.toBe(callbackError) + expect(onStderr).toHaveBeenCalledTimes(1) + expect(child.exitCode).toBe(143) + }) + + it('rejects in-band when a streamed stderr callback throws during decoder flush', async () => { + const child = new FakeChildProcess() + const spawnImpl = vi.fn(() => child) + const callbackError = new Error('stderr flush listener failed') + const onStderr = vi.fn(() => { + throw callbackError + }) + const outcomePromise = runCommandWithSpawn('fake-runner', ['daily', '--json'], { + streamStderr: true, + onStderr, + spawnImpl, + }) + const symbol = Buffer.from('€') + + child.stderr.emit('data', symbol.subarray(0, 1)) + child.exitCode = 1 + child.emit('close', 1) + + await expect(outcomePromise).rejects.toBe(callbackError) + expect(onStderr).toHaveBeenCalledWith('�') + }) + + it('captures stdout split across UTF-8 chunk boundaries', async () => { + const child = new FakeChildProcess() + const spawnImpl = vi.fn(() => child) + const outcomePromise = runCommandWithSpawn('fake-runner', ['daily', '--json'], { + spawnImpl, + }) + const output = Buffer.from('a€b') + + child.stdout.emit('data', output.subarray(0, 2)) + child.stdout.emit('data', output.subarray(2)) + child.exitCode = 1 + child.emit('close', 1) + + await expect(outcomePromise).rejects.toMatchObject({ + message: 'a€b', + stdout: 'a€b', + exitCode: 1, + }) + }) + + it('streams stderr split across UTF-8 chunk boundaries without replacement characters', async () => { + const child = new FakeChildProcess() + const spawnImpl = vi.fn(() => child) + const stderrLines: string[] = [] + const outcomePromise = runCommandWithSpawn('fake-runner', ['daily', '--json'], { + streamStderr: true, + onStderr: (line) => stderrLines.push(line), + spawnImpl, + }) + const prefix = Buffer.from('runner ') + const symbol = Buffer.from('€') + const suffix = Buffer.from(' warning\n') + + child.stderr.emit('data', Buffer.concat([prefix, symbol.subarray(0, 1)])) + child.stderr.emit('data', Buffer.concat([symbol.subarray(1), suffix])) + child.exitCode = 1 + child.emit('close', 1) + + await expect(outcomePromise).rejects.toMatchObject({ + message: 'runner € warning', + stderr: 'runner € warning\n', + exitCode: 1, + }) + expect(stderrLines.join('')).toBe('runner € warning\n') + expect(stderrLines.join('')).not.toContain('\uFFFD') + }) + + it('bounds captured output while preserving streamed stderr chunks', async () => { + const spawnImpl = createSpawnSequence([ + { + code: 1, + stdout: 'abcdef', + stderr: 'runner warning\n', + }, + ]) + const stderrLines: string[] = [] + + await expect( + runCommandWithSpawn('fake-runner', ['daily', '--json'], { + maxOutputBytes: 4, + streamStderr: true, + onStderr: (line) => stderrLines.push(line), + spawnImpl, + }), + ).rejects.toMatchObject({ + stdout: 'abcd', + stderr: 'runn', + outputTruncated: true, + exitCode: 1, + }) + expect(stderrLines).toEqual(['runner warning\n']) + }) + + it('truncates captured UTF-8 output on character boundaries', async () => { + const spawnImpl = createSpawnSequence([ + { + code: 1, + stdout: 'a€b', + }, + ]) + + await expect( + runCommandWithSpawn('fake-runner', ['daily', '--json'], { + maxOutputBytes: 3, + spawnImpl, + }), + ).rejects.toMatchObject({ + stdout: 'a', + outputTruncated: true, + exitCode: 1, + }) + }) + + it('wraps spawn errors with command diagnostics', async () => { + const child = new FakeChildProcess() + const spawnImpl = vi.fn(() => child) + const outcomePromise = runCommandWithSpawn('missing-runner', ['--version'], { + spawnImpl, + }) + + child.emit('error', new Error('spawn denied')) + + await expect(outcomePromise).rejects.toMatchObject({ + message: 'spawn denied', + command: 'missing-runner', + args: ['--version'], + stdout: '', + stderr: '', + }) + }) + + it('waits for timed-out toktrack commands to exit before rejecting', async () => { + vi.useFakeTimers() + + class FakeChild extends EventEmitter { + stdout = new EventEmitter() + stderr = new EventEmitter() + exitCode: number | null = null + + kill(signal: string) { + if (signal !== 'SIGTERM') { + this.exitCode = 137 + this.emit('close', 137) + return + } + + setTimeout(() => { + this.exitCode = 143 + this.emit('close', 143) + }, 150) + } + } + + const child = new FakeChild() + const spawnImpl = vi.fn(() => child) as unknown as ReturnType + const runtime = createRuntimeWithSpawn(spawnImpl) + let settled = false + + try { + const outcomePromise = runtime + .runToktrack( + { + command: 'fake-runner', + prefixArgs: [], + env: {}, + }, + ['daily', '--json'], + { timeoutMs: 50 }, + ) + .then((value) => ({ ok: true as const, value })) + .catch((error) => ({ ok: false as const, error })) + .finally(() => { + settled = true + }) + + await vi.advanceTimersByTimeAsync(50) + expect(settled).toBe(false) + + await vi.advanceTimersByTimeAsync(150) + await expect(outcomePromise).resolves.toMatchObject({ + ok: false, + error: { + message: expect.stringContaining('Command timed out'), + exitCode: 143, + timedOut: true, + }, + }) + } finally { + vi.useRealTimers() + } + }) + + it('uses a 5s process termination grace fallback when none is configured', async () => { + vi.useFakeTimers() + + class FakeChild extends EventEmitter { + stdout = new EventEmitter() + stderr = new EventEmitter() + exitCode: number | null = null + signals: string[] = [] + + kill(signal: string) { + this.signals.push(signal) + if (signal === 'SIGKILL') { + this.exitCode = 137 + this.emit('close', 137) + } + } + } + + const child = new FakeChild() + const spawnImpl = vi.fn(() => child) as unknown as ReturnType + const runtime = createRuntimeWithSpawn(spawnImpl, { + processTerminationGraceMs: undefined, + }) + + try { + const outcomePromise = runtime + .runToktrack( + { + command: 'fake-runner', + prefixArgs: [], + env: {}, + }, + ['daily', '--json'], + { timeoutMs: 50 }, + ) + .then((value) => ({ ok: true as const, value })) + .catch((error) => ({ ok: false as const, error })) + + await vi.advanceTimersByTimeAsync(50) + expect(child.signals).toEqual(['SIGTERM']) + + await vi.advanceTimersByTimeAsync(4999) + expect(child.signals).toEqual(['SIGTERM']) + + await vi.advanceTimersByTimeAsync(1) + expect(child.signals).toEqual(['SIGTERM', 'SIGKILL']) + await expect(outcomePromise).resolves.toMatchObject({ + ok: false, + error: { + message: expect.stringContaining('Command timed out'), + exitCode: 137, + timedOut: true, + }, + }) + } finally { + vi.useRealTimers() + } + }) +}) diff --git a/tests/unit/server-helpers-latest-version.test.ts b/tests/unit/server-helpers-latest-version.test.ts new file mode 100644 index 0000000..00f42f5 --- /dev/null +++ b/tests/unit/server-helpers-latest-version.test.ts @@ -0,0 +1,115 @@ +import { afterEach, describe, expect, it, vi } from 'vitest' +import { + TOKTRACK_VERSION, + createSpawnSequence, + resetServerHelperTestState, +} from './server-helpers.shared' +import { createRuntimeWithSpawn } from './server-helpers-runner-test-utils' + +afterEach(() => { + resetServerHelperTestState() +}) + +describe('server helper utilities: toktrack latest-version lookup', () => { + it('returns a structured warning when the latest toktrack version lookup times out', async () => { + vi.useFakeTimers() + const runtime = createRuntimeWithSpawn(createSpawnSequence([{ hang: true }]), { + latestLookupTimeoutMs: 50, + }) + + try { + const statusPromise = runtime.lookupLatestToktrackVersion() + await vi.advanceTimersByTimeAsync(50) + + await expect(statusPromise).resolves.toMatchObject({ + configuredVersion: TOKTRACK_VERSION, + latestVersion: null, + isLatest: null, + lookupStatus: 'timeout', + message: expect.stringContaining('Command timed out'), + }) + } finally { + vi.useRealTimers() + } + }) + + it('returns a structured warning when the latest toktrack version lookup is malformed', async () => { + const runtime = createRuntimeWithSpawn(createSpawnSequence([{ stdout: 'not-a-version\n' }])) + + await expect(runtime.lookupLatestToktrackVersion()).resolves.toMatchObject({ + configuredVersion: TOKTRACK_VERSION, + latestVersion: null, + isLatest: null, + lookupStatus: 'malformed-output', + message: 'npm returned an invalid toktrack version: not-a-version', + }) + }) + + it('reuses a cached successful latest-version lookup until the TTL expires', async () => { + const spawnImpl = createSpawnSequence([ + { stdout: `${TOKTRACK_VERSION}\n` }, + { stdout: '9.9.9\n' }, + ]) + const runtime = createRuntimeWithSpawn(spawnImpl) + const nowSpy = vi.spyOn(Date, 'now') + + try { + nowSpy.mockReturnValue(1_000_000) + const firstStatus = await runtime.lookupLatestToktrackVersion() + nowSpy.mockReturnValue(1_000_000) + const secondStatus = await runtime.lookupLatestToktrackVersion() + nowSpy.mockReturnValue(1_000_000 + 5 * 60 * 1000 + 1) + const thirdStatus = await runtime.lookupLatestToktrackVersion() + + expect(firstStatus).toMatchObject({ + configuredVersion: TOKTRACK_VERSION, + latestVersion: TOKTRACK_VERSION, + isLatest: true, + lookupStatus: 'ok', + }) + expect(secondStatus).toEqual(firstStatus) + expect(thirdStatus).toMatchObject({ + latestVersion: '9.9.9', + isLatest: false, + lookupStatus: 'ok', + }) + expect(spawnImpl).toHaveBeenCalledTimes(2) + } finally { + nowSpy.mockRestore() + } + }) + + it('reuses a cached failed latest-version lookup until the failure TTL expires', async () => { + const spawnImpl = createSpawnSequence([ + { code: 1, stderr: 'lookup failed\n' }, + { code: 1, stderr: 'lookup still failed\n' }, + ]) + const runtime = createRuntimeWithSpawn(spawnImpl) + const nowSpy = vi.spyOn(Date, 'now') + + try { + nowSpy.mockReturnValue(2_000_000) + const firstStatus = await runtime.lookupLatestToktrackVersion() + nowSpy.mockReturnValue(2_000_000) + const secondStatus = await runtime.lookupLatestToktrackVersion() + nowSpy.mockReturnValue(2_000_000 + 60 * 1000 + 1) + const thirdStatus = await runtime.lookupLatestToktrackVersion() + + expect(firstStatus).toMatchObject({ + configuredVersion: TOKTRACK_VERSION, + latestVersion: null, + isLatest: null, + lookupStatus: 'failed', + message: 'lookup failed', + }) + expect(secondStatus).toEqual(firstStatus) + expect(thirdStatus).toMatchObject({ + lookupStatus: 'failed', + message: 'lookup still failed', + }) + expect(spawnImpl).toHaveBeenCalledTimes(2) + } finally { + nowSpy.mockRestore() + } + }) +}) diff --git a/tests/unit/server-helpers-runner-core.test.ts b/tests/unit/server-helpers-runner-core.test.ts deleted file mode 100644 index 784b63d..0000000 --- a/tests/unit/server-helpers-runner-core.test.ts +++ /dev/null @@ -1,666 +0,0 @@ -import { afterEach, describe, expect, it, vi } from 'vitest' -import { - EventEmitter, - FakeChildProcess, - TOKTRACK_VERSION, - createAutoImportRuntime, - createSpawnSequence, - getExecutableName, - getLocalToktrackDisplayCommand, - getToktrackLatestLookupTimeoutMs, - getToktrackRunnerTimeouts, - parseToktrackVersionOutput, - resetServerHelperTestState, - runCommandWithSpawn, - toAutoImportRunnerResolutionError, -} from './server-helpers.shared' - -function createRuntimeWithSpawn( - spawnImpl: ReturnType, - options: { - latestLookupTimeoutMs?: number - localBinExists?: boolean - localBinOverride?: string - localProbeTimeoutMs?: number - localVersionCheckTimeoutMs?: number - processTerminationGraceMs?: number | undefined - } = {}, -) { - const localBin = options.localBinOverride ?? '/missing/toktrack' - const processTerminationGraceMs = Object.prototype.hasOwnProperty.call( - options, - 'processTerminationGraceMs', - ) - ? options.processTerminationGraceMs - : 1000 - - return createAutoImportRuntime({ - fs: { - existsSync: (filePath: string) => Boolean(options.localBinExists && filePath === localBin), - }, - processObject: { - env: options.localBinOverride ? { TTDASH_TOKTRACK_LOCAL_BIN: options.localBinOverride } : {}, - }, - spawnCrossPlatform: spawnImpl, - normalizeIncomingData: (input: unknown) => input, - withSettingsAndDataMutationLock: async (operation: () => Promise) => operation(), - writeData: () => undefined, - updateDataLoadState: async () => undefined, - toktrackPackageName: 'toktrack', - toktrackPackageSpec: `toktrack@${TOKTRACK_VERSION}`, - toktrackVersion: TOKTRACK_VERSION, - toktrackLocalBin: '/missing/toktrack', - npxCacheDir: '/tmp/ttdash-test-npx-cache', - isWindows: false, - processTerminationGraceMs, - toktrackLocalRunnerProbeTimeoutMs: options.localProbeTimeoutMs ?? 7000, - toktrackLocalRunnerVersionCheckTimeoutMs: options.localVersionCheckTimeoutMs ?? 7000, - toktrackLocalRunnerImportTimeoutMs: 60000, - toktrackPackageRunnerProbeTimeoutMs: 45000, - toktrackPackageRunnerVersionCheckTimeoutMs: 45000, - toktrackPackageRunnerImportTimeoutMs: 60000, - toktrackLatestLookupTimeoutMs: options.latestLookupTimeoutMs ?? 15000, - toktrackLatestCacheSuccessTtlMs: 5 * 60 * 1000, - toktrackLatestCacheFailureTtlMs: 60 * 1000, - probeLog: (message: string) => console.warn(message), - }) -} - -afterEach(() => { - resetServerHelperTestState() -}) - -describe('server helper utilities: toktrack runner core behavior', () => { - it('maps executable names correctly across platforms', () => { - expect(getExecutableName('npm', true)).toBe('npm.cmd') - expect(getExecutableName('bun', true)).toBe('bun.exe') - expect(getExecutableName('bunx', true)).toBe('bun.exe') - expect(getExecutableName('npx', true)).toBe('npx.cmd') - expect(getExecutableName('toktrack', true)).toBe('toktrack') - expect(getExecutableName('npm', false)).toBe('npm') - expect(getExecutableName('bun', false)).toBe('bun') - expect(getExecutableName('bunx', false)).toBe('bunx') - expect(getExecutableName('npx', false)).toBe('npx') - }) - - it('renders the local toktrack command example correctly across platforms', () => { - expect(getLocalToktrackDisplayCommand(false)).toBe('node_modules/.bin/toktrack daily --json') - expect(getLocalToktrackDisplayCommand(true)).toBe( - 'node_modules\\.bin\\toktrack.cmd daily --json', - ) - }) - - it('honors a configured local toktrack binary override for display and execution', async () => { - const spawnImpl = createSpawnSequence([{ stdout: `toktrack ${TOKTRACK_VERSION}` }]) - const runtime = createRuntimeWithSpawn(spawnImpl, { - localBinExists: true, - localBinOverride: '/custom/bin/toktrack', - }) - - expect(runtime.getLocalToktrackDisplayCommand()).toBe('/custom/bin/toktrack daily --json') - - await expect(runtime.resolveToktrackRunner()).resolves.toMatchObject({ - command: '/custom/bin/toktrack', - displayCommand: '/custom/bin/toktrack daily --json', - method: 'local', - }) - expect(spawnImpl).toHaveBeenCalledWith( - '/custom/bin/toktrack', - ['--version'], - expect.objectContaining({ - env: { TTDASH_TOKTRACK_LOCAL_BIN: '/custom/bin/toktrack' }, - }), - ) - }) - - it('parses toktrack version banners down to the raw version', () => { - expect(parseToktrackVersionOutput(`toktrack ${TOKTRACK_VERSION}`)).toBe(TOKTRACK_VERSION) - expect(parseToktrackVersionOutput(`${TOKTRACK_VERSION}\n`)).toBe(TOKTRACK_VERSION) - }) - - it('uses longer warmup timeouts for package runners than for local toktrack', () => { - const localTimeouts = getToktrackRunnerTimeouts({ method: 'local' }) - const bunxTimeouts = getToktrackRunnerTimeouts({ method: 'bunx' }) - const npxTimeouts = getToktrackRunnerTimeouts({ method: 'npm' }) - - expect(localTimeouts).toEqual({ - probeMs: 7000, - versionCheckMs: 7000, - importMs: 60000, - }) - expect(bunxTimeouts).toEqual({ - probeMs: 45000, - versionCheckMs: 45000, - importMs: 60000, - }) - expect(npxTimeouts).toEqual(bunxTimeouts) - }) - - it('uses a less aggressive timeout for latest-version registry lookups', () => { - expect(getToktrackLatestLookupTimeoutMs()).toBe(15000) - }) - - it('resolves a compatible local toktrack runner without probing package fallbacks', async () => { - const spawnImpl = createSpawnSequence([{ stdout: `toktrack ${TOKTRACK_VERSION}\n` }]) - const runtime = createRuntimeWithSpawn(spawnImpl, { localBinExists: true }) - - await expect(runtime.resolveToktrackRunner()).resolves.toMatchObject({ - command: '/missing/toktrack', - method: 'local', - label: 'local toktrack', - }) - expect(spawnImpl).toHaveBeenCalledTimes(1) - expect(spawnImpl).toHaveBeenCalledWith( - '/missing/toktrack', - ['--version'], - expect.objectContaining({ - stdio: ['ignore', 'pipe', 'pipe'], - }), - ) - }) - - it('uses the local version-check timeout before probing package fallbacks', async () => { - vi.useFakeTimers() - const spawnImpl = createSpawnSequence([ - { hang: true }, - { stdout: `toktrack ${TOKTRACK_VERSION}\n` }, - ]) - const runtime = createRuntimeWithSpawn(spawnImpl, { - localBinExists: true, - localProbeTimeoutMs: 7000, - localVersionCheckTimeoutMs: 50, - }) - - try { - const resolutionPromise = runtime.resolveToktrackRunner() - await vi.advanceTimersByTimeAsync(50) - - expect(spawnImpl).toHaveBeenCalledTimes(2) - await expect(resolutionPromise).resolves.toMatchObject({ - command: 'bunx', - method: 'bunx', - }) - } finally { - vi.useRealTimers() - } - }) - - it('falls back to npm when the local runner version mismatches and bunx probing fails', async () => { - const warnSpy = vi.spyOn(console, 'warn').mockImplementation(() => undefined) - const spawnImpl = createSpawnSequence([ - { stdout: 'toktrack 0.0.0\n' }, - { code: 1, stderr: 'bunx failed\n' }, - { stdout: `toktrack ${TOKTRACK_VERSION}\n` }, - ]) - const runtime = createRuntimeWithSpawn(spawnImpl, { localBinExists: true }) - - try { - await expect(runtime.resolveToktrackRunner()).resolves.toMatchObject({ - command: 'npx', - method: 'npm', - label: 'npm exec', - }) - expect(spawnImpl).toHaveBeenCalledTimes(3) - expect(warnSpy).toHaveBeenCalledWith(expect.stringContaining('Failed to probe bunx')) - } finally { - warnSpy.mockRestore() - } - }) - - it('returns a structured warning when the latest toktrack version lookup times out', async () => { - vi.useFakeTimers() - const runtime = createRuntimeWithSpawn(createSpawnSequence([{ hang: true }]), { - latestLookupTimeoutMs: 50, - }) - - try { - const statusPromise = runtime.lookupLatestToktrackVersion() - await vi.advanceTimersByTimeAsync(50) - - await expect(statusPromise).resolves.toMatchObject({ - configuredVersion: TOKTRACK_VERSION, - latestVersion: null, - isLatest: null, - lookupStatus: 'timeout', - message: expect.stringContaining('Command timed out'), - }) - } finally { - vi.useRealTimers() - } - }) - - it('returns a structured warning when the latest toktrack version lookup is malformed', async () => { - const runtime = createRuntimeWithSpawn(createSpawnSequence([{ stdout: 'not-a-version\n' }])) - - await expect(runtime.lookupLatestToktrackVersion()).resolves.toMatchObject({ - configuredVersion: TOKTRACK_VERSION, - latestVersion: null, - isLatest: null, - lookupStatus: 'malformed-output', - message: 'npm returned an invalid toktrack version: not-a-version', - }) - }) - - it('reuses a cached successful latest-version lookup until the TTL expires', async () => { - const spawnImpl = createSpawnSequence([ - { stdout: `${TOKTRACK_VERSION}\n` }, - { stdout: '9.9.9\n' }, - ]) - const runtime = createRuntimeWithSpawn(spawnImpl) - const nowSpy = vi.spyOn(Date, 'now') - - try { - nowSpy.mockReturnValue(1_000_000) - const firstStatus = await runtime.lookupLatestToktrackVersion() - nowSpy.mockReturnValue(1_000_000) - const secondStatus = await runtime.lookupLatestToktrackVersion() - nowSpy.mockReturnValue(1_000_000 + 5 * 60 * 1000 + 1) - const thirdStatus = await runtime.lookupLatestToktrackVersion() - - expect(firstStatus).toMatchObject({ - configuredVersion: TOKTRACK_VERSION, - latestVersion: TOKTRACK_VERSION, - isLatest: true, - lookupStatus: 'ok', - }) - expect(secondStatus).toEqual(firstStatus) - expect(thirdStatus).toMatchObject({ - latestVersion: '9.9.9', - isLatest: false, - lookupStatus: 'ok', - }) - expect(spawnImpl).toHaveBeenCalledTimes(2) - } finally { - nowSpy.mockRestore() - } - }) - - it('reuses a cached failed latest-version lookup until the failure TTL expires', async () => { - const spawnImpl = createSpawnSequence([ - { code: 1, stderr: 'lookup failed\n' }, - { code: 1, stderr: 'lookup still failed\n' }, - ]) - const runtime = createRuntimeWithSpawn(spawnImpl) - const nowSpy = vi.spyOn(Date, 'now') - - try { - nowSpy.mockReturnValue(2_000_000) - const firstStatus = await runtime.lookupLatestToktrackVersion() - nowSpy.mockReturnValue(2_000_000) - const secondStatus = await runtime.lookupLatestToktrackVersion() - nowSpy.mockReturnValue(2_000_000 + 60 * 1000 + 1) - const thirdStatus = await runtime.lookupLatestToktrackVersion() - - expect(firstStatus).toMatchObject({ - configuredVersion: TOKTRACK_VERSION, - latestVersion: null, - isLatest: null, - lookupStatus: 'failed', - message: 'lookup failed', - }) - expect(secondStatus).toEqual(firstStatus) - expect(thirdStatus).toMatchObject({ - lookupStatus: 'failed', - message: 'lookup still failed', - }) - expect(spawnImpl).toHaveBeenCalledTimes(2) - } finally { - nowSpy.mockRestore() - } - }) - - it('uses stdout as the toktrack command error message when stderr is empty', async () => { - const spawnImpl = createSpawnSequence([{ code: 1, stdout: 'stdout failure\n' }]) - const runtime = createRuntimeWithSpawn(spawnImpl) - - await expect( - runtime.runToktrack( - { - command: 'fake-runner', - prefixArgs: ['--prefix'], - env: { PATH: '/fake-bin' }, - }, - ['--version'], - ), - ).rejects.toMatchObject({ - message: 'stdout failure', - stdout: 'stdout failure\n', - stderr: '', - exitCode: 1, - }) - expect(spawnImpl).toHaveBeenCalledWith( - 'fake-runner', - ['--prefix', '--version'], - expect.objectContaining({ - stdio: ['ignore', 'pipe', 'pipe'], - env: { PATH: '/fake-bin' }, - }), - ) - }) - - it('streams stderr and lets callers terminate a running command on close', async () => { - const child = new FakeChildProcess() - const spawnImpl = vi.fn(() => child) - const stderrLines: string[] = [] - let closeCommand: (() => void) | null = null - - const outcomePromise = runCommandWithSpawn('fake-runner', ['daily', '--json'], { - streamStderr: true, - onStderr: (line) => stderrLines.push(line), - signalOnClose: (close) => { - closeCommand = close - }, - spawnImpl, - }) - - child.stderr.emit('data', Buffer.from('runner warning\n')) - expect(stderrLines).toEqual(['runner warning\n']) - - closeCommand?.() - - await expect(outcomePromise).rejects.toMatchObject({ - message: 'runner warning', - stderr: 'runner warning\n', - exitCode: 143, - }) - }) - - it('captures stdout split across UTF-8 chunk boundaries', async () => { - const child = new FakeChildProcess() - const spawnImpl = vi.fn(() => child) - const outcomePromise = runCommandWithSpawn('fake-runner', ['daily', '--json'], { - spawnImpl, - }) - const output = Buffer.from('a€b') - - child.stdout.emit('data', output.subarray(0, 2)) - child.stdout.emit('data', output.subarray(2)) - child.exitCode = 1 - child.emit('close', 1) - - await expect(outcomePromise).rejects.toMatchObject({ - message: 'a€b', - stdout: 'a€b', - exitCode: 1, - }) - }) - - it('streams stderr split across UTF-8 chunk boundaries without replacement characters', async () => { - const child = new FakeChildProcess() - const spawnImpl = vi.fn(() => child) - const stderrLines: string[] = [] - const outcomePromise = runCommandWithSpawn('fake-runner', ['daily', '--json'], { - streamStderr: true, - onStderr: (line) => stderrLines.push(line), - spawnImpl, - }) - const prefix = Buffer.from('runner ') - const symbol = Buffer.from('€') - const suffix = Buffer.from(' warning\n') - - child.stderr.emit('data', Buffer.concat([prefix, symbol.subarray(0, 1)])) - child.stderr.emit('data', Buffer.concat([symbol.subarray(1), suffix])) - child.exitCode = 1 - child.emit('close', 1) - - await expect(outcomePromise).rejects.toMatchObject({ - message: 'runner € warning', - stderr: 'runner € warning\n', - exitCode: 1, - }) - expect(stderrLines.join('')).toBe('runner € warning\n') - expect(stderrLines.join('')).not.toContain('\uFFFD') - }) - - it('bounds captured output while preserving streamed stderr chunks', async () => { - const spawnImpl = createSpawnSequence([ - { - code: 1, - stdout: 'abcdef', - stderr: 'runner warning\n', - }, - ]) - const stderrLines: string[] = [] - - await expect( - runCommandWithSpawn('fake-runner', ['daily', '--json'], { - maxOutputBytes: 4, - streamStderr: true, - onStderr: (line) => stderrLines.push(line), - spawnImpl, - }), - ).rejects.toMatchObject({ - stdout: 'abcd', - stderr: 'runn', - outputTruncated: true, - exitCode: 1, - }) - expect(stderrLines).toEqual(['runner warning\n']) - }) - - it('truncates captured UTF-8 output on character boundaries', async () => { - const spawnImpl = createSpawnSequence([ - { - code: 1, - stdout: 'a€b', - }, - ]) - - await expect( - runCommandWithSpawn('fake-runner', ['daily', '--json'], { - maxOutputBytes: 3, - spawnImpl, - }), - ).rejects.toMatchObject({ - stdout: 'a', - outputTruncated: true, - exitCode: 1, - }) - }) - - it('wraps spawn errors with command diagnostics', async () => { - const child = new FakeChildProcess() - const spawnImpl = vi.fn(() => child) - const outcomePromise = runCommandWithSpawn('missing-runner', ['--version'], { - spawnImpl, - }) - - child.emit('error', new Error('spawn denied')) - - await expect(outcomePromise).rejects.toMatchObject({ - message: 'spawn denied', - command: 'missing-runner', - args: ['--version'], - stdout: '', - stderr: '', - }) - }) - - it('waits for timed-out toktrack commands to exit before rejecting', async () => { - vi.useFakeTimers() - - class FakeChild extends EventEmitter { - stdout = new EventEmitter() - stderr = new EventEmitter() - exitCode: number | null = null - - kill(signal: string) { - if (signal !== 'SIGTERM') { - this.exitCode = 137 - this.emit('close', 137) - return - } - - setTimeout(() => { - this.exitCode = 143 - this.emit('close', 143) - }, 150) - } - } - - const child = new FakeChild() - const spawnImpl = vi.fn(() => child) as unknown as ReturnType - const runtime = createRuntimeWithSpawn(spawnImpl) - let settled = false - - try { - const outcomePromise = runtime - .runToktrack( - { - command: 'fake-runner', - prefixArgs: [], - env: {}, - }, - ['daily', '--json'], - { timeoutMs: 50 }, - ) - .then((value) => ({ ok: true as const, value })) - .catch((error) => ({ ok: false as const, error })) - .finally(() => { - settled = true - }) - - await vi.advanceTimersByTimeAsync(50) - expect(settled).toBe(false) - - await vi.advanceTimersByTimeAsync(150) - await expect(outcomePromise).resolves.toMatchObject({ - ok: false, - error: { - message: expect.stringContaining('Command timed out'), - timedOut: true, - }, - }) - } finally { - vi.useRealTimers() - } - }) - - it('uses a 5s process termination grace fallback when none is configured', async () => { - vi.useFakeTimers() - - class FakeChild extends EventEmitter { - stdout = new EventEmitter() - stderr = new EventEmitter() - exitCode: number | null = null - signals: string[] = [] - - kill(signal: string) { - this.signals.push(signal) - if (signal === 'SIGKILL') { - this.exitCode = 137 - this.emit('close', 137) - } - } - } - - const child = new FakeChild() - const spawnImpl = vi.fn(() => child) as unknown as ReturnType - const runtime = createRuntimeWithSpawn(spawnImpl, { - processTerminationGraceMs: undefined, - }) - - try { - const outcomePromise = runtime - .runToktrack( - { - command: 'fake-runner', - prefixArgs: [], - env: {}, - }, - ['daily', '--json'], - { timeoutMs: 50 }, - ) - .then((value) => ({ ok: true as const, value })) - .catch((error) => ({ ok: false as const, error })) - - await vi.advanceTimersByTimeAsync(50) - expect(child.signals).toEqual(['SIGTERM']) - - await vi.advanceTimersByTimeAsync(4999) - expect(child.signals).toEqual(['SIGTERM']) - - await vi.advanceTimersByTimeAsync(1) - expect(child.signals).toEqual(['SIGTERM', 'SIGKILL']) - await expect(outcomePromise).resolves.toMatchObject({ - ok: false, - error: { - message: expect.stringContaining('Command timed out'), - timedOut: true, - }, - }) - } finally { - vi.useRealTimers() - } - }) - - it('prioritizes local version mismatches over generic no-runner feedback', () => { - const error = toAutoImportRunnerResolutionError({ - localVersionMismatch: { - detectedVersion: '9.9.9', - expectedVersion: TOKTRACK_VERSION, - }, - localFailure: null, - runnerFailures: [{ label: 'bunx', message: 'missing', timedOut: false }], - }) - - expect(error.messageKey).toBe('localToktrackVersionMismatch') - expect(error.message).toContain('9.9.9') - expect(error.message).toContain(TOKTRACK_VERSION) - }) - - it('prioritizes local startup failures over package runner feedback', () => { - const error = toAutoImportRunnerResolutionError({ - localVersionMismatch: null, - localFailure: 'permission denied', - runnerFailures: [ - { label: 'bunx', message: 'missing', timedOut: false }, - { label: 'npm exec', message: 'missing', timedOut: false }, - ], - }) - - expect(error.messageKey).toBe('localToktrackFailed') - expect(error.message).toContain('permission denied') - }) - - it('reports package runner failures when no compatible fallback succeeds', () => { - const error = toAutoImportRunnerResolutionError({ - localVersionMismatch: null, - localFailure: null, - runnerFailures: [ - { label: 'bunx', message: 'failed to start', timedOut: false }, - { label: 'npm exec', message: 'permission denied', timedOut: false }, - ], - }) - - expect(error.messageKey).toBe('packageRunnerFailed') - expect(error.message).toContain('bunx: failed to start') - expect(error.message).toContain('npm exec: permission denied') - }) - - it('surfaces a dedicated warmup-timeout message when only package runners time out', () => { - const error = toAutoImportRunnerResolutionError({ - localVersionMismatch: null, - localFailure: null, - runnerFailures: [ - { label: 'bunx', message: 'Command timed out', timedOut: true }, - { label: 'npm exec', message: 'Command timed out', timedOut: true }, - ], - }) - - expect(error.messageKey).toBe('packageRunnerWarmupTimedOut') - expect(error.message).toContain('bunx / npm exec') - expect(error.message).toContain('45s') - }) - - it('reports a no-runner error when no resolution diagnostics are available', () => { - const error = toAutoImportRunnerResolutionError({ - localVersionMismatch: null, - localFailure: null, - runnerFailures: [], - }) - - expect(error.messageKey).toBe('noRunnerFound') - expect(error.message).toBe('No local toktrack, Bun, or npm exec installation found.') - }) -}) diff --git a/tests/unit/server-helpers-runner-errors.test.ts b/tests/unit/server-helpers-runner-errors.test.ts new file mode 100644 index 0000000..d0b3643 --- /dev/null +++ b/tests/unit/server-helpers-runner-errors.test.ts @@ -0,0 +1,82 @@ +import { afterEach, describe, expect, it } from 'vitest' +import { + TOKTRACK_VERSION, + resetServerHelperTestState, + toAutoImportRunnerResolutionError, +} from './server-helpers.shared' + +afterEach(() => { + resetServerHelperTestState() +}) + +describe('server helper utilities: toktrack runner resolution errors', () => { + it('prioritizes local version mismatches over generic no-runner feedback', () => { + const error = toAutoImportRunnerResolutionError({ + localVersionMismatch: { + detectedVersion: '9.9.9', + expectedVersion: TOKTRACK_VERSION, + }, + localFailure: null, + runnerFailures: [{ label: 'bunx', message: 'missing', timedOut: false }], + }) + + expect(error.messageKey).toBe('localToktrackVersionMismatch') + expect(error.message).toContain('9.9.9') + expect(error.message).toContain(TOKTRACK_VERSION) + }) + + it('prioritizes local startup failures over package runner feedback', () => { + const error = toAutoImportRunnerResolutionError({ + localVersionMismatch: null, + localFailure: 'permission denied', + runnerFailures: [ + { label: 'bunx', message: 'missing', timedOut: false }, + { label: 'npm exec', message: 'missing', timedOut: false }, + ], + }) + + expect(error.messageKey).toBe('localToktrackFailed') + expect(error.message).toContain('permission denied') + }) + + it('reports package runner failures when no compatible fallback succeeds', () => { + const error = toAutoImportRunnerResolutionError({ + localVersionMismatch: null, + localFailure: null, + runnerFailures: [ + { label: 'bunx', message: 'failed to start', timedOut: false }, + { label: 'npm exec', message: 'permission denied', timedOut: false }, + ], + }) + + expect(error.messageKey).toBe('packageRunnerFailed') + expect(error.message).toContain('bunx: failed to start') + expect(error.message).toContain('npm exec: permission denied') + }) + + it('surfaces a dedicated warmup-timeout message when only package runners time out', () => { + const error = toAutoImportRunnerResolutionError({ + localVersionMismatch: null, + localFailure: null, + runnerFailures: [ + { label: 'bunx', message: 'Command timed out', timedOut: true }, + { label: 'npm exec', message: 'Command timed out', timedOut: true }, + ], + }) + + expect(error.messageKey).toBe('packageRunnerWarmupTimedOut') + expect(error.message).toContain('bunx / npm exec') + expect(error.message).toContain('45s') + }) + + it('reports a no-runner error when no resolution diagnostics are available', () => { + const error = toAutoImportRunnerResolutionError({ + localVersionMismatch: null, + localFailure: null, + runnerFailures: [], + }) + + expect(error.messageKey).toBe('noRunnerFound') + expect(error.message).toBe('No local toktrack, Bun, or npm exec installation found.') + }) +}) diff --git a/tests/unit/server-helpers-runner-resolution.test.ts b/tests/unit/server-helpers-runner-resolution.test.ts new file mode 100644 index 0000000..159d588 --- /dev/null +++ b/tests/unit/server-helpers-runner-resolution.test.ts @@ -0,0 +1,103 @@ +import { afterEach, describe, expect, it, vi } from 'vitest' +import { + TOKTRACK_VERSION, + createSpawnSequence, + resetServerHelperTestState, +} from './server-helpers.shared' +import { createRuntimeWithSpawn } from './server-helpers-runner-test-utils' + +afterEach(() => { + resetServerHelperTestState() +}) + +describe('server helper utilities: toktrack runner resolution', () => { + it('honors a configured local toktrack binary override for display and execution', async () => { + const spawnImpl = createSpawnSequence([{ stdout: `toktrack ${TOKTRACK_VERSION}` }]) + const runtime = createRuntimeWithSpawn(spawnImpl, { + localBinExists: true, + localBinOverride: '/custom/bin/toktrack', + }) + + expect(runtime.getLocalToktrackDisplayCommand()).toBe('/custom/bin/toktrack daily --json') + + await expect(runtime.resolveToktrackRunner()).resolves.toMatchObject({ + command: '/custom/bin/toktrack', + displayCommand: '/custom/bin/toktrack daily --json', + method: 'local', + }) + expect(spawnImpl).toHaveBeenCalledWith( + '/custom/bin/toktrack', + ['--version'], + expect.objectContaining({ + env: { TTDASH_TOKTRACK_LOCAL_BIN: '/custom/bin/toktrack' }, + }), + ) + }) + + it('resolves a compatible local toktrack runner without probing package fallbacks', async () => { + const spawnImpl = createSpawnSequence([{ stdout: `toktrack ${TOKTRACK_VERSION}\n` }]) + const runtime = createRuntimeWithSpawn(spawnImpl, { localBinExists: true }) + + await expect(runtime.resolveToktrackRunner()).resolves.toMatchObject({ + command: '/missing/toktrack', + method: 'local', + label: 'local toktrack', + }) + expect(spawnImpl).toHaveBeenCalledTimes(1) + expect(spawnImpl).toHaveBeenCalledWith( + '/missing/toktrack', + ['--version'], + expect.objectContaining({ + stdio: ['ignore', 'pipe', 'pipe'], + }), + ) + }) + + it('uses the local version-check timeout before probing package fallbacks', async () => { + vi.useFakeTimers() + const spawnImpl = createSpawnSequence([ + { hang: true }, + { stdout: `toktrack ${TOKTRACK_VERSION}\n` }, + ]) + const runtime = createRuntimeWithSpawn(spawnImpl, { + localBinExists: true, + localProbeTimeoutMs: 7000, + localVersionCheckTimeoutMs: 50, + }) + + try { + const resolutionPromise = runtime.resolveToktrackRunner() + await vi.advanceTimersByTimeAsync(50) + + expect(spawnImpl).toHaveBeenCalledTimes(2) + await expect(resolutionPromise).resolves.toMatchObject({ + command: 'bunx', + method: 'bunx', + }) + } finally { + vi.useRealTimers() + } + }) + + it('falls back to npm when the local runner version mismatches and bunx probing fails', async () => { + const warnSpy = vi.spyOn(console, 'warn').mockImplementation(() => undefined) + const spawnImpl = createSpawnSequence([ + { stdout: 'toktrack 0.0.0\n' }, + { code: 1, stderr: 'bunx failed\n' }, + { stdout: `toktrack ${TOKTRACK_VERSION}\n` }, + ]) + const runtime = createRuntimeWithSpawn(spawnImpl, { localBinExists: true }) + + try { + await expect(runtime.resolveToktrackRunner()).resolves.toMatchObject({ + command: 'npx', + method: 'npm', + label: 'npm exec', + }) + expect(spawnImpl).toHaveBeenCalledTimes(3) + expect(warnSpy).toHaveBeenCalledWith(expect.stringContaining('Failed to probe bunx')) + } finally { + warnSpy.mockRestore() + } + }) +}) diff --git a/tests/unit/server-helpers-runner-test-utils.ts b/tests/unit/server-helpers-runner-test-utils.ts new file mode 100644 index 0000000..260f2dc --- /dev/null +++ b/tests/unit/server-helpers-runner-test-utils.ts @@ -0,0 +1,53 @@ +import { TOKTRACK_VERSION, createAutoImportRuntime } from './server-helpers.shared' +import type { createSpawnSequence } from './server-helpers.shared' + +export function createRuntimeWithSpawn( + spawnImpl: ReturnType, + options: { + latestLookupTimeoutMs?: number + localBinExists?: boolean + localBinOverride?: string + localProbeTimeoutMs?: number + localVersionCheckTimeoutMs?: number + processTerminationGraceMs?: number | undefined + } = {}, +) { + const localBin = options.localBinOverride ?? '/missing/toktrack' + const processTerminationGraceMs = Object.prototype.hasOwnProperty.call( + options, + 'processTerminationGraceMs', + ) + ? options.processTerminationGraceMs + : 1000 + + return createAutoImportRuntime({ + fs: { + existsSync: (filePath: string) => Boolean(options.localBinExists && filePath === localBin), + }, + processObject: { + env: options.localBinOverride ? { TTDASH_TOKTRACK_LOCAL_BIN: options.localBinOverride } : {}, + }, + spawnCrossPlatform: spawnImpl, + normalizeIncomingData: (input: unknown) => input, + withSettingsAndDataMutationLock: async (operation: () => Promise) => operation(), + writeData: () => undefined, + updateDataLoadState: async () => undefined, + toktrackPackageName: 'toktrack', + toktrackPackageSpec: `toktrack@${TOKTRACK_VERSION}`, + toktrackVersion: TOKTRACK_VERSION, + toktrackLocalBin: localBin, + npxCacheDir: '/tmp/ttdash-test-npx-cache', + isWindows: false, + processTerminationGraceMs, + toktrackLocalRunnerProbeTimeoutMs: options.localProbeTimeoutMs ?? 7000, + toktrackLocalRunnerVersionCheckTimeoutMs: options.localVersionCheckTimeoutMs ?? 7000, + toktrackLocalRunnerImportTimeoutMs: 60000, + toktrackPackageRunnerProbeTimeoutMs: 45000, + toktrackPackageRunnerVersionCheckTimeoutMs: 45000, + toktrackPackageRunnerImportTimeoutMs: 60000, + toktrackLatestLookupTimeoutMs: options.latestLookupTimeoutMs ?? 15000, + toktrackLatestCacheSuccessTtlMs: 5 * 60 * 1000, + toktrackLatestCacheFailureTtlMs: 60 * 1000, + probeLog: (message: string) => console.warn(message), + }) +} diff --git a/tests/unit/server-helpers-runner-utils.test.ts b/tests/unit/server-helpers-runner-utils.test.ts new file mode 100644 index 0000000..e15d86e --- /dev/null +++ b/tests/unit/server-helpers-runner-utils.test.ts @@ -0,0 +1,62 @@ +import { afterEach, describe, expect, it } from 'vitest' +import { + TOKTRACK_VERSION, + getExecutableName, + getLocalToktrackDisplayCommand, + getToktrackLatestLookupTimeoutMs, + getToktrackRunnerTimeouts, + parseToktrackVersionOutput, + resetServerHelperTestState, +} from './server-helpers.shared' + +afterEach(() => { + resetServerHelperTestState() +}) + +describe('server helper utilities: toktrack runner utils', () => { + it('maps executable names correctly across platforms', () => { + expect(getExecutableName('npm', true)).toBe('npm.cmd') + expect(getExecutableName('bun', true)).toBe('bun.exe') + expect(getExecutableName('bunx', true)).toBe('bun.exe') + expect(getExecutableName('npx', true)).toBe('npx.cmd') + expect(getExecutableName('toktrack', true)).toBe('toktrack') + expect(getExecutableName('npm', false)).toBe('npm') + expect(getExecutableName('bun', false)).toBe('bun') + expect(getExecutableName('bunx', false)).toBe('bunx') + expect(getExecutableName('npx', false)).toBe('npx') + }) + + it('renders the local toktrack command example correctly across platforms', () => { + expect(getLocalToktrackDisplayCommand(false)).toBe('node_modules/.bin/toktrack daily --json') + expect(getLocalToktrackDisplayCommand(true)).toBe( + 'node_modules\\.bin\\toktrack.cmd daily --json', + ) + }) + + it('parses toktrack version banners down to the raw version', () => { + expect(parseToktrackVersionOutput(`toktrack ${TOKTRACK_VERSION}`)).toBe(TOKTRACK_VERSION) + expect(parseToktrackVersionOutput(`${TOKTRACK_VERSION}\n`)).toBe(TOKTRACK_VERSION) + }) + + it('uses longer warmup timeouts for package runners than for local toktrack', () => { + const localTimeouts = getToktrackRunnerTimeouts({ method: 'local' }) + const bunxTimeouts = getToktrackRunnerTimeouts({ method: 'bunx' }) + const npxTimeouts = getToktrackRunnerTimeouts({ method: 'npm' }) + + expect(localTimeouts).toEqual({ + probeMs: 7000, + versionCheckMs: 7000, + importMs: 60000, + }) + expect(bunxTimeouts).toEqual({ + probeMs: 45000, + versionCheckMs: 45000, + importMs: 60000, + }) + expect(npxTimeouts).toEqual(bunxTimeouts) + }) + + it('uses a less aggressive timeout for latest-version registry lookups', () => { + expect(getToktrackLatestLookupTimeoutMs()).toBe(15000) + }) +})