diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index fc596d0..e4aac8d 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -14,9 +14,9 @@ concurrency: cancel-in-progress: true jobs: - test: + static: runs-on: ubuntu-latest - timeout-minutes: 20 + timeout-minutes: 12 steps: - name: Check out repository @@ -31,33 +31,162 @@ jobs: - name: Install dependencies run: npm ci --ignore-scripts - - name: Check formatting - run: npm run format:check + - name: Run static checks + run: npm run test:static - - name: Run ESLint - run: npm run lint + vitest: + name: Vitest (${{ matrix.project }}) + runs-on: ubuntu-latest + timeout-minutes: 15 + strategy: + fail-fast: false + matrix: + project: + - architecture + - unit + - frontend + - integration + - integration-background - - name: Run docstring lint - run: npm run lint:docstrings + steps: + - name: Check out repository + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + + - name: Set up Node.js + uses: actions/setup-node@48b55a011bda9f5d6aeb4c2d9c7362e8dae4041e # v6.4.0 + with: + node-version: 24 + cache: npm - - name: Run dependency graph checks - run: npm run check:deps + - name: Install dependencies + run: npm ci --ignore-scripts - - name: Run TypeScript checks - run: ./node_modules/.bin/tsc --noEmit + - name: Run Vitest project + run: npm run test:vitest:${{ matrix.project }} - - name: Run architecture tests - run: npm run test:architecture + - name: Check Vitest timing budget + run: | + node scripts/report-test-timings.js \ + test-results/vitest-${{ matrix.project }}.junit.xml \ + --max-suite-seconds=20 \ + --max-test-seconds=12 + + - name: Upload Vitest report + if: always() + uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # v7.0.1 + with: + name: test-reports-vitest-${{ matrix.project }} + if-no-files-found: ignore + path: test-results/vitest-${{ matrix.project }}.junit.xml + + coverage: + runs-on: ubuntu-latest + timeout-minutes: 20 + + steps: + - name: Check out repository + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + + - name: Set up Node.js + uses: actions/setup-node@48b55a011bda9f5d6aeb4c2d9c7362e8dae4041e # v6.4.0 + with: + node-version: 24 + cache: npm + + - name: Install dependencies + run: npm ci --ignore-scripts - name: Run unit and integration tests with coverage - run: npm run test:unit:coverage + run: npm run test:vitest:coverage + + - name: Upload coverage reports + if: always() + uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # v7.0.1 + with: + name: coverage-reports + if-no-files-found: ignore + path: | + coverage/ + test-results/vitest-coverage.junit.xml + + build: + runs-on: ubuntu-latest + timeout-minutes: 10 + + steps: + - name: Check out repository + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + + - name: Set up Node.js + uses: actions/setup-node@48b55a011bda9f5d6aeb4c2d9c7362e8dae4041e # v6.4.0 + with: + node-version: 24 + cache: npm + + - name: Install dependencies + run: npm ci --ignore-scripts - name: Build production bundle run: npm run build:app + - name: Upload production bundle + uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # v7.0.1 + with: + name: production-dist + if-no-files-found: error + path: dist/ + + package-smoke: + runs-on: ubuntu-latest + timeout-minutes: 10 + needs: build + + steps: + - name: Check out repository + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + + - name: Set up Node.js + uses: actions/setup-node@48b55a011bda9f5d6aeb4c2d9c7362e8dae4041e # v6.4.0 + with: + node-version: 24 + cache: npm + + - name: Install dependencies + run: npm ci --ignore-scripts + + - name: Download production bundle + uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0 + with: + name: production-dist + path: dist + - name: Verify packed npm artifact run: npm run verify:package + e2e: + runs-on: ubuntu-latest + timeout-minutes: 20 + needs: build + + steps: + - name: Check out repository + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + + - name: Set up Node.js + uses: actions/setup-node@48b55a011bda9f5d6aeb4c2d9c7362e8dae4041e # v6.4.0 + with: + node-version: 24 + cache: npm + + - name: Install dependencies + run: npm ci --ignore-scripts + + - name: Download production bundle + uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0 + with: + name: production-dist + path: dist + - name: Install Playwright browser run: npx playwright install --with-deps chromium @@ -68,10 +197,9 @@ jobs: if: always() uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # v7.0.1 with: - name: test-reports + name: test-reports-e2e if-no-files-found: ignore path: | - coverage/ playwright-report/ test-results/ @@ -97,3 +225,44 @@ jobs: - name: Build production bundle run: npm run build:app + + ci-required: + name: CI Required + runs-on: ubuntu-latest + timeout-minutes: 5 + needs: + - static + - vitest + - coverage + - build + - package-smoke + - e2e + - windows-smoke + if: ${{ always() }} + + steps: + - name: Verify required CI jobs + run: | + failed=0 + + check_result() { + local job_name="$1" + local result="$2" + + if [[ "${result}" = "success" ]]; then + echo "${job_name}: ${result}" + else + echo "::error::${job_name}: ${result}" + failed=1 + fi + } + + check_result "static" "${{ needs.static.result }}" + check_result "vitest" "${{ needs.vitest.result }}" + check_result "coverage" "${{ needs.coverage.result }}" + check_result "build" "${{ needs.build.result }}" + check_result "package-smoke" "${{ needs['package-smoke'].result }}" + check_result "e2e" "${{ needs.e2e.result }}" + check_result "windows-smoke" "${{ needs['windows-smoke'].result }}" + + exit "${failed}" diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 40d7e29..39afa8b 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -116,6 +116,7 @@ jobs: --workflow ci.yml \ --branch main \ --sha "${MAIN_SHA}" \ + --required-job "CI Required" \ --retries 90 \ --retry-delay-ms 10000 diff --git a/.gitignore b/.gitignore index 1fc5dc2..a9c1b0e 100644 --- a/.gitignore +++ b/.gitignore @@ -2,6 +2,7 @@ node_modules/ dist/ data.json .DS_Store +.cache/ .playwright-mcp/ .tmp-playwright/ .tmp-smoke-*/ @@ -21,7 +22,6 @@ coverage/ requirements/ docs/security/ docs/review/* -!docs/review/*.md docs/application-stack-reference.md /activity-*.png /cache-hit-rate-*.png diff --git a/AGENTS.md b/AGENTS.md index 73e6764..5bc8e91 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -24,7 +24,7 @@ Frontend code is TypeScript + React. Follow the existing style: 2-space indentat ## Testing Guidelines -Automated tests are part of the repo now. Before opening a PR, run `npm run verify:full`. If you only need the main local gate without Playwright, run `npm run verify`. If local port `3015` is already in use, run Playwright with `PLAYWRIGHT_TEST_PORT=3016 npm run test:e2e`. Use `npm run test:timings` to inspect the slowest Vitest suites and cases after larger test changes, but do not run it in parallel with another Vitest command that writes the same JUnit report. Architecture and dependency boundaries are guarded by `npm run test:architecture` and `npm run check:deps`; when you add or move modules, keep [`docs/architecture.md`](docs/architecture.md) aligned with the actual structure. +Automated tests are part of the repo now. Before opening a PR, run `npm run verify:full`; on a local machine with enough CPU, `PLAYWRIGHT_TEST_PORT=3016 npm run verify:full:parallel` is the faster non-coverage fast path across the main test surfaces. If you only need the main local gate without Playwright, run `npm run verify`. If local port `3015` is already in use, run Playwright with `PLAYWRIGHT_TEST_PORT=3016 npm run test:e2e`. Use `npm run test:timings` to inspect the slowest Vitest suites and cases after larger test changes, but do not run it in parallel with another Vitest command that writes the same JUnit report. Architecture and dependency boundaries are guarded by `npm run test:architecture` and `npm run check:deps`; when you add or move modules, keep [`docs/architecture.md`](docs/architecture.md) aligned with the actual structure. When adding tests: @@ -37,6 +37,7 @@ When adding tests: - use real subprocesses only when shell/PATH/CLI or cross-process behavior is the thing being tested - split files once they mix unrelated concerns like content, navigation, localization, motion, or keyboard behavior - keep `waitFor(...)` for real eventual consistency only, not as the default assertion pattern +- keep Playwright fixtures and shared helpers in `tests/e2e/fixtures.ts`; place specs in `tests/e2e` spec files and reset state with `resetAppState(...)` or `prepareDashboard(...)` - when improving coverage, prefer critical branch-heavy runtime modules like `src/lib/api.ts`, `src/hooks/use-usage-data.ts`, and `src/hooks/use-dashboard-controller.ts` over adding another broad dashboard catch-all test Continue to manually verify the main flows affected by the change: dashboard load, auto-import, JSON upload, filtering, and export actions. diff --git a/CHANGELOG.md b/CHANGELOG.md index 503a876..9d6fb9b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,29 @@ # Changelog +## [6.3.0] - 2026-05-02 + +### Added + +- **Paralleles Verification-Gate für lokale und CI-nahe Prüfläufe** — ein neuer Parallelmodus bündelt die wichtigsten Test- und Build-Oberflächen mit eigener Portsteuerung, damit breite Validierungen schneller und reproduzierbarer laufen +- **Test-Timing-Budgets und Benchmark-Transparenz** — neue Timing-Budgets, Projekt-Benchmarks und dokumentierte Gate-Optionen machen langsame oder driftende Tests besser sichtbar +- **Gezielte Branch-Coverage für Runtime-, Router- und Exportpfade** — zusätzliche Unit-, Frontend- und Integrationstests decken Settings, CSV-Export, Runner-Auflösung, Background-Runtime, Auto-Import-Router und process-adjacent Serverpfade gegen Regressionen ab + +### Improved + +- **Klarere Ownership in Runtime und Dashboard-Struktur** — Auto-Import-, Daten-, HTTP-Router- und Dashboard-Section-Verantwortlichkeiten wurden weiter in fokussierte Services und Contracts aufgeteilt +- **Stabilere parallele Testausführung** — Playwright-Specs isolieren ihren Zustand robuster, Vitest-/Frontend-Parallelismus ist gezielter abgestimmt, Toktrack-Cache-Tests nutzen Fake-Spawn-Pfade, und Chart-Legend-Tests bleiben leichtergewichtig +- **Bessere Diagnosequalität in Test- und Verify-Gates** — parallele Gate-Fehler, Runner-Auflösungsfehler und Scope-Fehler liefern klarere Hinweise, während Test-Gates und Review-Baselines aktualisiert und dokumentiert wurden + +### Fixed + +- **Flaky Cross-Test-State in Browser- und Parallelgates** — Playwright-Smoke-Scope, Spec-State-Isolation und parallele Gate-Optionsfehler sind gegen versteckte Zustandskopplung und schwer lesbare Fehlschläge abgesichert +- **Unvollständige Fehlerpfadabdeckung in Server-Runtime und Auto-Import** — Background-Concurrency, Router-Auto-Import-Fehler, process-adjacent Runtime-Kanten und Runner-Resolution-Branches sind jetzt gezielt getestet +- **Shared-Declaration- und Architekturdrift** — gemeinsame Deklarationsverträge werden explizit geschützt, damit geteilte Typ- und Boundary-Erwartungen nicht unbemerkt auseinanderlaufen + +### Commits + +- Enthält alle Branch-Commits seit `v6.2.9`: `f5cf2b6`, `104d663`, `aec28b2`, `892d551`, `c4e3049`, `475aee4`, `5a701b7`, `0834a07`, `ca29012`, `04aa7a6`, `be994fd`, `708c3c7`, `7e60008`, `efb9cee`, `eab08c5`, `f2b3400`, `7555c0e`, `1657822`, `b7fbe80`, `67db759`, `44c073d`, `c4882ae`, `2932697`, `944a049`, `e9d54fa`, `117cfeb`, `b5cac6a`, `3a4285d`, `cb31d35`, `ab49513`, `c4abb2b`, `5f69ade`, `acb22ab` + ## [6.2.9] - 2026-04-28 ### Improved diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 7525e01..97b8d42 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -41,19 +41,26 @@ For feature requests, explain the user problem first. Suggestions that only desc Make sure the change is small, focused, and aligned with the existing product direction. -Run the main local checks: +Run the full local gate before opening a PR: ```bash -npm run verify -npm run test:e2e +npm run verify:full ``` -`npm run verify` covers formatting, ESLint, `tsc --noEmit`, unit tests, the production bundle, and packaged-artifact verification. If you want the same coverage gate used in release preparation, also run: +On a local machine with enough CPU, the staged parallel gate gives faster feedback across the same +main test surfaces without the coverage-instrumented pass: ```bash -npm run test:unit:coverage +PLAYWRIGHT_TEST_PORT=3016 npm run verify:full:parallel ``` +Do not use the parallel fast path as a replacement for the final coverage gate when a change affects +coverage thresholds; keep `npm run verify:full` or `npm run test:vitest:coverage` in that validation. + +`npm run verify` remains the faster non-browser gate for inner-loop work. It covers formatting, +ESLint, `tsc --noEmit`, Vitest without coverage instrumentation, the production bundle, and +packaged-artifact verification. + If you only need the production bundle without the lint/format gate, use: ```bash @@ -66,7 +73,7 @@ If local port `3015` is already occupied, run Playwright on another isolated por PLAYWRIGHT_TEST_PORT=3016 npm run test:e2e ``` -The Playwright suite uses an isolated local app directory under `.tmp-playwright/` and should not reuse your normal local dashboard data. `npm run verify:package` builds the real tarball and verifies that the packaged CLI can start outside the repo checkout. +The Playwright suite starts an isolated local app per worker under `.tmp-playwright/workers/` and should not reuse your normal local dashboard data. `npm run verify:package` builds the real tarball and verifies that the packaged CLI can start outside the repo checkout. Then manually verify the main user flows touched by your change: diff --git a/README.md b/README.md index b647618..a17014a 100644 --- a/README.md +++ b/README.md @@ -98,8 +98,8 @@ Then either: The auto-import path prefers: 1. local `toktrack` -2. `bunx toktrack@2.5.0` -3. `npx --yes toktrack@2.5.0` +2. `bunx toktrack@` +3. `npx --yes toktrack@` ## Common Commands @@ -334,25 +334,35 @@ npm run build npm run build:app ``` -Run automated checks: +Run the main local gate without Playwright: ```bash npm run verify ``` -For the full release-style local gate without re-running unit tests or rebuilding twice, use: +For the full serial CI/release-style local gate, including coverage thresholds and Playwright, use: ```bash npm run verify:full ``` +On a local machine with enough CPU, the staged parallel fast path gives quicker feedback across the +same main test surfaces without the coverage-instrumented pass: + +```bash +PLAYWRIGHT_TEST_PORT=3016 npm run verify:full:parallel +``` + +Keep `npm run verify:full` or `npm run test:vitest:coverage` in the final validation path whenever +coverage thresholds matter. + If you want to run the steps individually, use: ```bash npm run check:deps npm run test:architecture npm run test:unit:coverage -npm run test:e2e +npm run test:e2e:ci ``` To inspect the slowest suites and test cases after a Vitest run: @@ -361,7 +371,7 @@ To inspect the slowest suites and test cases after a Vitest run: npm run test:timings ``` -The Playwright suite uses its own isolated local app directory. If port `3015` is already occupied locally, run it on another isolated port: +The Playwright suite starts an isolated local app per worker under `.tmp-playwright/workers/`. If the base port `3015` is already occupied locally, run it from another base port: ```bash PLAYWRIGHT_TEST_PORT=3016 npm run test:e2e diff --git a/RELEASING.md b/RELEASING.md index 8b3bb04..99c942e 100644 --- a/RELEASING.md +++ b/RELEASING.md @@ -45,6 +45,15 @@ Optional local confidence check before starting the workflow: npm run verify:full ``` +For faster local behavior-test feedback before the final serial gate, use the staged parallel path: + +```bash +PLAYWRIGHT_TEST_PORT=3016 npm_config_cache=/tmp/ttdash-npm-cache npm run verify:full:parallel +``` + +It does not replace `npm run verify:full` for release confidence because the serial gate evaluates +the coverage-instrumented pass. + If port `3015` is already occupied locally, keep the same one-pass gate and only override the Playwright port: ```bash diff --git a/bun.lock b/bun.lock index 55d5f4b..7d75ec6 100644 --- a/bun.lock +++ b/bun.lock @@ -9,7 +9,7 @@ "i18next": "^26.0.6", "react-i18next": "^17.0.4", "react-is": "^19.2.5", - "toktrack": "2.5.0", + "toktrack": "2.6.0", }, "devDependencies": { "@eslint/js": "^9.39.4", @@ -49,7 +49,7 @@ "jsdom": "^29.0.2", "lucide-react": "^1.8.0", "prettier": "^3.8.3", - "prettier-plugin-tailwindcss": "^0.7.2", + "prettier-plugin-tailwindcss": "^0.8.0", "react": "^19.2.5", "react-dom": "^19.2.5", "recharts": "^3.8.1", @@ -1063,7 +1063,7 @@ "prettier": ["prettier@3.8.3", "", { "bin": { "prettier": "bin/prettier.cjs" } }, "sha512-7igPTM53cGHMW8xWuVTydi2KO233VFiTNyF5hLJqpilHfmn8C8gPf+PS7dUT64YcXFbiMGZxS9pCSxL/Dxm/Jw=="], - "prettier-plugin-tailwindcss": ["prettier-plugin-tailwindcss@0.7.2", "", { "peerDependencies": { "@ianvs/prettier-plugin-sort-imports": "*", "@prettier/plugin-hermes": "*", "@prettier/plugin-oxc": "*", "@prettier/plugin-pug": "*", "@shopify/prettier-plugin-liquid": "*", "@trivago/prettier-plugin-sort-imports": "*", "@zackad/prettier-plugin-twig": "*", "prettier": "^3.0", "prettier-plugin-astro": "*", "prettier-plugin-css-order": "*", "prettier-plugin-jsdoc": "*", "prettier-plugin-marko": "*", "prettier-plugin-multiline-arrays": "*", "prettier-plugin-organize-attributes": "*", "prettier-plugin-organize-imports": "*", "prettier-plugin-sort-imports": "*", "prettier-plugin-svelte": "*" }, "optionalPeers": ["@ianvs/prettier-plugin-sort-imports", "@prettier/plugin-hermes", "@prettier/plugin-oxc", "@prettier/plugin-pug", "@shopify/prettier-plugin-liquid", "@trivago/prettier-plugin-sort-imports", "@zackad/prettier-plugin-twig", "prettier-plugin-astro", "prettier-plugin-css-order", "prettier-plugin-jsdoc", "prettier-plugin-marko", "prettier-plugin-multiline-arrays", "prettier-plugin-organize-attributes", "prettier-plugin-organize-imports", "prettier-plugin-sort-imports", "prettier-plugin-svelte"] }, "sha512-LkphyK3Fw+q2HdMOoiEHWf93fNtYJwfamoKPl7UwtjFQdei/iIBoX11G6j706FzN3ymX9mPVi97qIY8328vdnA=="], + "prettier-plugin-tailwindcss": ["prettier-plugin-tailwindcss@0.8.0", "", { "peerDependencies": { "@ianvs/prettier-plugin-sort-imports": "*", "@prettier/plugin-hermes": "*", "@prettier/plugin-oxc": "*", "@prettier/plugin-pug": "*", "@shopify/prettier-plugin-liquid": "*", "@trivago/prettier-plugin-sort-imports": "*", "@zackad/prettier-plugin-twig": "*", "prettier": "^3.0", "prettier-plugin-astro": "*", "prettier-plugin-css-order": "*", "prettier-plugin-jsdoc": "*", "prettier-plugin-marko": "*", "prettier-plugin-multiline-arrays": "*", "prettier-plugin-organize-attributes": "*", "prettier-plugin-organize-imports": "*", "prettier-plugin-sort-imports": "*", "prettier-plugin-svelte": "*" }, "optionalPeers": ["@ianvs/prettier-plugin-sort-imports", "@prettier/plugin-hermes", "@prettier/plugin-oxc", "@prettier/plugin-pug", "@shopify/prettier-plugin-liquid", "@trivago/prettier-plugin-sort-imports", "@zackad/prettier-plugin-twig", "prettier-plugin-astro", "prettier-plugin-css-order", "prettier-plugin-jsdoc", "prettier-plugin-marko", "prettier-plugin-multiline-arrays", "prettier-plugin-organize-attributes", "prettier-plugin-organize-imports", "prettier-plugin-sort-imports", "prettier-plugin-svelte"] }, "sha512-V8ITGH87yuBDF6JpEZTOVlUz/saAwqb8f3HRgUj8Lh+tGCcrmorhsLpYqzygwFwK0PE2Ib6Mv3M7T/uE2tZV1g=="], "pretty-format": ["pretty-format@27.5.1", "", { "dependencies": { "ansi-regex": "^5.0.1", "ansi-styles": "^5.0.0", "react-is": "^17.0.1" } }, "sha512-Qb1gy5OrP5+zDf2Bvnzdl3jsTf1qXVMazbvCoKhtKqVs4/YK4ozX4gKQJJVyNe+cajNPn0KoC0MC3FUmaHWEmQ=="], @@ -1255,7 +1255,7 @@ "to-valid-identifier": ["to-valid-identifier@1.0.0", "", { "dependencies": { "@sindresorhus/base62": "^1.0.0", "reserved-identifiers": "^1.0.0" } }, "sha512-41wJyvKep3yT2tyPqX/4blcfybknGB4D+oETKLs7Q76UiPqRpUJK3hr1nxelyYO0PHKVzJwlu0aCeEAsGI6rpw=="], - "toktrack": ["toktrack@2.5.0", "", { "os": [ "linux", "win32", "darwin", ], "cpu": [ "x64", "arm64", ], "bin": { "toktrack": "bin/run.js" } }, "sha512-WJYj86vF6uQ2NA3eBQZ7bzN9Idrn12i3u2vEqT7FkjuTAz6zaRXX1/Z1JjIyurc7OR8xfzE78PiONhCTl0T3jw=="], + "toktrack": ["toktrack@2.6.0", "", { "os": [ "linux", "win32", "darwin", ], "cpu": [ "x64", "arm64", ], "bin": { "toktrack": "bin/run.js" } }, "sha512-Gq6Q7hsMc8i3Ld4gG7pCgG63D4QADPDjquWiQ8lRsmio1+WXSK+7E85MLNAiHthb1Qdwv2Tsk9uVa90YPVbAxQ=="], "tough-cookie": ["tough-cookie@6.0.1", "", { "dependencies": { "tldts": "^7.0.5" } }, "sha512-LktZQb3IeoUWB9lqR5EWTHgW/VTITCXg4D21M+lvybRVdylLrRMnqaIONLVb5mav8vM19m44HIcGq4qASeu2Qw=="], diff --git a/docs/architecture.md b/docs/architecture.md index c0886c5..c25a12e 100644 --- a/docs/architecture.md +++ b/docs/architecture.md @@ -52,15 +52,28 @@ The server runtime is intentionally split so `server.js` stays an executable shi - owns local mutable runtime state services such as runtime snapshots, singleton runtime leases, and expiring async caches - keeps startup flags, auto-import leases, and toktrack version lookup cache state scoped to one composed app runtime - `server/data-runtime.js` - - owns app-path resolution, persisted usage/settings IO, migration, and file-mutation locks + - is the facade for app-path resolution, persisted usage/settings IO, migration, import merging, and file-mutation locks - consumes the shared settings contract instead of defining local settings defaults or normalizers + - composes focused services from `server/data-runtime/**`: + - `app-paths.js` exposes `resolveDataRuntimeAppPaths` for platform and env override path selection + - `file-io.js` exposes `createDataRuntimeFileIo` for secure directory setup, atomic JSON writes, file modes, and missing-file handling + - `file-locks.js` exposes `createDataRuntimeFileLocks` for in-process and cross-process mutation locks around data/settings files + - `import-merge.js` exposes `createDataRuntimeImportMerge` for backup payload extraction, usage-day equivalence, and non-destructive merge summaries + - wiring model: `server/data-runtime.js` imports these four submodules, injects filesystem/path/runtime configuration, and returns one public data-runtime facade to `server/app-runtime.js` - `server/cli.js` - owns CLI alias normalization, help text, positional command parsing, and port validation - `server/background-runtime.js` - owns background instance registry, start/stop flows, and registry locking - `server/auto-import-runtime.js` - - owns toktrack runner resolution, subprocess execution, version lookup, and auto-import execution + - is the facade for toktrack runner resolution, subprocess execution, version lookup, singleton leasing, and auto-import execution - uses the injected runtime-state services for singleton import leasing and latest-version cache isolation + - composes focused services from `server/auto-import-runtime/**`: + - `messages.js` exposes structured progress/error events and server-side fallback message formatting + - `command-runner.js` exposes cross-platform command spawning, timeout handling, process termination, and command diagnostics + - `toktrack-runners.js` exposes local/bunx/npx runner construction, probing, timeout selection, version parsing, and runner-resolution errors + - `latest-version.js` exposes the cached npm registry lookup for the latest toktrack version + - `import-executor.js` exposes singleton lease acquisition, toktrack JSON import orchestration, persistence updates, and startup auto-load logging + - wiring model: `server/auto-import-runtime.js` imports these five submodules, injects process/filesystem/cache/runtime configuration, and returns one public auto-import facade to `server/app-runtime.js` - `server/http-router.js` - owns API routing, SSE wiring, and static asset dispatch with injected runtime dependencies - must acquire auto-import work through `server/auto-import-runtime.js` instead of keeping route-local import flags @@ -81,7 +94,7 @@ The server runtime is intentionally split so `server.js` stays an executable shi - Local auth session state - `server/startup-runtime.js` writes the current local session metadata to a restrictive `session-auth.json` file in the user config dir through injected data-runtime IO - `server/background-runtime.js` stores per-instance auth headers and bootstrap URLs in the restrictive background registry so `ttdash stop` and no-open background starts stay usable -- `server/http-utils.js`, `server/runtime.js`, `server/process-utils.js`, `server/report/**` +- `server/http-utils.js`, `server/runtime.js`, `server/runtime-formatters.js`, `server/process-utils.js`, `server/report/**` - shared support modules used by the composed runtimes ## Shared Settings Contract @@ -92,6 +105,9 @@ Persisted settings are a shared contract across the frontend bootstrap path and - owns app-level settings defaults, provider-limit normalization, and timestamp/load-source coercion - consumes `shared/dashboard-preferences.js` for dashboard-specific filter/section defaults and normalization - is the only production module that should define persisted settings defaults or normalization rules +- `shared/*.d.ts` + - must expose the same value exports as their sibling CommonJS modules + - are guarded by `tests/architecture/shared-declaration-contract.test.ts` so runtime exports cannot drift from TypeScript consumers silently - `src/lib/app-settings.ts` - is a typed frontend adapter over `shared/app-settings.js` - may keep DOM-only behavior such as `applyTheme`, but must not recompute settings defaults from local helpers @@ -156,6 +172,9 @@ Dashboard-specific presets, static section metadata, and preset date semantics a - `src/components/dashboard/DashboardSections.tsx` - consumes a single `DashboardSectionsViewModel` - should keep section ownership grouped by section bundle instead of reintroducing broad prop lists +- `src/components/dashboard/sections/dashboard-section-renderer-types.d.ts` + - owns the type-only renderer contract shared by section family renderers + - must stay declaration-only because it is intentionally erased from the runtime dependency graph - `src/components/layout/FilterBar.tsx` - owns the public filter bar shell and composes private layout filter groups for status, time presets, date range, and provider/model chips - `src/components/layout/FilterBar*.tsx` @@ -200,10 +219,12 @@ Reason: the repo has stable layer boundaries, but it is not structured around st - dependency graph validation: `npm run check:deps` - dependency graph visualization: `npm run deps:graph` +- static quality gate: `npm run test:static` +- all Vitest projects without coverage: `npm run test:vitest` - architecture tests only: `npm run test:architecture` - main release-style local gate: `npm run verify:full` -Both `ci.yml` and `release.yml` run `check:deps` and `test:architecture` explicitly so dependency and architecture violations show up as separate CI failures instead of being hidden inside a larger local gate. +`ci.yml` runs static checks, architecture tests, Vitest projects, coverage, build, package smoke, and Playwright as separate DAG jobs so independent failures are visible without waiting for one serial full-gate job. `release.yml` still runs `check:deps` and `test:architecture` explicitly during the release path so dependency and architecture violations are not hidden inside a larger local gate. ## Contributor Rules diff --git a/docs/review/README.md b/docs/review/README.md deleted file mode 100644 index c838caa..0000000 --- a/docs/review/README.md +++ /dev/null @@ -1,67 +0,0 @@ -# TTDash Ultra-Deep Review - -## Kurzfazit - -Der Codebase-Stand ist insgesamt solide: die wichtigsten Qualitaetsgates laufen gruens, die Testlandschaft ist deutlich ueber Durchschnitt, und mehrere fruehere Security-Luecken sind inzwischen sauber abgesichert. Die groessten aktuellen Risiken liegen nicht in akuten Produktionsfehlern, sondern in Struktur, Wartbarkeit, Testsignalen und der wachsenden Dichte der Dashboard-Oberflaeche. - -## Durchgefuehrte Evidenzarbeit - -- Statische Review von `src/**`, `server.js`, `server/**`, `shared/**`, `tests/**`, `docs/architecture.md`, `docs/testing.md`, `vite.config.ts`, `vitest.config.ts`, `.dependency-cruiser.cjs` -- Ausgefuehrte Gates: - - `npm run check` -> bestanden - - `npm run test:unit` -> bestanden (`99` Dateien, `369` Tests bestanden, `1` uebersprungen) - - `npm run test:unit:coverage` -> bestanden - - `npm run test:timings` -> bestanden - - `npm run build:app` -> bestanden -- Architektur-Spezialfall: - - `npm run test:architecture` schlug einmal fehl, weil `tests/architecture/frontend-layers.test.ts` im Gesamtlauf in den `5000ms` Timeout lief - - isolierter Re-Run derselben Datei bestand, aber der langsamste Fall lag bei ca. `4950ms` - -## Wichtigste Querschnittsbefunde - -1. Die groessten Wartbarkeitsrisiken sitzen inzwischen weniger im Server-Entrypoint und staerker in wenigen Frontend-Mega-Modulen: `use-dashboard-controller.ts`, `SettingsModal.tsx`, `DashboardSections.tsx`, `FilterBar.tsx`. -2. Die Architektur-Grenzen sind auf Repo-Ebene gut abgesichert, aber die Anwendungslogik ist intern noch zu stark zentralisiert und ueber breite Props- und Return-Surfaces gekoppelt. -3. Die Security-Hardening-Basis ist fuer den Default-Loopback-Betrieb gut; lokale Read-APIs sind per per-start Session-Token geschuetzt, `TTDASH_ALLOW_REMOTE=1` bleibt ein separater token-gesicherter Betriebsmodus, und die Style-CSP kommt ohne `unsafe-inline` aus. -4. Die Testbasis ist breit, aber die gemeldete Coverage unterschaetzt nicht nur Luecken, sondern blendet ganze produktive Runtime-Bereiche aus. -5. Die Dashboard-Oberflaeche ist funktional stark und accessibility-bewusst, wirkt aber an mehreren Stellen ueberladen und pflegt zu viele Interaktionsmuster in zu wenigen Komponenten. - -## Wichtige Messpunkte - -- Urspruengliche Coverage-Summary aus `npm run test:unit:coverage`: - - Statements `76.27%` - - Branches `65.71%` - - Functions `76.43%` - - Lines `78.61%` -- Aktueller Stand zu `test-review.md / H-02`: - - `npm run test:unit:coverage` zaehlt inzwischen `src/**/*.{ts,tsx}`, `server.js`, `server/**/*.js`, `shared/**/*.js` und `usage-normalizer.js` - - Die neue Produkt-Runtime-Baseline liegt bei Statements `72.85%`, Branches `63.01%`, Functions `74.97%`, Lines `73.88%` - - Server-Entrypoints, Child-Process-Pfade und lazy Dashboard-Sektionen bleiben dadurch sichtbar, auch wenn sie nicht alle direkt im Vitest-Hauptprozess hohe Line-Coverage erzeugen -- Langsamste Test-Suites aus `npm run test:timings`: - - `tests/integration/server-background.test.ts` -> `5.785s` - - `tests/integration/server-auto-import.test.ts` -> `4.521s` - - `tests/unit/server-helpers-runner-process.test.ts` -> `2.778s` - - `tests/frontend/settings-modal-language.test.tsx` -> `2.443s` - - `tests/frontend/drill-down-modal-motion.test.tsx` -> `2.013s` -- Build-Signal aus `npm run build:app`: - - `charts-vendor` -> `422.02 kB` raw / `119.59 kB` gzip - - `index` -> `212.32 kB` raw / `57.55 kB` gzip - - `react-vendor` -> `200.38 kB` raw / `64.43 kB` gzip - - `motion-vendor` -> `125.85 kB` raw / `41.08 kB` gzip - - `i18n` -> `119.68 kB` raw / `36.70 kB` gzip - -## Berichte - -- [code-review.md](./code-review.md) -- [architecture-review.md](./architecture-review.md) -- [security-review.md](./security-review.md) -- [performance-review.md](./performance-review.md) -- [dashboard-review.md](./dashboard-review.md) -- [server-review.md](./server-review.md) -- [test-review.md](./test-review.md) -- [fixed-findings.md](./fixed-findings.md) - -## Bewertungslogik - -- `Hoch`: strukturelles oder betriebliches Risiko mit klarer Folgewirkung auf Aenderungssicherheit, Security oder Teamtempo -- `Mittel`: echtes Problem mit begrenztem oder lokalem Blast Radius -- `Niedrig`: saubere Verbesserung mit niedrigerem Risiko, aber gutem Langzeitnutzen diff --git a/docs/review/architecture-review.md b/docs/review/architecture-review.md deleted file mode 100644 index 4d47d92..0000000 --- a/docs/review/architecture-review.md +++ /dev/null @@ -1,70 +0,0 @@ -# Architecture Review - -## Kurzfazit - -Die Repo-weiten Guardrails sind gut gedacht und weitgehend wirksam. Das groesste Architekturproblem ist nicht fehlende Regelung, sondern zu viel funktionale Konzentration in wenigen Modulen, waehrend andere Teile bereits sauber extrahiert wurden. - -## Was bereits gut ist - -- `dependency-cruiser`, `eslint-plugin-boundaries` und `archunit` decken unterschiedliche Strukturfragen sinnvoll ab -- `shared/dashboard-domain.js` wird bereits sowohl server- als auch frontendseitig genutzt -- Die Runtime-Trennung `src` vs `server` vs `shared` ist dokumentiert und im Check-Setup sichtbar verankert - -## Findings - -### H-01 - `server.js` ist die dominante Architektur-Engstelle - -**Referenzen:** `server.js` insgesamt, besonders `75-92`, `444-542`, `1747-1774`, `2208-2451`, `2482-2975` - -`server.js` umfasst `2992` Zeilen und rund `125` Funktionsdefinitionen. Darin liegen gleichzeitig: - -- CLI-Argumente -- Port- und Runtime-Setup -- Datei- und Prozess-Locks -- Background-Registry -- Settings- und Usage-Persistenz -- HTTP-Guards und Routing -- Auto-Import samt Runner-Erkennung -- PDF-Reporting -- Static Serving -- Shutdown-Pfade - -Das erzeugt hohe Aenderungskopplung. Ein Bugfix an Import- oder Background-Logik verlaengert unmittelbar die Review-Flaeche fuer HTTP-, CLI- und Persistenzcode. - -**Empfehlung:** das Runtime-Modul weiter schneiden, z. B. `server/settings-runtime`, `server/data-runtime`, `server/background-runtime`, `server/auto-import-runtime`, `server/http-router`. - -### H-02 - Der Settings-Vertrag ist client- und serverseitig dupliziert - -**Referenzen:** `server.js:75-92`, `server.js:1403-1485`, `src/lib/app-settings.ts:20-91`, `src/lib/dashboard-preferences.ts:124-220` - -Defaults und Normalisierung fuer Settings leben mehrfach: - -- Server: `DEFAULT_SETTINGS`, `normalizeProviderLimits`, `normalizeDefaultFilters`, `normalizeSectionVisibility`, `normalizeSectionOrder`, `normalizeSettings` -- Frontend: `DEFAULT_APP_SETTINGS`, `normalizeAppSettings`, `normalizeDashboardDefaultFilters`, `normalizeDashboardSectionVisibility`, `normalizeDashboardSectionOrder` - -Die Logik ist semantisch aehnlich, aber nicht aus einer gemeinsamen Quelle abgeleitet. Das ist ein Drift-Risiko: ein neuer Settings-Key, ein neues Default oder eine veraenderte Normalisierungsregel kann unbemerkt asymmetrisch werden. - -**Empfehlung:** einen gemeinsamen Settings-Schema- und Normalisierungs-Layer nach `shared/**` ziehen. - -### M-01 - Die Dashboard-Orchestrierung haengt an sehr breiten Props- und Return-Surfaces - -**Referenzen:** `src/hooks/use-dashboard-controller.ts:665-760`, `src/components/dashboard/DashboardSections.tsx:179-219`, `src/components/Dashboard.tsx:301-456` - -Der Controller liefert `95` Rueckgabefelder. `DashboardSectionsProps` umfasst `35` Inputs. `Dashboard.tsx` agiert dadurch eher als Durchreicher denn als klarer Kompositionspunkt. - -Das ist architektonisch teuer: - -- grosse Merge-Flaechen -- hohe Aenderungskosten fuer kleine Features -- breite Re-Render- und Testflaechen -- schwache lokale Ownership der Subsysteme - -**Empfehlung:** Sections ueber bewusstere View-Model-Bundles versorgen, z. B. `filtersViewModel`, `forecastViewModel`, `tableViewModel`, `actions`. - -### M-02 - Die gute Shared-Domain-Extraktion wurde noch nicht bis zu den Settings und UI-Regeln durchgezogen - -**Referenzen:** `shared/dashboard-domain.js`, `src/lib/data-transforms.ts:8-16`, `src/lib/calculations.ts:6-16` - -Bei Metrics und Transformationsregeln ist die Richtung bereits richtig: Frontend und Server ziehen gemeinsame Logik aus `shared/dashboard-domain.js`. Bei Settings, Presets und Teilen der UI-Regeln fehlt dieselbe Konsequenz noch. - -**Empfehlung:** dieselbe Shared-Strategie selektiv auf Settings-Schema, Preset-Definitionen und andere rein fachliche Regeln ausweiten. diff --git a/docs/review/code-review.md b/docs/review/code-review.md deleted file mode 100644 index 18a36a9..0000000 --- a/docs/review/code-review.md +++ /dev/null @@ -1,54 +0,0 @@ -# Code Review - -## Kurzfazit - -Die Codequalitaet ist im Mittel gut: Typsicherheit, Namensgebung, Testtiefe und defensive API-Fehlerbehandlung sind ueberwiegend stark. Die Hauptschwaechen sind zentrale God-Objects, Dopplungen in der UI-Logik und etwas liegengebliebener Dead Code. - -## Was bereits gut ist - -- Die produktiven Grenzschichten sind bewusst organisiert (`src`, `server`, `shared`) -- Gemeinsame Domainlogik fuer Datenfilterung und Kennzahlen wurde bereits nach `shared/dashboard-domain.js` gezogen -- Fehlertexte und i18n-Pfade sind grossenteils konsistent -- Upload-, Import- und Export-Flows haben klare Fehlerrueckmeldungen und sichtbare Busy-States - -## Findings - -### H-01 - `use-dashboard-controller.ts` ist ein God-Hook mit sehr breiter API - -**Referenzen:** `src/hooks/use-dashboard-controller.ts:102-760`, besonders `69-89`, `393-657`, `665-760` - -Der Hook vereint Bootstrap, React Query Orchestrierung, Theme-Anwendung, i18n-Synchronisierung, Toasts, Datei-Import/Export, PDF-Download, Scroll-Navigation, Error-State-Bildung und UI-Dialogsteuerung in einem Modul mit `763` Zeilen. Der finale Return-Block exportiert `95` Felder. - -Das ist der groesste Clean-Code-Befund im Frontend: die Datei hat mehrere Gruende, sich gleichzeitig zu aendern, und entkoppelt weder Browser-I/O noch Produktlogik sauber. Die Tests sind deshalb gezwungen, grosse Zustandsflaechen zu kennen, statt kleine Einheiten gezielt zu pruefen. - -**Empfehlung:** in kleinere Hooks oder Controller-Slices zerlegen, z. B. Bootstrap/Load-State, Settings-Orchestrierung, Data-Transfer, Report/Download, UI-Only Actions. Browser-I/O wie Blob-Downloads aus dem Hook in kleine Helper oder View-Layer verschieben. - -### M-01 - Preset-Logik ist doppelt implementiert - -**Referenzen:** `src/components/layout/FilterBar.tsx:71-117`, `src/hooks/use-dashboard-filters.ts:17-45` - -`resolveActivePreset(...)` und `resolvePresetRange(...)` codieren dieselben Datumspresets (`7d`, `30d`, `month`, `year`, `all`) getrennt. Eine Aenderung an den Preset-Regeln kann deshalb dazu fuehren, dass der angewendete Filter korrekt ist, die aktive UI-Markierung aber etwas anderes zeigt. - -Das ist ein klassischer Konsistenz- und Duplikationsbefund: fachliche Regeln sollten fuer Anwenden und Anzeigen aus derselben Quelle kommen. - -**Empfehlung:** Preset-Definitionen in ein gemeinsames Modul ueberfuehren und sowohl Hook als auch FilterBar nur noch daraus ableiten lassen. - -### M-02 - `SettingsModal.tsx` mischt zu viele Verantwortlichkeiten - -**Referenzen:** `src/components/features/settings/SettingsModal.tsx:1-1026`, besonders `53-83`, `219-356` - -Die Settings-Komponente vereint Sprachwahl, Motion-Einstellungen, Provider-Limits, Default-Filter, Section-Visibility und -Order, Backup-Import/Export sowie einen live geladenen Toktrack-Versionsstatus in einer einzigen Datei mit `1026` Zeilen. - -Das ist nicht nur ein Architekturthema, sondern auch ein Code-Consistency-Problem: je groesser die Datei wird, desto wahrscheinlicher werden inkonsistente Patterns fuer State, Reset, Busy-Zustaende und Fehlerbehandlung. - -**Empfehlung:** in klar getrennte Subviews oder Dialog-Sektionen zerlegen, z. B. `General`, `Defaults`, `Sections`, `Data`, `Versions`. - -### N-01 - Es gibt ungenutzte Hooks mit `0%` Coverage - -**Referenzen:** `src/hooks/use-theme.ts:1-21`, `src/hooks/use-provider-limits.ts:1-17` - -Beide Hooks sind im Repo vorhanden, aber weder produktiv importiert noch testseitig abgedeckt. Gleichzeitig melden die Coverage-Daten fuer beide `0%`. - -Das ist kleiner als die God-Hook-Befunde, aber trotzdem wichtig: unbenutzter Code erzeugt Pflegekosten, verlaengert Suchraeume bei Refactors und verwirrt Guardrails, wenn er trotz `no-orphans-src` Regel nicht als Problem auftaucht. - -**Empfehlung:** entfernen oder bewusst wieder integrieren und dann gezielt testen. diff --git a/docs/review/dashboard-review.md b/docs/review/dashboard-review.md deleted file mode 100644 index 414eaba..0000000 --- a/docs/review/dashboard-review.md +++ /dev/null @@ -1,65 +0,0 @@ -# Dashboard Review - -> Historical note: Findings M-01, M-02, N-01, and N-02 are resolved historical -> review items. See `docs/review/fixed-findings.md` for the implementation -> status and guardrails. - -## Kurzfazit - -Das Dashboard ist funktional stark und sichtbar mit Fokus auf Accessibility, Internationalisierung und lehrreiche Analytik gebaut. Die Kehrseite ist eine hohe Oberflaechen- und Interaktionsdichte, die sowohl Nutzer als auch Maintainer belastet. - -## Was bereits gut ist - -- Leere-, Fehler- und Erfolgszustaende sind bewusst modelliert -- Filter, Motion und Dialoge sind durch viele gezielte Frontend-Tests abgesichert -- Deutsche und englische Terminologie werden aktiv gepflegt -- Forecast-, Drilldown- und Tabellenoberflaechen sind keine bloessen "nice to have", sondern tief integriert - -## Findings - -### M-01 - Der Settings-Dialog ist fuer eine einzelne Oberflaeche ueberladen - -**Referenzen:** `src/components/features/settings/SettingsModal.tsx:53-83`, `219-356`, gesamte Datei mit `1026` Zeilen - -Nutzer bekommen in einem einzigen Modal gleichzeitig: - -- Sprachumschaltung -- Motion-Einstellungen -- Default-Filter -- Provider-Limits -- Sichtbarkeit und Reihenfolge der Sektionen -- Settings- und Daten-Import/Export -- Versionsstatus von Toktrack -- Data-Status und Load-Quelle - -Das ist fuer Power-User noch beherrschbar, fuer neue oder seltene Nutzer aber schnell ein "scroll and search" Workflow statt einer gefuehrten Konfiguration. - -**Empfehlung:** in klar getrennte Bereiche oder Tabs schneiden und "day-to-day" Einstellungen von "advanced / admin" Funktionen trennen. - -### M-02 - Die Filterleiste kombiniert zu viele Interaktionsmuster in einer Komponente - -**Referenzen:** `src/components/layout/FilterBar.tsx` insgesamt, besonders `27-47`, `71-117`, `119-317` - -Die FilterBar mischt ViewMode-Wechsel, Monatsauswahl, Provider- und Modellchips, aktive Preset-Erkennung und einen komplett eigenen Date-Picker inklusive Focus- und Keyboard-Management. Das zeigt hohe Ambition, macht die Komponente aber schwer ueberschaubar und schwer aenderbar. - -Die Accessibility-Tests belegen, dass der aktuelle Zustand funktioniert. Gleichzeitig zeigt die Menge an Focus- und Overlay-Logik, wie fragil dieser Bereich werden kann. - -**Empfehlung:** Date-Picker und Chip-Filter in klar getrennte, kleinere Subkomponenten auslagern. - -### N-01 - Die Action-Landschaft ist reich, aber verstreut - -**Referenzen:** `src/components/Dashboard.tsx:219-456` - -Upload, Auto-Import, Export, Settings, Help, Report, Filter, Command Palette und section-based Navigation sind ueber Header, Empty State, Settings, Dialoge und Command Palette verteilt. Das erhoeht die Entdeckbarkeit, aber auch den kognitiven Overhead. - -Vor allem fuer seltene Aufgaben wie Backups, Reset oder Toktrack-Versionsstatus ist nicht immer klar, ob sie "daily use" oder "advanced maintenance" sind. - -**Empfehlung:** primaere Alltagsaktionen deutlicher von Wartungs- und Diagnoseaktionen trennen. - -### N-02 - Die Dashboard-Struktur ist intern deutlich breiter als von aussen sichtbar - -**Referenzen:** `src/components/dashboard/DashboardSections.tsx:179-219`, `src/components/Dashboard.tsx:301-390` - -`DashboardSectionsProps` umfasst `35` Inputs. Das ist weniger ein direkter UX-Bug als ein Hinweis darauf, dass das Dashboard intern bereits sehr viele Konzepte gleichzeitig aufspannt. Solche Breite endet oft spaeter als inkonsistente UX. - -**Empfehlung:** pro Dashboard-Sektion bewusstere View-Model-Grenzen definieren, damit Fach- und UI-Entscheidungen wieder lokaler werden. diff --git a/docs/review/fixed-findings.md b/docs/review/fixed-findings.md deleted file mode 100644 index 7803c94..0000000 --- a/docs/review/fixed-findings.md +++ /dev/null @@ -1,586 +0,0 @@ -# Fixed Findings - -## 2026-04-27 - -### test-review.md / H-01 - -- Status: fixed -- Scope: simple architecture rules no longer depend on repeated ArchUnit project scans. `tests/architecture/source-graph.ts` now owns one cached `src/**` file scan, TypeScript-based import/export parsing, alias resolution for `@/...`, relative import resolution, extension resolution, and `index` module resolution. The frontend layer, unused-hook, hook-naming, and shared-UI-placement tests use this shared graph while the feature-slice diagram remains on ArchUnit where its diagram model adds value. -- Guardrails: `tests/architecture/frontend-layers.test.ts` still blocks hooks from importing components, lib core from importing hooks/components, lib React modules from reaching back into hooks/components, and type modules from depending on components/hooks/lib. `tests/architecture/unused-hooks.test.ts`, `hook-naming.test.ts`, and `shared-ui-placement.test.ts` now reuse the same source graph so future test-layer changes do not reintroduce separate slow scans. `tests/architecture/source-graph.test.ts` covers static imports, re-exports, side-effect imports, and dynamic imports with options. -- Follow-up quality fixes during implementation: - - Dynamic `import(...)` calls are parsed alongside static imports and re-exports, so lazy edges stay visible to the architecture guardrails instead of only top-level declarations being checked. - - The formerly near-timeout `hooks must not depend on components` rule dropped from the historical `~4950ms` flake boundary to well below `100ms` in the targeted architecture run, without increasing global or local timeouts. - - Dashboard UI, content, animation, runtime API behavior, and production code remain unchanged. -- Validation: - - `npm run test:architecture -- --reporter=verbose` - - `npm run format:check` - - `npm run lint` - - `tsc --noEmit` - - `npm run check:deps` - - `git diff --check` - - `npm run verify:full` - - `npm run test:timings` - - `coderabbit review --agent -t uncommitted -c AGENTS.md --files ...` -> round 1: 0 issues, round 2: 1 minor status-line suggestion already satisfied by the current `- Status: fixed` text, round 3: 1 dynamic-import options issue fixed, final round: 0 issues - -### test-review.md / H-02 - -- Status: fixed -- Scope: Vitest coverage now reports against the product runtime instead of the former narrow frontend slice. `vitest.config.ts` includes `src/**/*.{ts,tsx}`, `server.js`, `server/**/*.js`, `shared/**/*.js`, and `usage-normalizer.js`, while excluding only TypeScript declarations, test files, and locale JSON assets from the configured runtime denominator. -- Guardrails: `tests/unit/vitest-coverage-config.test.ts` locks the coverage includes, excludes, and broad-denominator global thresholds. The thresholds ratchet the new signal at Statements `70`, Branches `60`, Functions `70`, and Lines `70`, below the measured broad baseline so CI fails on real regressions without making the first honest denominator brittle. -- Follow-up quality fixes during implementation: - - The current `npm run test:unit:coverage` baseline is Statements `72.85%`, Branches `63.01%`, Functions `74.97%`, Lines `73.88%` across the broader runtime scope. - - Server entrypoints, local server modules, shared runtime contracts, App/main entry files, and lazy dashboard sections are now visible in the coverage report instead of being hidden outside the denominator. - - `docs/testing.md` documents the broader denominator and clarifies that subprocess-spawned CLI/server paths remain behaviorally covered by integration, background, and Playwright tests even when V8 main-process line attribution stays lower. - - Dashboard UI, content, animation, runtime API behavior, and production code remain unchanged. -- Validation: - - `npx vitest run --project unit tests/unit/vitest-coverage-config.test.ts --reporter=verbose` - - `npm run test:unit:coverage` - - `npm run format:check` - - `npm run lint` - - `tsc --noEmit` - - `npm run check:deps` - - `npx vitest run --project architecture --reporter=verbose` - - `npm run verify:full` - - `npm run test:timings` - - `coderabbit review --agent -t uncommitted -c AGENTS.md --files ...` -> round 1: 0 issues, round 2: 0 issues - -### test-review.md / M-01 - -- Status: fixed -- Scope: dead hook visibility now has an explicit architecture guardrail. The two originally cited unused hook files, `src/hooks/use-theme.ts` and `src/hooks/use-provider-limits.ts`, had already been removed during the earlier `code-review.md / N-01` cleanup; this phase strengthens the test-review guardrail so the same class of dead runtime hook cannot silently return. -- Guardrails: `tests/architecture/unused-hooks.test.ts` now treats a hook as live only when it is reachable from `src/main.tsx`, rather than merely imported by any production file. A focused synthetic regression covers a dead hook cluster where one unused hook imports another, so future dead-code islands cannot satisfy the guardrail by importing each other. -- Follow-up quality fixes during implementation: - - `docs/architecture.md` and `docs/testing.md` now describe the hook rule as app-entrypoint reachability, matching the enforced behavior. - - `dependency-cruiser` remains responsible for dependency boundaries and cycle/orphan visibility, while `npm run test:architecture` owns the precise unused-hook signal that the original finding needed. - - Dashboard UI, content, animation, runtime API behavior, and production code remain unchanged. -- Validation: - - `npm run test:architecture` - - `npm run check:deps` - - `npm run verify:full` - - `npm run test:timings` - - `git diff --check` - - `coderabbit review --agent -t uncommitted -c AGENTS.md` -> round 1: 0 issues, round 2: 0 issues - -### test-review.md / M-02 - -- Status: fixed -- Scope: the highest-cost and hang-prone test paths were split, made deterministic, and given bounded cleanup without changing production behavior. The auto-import singleton integration test now coordinates the fake Toktrack runner through start/release sentinel files instead of a fixed 2-second delay, and the previous background catch-all suite was split into focused background prefix, selection, concurrency, registry, and startup CLI files. -- Guardrails: the `integration-background` Vitest project now allows file-level scheduling with a capped `maxWorkers: 2`, so independent background process tests can overlap without turning the test run into an unbounded process fan-out. `docs/testing.md` now documents deterministic subprocess coordination and small focused background files as the expected pattern. -- Follow-up quality fixes during implementation: - - The final `npm run test:timings` run now reports the auto-import singleton test at `1.371s` instead of the historical `3.326s`, and `tests/integration/server-auto-import.test.ts` at `2.669s` instead of the historical `4.521s`. - - The old `tests/integration/server-background.test.ts` monolith no longer dominates as one `5.785s` suite; its formerly mixed cases now appear as focused files, with the slowest background slice at `3.208s` in the final coverage/timing run. - - Test hangs were addressed at the harness level: server readiness/shutdown probes now have abort timeouts, CLI subprocess helpers have bounded timeouts and SIGKILL fallback, failed standalone server startup cleans up the spawned process, background cleanup force-stops leftover registry PIDs owned by the test root, and shared integration servers now wait for process shutdown in `afterAll`. - - `tests/unit/server-helpers-runner-process.test.ts` remains intentionally subprocess-backed where shell, `PATH`, timeout, and runner fallback behavior are the thing under test. - - Dashboard UI, content, animation, runtime API behavior, and production code remain unchanged. -- Validation: - - `npx vitest run --project integration tests/integration/server-auto-import.test.ts tests/integration/server-startup-cli.test.ts --project integration-background tests/integration/server-background.test.ts tests/integration/server-background-selection.test.ts tests/integration/server-background-concurrency.test.ts tests/integration/server-background-registry.test.ts --reporter=verbose` - - `npx vitest run --project integration tests/integration/server-auto-import.test.ts tests/integration/server-startup-cli.test.ts tests/integration/server-local-auth.test.ts --project integration-background tests/integration/server-background.test.ts tests/integration/server-background-selection.test.ts tests/integration/server-background-concurrency.test.ts tests/integration/server-background-registry.test.ts --reporter=verbose` - - `npx vitest run --project integration-background tests/integration/server-background-registry.test.ts --reporter=verbose` - - `npx vitest run --project integration-background tests/integration/server-background-selection.test.ts --reporter=verbose` - - `tsc --noEmit` - - `npx vitest run --project integration --project integration-background --reporter=verbose` - - `npm run verify:full` -> final run passed, including Playwright `15 passed` - - `npm run test:timings` -> completed without hanging after the cleanup hardening - - `coderabbit review --agent -t uncommitted -c AGENTS.md` -> multiple rounds: 0 issues initially, 4 minor test-harness/test-clarity issues fixed, latest round 0 issues - -### test-review.md / M-03 - -- Status: fixed -- Scope: the Playwright dashboard monolith was split by user journey without changing dashboard runtime behavior. The former `tests/e2e/dashboard.spec.ts` coverage now lives in focused load/upload, forecast/filter, settings/backups, and reporting specs, while command-palette coverage remains separate. -- Guardrails: shared E2E auth, state reset, usage seeding, file upload, mocked auto-import/report, download-recording, and dashboard test-hook access now live in `tests/e2e/helpers.ts`. `docs/testing.md` documents journey-based Playwright files so future browser coverage does not grow back into a catch-all suite. -- Follow-up quality fixes during implementation: - - The CSP/browser-error smoke coverage moved to `tests/e2e/dashboard-load-upload.spec.ts`, replacing stale references to the removed dashboard monolith. - - The report-language test now resets both usage and settings before switching locale, making it independent from earlier Playwright file order. - - The largest dashboard-specific E2E files are now focused around settings/backups and command-palette behavior instead of one mixed dashboard file. - - The recurring silent coverage/timing hang was traced to Vitest runs that emitted only the JUnit reporter in this non-interactive gate. `test:unit:coverage` and `test:timings` now keep the JUnit artifact and also emit the `dot` reporter, so long coverage runs show progress and finish cleanly instead of depending on a silent reporter path. - - Dashboard UI, content, animation, runtime API behavior, and production code remain unchanged. -- Validation: - - `npx vitest run --project unit tests/unit/vitest-coverage-config.test.ts --reporter=verbose` -> passed, including the guardrail for explicit `dot` plus `junit` reporters on coverage-heavy scripts. - - `npm run test:unit:coverage` -> passed with `135` files, `496` tests passed, `1` skipped, and no silent hang after the reporter fix. - - `npm run test:timings` -> passed and printed timing diagnostics; the slowest suites remained existing server integration subprocess paths, while the split E2E work is outside this Vitest-only timing gate. - - `PLAYWRIGHT_TEST_PORT=3016 npm_config_cache=/tmp/ttdash-npm-cache npm run test:e2e` -> passed with `15` Playwright tests after the journey split. - - `npm run verify:full` -> final run passed, including format, lint, docstring lint, dependency-cruiser, `tsc --noEmit`, architecture tests, coverage, package verification, production build, and Playwright `15 passed`. - - Targeted Playwright follow-ups passed for `tests/e2e/dashboard-load-upload.spec.ts` and `tests/e2e/dashboard-settings-backups.spec.ts` after CodeRabbit requested locale-resilient label assertions. - - `git diff --check` -> passed after this validation update. - - `coderabbit review --agent -t uncommitted -c AGENTS.md` -> rounds 1 and 3 reported minor test/doc clarity issues that were fixed; rounds 2, 4, and 5 reported 0 issues; round 6 reported this missing validation detail and was fixed by this entry; the follow-up review after the validation update reported 0 issues. - -## 2026-04-26 - -### server-review.md / H-01 - -- Status: fixed -- Scope: `server.js` was reduced to dependency/runtime wiring during this phase, then further reduced by `server-review.md / M-01` to an executable CLI/Bin shim. CLI parsing/help moved to `server/cli.js`, startup summaries/browser opening/local auth-session metadata moved to `server/startup-runtime.js`, shared process helpers moved to `server/process-utils.js`, and HTTP server lifecycle, CLI routing, startup sequencing, client errors, and shutdown cleanup moved to `server/server-lifecycle.js`. -- Guardrails: `tests/architecture/server-entrypoint-contract.test.ts` blocks local helper function definitions, `__test__` exports, and direct `http.createServer(...)` calls from returning to `server.js`. `tests/unit/server-cli.test.ts`, `tests/unit/startup-runtime.test.ts`, and `tests/unit/server-lifecycle.test.ts` cover the extracted behavior directly. Existing server helper tests now instantiate `server/data-runtime.js` and `server/auto-import-runtime.js` directly instead of importing `server.js`. -- Follow-up quality fixes during implementation: - - The productive `server.js.__test__` helper surface was removed as part of the Entrypoint split; tests now target the owning runtime modules. - - The cross-process file-lock test now loads `server/data-runtime.js` directly in its child process, so it still validates real lock behavior without loading the CLI entrypoint. - - The startup data summary now pluralizes `1 day` versus `N days` correctly without changing the existing cost or token formatting. - - Background-child shutdown now logs unregister failures, suppresses unhandled promise rejections, exits through a finally-style path, and prevents duplicate shutdown completion when graceful close and forced timeout race. - - CLI help now documents the supported `-bg` legacy background alias alongside `-b` and `--background`, so displayed usage matches parser behavior. - - Startup behavior remains intentionally unchanged: auth bootstrap URL output, remote warnings, browser opening, background registration, auto-load logging, package startup, and API routing keep the same runtime contracts. -- Validation: - - `npx vitest run --project unit tests/unit/server-cli.test.ts tests/unit/startup-runtime.test.ts tests/unit/server-lifecycle.test.ts tests/unit/server-helpers-network.test.ts tests/unit/server-helpers-runner-core.test.ts tests/unit/server-helpers-runner-process.test.ts tests/unit/server-helpers-file-locks.test.ts --reporter=verbose` - - `npx vitest run --project architecture tests/architecture/server-entrypoint-contract.test.ts --reporter=verbose` - - `npm run format:check` - - `npm run lint` - - `tsc --noEmit` - - `npm run test:architecture` - - `npm run check:deps` - - `npm run verify:full` - - `npm run test:timings` - - `coderabbit review --agent -t uncommitted -c AGENTS.md --files ...` -> rounds 1-2: 0 issues; round 3: 2 minor issues fixed; round 4: 1 minor issue fixed - -### server-review.md / M-01 - -- Status: fixed -- Scope: `server.js` is now only the executable CLI/Bin shim. Runtime composition moved to `server/app-runtime.js`, which builds the injected server lifecycle and keeps the background process entrypoint pointed at package-root `server.js`. The undocumented `require('./server.js').bootstrapCli/runCli` import surface was removed; in-process test/server starts now use `createAppRuntime(...)` explicitly. -- Guardrails: `tests/architecture/server-entrypoint-contract.test.ts` now blocks `module.exports`, `exports.*`, local helper functions, `__test__`, direct `http.createServer(...)`, and any `server.js` require target other than `./server/app-runtime`. Playwright's `scripts/start-test-server.js` uses the explicit app-runtime composer so E2E startup still exercises the same server lifecycle without importing the executable shim as a helper module. -- Follow-up quality fixes during implementation: - - Environment-derived CLI/server configuration now lives inside `createAppRuntime(...)`, so test harnesses can set isolated storage, host, and port environment variables before composing the runtime. - - `server/app-runtime.js` passes an explicit root `server.js` path to the background runtime, preserving foreground/background CLI behavior after the composition move. - - `docs/architecture.md` documents the new split between the executable shim and the app-runtime composition root. - - Dashboard UI, content, animation, API route behavior, local auth, remote auth, background CLI, packaging, and E2E startup behavior remain unchanged. -- Validation: - - `node -c server.js` - - `node -c server/app-runtime.js` - - `npx vitest run --project architecture tests/architecture/server-entrypoint-contract.test.ts --reporter=verbose` - - `npx vitest run --project unit tests/unit/server-lifecycle.test.ts tests/unit/server-cli.test.ts tests/unit/startup-runtime.test.ts --project integration tests/integration/server-local-auth.test.ts tests/integration/server-remote-auth.test.ts --reporter=verbose` - - `npx vitest run --project integration-background tests/integration/server-background.test.ts --reporter=verbose` - - `npm run verify:full` - - `npm run test:timings` - - `coderabbit review --agent -t uncommitted -c AGENTS.md --files ...` -> multiple rounds: 0 issues - -### server-review.md / M-02 - -- Status: fixed -- Scope: mutable server runtime state is now encapsulated in per-runtime services. `server/runtime-state.js` owns the runtime snapshot, startup auto-load flag, singleton runtime lease, and expiring async cache primitives; `server/app-runtime.js`, `server/server-lifecycle.js`, and `server/startup-runtime.js` use the runtime-state service instead of ad hoc mutable flags; `server/auto-import-runtime.js` owns the Auto-Import lease and Toktrack latest-version cache through those services. -- Guardrails: `tests/unit/runtime-state.test.ts` covers runtime snapshots, startup flag isolation, singleton lease behavior, in-flight lookup deduplication, and TTL/reset behavior. `tests/architecture/server-runtime-state-contract.test.ts` blocks route-local `autoImportStreamRunning` state and the old free Auto-Import/Toktrack cache variables from returning. -- Follow-up quality fixes during implementation: - - `server/http-router.js` no longer owns an Auto-Import stream flag. It acquires a lease before sending SSE headers, so concurrent starts still return the existing HTTP `409` response without turning into streamed errors. - - `server/auto-import-runtime.js` remains the owner of Auto-Import execution, but the singleton state is now an explicit lease with idempotent release for normal completion, failures, and aborted streams. - - Toktrack latest-version lookups still share one in-flight request and keep separate success/failure TTL behavior, but the cache/promise state is hidden behind `createExpiringAsyncCache(...)`. - - Dashboard UI, content, animation, API paths, response shapes, local auth, remote auth, background startup, and E2E startup behavior remain unchanged. -- Validation: - - `node -c server/runtime-state.js` - - `node -c server/app-runtime.js` - - `node -c server/auto-import-runtime.js` - - `node -c server/http-router.js` - - `npx vitest run --project unit tests/unit/runtime-state.test.ts tests/unit/server-lifecycle.test.ts tests/unit/startup-runtime.test.ts tests/unit/server-helpers-runner-process.test.ts --project architecture tests/architecture/server-entrypoint-contract.test.ts tests/architecture/server-runtime-state-contract.test.ts --reporter=verbose` - - `npx vitest run --project integration tests/integration/server-auto-import.test.ts tests/integration/server-api-routing-runtime.test.ts tests/integration/server-local-auth.test.ts tests/integration/server-remote-auth.test.ts --project integration-background tests/integration/server-background.test.ts --reporter=verbose` - - `npm run verify:full` - - `npm run test:timings` - - `coderabbit review --agent -t uncommitted -c AGENTS.md --files ...` -> multiple rounds: 0 issues - -### server-review.md / N-01 - -- Status: fixed -- Scope: Host, Origin, Sec-Fetch-Site, and JSON content-type request policy moved from the broader HTTP utility module into `server/http-request-guards.js`. `server/http-utils.js` remains the compatible facade for router consumers and still owns request body parsing, JSON responses, buffer responses, and API-prefix resolution. -- Guardrails: `tests/unit/http-request-guards.test.ts` covers loopback, wildcard, non-loopback, IPv6, origin, cross-site, and content-type behavior directly. `tests/unit/http-utils.test.ts` now focuses on the HTTP facade and body/response behavior. `tests/architecture/server-http-boundaries.test.ts` keeps request guard policy out of the router and out of generic HTTP utility internals. -- Follow-up quality fixes during implementation: - - The request guard code now tolerates missing `req.headers` defensively while preserving the same rejection path for malformed or missing Host/Origin input. - - Wildcard binds now apply the intended socket-local host check before exact bind-host matching, so `Host: 0.0.0.0` is not treated as a trusted client-facing host. - - Body-size and response-header behavior gained focused unit coverage, so the split did not trade broad utility tests for narrower policy-only tests. - - Dashboard UI, content, animation, API paths, response shapes, local auth, remote auth, background startup, and E2E startup behavior remain unchanged. -- Validation: - - `node -c server/http-request-guards.js` - - `node -c server/http-utils.js` - - `npx vitest run --project unit tests/unit/http-request-guards.test.ts tests/unit/http-utils.test.ts --project architecture tests/architecture/server-http-boundaries.test.ts tests/architecture/server-entrypoint-contract.test.ts tests/architecture/server-runtime-state-contract.test.ts --reporter=verbose` - - `npx vitest run --project integration tests/integration/server-api-guards.test.ts tests/integration/server-remote-auth.test.ts --reporter=verbose` - - `npm run verify:full` - - `npm run test:timings` - - `coderabbit review --agent -t uncommitted -c AGENTS.md --files ...` -> multiple rounds: 0 issues - -## 2026-04-25 - -### security-review.md / H-01 - -- Status: fixed -- Scope: explicit non-loopback binding now requires `TTDASH_REMOTE_TOKEN` in addition to `TTDASH_ALLOW_REMOTE=1`. Remote API requests are authenticated centrally before route handling through Bearer auth, `X-TTDash-Remote-Token`, or an HttpOnly same-site cookie; the later `security-review.md / M-01` fix extends the same auth boundary to default loopback sessions. -- Guardrails: `tests/unit/remote-auth.test.ts` covers remote-mode configuration, accepted credential forms, generic rejection paths, timing-safe length-independent comparison behavior, and the browser bootstrap redirect/cookie contract. `tests/integration/server-remote-auth.test.ts` covers failed startup without a token, protected remote API reads, cookie bootstrap, and preserved Origin mutation guards. Existing server guard tests continue to cover host validation, malformed paths, oversized payloads, and cross-site rejection. -- Follow-up quality fixes during implementation: - - Remote authentication lives in `server/remote-auth.js` as a focused server boundary, while `server/http-router.js` only applies the injected gate before API routing and static bootstrap handling. - - Remote browser access is supported without frontend changes by visiting `?ttdash_token=` once; the token is moved to an HttpOnly cookie and removed from the redirected URL. - - Background runtime identity checks now send the remote Bearer header when the parent process is running in authenticated remote mode, so remote background management keeps working. - - Startup help and remote warnings now mention the additional token requirement without logging the token value. -- Validation: - - `npx vitest run --project unit tests/unit/remote-auth.test.ts tests/unit/http-utils.test.ts tests/unit/background-runtime.test.ts --reporter=verbose` - - `npx vitest run --project integration tests/integration/server-remote-auth.test.ts tests/integration/server-api-guards.test.ts --reporter=verbose` - - `npx vitest run --project integration-background tests/integration/server-background.test.ts --reporter=verbose` - - `npm run format:check` - - `npm run lint` - - `tsc --noEmit` - - `npm run test:architecture` - - `npm run check:deps` - - `npm run verify:full` - - `npm run test:timings` - - `coderabbit review --agent -t uncommitted -c AGENTS.md --files ...` -> round 1: 0 issues, round 2: 0 issues, round 3: 0 issues, round 4: 0 issues - -### security-review.md / M-01 - -- Status: fixed -- Scope: default loopback servers now generate a per-start local session token and require authentication for every API endpoint, including `/api/usage`, `/api/settings`, `/api/runtime`, and `/api/toktrack/version-status`. Normal dashboard startup stays frictionless because the auto-open URL carries the one-time bootstrap token, which is exchanged for an HttpOnly/SameSite cookie and stripped from the redirected URL. -- Guardrails: `tests/unit/remote-auth.test.ts` covers default local session auth, generated local tokens, explicit test opt-out, shared credential parsing, bootstrap redirects, and generic rejection paths. `tests/integration/server-local-auth.test.ts` covers protected loopback reads, Bearer and cookie access, preserved mutation Origin guards, and restrictive auth-session file permissions. Existing remote, background, persistence, recovery, routing, and E2E tests were updated to authenticate through the same boundary. -- Follow-up quality fixes during implementation: - - The existing `server/remote-auth.js` boundary now owns both local session auth and remote auth so API route handlers stay unaware of the source of the credential. - - `server.js` writes the local session metadata to restrictive user config state and prints a `Local Auth URL` only when browser auto-open is disabled. - - Background instance registry entries now carry per-instance auth headers and bootstrap URLs so `ttdash stop`, registry pruning, and no-open background starts remain usable. - - Playwright and integration test helpers bootstrap through the same local session file instead of bypassing the production auth path. -- Residual risk: - - This protects the local HTTP API from unauthenticated loopback access, browser/DNS-rebinding-style access, and other OS users. It does not fully isolate against malware already running as the same OS user, which can target user config files, terminal output, or browser state. -- Validation: - - `npx vitest run --project unit tests/unit/remote-auth.test.ts tests/unit/background-runtime.test.ts --reporter=verbose` - - `npx vitest run --project integration tests/integration/server-local-auth.test.ts tests/integration/server-api-guards.test.ts tests/integration/server-api-persistence.test.ts tests/integration/server-api-routing-runtime.test.ts tests/integration/server-api-recovery.test.ts tests/integration/server-remote-auth.test.ts --reporter=verbose` - - `npx vitest run --project integration-background tests/integration/server-background.test.ts --reporter=verbose` - - `PLAYWRIGHT_TEST_PORT=3020 npm_config_cache=/tmp/ttdash-npm-cache npm run test:e2e` - - `npm run format:check` - - `npm run lint` - - `tsc --noEmit` - - `npm run test:architecture` - - `npm run check:deps` - - `npm run verify:package` - - `npm run verify:full` - - `npm run test:timings` - - `coderabbit review --agent -t uncommitted -c AGENTS.md --files ...` -> round 1: 0 issues, round 2: 0 issues, round 3: 2 minor documentation issues fixed, round 4: 0 issues - -### security-review.md / N-01 - -- Status: fixed -- Scope: the server CSP no longer allows `unsafe-inline` styles. Shared security headers now live in `server/security-headers.js`, HTML responses get a per-response CSP nonce plus a matching `ttdash-csp-nonce` meta tag, `style-src-elem` is limited to `self` and the nonce, and `style-src-attr 'none'` blocks literal inline style attributes. -- Guardrails: `tests/unit/security-headers.test.ts` covers CSP construction, nonce shape, nonce meta injection, HTML response preparation, and non-HTML header behavior. `tests/integration/server-api-guards.test.ts` checks the strict CSP on authenticated API responses. `tests/e2e/dashboard-load-upload.spec.ts` verifies that the loaded dashboard HTML carries the nonce-backed CSP and that the browser reports no CSP errors while the main dashboard journey runs. -- Follow-up quality fixes during implementation: - - CSP generation moved out of `server.js`, so future header changes have a focused unit-testable boundary. - - `server/http-router.js` now treats HTML static responses separately from other assets, allowing nonce-specific headers without weakening API, JSON, CSS, or JS asset responses. - - React/Recharts/Motion JS-driven style property updates remain allowed because the browser-enforced risk path for this finding is literal inline style attributes or inline style elements; preserving those runtime style properties avoids UI and animation regressions without reintroducing `unsafe-inline`. -- Validation: - - `npx vitest run --project unit tests/unit/security-headers.test.ts --reporter=verbose` - - `npx vitest run --project integration tests/integration/server-api-guards.test.ts --reporter=verbose` - - `npm run format:check` - - `npm run lint` - - `tsc --noEmit` - - `npm run test:architecture` - - `npm run check:deps` - - `npm run verify:full` - - `npm run test:timings` - - `coderabbit review --agent -t uncommitted -c AGENTS.md --files ...` -> round 1: 0 issues, round 2: 1 minor grammar issue in `docs/application-stack-reference.md` fixed, round 3: 0 issues - -### performance-review.md / H-01 - -- Status: fixed -- Scope: secondary cost-analysis charts now leave the initial dashboard bundle and load through the section warmup path. Dashboard sections also get an adaptive, deduplicated preload scheduler that starts visible lazy section chunks shortly after the first render and keeps IntersectionObserver preloading far enough ahead of the viewport to avoid visible lazy-loading gaps while scrolling. -- Guardrails: `tests/frontend/dashboard-motion.test.tsx` covers idle/fallback scheduling, deduplication, cancellation, early preloading, and inert hidden content. `tests/unit/dashboard-section-preloading.test.ts` covers visible-section queue ordering, hidden/request-data gating, and duplicate task removal. -- Follow-up quality fixes during implementation: - - Cost-analysis entry charts (`CostOverTime`, `CostByModel`) were moved from static dashboard imports to lazy chunks, reducing the built main `index` chunk from roughly `212 kB` raw / `57 kB` gzip to roughly `198 kB` raw / `53 kB` gzip. - - The warmup scheduler now uses a short idle timeout with bounded parallelism so deeper sections are warmed without waiting for late viewport proximity. - - Section placeholders for deeper analysis/table areas now better match final section heights, preventing scroll-command layout shift while lazy chunks complete above the target section. -- Validation: - - `npm run test:unit -- tests/frontend/dashboard-motion.test.tsx tests/unit/dashboard-section-preloading.test.ts` - - `npx playwright test tests/e2e/command-palette.spec.ts -g "executes analysis section navigation commands"` - - `npm run lint` - - `npm run test:architecture` - - `npm run check:deps` - - `tsc --noEmit` - - `npm run build:app` - - `npm run verify:full` - - `npm run test:timings` - - `coderabbit review --agent -t uncommitted -c AGENTS.md --files ...` -> round 1: 0 issues, round 2: 0 issues, round 3: 0 issues - -### performance-review.md / M-01 - -- Status: fixed -- Scope: dashboard filter changes now flow through a centralized `deriveDashboardFilterData(...)` pass that derives date/month filtering, provider/model filtering, available filter options, date range, and view-mode aggregation behind the stable `useDashboardFilters` contract. Computed dashboard summaries now share one normalized breakdown aggregation for model costs, provider metrics, and model options behind the stable `useComputedMetrics` contract. -- Guardrails: `tests/unit/dashboard-filter-data.test.ts` compares the new filter derivation with the previous staged semantics across representative filter combinations and empty states. `tests/unit/dashboard-aggregation.test.ts` locks normalized model/provider aggregation and day counting. `tests/frontend/use-computed-metrics.test.tsx` covers the public computed-metrics hook boundary. -- Follow-up quality fixes during implementation: - - `src/lib/dashboard-filter-data.ts` imports directly from the shared dashboard-domain contract instead of routing through broader frontend transform helpers, keeping the hook dependency graph smaller and the architecture layer test below its timeout budget. - - `src/lib/data-transforms.ts` now fills scalar chart arrays and model-name discovery in one sorted-data pass instead of separate `map(...)` and `flatMap(...)` passes. - - `computeModelCosts(...)` and `computeProviderMetrics(...)` now delegate to the shared breakdown summary, so standalone calculation callers and dashboard hook callers use the same aggregation path. - - After CodeRabbit review, computed `allModels` now unions `modelsUsed` and `modelBreakdowns`, so inconsistent imported data can no longer hide a breakdown-backed model from the model-over-time chart while existing modelsUsed-only behavior remains preserved. -- Validation: - - `npm run test:unit -- tests/unit/dashboard-filter-data.test.ts tests/unit/dashboard-aggregation.test.ts tests/frontend/use-computed-metrics.test.tsx tests/frontend/use-dashboard-filters.test.tsx tests/unit/analytics.test.ts tests/unit/data-transforms.test.ts tests/unit/code-rabbit-phase4.test.ts` - - `npm run test:unit -- tests/unit/dashboard-aggregation.test.ts tests/frontend/use-computed-metrics.test.tsx tests/unit/analytics.test.ts` - - `npm run format:check` - - `npm run lint` - - `tsc --noEmit` - - `npm run test:architecture` - - `npm run check:deps` - - `npm run verify:full` -> completed after the CodeRabbit follow-up fix - - `npm run test:timings` -> completed after the CodeRabbit follow-up fix - - `coderabbit review --agent -t uncommitted -c AGENTS.md --files ...` -> round 1: 0 issues, round 2: 1 minor issue fixed, round 3: 0 issues, round 4: 0 issues - -### performance-review.md / M-02 - -- Status: fixed -- Scope: the Settings dialog no longer starts `/api/toktrack/version-status` when it opens. The dashboard shell now schedules one cancellable, idle-friendly toktrack latest-version warmup per browser session through `src/lib/toktrack-version-status.ts`, and the settings version hook only formats the shared session snapshot for the Maintenance tab. -- Guardrails: `tests/unit/toktrack-version-status.test.ts` covers the pinned initial snapshot, concurrent warmup deduplication, cached failure behavior, scheduled fallback warmup, and cancellation. `tests/frontend/settings-modal-version-status.test.tsx` covers warmed status rendering without a dialog-open fetch, the no-fetch dialog-open path, and cached failure reuse across reopen. `tests/frontend/dashboard-filter-visibility.test.tsx` covers that the dashboard shell wires the session warmup scheduler. -- Follow-up quality fixes during implementation: - - `docs/architecture.md` now documents the session-wide toktrack version status cache as the owner of lookup state, while the settings modal hook is only the presentation adapter. - - Dashboard tests mock the warmup scheduler explicitly so future dashboard renders cannot accidentally perform real toktrack version network work during component tests. - - The server `/api/toktrack/version-status` contract and existing server-side success/failure TTL cache remain unchanged, so the fix changes when the UI asks for the status, not how the status is resolved. -- Validation: - - `npm run test:unit -- tests/unit/toktrack-version-status.test.ts tests/frontend/settings-modal-version-status.test.tsx tests/frontend/settings-modal-tabs.test.tsx tests/frontend/dashboard-filter-visibility.test.tsx tests/frontend/dashboard-error-state.test.tsx tests/unit/api.test.ts` - - `npm run format:check` - - `npm run lint` - - `tsc --noEmit` - - `npm run test:architecture` - - `npm run check:deps` - - `npm run verify:full` - - `npm run test:timings` - - `coderabbit review --agent -t uncommitted -c AGENTS.md --files ...` -> round 1: 0 issues, round 2: 0 issues, round 3: 0 issues - -### performance-review.md / N-01 - -- Status: fixed -- Scope: complex dashboard UI islands now separate calculation-heavy view data from rendering, accessibility, and motion. Drilldown details, heatmap grids, request-quality ratios, sortable table rows, recent-day benchmarks, and date-picker calendar navigation are derived through focused `src/lib/*-data.ts` helpers while the existing frontend functionality, visual structure, and animations stay unchanged. -- Guardrails: new unit suites cover drilldown aggregation/rankings/token segments, heatmap grid and keyboard target derivation, request-quality ratios/progress, table sort/row derivation, and date-picker calendar actions. Existing frontend suites still cover the DOM, ARIA, keyboard, and motion contracts for the touched components. -- Follow-up quality fixes during implementation: - - React component tests for heatmap and drilldown were narrowed to visible UI/A11y contracts after the derived branches moved to fast unit coverage, and over-broad timeout overrides were removed from sortable table tests. - - `docs/architecture.md` now documents the non-presentational data-derivation helpers as the boundary for complex dashboard UI islands. - - `npm run test:timings` shows the new helper suites stay out of the slowest-suite list; the remaining slow entries are existing integration, motion, settings, and broad table/UI interaction paths. -- Validation: - - `npx vitest run --project unit tests/unit/drill-down-data.test.ts tests/unit/heatmap-calendar-data.test.ts tests/unit/request-quality-data.test.ts tests/unit/sortable-table-data.test.ts tests/unit/filter-date-picker-data.test.ts tests/unit/recent-days-reveal.test.ts --reporter=verbose` - - `npx vitest run --project frontend tests/frontend/drill-down-modal-content.test.tsx tests/frontend/drill-down-modal-motion.test.tsx tests/frontend/heatmap-calendar-accessibility.test.tsx tests/frontend/request-quality.test.tsx tests/frontend/sortable-table-provider-model.test.tsx tests/frontend/sortable-table-recent-days.test.tsx tests/frontend/filter-bar-date-picker.test.tsx --reporter=verbose` - - `npm run format:check` - - `npm run lint` - - `tsc --noEmit` - - `npm run test:architecture` - - `npm run check:deps` - - `npm run verify:full` - - `npm run test:timings` - - `coderabbit review --agent -t uncommitted -c AGENTS.md --files ...` -> round 1: 0 issues, round 2: 0 issues - -### dashboard-review.md / N-02 - -- Status: fixed -- Scope: `src/components/dashboard/DashboardSections.tsx` already consumes a single structured `DashboardSectionsViewModel`, and `src/types/dashboard-view-model.d.ts` keeps the section data split into named section bundles instead of flat dashboard props. This closes the original broad `DashboardSectionsProps` concern without changing visible dashboard functionality, content, UI, or animations. -- Guardrails: `tests/architecture/dashboard-sections-contract.test.ts` now locks the public `DashboardSections` prop contract to one `viewModel` prop and keeps `DashboardSectionsViewModel` split into the intended layout, analysis, table, comparison, and interaction bundles. -- Follow-up quality fixes during implementation: - - No production refactor was needed because the broader view-model boundary had already been introduced by `architecture-review.md / M-01`; this change adds a targeted regression guardrail so future section work does not reintroduce wide flat props. - - `docs/architecture.md` already documents the intended DashboardSections boundary, so no duplicate architecture text was added. -- Validation: - - `npm run test:architecture` - - `npm run test:unit -- tests/frontend/dashboard-filter-visibility.test.tsx` - - `tsc --noEmit` - - `npm run lint` - - `npm run check:deps` - - `npm run verify:full` - - `npm run test:timings` -> attempted twice; both runs failed under external CPU pressure with migrating 5s timeouts in existing unrelated frontend suites, while `npm run verify:full` had just completed the same coverage test set successfully; the new architecture guardrail itself stayed below 100ms in `npm run test:architecture` - - `coderabbit review --agent -t uncommitted -c AGENTS.md --files docs/review/fixed-findings.md tests/architecture/dashboard-sections-contract.test.ts` -> 0 issues - -## 2026-04-24 - -### dashboard-review.md / N-01 - -- Status: fixed -- Scope: the dashboard action landscape now separates everyday data loading, export/use actions, and maintenance actions in the header, while the Command Palette mirrors the same intent split for load data, exports, and maintenance. Existing actions and command IDs remain available. -- Guardrails: `tests/frontend/header-links.test.tsx` covers localized header action groups and preserved button access, while `tests/frontend/command-palette-action-groups.test.tsx` covers localized Command Palette groups and stable command IDs. -- Follow-up quality fixes during implementation: - - Header action rendering is now owned by a private `HeaderActions` composition inside `Header.tsx`, keeping the global header shell separate from action information architecture. - - Command Palette action commands are grouped by intent instead of sharing one broad `Actions` bucket, without changing command handlers or `data-testid` contracts. -- Validation: - - `npm run test:unit -- tests/frontend/header-links.test.tsx tests/frontend/command-palette-action-groups.test.tsx` - - `npx playwright test tests/e2e/command-palette.spec.ts` - - `tsc --noEmit` - - `npm run lint` - - `npm run test:architecture` - - `npm run check:deps` - - `npm run verify:full` - - `npm run test:timings` - - `coderabbit review --agent -t uncommitted -c AGENTS.md -f ...` -> only reported unrelated untracked `docs/application-stack-reference.md`; the file was not changed - - `coderabbit review --agent -t uncommitted -c AGENTS.md --dir src/components` -> 0 issues - - `coderabbit review --agent -t uncommitted -c AGENTS.md --dir tests/frontend` -> 0 issues - - `coderabbit review --agent -t uncommitted -c AGENTS.md --dir shared/locales` -> 0 issues - - `coderabbit review --agent -t uncommitted -c AGENTS.md --dir docs/review` -> 0 issues - - `coderabbit review --agent -t uncommitted -c AGENTS.md --files docs/architecture.md` -> 0 issues - -### dashboard-review.md / M-02 - -- Status: fixed -- Scope: `src/components/layout/FilterBar.tsx` was reduced to a composition shell over private filter groups for status, time presets, date range, and provider/model chips. The visible dashboard filtering capabilities remain intact while the bar now shows lightly grouped Time, Date range, Providers, and Models areas. -- Guardrails: `tests/frontend/filter-bar-accessibility.test.tsx` covers localized filter groups, while the existing date-picker and preset/chip suites continue to lock keyboard focus, date clearing, preset highlighting, and chip `aria-pressed`/visual states. `.dependency-cruiser.cjs` now keeps the new FilterBar internals private to the FilterBar shell. -- Follow-up quality fixes during implementation: - - The custom date picker logic now lives in `FilterBarDateRange.tsx`, isolating its portal, overlay positioning, keyboard navigation, and focus restoration from the main filter shell. - - Provider and model chip rendering now lives in `FilterBarChipFilters.tsx`, keeping provider badge styling and model color state local to chip filters. - - Provider badge alpha variants now flow through `getProviderBadgeStyle(...)`, so included provider chips no longer parse or mutate CSS strings in the UI. - - Date-picker calendar labels now recompute from the active locale while the picker is open, and weekday cells use stable keys. -- Validation: - - `npm run test:unit -- tests/frontend/filter-bar-accessibility.test.tsx tests/frontend/filter-bar-date-picker.test.tsx tests/frontend/filter-bar-presets.test.tsx tests/unit/model-colors.test.ts` - - `tsc --noEmit` - - `npm run lint` - - `npm run test:architecture` - - `npm run check:deps` - - `npm run verify:full` - - `npm run test:timings` - - `coderabbit review --agent -t uncommitted -c AGENTS.md -f ...` -> fixed relevant minor findings for weekday keys, provider badge alpha handling, locale-reactive calendar labels, and included-chip style collisions; remaining global uncommitted finding is isolated to unrelated untracked `docs/application-stack-reference.md`. - - `coderabbit review --agent -t uncommitted -c AGENTS.md --dir src/components/layout` -> 0 issues - - `coderabbit review --agent -t uncommitted -c AGENTS.md --dir src/lib` -> 0 issues - - `coderabbit review --agent -t uncommitted -c AGENTS.md --dir tests` -> 0 issues - - `coderabbit review --agent -t uncommitted -c AGENTS.md --dir shared` -> 0 issues - - `coderabbit review --agent -t uncommitted -c AGENTS.md --dir docs/review` -> 0 issues - - `coderabbit review --agent -t uncommitted -c AGENTS.md --files .dependency-cruiser.cjs` -> 0 issues - -### dashboard-review.md / M-01 - -- Status: fixed -- Scope: `src/components/features/settings/SettingsModal.tsx` was changed from one long settings surface into a tabbed workspace with Basics, Layout, Limits, and Maintenance areas. Existing settings capabilities remain available, but day-to-day preferences are separated from section layout, provider limits, and backup/version-maintenance actions. -- Guardrails: `tests/frontend/settings-modal-tabs.test.tsx` covers default tab state, section grouping, and keyboard navigation, while the existing focused settings suites now exercise their sections through the relevant tab. `docs/architecture.md` documents the tabbed settings shell and split motion/toktrack version ownership. -- Follow-up quality fixes during implementation: - - `SettingsModalSections.tsx` now separates the reduced-motion card from the toktrack version-status card so Maintenance can own diagnostic/version information without mixing it into daily dashboard behavior settings. - - `SettingsModal.tsx` now resets the active settings tab while the dialog is closed, so reopening the dialog always starts from Basics without a transient stale tab render. - - `tests/e2e/command-palette.spec.ts` now splits the formerly near-timeout section-navigation coverage into smaller scroll, dashboard-section, and analysis-section tests after the full gate exposed the old monolithic test as a reliability/performance risk. -- Validation: - - `npm run test:unit -- tests/frontend/settings-modal-tabs.test.tsx tests/frontend/settings-modal-language.test.tsx tests/frontend/settings-modal-version-status.test.tsx tests/frontend/settings-modal-defaults.test.tsx tests/frontend/settings-modal-sections.test.tsx tests/frontend/settings-modal-backups.test.tsx tests/frontend/settings-modal-provider-limits.test.tsx tests/frontend/settings-modal-draft-state.test.tsx` - - `npx playwright test tests/e2e/command-palette.spec.ts` - - `npm run test:architecture` - - `npm run check:deps` - - `npm run verify:full` - - `npm run test:timings` - - `coderabbit review --agent -t uncommitted -c AGENTS.md` -> round 1: 0 issues, round 2: 0 issues, round 3: 0 issues - -### code-review.md / N-01 - -- Status: fixed -- Scope: removed the unused `src/hooks/use-theme.ts` and `src/hooks/use-provider-limits.ts` files. Their active responsibilities already live in the persisted settings flow: theme application goes through `applyTheme` and dashboard controller effects, while provider-limit synchronization goes through `useAppSettings` and `syncProviderLimits`. -- Guardrails: `tests/architecture/unused-hooks.test.ts` now fails when a production hook file under `src/hooks/` has no production import, and `docs/architecture.md` plus `docs/testing.md` document that unused hook helpers should be removed instead of retained as speculative code. -- Follow-up quality fixes during implementation: - - The guardrail covers the same dead-hook visibility gap that made the original `0%` coverage files easy to miss, so future unused hook files are caught by `npm run test:architecture`. -- Validation: - - `npm run test:architecture` - - `npm run check:deps` - - `npm run test:unit -- tests/frontend/react-query-hooks.test.tsx tests/frontend/dashboard-controller-state.test.tsx tests/frontend/dashboard-controller-actions.test.tsx` - - `npm run verify:full` - - `npm run test:timings` - - `coderabbit review --agent -t uncommitted -c AGENTS.md` -> round 1: blocked by CodeRabbit rate limit (`Rate limit exceeded`, retry window reported by the CLI: `47 minutes and 10 seconds`) - -### code-review.md / M-01 - -- Status: fixed -- Scope: dashboard date preset behavior is centralized in the shared dashboard preferences contract. `src/hooks/use-dashboard-filters.ts` applies presets through `resolveDashboardPresetRange`, and `src/components/layout/FilterBar.tsx` derives the active UI state through `resolveDashboardActivePreset`; both flow through `src/lib/dashboard-preferences.ts` to `shared/dashboard-preferences.js`, so applying and displaying presets no longer duplicate the `7d`, `30d`, `month`, `year`, and `all` rules. -- Guardrails: `tests/unit/dashboard-preferences.test.ts` locks the shared/frontend preset contract, while `tests/frontend/use-dashboard-filters.test.tsx` and `tests/frontend/filter-bar-presets.test.tsx` cover applying presets in the hook and highlighting them in the filter bar. -- Follow-up quality fixes during implementation: - - No production or test changes were needed for this finding because the shared preset contract and focused regression coverage already existed; the fix was to document the concrete `code-review.md / M-01` reference as closed. -- Validation: - - `npm run test:unit -- tests/unit/dashboard-preferences.test.ts tests/frontend/use-dashboard-filters.test.tsx tests/frontend/filter-bar-presets.test.tsx` - - `npm run test:architecture` - - `npm run check:deps` - - `npm run verify:full` - - `npm run test:timings` - - `coderabbit review --agent -t uncommitted -c AGENTS.md` -> round 1: 2 minor documentation issues, fixed - - `coderabbit review --agent -t uncommitted -c AGENTS.md` -> round 2: blocked by CodeRabbit rate limit (`Rate limit exceeded`, retry window reported by the CLI: `57 minutes and 23 seconds`) - -### code-review.md / M-02 - -- Status: fixed -- Scope: `src/components/features/settings/SettingsModal.tsx` was reduced from the former 1000+ line all-in-one dialog into a shell/composition root over extracted settings sections, a draft-state hook, a version-status hook, and modal-local helper logic. The visible settings areas now live behind `src/components/features/settings/SettingsModalSections.tsx`, while the draft/save/reset behavior moved into `use-settings-modal-draft.ts` and the toktrack lookup behavior moved into `use-settings-modal-version-status.ts`. -- Guardrails: `docs/architecture.md` now documents the settings modal shell, internal section bundle, and private helper hooks as a feature-internal composition boundary, and `.dependency-cruiser.cjs` now blocks unrelated frontend modules from importing `SettingsModalSections.tsx`, `use-settings-modal-draft.ts`, `use-settings-modal-version-status.ts`, or `settings-modal-helpers.ts` directly. -- Follow-up quality fixes during implementation: - - `src/components/features/settings/settings-modal-helpers.ts` now owns the extracted number parsing, selection normalization, provider-limit draft building/patching, and section reorder helpers so tests no longer import utility behavior through the UI shell. - - `use-settings-modal-draft.ts` now clones incoming section drafts and patches empty provider configs from `DEFAULT_PROVIDER_LIMIT_CONFIG`, preserving the previous provider-limit safety behavior after the refactor. - - `use-settings-modal-draft.ts` now initializes modal drafts once per open session and resets that guard on close, so external prop churn can no longer overwrite in-progress edits while the dialog remains open. - - `tests/unit/settings-modal-helpers.test.ts` now covers the extracted settings helpers directly, and `tests/unit/code-rabbit-phase1.test.ts` was narrowed back to the unrelated chart/auto-import helpers it actually owns. - - `tests/frontend/settings-modal-defaults.test.tsx`, `settings-modal-sections.test.tsx`, `settings-modal-backups.test.tsx`, `settings-modal-provider-limits.test.tsx`, and `settings-modal-draft-state.test.tsx` now split the modal coverage by responsibility instead of adding another broad catch-all dialog suite. -- Validation: - - `npm run test:unit -- tests/unit/settings-modal-helpers.test.ts tests/unit/code-rabbit-phase1.test.ts tests/frontend/settings-modal-language.test.tsx tests/frontend/settings-modal-version-status.test.tsx tests/frontend/settings-modal-defaults.test.tsx tests/frontend/settings-modal-sections.test.tsx tests/frontend/settings-modal-backups.test.tsx tests/frontend/settings-modal-provider-limits.test.tsx` - - `npm run check` - - `npm run test:architecture` - - `npm run test:timings` - - `npm_config_cache=/tmp/ttdash-npm-cache npm run verify:release` - - `PLAYWRIGHT_TEST_PORT=3016 npm_config_cache=/tmp/ttdash-npm-cache npm run test:e2e` - - `coderabbit review --agent -t uncommitted -c AGENTS.md` -> round 1: 1 minor issue, fixed (draft state no longer reinitializes while the modal stays open; `tests/frontend/settings-modal-draft-state.test.tsx` added) - - `coderabbit review --agent -t uncommitted -c AGENTS.md` -> round 2: blocked by CodeRabbit rate limit (`Rate limit exceeded`, retry window reported by the CLI: `54 minutes and 32 seconds`) - -### code-review.md / H-01 - -- Status: fixed -- Scope: `src/hooks/use-dashboard-controller.ts` was reduced from the remaining god-hook into a composition root over focused internal controller slices. The heavy derived state, browser IO, dialog ownership, drill-down navigation, shell/load state, and imperative dashboard actions now live in `src/hooks/use-dashboard-controller-actions.ts`, `use-dashboard-controller-browser.ts`, `use-dashboard-controller-derived-state.ts`, `use-dashboard-controller-dialogs.ts`, `use-dashboard-controller-drill-down.ts`, `use-dashboard-controller-effects.ts`, and `use-dashboard-controller-shell-state.ts`, while the public controller contract stayed stable through `src/hooks/use-dashboard-controller.ts`. -- Guardrails: `docs/architecture.md` now documents the internal dashboard controller slices and the browser-IO helper as private implementation details behind the public controller hook, and `.dependency-cruiser.cjs` now blocks component-level fanout to the internal `use-dashboard-controller-*.ts` slices while keeping the intentional type-only controller contract out of the orphan warning path. -- Follow-up quality fixes during implementation: - - `tests/frontend/dashboard-controller-browser.test.tsx` now locks the extracted browser helper responsibilities for JSON downloads, section scrolling, and the test-only `openSettings` bridge. - - `tests/frontend/dashboard-controller-drill-down.test.tsx` now covers the extracted drill-down slice directly, including the edge case where the selected day disappears after filtering changes. - - `src/hooks/use-dashboard-controller-actions.ts` now uses the upload-specific fallback toast (`api.uploadFailed`) after a successful JSON parse when the backend rejects a usage upload without an `Error` instance, and `tests/frontend/dashboard-error-state.test.tsx` covers that regression explicitly. - - `npm run test:timings` showed no new performance hotspot in the touched dashboard/controller suites; the new slice tests stay sub-100ms and the existing dashboard controller/public-shell tests remained fast. -- Validation: - - `npm run check` - - `npm run test:architecture` - - `npm run test:timings` - - `npm run test:unit -- tests/frontend/dashboard-controller-browser.test.tsx tests/frontend/dashboard-controller-drill-down.test.tsx tests/frontend/dashboard-controller-state.test.tsx tests/frontend/dashboard-controller-actions.test.tsx tests/frontend/dashboard-filter-visibility.test.tsx tests/frontend/dashboard-error-state.test.tsx` - - `npm run test:unit -- tests/frontend/dashboard-error-state.test.tsx tests/frontend/dashboard-controller-actions.test.tsx tests/frontend/dashboard-controller-browser.test.tsx` - - `npm_config_cache=/tmp/ttdash-npm-cache npm run verify:release` - - `PLAYWRIGHT_TEST_PORT=3016 npm_config_cache=/tmp/ttdash-npm-cache npm run test:e2e` - - `coderabbit review --agent -t uncommitted -c AGENTS.md` -> round 1: 1 minor issue, fixed (`api.uploadFailed` fallback for backend upload rejection after successful JSON parse) - - `coderabbit review --agent -t uncommitted -c AGENTS.md` -> round 2: blocked by CodeRabbit rate limit (`Rate limit exceeded`, retry window reported by the CLI: `52 minutes and 50 seconds`) - -## 2026-04-23 - -### architecture-review.md / H-01 - -- Status: fixed -- Scope: `server.js` was reduced to the CLI/bootstrap and runtime composition root; subsystem logic moved into `server/data-runtime.js`, `server/background-runtime.js`, `server/auto-import-runtime.js`, and `server/http-router.js`. -- Guardrails: `docs/architecture.md` now documents the server split, `.dependency-cruiser.cjs` prevents runtime modules from coupling back to `server.js`, the router, or each other, and `tests/unit/background-runtime.test.ts` locks the background registry snapshot behavior that was tightened during the review cycle. -- Follow-up quality fixes during implementation: - - `server/background-runtime.js`: removed a snapshot TOCTOU re-read so background pruning now decides cleanup from one captured registry read. - - `src/components/layout/FilterBar.tsx`: added cleanup for queued focus-restoration callbacks; `tests/frontend/filter-bar-date-picker.test.tsx` now covers the unmount path that was causing the flaky date-picker teardown in Playwright. -- Validation: - - `npm run test:architecture` - - `npm run check:deps` - - `npm run test:unit -- tests/unit/server-helpers-network.test.ts tests/unit/server-helpers-runner-core.test.ts tests/unit/server-helpers-runner-process.test.ts tests/unit/server-helpers-file-locks.test.ts tests/integration/server-auto-import.test.ts tests/integration/server-background.test.ts tests/integration/server-api-guards.test.ts` - - `npm run verify:full` - - `coderabbit review --agent -t uncommitted -c AGENTS.md` -> round 1: 1 minor issue, round 2: 0 issues - -### architecture-review.md / H-02 - -- Status: fixed -- Scope: the duplicated client/server settings contract was consolidated into `shared/app-settings.js` with typed declarations in `shared/app-settings.d.ts`; frontend adapters in `src/lib/app-settings.ts`, `src/lib/dashboard-preferences.ts`, and `src/lib/provider-limits.ts` now consume that shared contract, and `server/data-runtime.js` now normalizes persisted settings through the same source. -- Guardrails: `docs/architecture.md` now documents `shared/app-settings.js` as the single production source for persisted settings defaults and normalization, `.dependency-cruiser.cjs` blocks bypassing that contract from `server.js`, `server/data-runtime.js`, and `src/lib/app-settings.ts`, and `vitest.config.ts` now includes `shared/app-settings.js` in coverage reporting. -- Follow-up quality fixes during implementation: - - `tests/unit/app-settings-contract.test.ts`: locks the shared/frontend contract alignment for defaults, fragment normalization, persisted settings normalization, and runtime-only flags. - - `tests/integration/server-api-imports.test.ts`: now asserts the normalized settings-import response instead of only the status code. - - `tests/integration/server-api-persistence.test.ts` and `tests/frontend/settings-modal-test-helpers.tsx`: now derive defaults from the shared settings contract instead of re-hardcoding them in tests. - - `tests/unit/background-runtime.test.ts`: cleaned up type-only imports to keep the repo lint/type gates green after the shared typings were added. -- Validation: - - `npm run check` - - `npm run test:architecture` - - `npm run test:unit -- tests/unit/app-settings-contract.test.ts tests/unit/api.test.ts tests/unit/dashboard-preferences.test.ts tests/integration/server-api-persistence.test.ts tests/integration/server-api-imports.test.ts` - - `npm_config_cache=/tmp/ttdash-npm-cache npm run test:unit:coverage` - - `npm run build:app` - - `npm run verify:package` - - `PLAYWRIGHT_TEST_PORT=3016 npm_config_cache=/tmp/ttdash-npm-cache npm run test:e2e` - - `coderabbit review --agent -t uncommitted -c AGENTS.md` -> round 1: 0 issues, round 2: 0 issues - -### architecture-review.md / M-01 - -- Status: fixed -- Scope: the dashboard orchestration was cut from a broad flat controller surface into focused view-model bundles in `src/hooks/use-dashboard-controller.ts`; `src/components/Dashboard.tsx` now consumes `header`, `filterBar`, `sections`, `settingsModal`, `dialogs`, `commandPalette`, `report`, and shell bundles instead of forwarding dozens of individual fields, and `src/components/dashboard/DashboardSections.tsx` now consumes one structured `DashboardSectionsViewModel`. -- Guardrails: `src/types/dashboard-view-model.d.ts` now owns the shared frontend-only dashboard view-model contracts, `docs/architecture.md` documents `Dashboard.tsx` as the controller composition root, and `.dependency-cruiser.cjs` now blocks component-subtree fanout to `src/hooks/use-dashboard-controller.ts`. -- Follow-up quality fixes during implementation: - - `src/components/layout/Header.tsx`, `src/components/layout/FilterBar.tsx`, `src/components/features/command-palette/CommandPalette.tsx`, and `src/components/features/settings/SettingsModal.tsx` now type their props from the shared dashboard view-model contracts instead of re-declaring local prop shapes. - - `tests/frontend/dashboard-controller-test-helpers.ts` now provides bundle-based controller and section factories, so dashboard composition tests no longer rebuild a flat mega-mock. - - `tests/frontend/dashboard-controller-actions.test.tsx` now covers drill-down navigation from the controller-owned dialog bundle, locking the logic that moved out of `Dashboard.tsx`. - - `tests/frontend/dashboard-filter-visibility.test.tsx` now also asserts that `DashboardSections` receives a structured `viewModel` bundle instead of flat section props. -- Validation: - - `npm run check` - - `npm run test:architecture` - - `npm run test:unit -- tests/frontend/dashboard-controller-state.test.tsx tests/frontend/dashboard-controller-actions.test.tsx tests/frontend/dashboard-filter-visibility.test.tsx` - - `npm_config_cache=/tmp/ttdash-npm-cache npm run test:unit:coverage` - - `npm run build:app` - - `npm run verify:package` - - `PLAYWRIGHT_TEST_PORT=3016 npm_config_cache=/tmp/ttdash-npm-cache npm run test:e2e` - - `coderabbit review --agent -t uncommitted -c AGENTS.md` -> round 1: 0 issues, round 2: 0 issues - -### architecture-review.md / M-02 - -- Status: fixed -- Scope: dashboard preset, section, and settings-adjacent UI rules now flow through `shared/dashboard-preferences.js` with declarations in `shared/dashboard-preferences.d.ts`; `shared/app-settings.js` consumes that shared contract, `src/lib/dashboard-preferences.ts` was reduced to a thin adapter, and the duplicated preset semantics were removed from `src/hooks/use-dashboard-filters.ts` and `src/components/layout/FilterBar.tsx`. -- Guardrails: `docs/architecture.md` now documents the shared dashboard contract separately from the app settings contract, `.dependency-cruiser.cjs` blocks production imports of `shared/dashboard-preferences.json` outside `shared/dashboard-preferences.js`, and `vitest.config.ts` now includes `shared/dashboard-preferences.js` in coverage. -- Follow-up quality fixes during implementation: - - `tests/unit/dashboard-preferences.test.ts` now locks the shared/frontend adapter alignment for config parsing, section metadata, preset-range resolution, and active-preset detection. - - `tests/frontend/use-dashboard-filters.test.tsx` now asserts preset application and reset behavior against the shared preset resolver instead of re-hardcoding date ranges. - - `tests/frontend/filter-bar-presets.test.tsx` now seeds active-preset UI states from the shared resolver while preserving the existing visible quick-select order. - - `src/types/dashboard-view-model.d.ts` and `src/hooks/use-dashboard-controller.ts` now type preset actions with `DashboardDatePreset` instead of broad strings. -- Validation: - - `npm run check` - - `npm run test:architecture` - - `npm run check:deps` - - `npm_config_cache=/tmp/ttdash-npm-cache npm run test:unit:coverage` - - `npm run build:app` - - `npm run verify:package` - - `PLAYWRIGHT_TEST_PORT=3016 npm_config_cache=/tmp/ttdash-npm-cache npm run test:e2e` - - `coderabbit review --agent -t uncommitted -c AGENTS.md` -> round 1: 0 issues, round 2: 0 issues diff --git a/docs/review/performance-review.md b/docs/review/performance-review.md deleted file mode 100644 index 4bc7c5e..0000000 --- a/docs/review/performance-review.md +++ /dev/null @@ -1,67 +0,0 @@ -# Performance Review - -## Kurzfazit - -Die Laufzeitperformance wirkt fuer einen lokalen Analytics-Dashboard-Use-Case aktuell brauchbar, aber der Code zeigt klare Skalierungsgrenzen: ein schweres Initial-Bundle, mehrere komplette Datenpasses pro Filterwechsel und einige bewusst teure Testpfade. - -## Was bereits gut ist - -- Sekundaere Oberflaechen wie `SettingsModal`, `DrillDownModal`, Forecast-Zoom und Help sind lazy geladen -- Viele teure Ableitungen sind bereits in `useMemo(...)` gekapselt -- Auto-Import und Background-Pfade sind zumindest gegen Parallelstarts abgesichert - -## Findings - -### H-01 - Das Initial-Bundle ist fuer ein lokales Dashboard weiterhin schwer - -**Evidenz:** `npm run build:app` - -Die Build-Ausgabe zeigt mehrere grosse Chunks: - -- `charts-vendor` -> `422.02 kB` raw / `119.59 kB` gzip -- `index` -> `212.32 kB` raw / `57.55 kB` gzip -- `react-vendor` -> `200.38 kB` raw / `64.43 kB` gzip -- `motion-vendor` -> `125.85 kB` raw / `41.08 kB` gzip -- `i18n` -> `119.68 kB` raw / `36.70 kB` gzip - -Die Lazy-Splits helfen fuer modale Nebenwege, aber das Kern-Dashboard bleibt sehr chart-lastig und schwer. Auf schwachen Geraeten oder in eingebetteten Browsern ist das der wahrscheinlichste Performance-Flaschenhals beim Kaltstart. - -**Empfehlung:** kritische First-View-Visualisierungen priorisieren und weniger wichtige Charts spaeter oder sektional nachladen. - -### M-01 - Ein Filterwechsel loest mehrere komplette Datenpasses aus - -**Referenzen:** `src/hooks/use-dashboard-filters.ts:64-114`, `src/hooks/use-dashboard-filters.ts:164-201`, `src/hooks/use-computed-metrics.ts:8-29`, `shared/dashboard-domain.js:261-520` - -Pro Aenderung an Filtern oder ViewMode wird die Datenmenge mehrfach komplett durchlaufen: - -- sortieren -- Datumsfilter -- Providerfilter -- Modellfilter -- Aggregation -- Metrics -- Provider- und Modellaggregation -- Chart-Transforms -- Unique-Model-Listen - -Fuer kleine Datensaetze ist das okay. Fuer groessere Historien wird es teuer, weil viele Schritte nicht dieselben Vorberechnungen teilen. - -**Empfehlung:** gemeinsame Vorberechnungen zentralisieren, vor allem fuer provider/model-Indexes und bereits normalisierte Breakdown-Maps. - -### M-02 - Der Settings-Dialog koppelt UI-Opening an eine externe Registry-Abfrage - -**Referenzen:** `src/components/features/settings/SettingsModal.tsx:241-271`, `server.js:2208-2247` - -Beim Oeffnen des Settings-Dialogs wird der Toktrack-Versionstatus angefragt, und der Server kann dafuer `npm view` mit Timeout starten. Das ist logisch nachvollziehbar, aber es koppelt einen lokalen UI-Dialog an eine externe Netzabhaengigkeit. - -Der bestehende Cache entschraerft das, beseitigt das Grundmuster aber nicht. Offline oder in restriktiven Netzen fuehrt das zu wiederholter Fehlermeldungsarbeit und vermeidbarer Laufzeitkomplexitaet. - -**Empfehlung:** Version-Check optional auf explizite Nutzeraktion legen oder im Hintergrund einmal pro Session vorwaermen. - -### N-01 - Die langsamsten Frontend-Tests deuten auf komplexe UI-Inseln hin - -**Evidenz:** `npm run test:timings` - -Die langsamsten Frontend-Suites haengen an `SettingsModal`, `DrillDownModal`, `HeatmapCalendar`, Sortierungstabellen und Filterzugriffen. Das ist nicht nur ein Testthema, sondern ein Signal fuer komplexe UI-Inseln mit hoher Interaktions- und Renderlast. - -**Empfehlung:** grosse Interaktionskomponenten weiter zerlegen und bei UI-Hotspots systematisch zwischen Renderlogik, Datenlogik und Accessibility-Glue unterscheiden. diff --git a/docs/review/security-review.md b/docs/review/security-review.md deleted file mode 100644 index db270ef..0000000 --- a/docs/review/security-review.md +++ /dev/null @@ -1,63 +0,0 @@ -# Security Review - -## Kurzfazit - -Der aktuelle Stand ist fuer den Default-Loopback-Betrieb deutlich staerker als es die aeltere Pen-Test-Doku vermuten laesst: Host-Checks, Origin-Pruefung, Payload-Grenzen, Null-Byte-Abwehr, token-basierte API-Auth, restriktive Datei-Permissions und eine CSP ohne `unsafe-inline` fuer Styles sind vorhanden und getestet. Die verbleibenden Risiken liegen vor allem bei kompromittierten Prozessen desselben OS-Users und bei veralteter Security-Dokumentation. - -## Was bereits gut ist - -- Nicht-loopback Bind braucht explizites Opt-in ueber `TTDASH_ALLOW_REMOTE=1` -- Nicht-loopback Bind braucht zusaetzlich `TTDASH_REMOTE_TOKEN` -- Loopback-API-Requests brauchen einen per Start generierten lokalen Session-Token -- Mutation-Requests werden ueber Host- und Origin-Validierung abgesichert -- Null-Byte-Pfade werden abgefangen, ohne den Server zu beenden -- Oversized Upload- und Report-Requests werden sauber mit `413` behandelt -- Persistierte Dateien und App-Directories werden mit restriktiven Rechten geschrieben -- Die CSP trennt Element- und Attribut-Styles, erlaubt Stylesheet-Elemente nur ueber `self` bzw. HTML-Nonce und blockiert Style-Attribute ueber `style-src-attr 'none'` -- Diese Schutzmechanismen sind nicht nur im Code sichtbar, sondern auch in Integrationstests verankert - -## Findings - -### H-01 - `TTDASH_ALLOW_REMOTE=1` bleibt ein Vollvertrauensmodus ohne Authentifizierung - -**Referenzen:** `server/runtime.js:10-21`, `server/http-utils.js:152-233`, `server.js:1608-1765`, `server.js:2482-2852` - -Sobald Remote-Binding explizit aktiviert wird, existiert weiterhin keine echte Authentifizierung. Die Autorisierung stuetzt sich dann nur noch auf Host- und Origin-Konsistenz. Das schuetzt gegen Browser-Cross-Site-Szenarien, aber nicht gegen einfache nicht-browserseitige Clients auf demselben Netz, die passende `Host`- und `Origin`-Header setzen. - -Das ist fuer den Default-Modus kein akuter Bug, aber ein klares Security-Design-Risiko fuer alle Nutzer, die das Tool absichtlich ins Netz haengen. - -**Empfehlung:** Remote-Bind nur mit Token-basierter Auth oder einem separaten abgesicherten Mode erlauben. - -**Aktueller Stand:** In `docs/review/fixed-findings.md` als `security-review.md / H-01` geschlossen. Remote-Bind verlangt `TTDASH_REMOTE_TOKEN`, und Remote-API-Requests laufen durch die zentrale Token-Auth. - -### M-01 - Lokale Read-Endpoints sind im gleichen Host-Kontext offen - -**Referenzen:** `server.js:2503-2588`, `server.js:2769-2777` - -`/api/usage`, `/api/settings`, `/api/runtime` und `/api/toktrack/version-status` waren fuer jeden Prozess erreichbar, der den Loopback-Server ansprechen konnte. Fuer eine lokale Single-User-App war das ein verstaendlicher Tradeoff, aber die Bedrohungsannahme war stark: "andere lokale Prozesse sind vertrauenswuerdig". - -**Aktueller Stand:** In `docs/review/fixed-findings.md` als `security-review.md / M-01` geschlossen. Der Loopback-Server generiert pro Start einen lokalen Session-Token, oeffnet bzw. dokumentiert eine einmalige Bootstrap-URL, setzt daraus ein HttpOnly/SameSite-Cookie und verlangt danach Auth fuer alle API-Endpoints. Background-Registry und Testserver speichern die notwendige Session-Metadaten nur in restriktiven User-Config-Dateien. - -**Restrisiko:** Ein kompromittierter Prozess mit denselben OS-User-Rechten kann weiterhin Terminalausgaben, User-Config-Dateien oder Browserprofile angreifen. Vollstaendige Isolation gegen diesen Angreifertyp wuerde einen OS-naeheren Transport wie Unix Domain Sockets, Named Pipes mit ACLs oder eine native Desktop-Shell erfordern. - -### N-01 - Die CSP erlaubt weiterhin Inline-Styles - -**Referenzen:** `server.js:49-56` - -Die gesetzte CSP enthielt `style-src 'self' 'unsafe-inline'`. Das war kein unmittelbarer Exploit-Nachweis, vergroesserte aber die Angriffsoberflaeche, falls spaeter ungewollte Style-Injektionen oder unsaubere HTML-Renderpfade dazukommen. - -**Empfehlung:** mittelfristig auf style hashes oder reine Stylesheet-basierte Ausgabe umstellen. - -**Aktueller Stand:** In `docs/review/fixed-findings.md` als `security-review.md / N-01` geschlossen. Die CSP enthaelt kein `unsafe-inline` mehr fuer Styles, HTML-Antworten erhalten pro Response eine Nonce und ein passendes `ttdash-csp-nonce` Meta-Tag, `style-src-elem` erlaubt nur `self` bzw. die Nonce, und `style-src-attr 'none'` blockiert echte Inline-Style-Attribute. - -**Restrisiko:** JavaScript-gesteuerte Style-Properties bleiben fuer React, Recharts und Motion erlaubt. Das ist bewusst, weil diese Updates nicht der blockierte HTML-Style-Attributpfad sind und die bestehende Dashboard-Optik sowie Animationen ohne Security-Gewinn sonst umfangreich ersetzt werden muessten. - -### N-02 - Die vorhandene Pen-Test-Doku ist fachlich nicht mehr auf dem aktuellen Stand - -**Referenzen:** `docs/security/pentest-2026-04-17.md:3-11`, `tests/integration/server-api-guards.test.ts:7-110`, `server.js:2818-2866` - -Die alte Doku nennt `%00`-DoS, fehlende Host-Pruefung und fehlende Origin-Pruefung als aktuelle Hauptbefunde. Der aktuelle Code und die aktuellen Guards-Tests zeigen aber, dass diese Punkte inzwischen abgesichert sind. - -Das ist kein Runtime-Bug, aber ein Security-Governance-Problem: veraltete Security-Dokumente fuehren zu falschem Vertrauen oder falscher Alarmierung. - -**Empfehlung:** Security-Dokumente versionieren oder mit einem klaren "historisch / superseded" Hinweis versehen. diff --git a/docs/review/server-review.md b/docs/review/server-review.md deleted file mode 100644 index 451a9e2..0000000 --- a/docs/review/server-review.md +++ /dev/null @@ -1,55 +0,0 @@ -# Server Review - -## Kurzfazit - -Der Server ist funktional robust fuer eine lokale Single-Binary-Node-Runtime. Die fruehere Entrypoint-Konzentration wurde deutlich reduziert: `server.js` ist heute nur noch der ausfuehrbare CLI/Bin-Shim, waehrend `server/app-runtime.js` die Runtime-Komposition uebernimmt und CLI, Startup-Shell, HTTP-Lifecycle, Auth, Router, Persistenz, Auto-Import und Background-Betrieb in fokussierten Runtime-Modulen liegen. - -## Was bereits gut ist - -- Host-, Origin- und Payload-Grenzen sind aktiv und getestet -- Persistenz nutzt atomische Schreibpfade und Cross-Process-Locks -- Background-Instanzen, Logfiles und Dateirechte sind nicht nur "best effort", sondern explizit mitgedacht -- Reporting, Auto-Import und Background-Betrieb haben klare Fehler- und Timeout-Strategien -- `server.js` exportiert keine Test- oder Runtime-Helper-API mehr und bleibt durch eine Architektur-Guardrail auf den ausfuehrbaren Shim begrenzt - -## Findings - -### H-01 - Zu viele Server-Subsysteme leben im Entrypoint - -**Referenzen:** `server.js` insgesamt, besonders `322-542`, `655-940`, `1665-1765`, `1784-2451`, `2482-2975` - -Das Entrypoint-Modul traegt Persistenz, File Locks, Background-Registry, CLI, Auto-Import, Runner-Diagnostik, HTTP-Router, Static Serving und Shutdown in einem Kontext. Diese Kopplung verlangsamt Reviews und macht lokale Refactors teuer. - -**Empfehlung:** innere Runtime-Helfer in eigene Module verschieben und `server.js` auf Komposition reduzieren. - -**Aktueller Stand:** In `docs/review/fixed-findings.md` als `server-review.md / H-01` geschlossen. CLI-Parsing, Startup-Ausgabe, Browser-Open, lokale Auth-Session-Metadaten und HTTP-Lifecycle/Shutdown sind aus dem Entrypoint herausgezogen. Nach `server-review.md / M-01` umfasst `server.js` nur noch den `require.main`-Startpfad und delegiert die Runtime-Komposition an `server/app-runtime.js`. - -### M-01 - Der produktive Entrypoint exportiert einen breiten `__test__`-API-Schatten - -**Referenzen:** `server.js:2935-2962` - -Fuer Tests werden viele interne Helfer direkt aus `server.js` exportiert. Das ist pragmatisch, macht das Produktionsmodul aber implizit zu einer halb-oeffentlichen Utility-Sammlung. Mit wachsender Codebasis entsteht daraus schnell eine "nicht offiziell oeffentliche, aber faktisch stabile" API. - -**Empfehlung:** Testziele aus `server.js` in importierbare Runtime-Module verschieben und dort direkt testen. - -**Aktueller Stand:** In `docs/review/fixed-findings.md` als `server-review.md / M-01` geschlossen. Die Server-helper-Tests importieren die Runtime-Module direkt, die Playwright-Testserver-Komposition nutzt `server/app-runtime.js`, und `server.js` exportiert weder `__test__` noch andere produktive Runtime-Helper. - -### M-02 - Globale Runtime-Flags und Caches erschweren lokale Isolation - -**Referenzen:** `server.js:93-101`, `1759-1774`, `2208-2247`, `2271-2451` - -Zustaende wie `startupAutoLoadCompleted`, `runtimePort`, `runtimeUrl`, `autoImportRunning`, `autoImportStreamRunning`, `latestToktrackVersionCache` und `latestToktrackVersionLookupPromise` sind zentral und mutable. Fuer die aktuelle App ist das noch beherrschbar, aber es koppelt Nebenwirkungen ueber mehrere Funktionsbereiche. - -**Empfehlung:** zumindest die Toktrack- und Auto-Import-Laufzeit in dedizierte Service-Objekte kapseln. - -**Aktueller Stand:** In `docs/review/fixed-findings.md` als `server-review.md / M-02` geschlossen. Runtime-Snapshot, Startup-Auto-Load-Status, Auto-Import-Lease und Toktrack-Version-Cache liegen jetzt in pro Runtime instanziierten Service-Objekten; der HTTP-Router besitzt kein eigenes Auto-Import-Stream-Flag mehr. - -### N-01 - Die Serverbasis ist strukturell staerker als es ihre Dateiform vermuten laesst - -**Referenzen:** `server/runtime.js`, `server/http-utils.js`, `tests/integration/server-api-guards.test.ts`, `tests/integration/server-background.test.ts` - -Positiv auffaellig ist, dass wesentliche Sicherheits- und Runtime-Helfer bereits aus `server.js` herausgezogen wurden. Diese Richtung ist richtig und sollte konsequent weitergefuehrt werden. - -**Empfehlung:** `server/runtime.js` und `server/http-utils.js` als Muster fuer weitere Extraktionen verwenden. - -**Aktueller Stand:** In `docs/review/fixed-findings.md` als `server-review.md / N-01` geschlossen. Die Host-/Origin-/Content-Type-Request-Policy liegt jetzt in `server/http-request-guards.js`, waehrend `server/http-utils.js` als kompatible Fassade fuer Body-, Response- und Router-Helfer erhalten bleibt. diff --git a/docs/review/test-review.md b/docs/review/test-review.md deleted file mode 100644 index 7cd9a0a..0000000 --- a/docs/review/test-review.md +++ /dev/null @@ -1,94 +0,0 @@ -# Test Review - -## Kurzfazit - -Die Teststrategie ist stark: klare Layer, viele gezielte Frontend- und Integrationstests, gute Accessibility-Abdeckung und brauchbare Runtime-Regressionen. Die groessten aktuellen Testprobleme liegen bei Messbarkeit, Flakiness und Testdauer, nicht bei fehlender Ernsthaftigkeit. - -## Was bereits gut ist - -- Die vier Testlayer sind sauber dokumentiert und tatsaechlich im Repo sichtbar -- Server-Guards, Background-Betrieb und Auto-Import haben echte Integrationstests -- Frontend-Tests pruefen nicht nur Rendern, sondern Sprache, Motion und Accessibility -- Die Test-Timings werden aktiv ueber ein eigenes Script ausgewertet - -## Findings - -### H-01 - Die Architektur-Suite enthaelt einen echten Timeout-Fall an der Flake-Grenze - -**Status:** Behoben, siehe `docs/review/fixed-findings.md` -> `test-review.md / H-01`. - -**Referenzen:** `tests/architecture/frontend-layers.test.ts:3-12` - -`npm run test:architecture` fiel im Gesamtlauf aus, weil `hooks must not depend on components` den `5000ms` Timeout riss. Der isolierte Re-Run derselben Datei bestand, aber der langsamste Fall lag bei etwa `4950ms`. - -Das ist kein semantischer Architekturverstoss, aber ein instabiles Signal. Genau solche Grenzfaelle untergraben das Vertrauen in CI-Fehler. - -**Empfehlung:** Timeout anheben oder die Archunit-Pruefung leichter machen, bevor daraus wiederkehrende CI-Flakes werden. - -### H-02 - Die Coverage-Zahl bildet die produktive Runtime nur teilweise ab - -**Status:** Behoben, siehe `docs/review/fixed-findings.md` -> `test-review.md / H-02`. - -**Referenzen:** `vitest.config.ts:27-44` - -Die Coverage-Includes decken nur: - -- `src/hooks/**/*.ts` -- `src/lib/**/*.ts` -- `src/components/Dashboard.tsx` -- `usage-normalizer.js` - -Damit fehlen in der gemeldeten Quote unter anderem: - -- `server.js` -- `server/**` -- `shared/**` -- fast alle Komponenten ausser `Dashboard.tsx` - -Die gemeldeten `76.27 / 65.71 / 76.43 / 78.61` sind also technisch korrekt, aber strategisch irrefuehrend, wenn man sie als Produkt-Coverage liest. - -**Empfehlung:** separate Server-Coverage fuer Spawn-Pfade einfuehren und die Includes auf die realen Runtime-Schwerpunkte erweitern. - -### M-01 - Dead Code und Coverage-Luecken werden von den Guardrails nicht sichtbar gemacht - -**Status:** Behoben, siehe `docs/review/fixed-findings.md` -> `test-review.md / M-01`. - -**Referenzen:** `src/hooks/use-theme.ts:1-21`, `src/hooks/use-provider-limits.ts:1-17`, `.dependency-cruiser.cjs:12-21` - -Es gibt mindestens zwei Hooks ohne produktive Importe und mit `0%` Coverage. Gleichzeitig lief `dependency-cruiser` trotz `no-orphans-src` Regel ohne Hinweis durch. - -Das ist wichtig, weil tote oder veraltete Pfade so laenger unauffaellig im Repo bleiben. - -**Empfehlung:** Orphan-Detection schaerfer pruefen oder zusaetzlich ein dediziertes Dead-Code-Werkzeug in den Review-/CI-Prozess aufnehmen. - -### M-02 - Testdauer konzentriert sich auf wenige Hotspots - -**Status:** Behoben, siehe `docs/review/fixed-findings.md` -> `test-review.md / M-02`. - -**Evidenz:** `npm run test:timings` - -Langsamste Suites: - -- `tests/integration/server-background.test.ts` -> `5.785s` -- `tests/integration/server-auto-import.test.ts` -> `4.521s` -- `tests/unit/server-helpers-runner-process.test.ts` -> `2.778s` -- `tests/frontend/settings-modal-language.test.tsx` -> `2.443s` -- `tests/frontend/drill-down-modal-motion.test.tsx` -> `2.013s` - -Langsamster Einzeltest: - -- `rejects parallel auto-import starts before launching a second toktrack runner` -> `3.326s` - -Die Hotspots sind plausibel, aber sie zeigen klar, welche Testpfade kuenftig zuerst entkoppelt oder fokussiert werden sollten. - -**Empfehlung:** langsame Prozess- und UI-Pfade weiter isolieren, damit neue Regressionen dort nicht unverhaeltnismaessig teuer werden. - -### M-03 - Die E2E-Abdeckung ist funktional stark, aber in einer grossen Monolith-Datei konzentriert - -**Status:** Behoben, siehe `docs/review/fixed-findings.md` -> `test-review.md / M-03`. - -**Referenzen:** `tests/e2e/dashboard.spec.ts` insgesamt, `734` Zeilen, `7` Tests - -Die Playwright-Suite prueft wichtige Journeys, aber fast alles lebt in einer Datei. Das erschwert Navigation, Review und selektive Optimierung. Mit weiterem Wachstum wird daraus schnell ein langsamer Catch-all statt fokussierter Journeys. - -**Empfehlung:** nach Themen splitten, z. B. `load-and-upload`, `settings-and-backups`, `forecast-and-reporting`, `filters-and-navigation`. diff --git a/docs/testing.md b/docs/testing.md index 48295c8..37e271c 100644 --- a/docs/testing.md +++ b/docs/testing.md @@ -31,11 +31,15 @@ Architecture constraints are documented separately in [`docs/architecture.md`](. ## Frontend Defaults +- `vitest.setup.node.ts` is the minimal shared cleanup for Node-only projects. - `vitest.setup.frontend.ts` is the shared place for `jsdom` defaults such as: - i18n bootstrapping - `matchMedia` - `ResizeObserver` - default `IntersectionObserver` +- The `frontend` Vitest project owns the shared `30s` jsdom timeout because coverage instrumentation + can make a few otherwise-synchronous render tests exceed Vitest's default `5s` timeout under load. + Do not add per-test timeout overrides for routine frontend render tests. - Only override `IntersectionObserver` locally when the test explicitly verifies reveal or visibility behavior. - Only call `initI18n(...)` inside a test file when locale switching is itself part of the assertion. @@ -59,7 +63,9 @@ Architecture constraints are documented separately in [`docs/architecture.md`](. - motion/reveal behavior - For large server helper or integration suites, group tests by subsystem so Vitest can schedule them more efficiently. - Keep background-process integration files focused by behavior; the background Vitest project intentionally uses a small worker cap instead of one serial catch-all file or unbounded process fan-out. -- Keep Playwright files grouped by end-to-end journey, such as load/upload, forecast/filter interaction, settings/backups, reporting, and command palette behavior. Share authentication, server reset, seeding, and download helpers through `tests/e2e/helpers.ts` instead of creating new browser catch-all files. +- Keep Playwright files grouped by end-to-end journey, such as load/upload, forecast/filter interaction, settings/backups, reporting, and command palette behavior. Import `test` and `expect` from `tests/e2e/fixtures.ts` so each worker gets its own server, port, auth session, and runtime directory. Share authentication, server reset, seeding, and download helpers through `tests/e2e/helpers.ts` instead of creating new browser catch-all files. +- Every Playwright spec must reset state through `resetAppState(...)` or prepare an isolated dashboard through `prepareDashboard(...)` before it asserts app behavior. `tests/unit/playwright-config.test.ts` fails when a new spec skips that isolation contract. +- `tests/unit/playwright-config.test.ts` also guards the small E2E journey list, the shared fixture import, CI worker cap, and Playwright reporter paths. Update that contract intentionally when adding a new browser journey. ## Choosing the Right Layer @@ -96,9 +102,9 @@ For `tests/architecture`, prefer the shared source graph helper for simple file, ## Coverage Scope -`npm run test:unit:coverage` reports product-runtime coverage. The configured coverage scope intentionally includes frontend runtime modules, the local server runtime, shared runtime contracts, and `usage-normalizer.js` instead of only the historically high-signal frontend subset. +`npm run test:vitest:coverage` reports product-runtime coverage. `npm run test:unit:coverage` is kept as the underlying compatibility command. The configured coverage scope intentionally includes frontend runtime modules, the local server runtime, shared runtime contracts, and `usage-normalizer.js` instead of only the historically high-signal frontend subset. -The coverage and timing commands use explicit `dot` and `junit` Vitest reporters. Keep those reporters on both scripts so non-interactive gates emit compact progress and do not depend on silent reporter paths. +The coverage and timing commands use explicit `dot` and `junit` Vitest reporters. Keep those reporters on both scripts so non-interactive gates emit compact progress and do not depend on silent reporter paths. They intentionally write separate JUnit files, `test-results/vitest-coverage.junit.xml` and `test-results/vitest-timings.junit.xml`, so coverage and timing diagnostics do not contend for the same report path. The global thresholds are ratchets for that broader denominator: @@ -129,24 +135,81 @@ Prioritize targeted branch coverage in runtime-heavy modules before adding anoth ## Local Commands - Required pre-PR gate: run `npm run verify:full` before opening a PR to ensure all tests and checks pass. +- Faster non-coverage fast path on a local machine with enough CPU: `PLAYWRIGHT_TEST_PORT=3016 npm run verify:full:parallel` - Faster inner-loop gate: `npm run verify` +- Static gate only: `npm run test:static` +- All Vitest projects without coverage: `npm run test:vitest` - Architecture tests only: `npm run test:architecture` - Dependency graph gate: `npm run check:deps` -- Coverage-only unit/integration gate: `npm run test:unit:coverage` -- Playwright only: `PLAYWRIGHT_TEST_PORT=3016 npm run test:e2e` +- Coverage-only unit/integration gate: `npm run test:vitest:coverage` +- Per-project timing budget pass: `npm run test:timings:projects` +- Three-run timing benchmark: `npm run test:timings:benchmark` +- Playwright only, with a fresh app build: `PLAYWRIGHT_TEST_PORT=3016 npm run test:e2e:parallel` +- CI-style Playwright smoke: `npm run test:e2e:ci` +- Serial local mirror of the CI gate: `npm run verify:ci` +- Optional parallel local gate without Playwright: `npm run verify:parallel` ## Architecture Guardrails - Keep hook files under `src/hooks/` reachable from the frontend app entrypoint; `npm run test:architecture` fails on unused production hooks so dead hook helpers do not silently remain at `0%` coverage. - Timing diagnostics: `npm run test:timings` -`npm run test:timings` generates a fresh Vitest JUnit report and prints the slowest suites and tests. Use it after larger test additions or refactors to catch new hotspots early. +`npm run test:timings` generates a fresh Vitest JUnit report, prints the slowest suites and tests, and +lists warning-level timing budget entries. Use it after larger test additions or refactors to catch +new hotspots early. + +`npm run test:timings:budget` runs the same Vitest project set with a separate JUnit output and fails +when a suite exceeds `20s` or an individual test exceeds `12s`. These hard limits are intentionally +above the current baseline so they catch pathological regressions without making local and CI runs +fragile under normal CPU variance. CI applies the same hard budget to each Vitest matrix job by +evaluating the JUnit report already produced by that job, so the guard does not add another full +Vitest pass. Do not run `test:timings` in parallel with another Vitest command that writes the same JUnit file. +`test:timings:budget` uses `test-results/vitest-timing-budget.junit.xml` so it can be run separately +from the diagnostic report when needed. + +`npm run test:timings:projects` runs each Vitest project separately, writes project-scoped JUnit files +such as `test-results/vitest-frontend.timing.junit.xml`, and checks the same `20s` suite / `12s` +test hard budget after each project. Use it when you need to identify whether a regression is in +unit, frontend, integration, or background tests without adding another monolithic timing report. + +`npm run test:timings:benchmark` repeats the project timing pass three times and prints median and +worst observed duration per project. Use those repeated measurements before changing worker counts +or moving tests between layers; a single run is not enough to separate real improvements from cache, +CPU, or I/O variance. Repeated runs write `timing-run-N` JUnit files so their reports never overwrite +the normal project, coverage, or diagnostic reports. + +The frontend Vitest project intentionally uses `maxWorkers: '80%'`. Local Phase-2 measurements showed +that the old `50%` setting left jsdom work under-parallelized, while `100%` saturated the machine and +made imports, setup, tests, and environment time much worse. Re-benchmark with +`npm run test:timings:benchmark -- --projects=frontend` before changing that worker cap. + +`npm run verify:parallel` is an optional local fast path. It overlaps the static gate, API +integration tests, and `build:app`, then runs the high-contention suites in separate waves: unit, +frontend/jsdom, architecture, and background-process integration. That keeps the internally +parallel Vitest projects from over-subscribing the same cores and keeps real background servers away +from the jsdom/build storm. `verify:package` runs after those waves succeed. +`npm run verify:full:parallel` adds `test:e2e:ci` to the final wave. The canonical serial gates stay +unchanged; use the parallel gates for local feedback when the machine has enough CPU and the +per-project JUnit report paths must stay isolated. The parallel fast path intentionally avoids a +second coverage-instrumented Vitest pass, so pair it with `npm run test:vitest:coverage` or the +serial `npm run verify:full` when coverage thresholds are part of the validation. + +Use `node scripts/run-parallel-gate.js --dry-run --e2e` after changing gate scripts to inspect the +task waves and their declared report outputs before running the expensive full gate. The script +fails before spawning children if two tasks in the same wave declare the same output path, and a +successful run prints a per-task timing summary so local bottlenecks are visible without opening +JUnit reports. ## CI Notes - Keep workflow test paths aligned with the split test structure. +- The main CI workflow is intentionally a DAG: `static`, Vitest project matrix, `coverage`, and `build` can run independently; `package-smoke` and `e2e` depend only on the `production-dist` artifact from `build`. +- `CI Required` is the stable branch-protection check for `ci.yml`; it runs after every required CI job with `always()` and fails on failed, skipped, or cancelled dependencies. +- Keep CI report artifacts job-scoped so parallel jobs do not overwrite each other. Vitest project jobs upload `test-reports-vitest-`, coverage uploads `coverage-reports`, and Playwright uploads `test-reports-e2e`. +- Each Vitest matrix job evaluates its own JUnit report with `scripts/report-test-timings.js` and the + same `20s` suite / `12s` test hard budget used by `npm run test:timings:budget`. - Do not point CI jobs at deleted catch-all files such as old monolithic `server-helpers` or frontend regression suites. - Windows smoke coverage should stay focused on the small, platform-relevant helper suites rather than full subprocess-heavy integration runs unless the workflow explicitly targets Windows process behavior. diff --git a/package.json b/package.json index 134dcff..3f5734f 100644 --- a/package.json +++ b/package.json @@ -18,33 +18,49 @@ "dev": "vite", "build:app": "vite build", "build": "npm run format:check && npm run lint && npm run build:app", - "check": "npm run format:check && npm run lint && npm run lint:docstrings && npm run check:deps && tsc --noEmit", + "check": "npm run test:static", "check:deps": "dependency-cruiser --config .dependency-cruiser.cjs src shared server server.js usage-normalizer.js", - "format": "prettier . --write", - "format:check": "prettier . --check", - "lint": "eslint .", - "lint:docstrings": "eslint src server.js usage-normalizer.js shared --ext .ts,.tsx,.js,.d.ts", - "lint:fix": "eslint . --fix", + "format": "prettier . --write --cache --cache-location .cache/prettier/write --cache-strategy content", + "format:check": "prettier . --check --cache --cache-location .cache/prettier/check --cache-strategy content", + "lint": "eslint . --cache --cache-location .cache/eslint/full --cache-strategy content", + "lint:docstrings": "eslint src server.js usage-normalizer.js shared --ext .ts,.tsx,.js,.d.ts --cache --cache-location .cache/eslint/docstrings --cache-strategy content", + "lint:fix": "eslint . --fix --cache --cache-location .cache/eslint/full --cache-strategy content", "preview": "vite preview", "start": "node server.js", "start:test-server": "node scripts/start-test-server.js", - "test": "npm run test:architecture && npm run test:unit", - "test:architecture": "vitest run --project architecture", - "test:unit": "vitest run --project unit --project frontend --project integration --project integration-background", + "test": "npm run test:vitest", + "test:architecture": "vitest run --project architecture --reporter=default --reporter=junit --outputFile.junit=./test-results/vitest-architecture.junit.xml", + "test:static": "npm run format:check && npm run lint && npm run check:deps && npm run typecheck", + "test:unit": "vitest run --project unit --project frontend --project integration --project integration-background --reporter=default --reporter=junit --outputFile.junit=./test-results/vitest-main.junit.xml", "test:unit:watch": "vitest --project unit --project frontend --project integration --project integration-background", - "test:unit:coverage": "vitest run --coverage --project unit --project frontend --project integration --project integration-background --reporter=dot --reporter=junit --outputFile.junit=./test-results/vitest.junit.xml", - "test:timings": "vitest run --coverage --project unit --project frontend --project integration --project integration-background --reporter=dot --reporter=junit --outputFile.junit=./test-results/vitest.junit.xml && node scripts/report-test-timings.js", - "test:e2e": "npm run build:app && playwright test", - "test:e2e:ci": "playwright test --workers=1", + "test:vitest": "npm run test:architecture && npm run test:unit", + "test:vitest:architecture": "npm run test:architecture", + "test:vitest:coverage": "npm run test:unit:coverage", + "test:vitest:unit": "vitest run --project unit --reporter=default --reporter=junit --outputFile.junit=./test-results/vitest-unit.junit.xml", + "test:vitest:frontend": "vitest run --project frontend --reporter=default --reporter=junit --outputFile.junit=./test-results/vitest-frontend.junit.xml", + "test:vitest:integration": "vitest run --project integration --reporter=default --reporter=junit --outputFile.junit=./test-results/vitest-integration.junit.xml", + "test:vitest:integration-background": "vitest run --project integration-background --reporter=default --reporter=junit --outputFile.junit=./test-results/vitest-integration-background.junit.xml", + "test:unit:coverage": "vitest run --coverage --project unit --project frontend --project integration --project integration-background --reporter=dot --reporter=junit --outputFile.junit=./test-results/vitest-coverage.junit.xml", + "test:timings": "vitest run --project unit --project frontend --project integration --project integration-background --reporter=dot --reporter=junit --outputFile.junit=./test-results/vitest-timings.junit.xml && node scripts/report-test-timings.js ./test-results/vitest-timings.junit.xml", + "test:timings:budget": "vitest run --project unit --project frontend --project integration --project integration-background --reporter=dot --reporter=junit --outputFile.junit=./test-results/vitest-timing-budget.junit.xml && node scripts/report-test-timings.js ./test-results/vitest-timing-budget.junit.xml --max-suite-seconds=20 --max-test-seconds=12", + "test:timings:projects": "node scripts/run-vitest-project-timings.js", + "test:timings:benchmark": "node scripts/run-vitest-project-timings.js --repeat=3", + "test:e2e": "npm run test:e2e:parallel", + "test:e2e:parallel": "npm run build:app && playwright test", + "test:e2e:ci": "playwright test --workers=2", "test:all": "npm run verify:full", + "typecheck": "tsc --noEmit --incremental --tsBuildInfoFile .cache/tsc/tsconfig.tsbuildinfo", "docs:screenshots": "node scripts/capture-readme-screenshots.js", "deps:graph": "dependency-cruiser --config .dependency-cruiser.cjs -T archi src shared server server.js usage-normalizer.js", "pack:dry-run": "npm pack --dry-run", - "verify": "npm run check && npm run test:architecture && npm run test:unit && npm run build:app && npm run verify:package", + "verify": "npm run test:static && npm run test:vitest && npm run build:app && npm run verify:package", "verify:package": "node scripts/verify-package.js", "verify:registry-install": "node scripts/verify-registry-install.js", - "verify:release": "npm run check && npm run test:architecture && npm run test:unit:coverage && npm run build:app && npm run verify:package", - "verify:full": "npm run verify:release && npm run test:e2e:ci", + "verify:release": "npm run test:static && npm run test:architecture && npm run test:vitest:coverage && npm run build:app && npm run verify:package", + "verify:ci": "npm run verify:release && npm run test:e2e:ci", + "verify:full": "npm run verify:ci", + "verify:parallel": "node scripts/run-parallel-gate.js", + "verify:full:parallel": "node scripts/run-parallel-gate.js --e2e", "prepare": "npm run build:app" }, "files": [ diff --git a/playwright.config.ts b/playwright.config.ts index a002ebe..37494c2 100644 --- a/playwright.config.ts +++ b/playwright.config.ts @@ -6,8 +6,11 @@ const baseURL = `http://${host}:${port}` export default defineConfig({ testDir: './tests/e2e', - fullyParallel: false, - workers: process.env.CI ? 1 : undefined, + fullyParallel: true, + // fullyParallel is safe because tests/e2e/fixtures.ts starts a separate app per + // worker. process.env.CI caps workers at 2 to avoid hosted-runner CPU + // contention; pass --workers=1 if CI flakiness needs to be debugged. + workers: process.env.CI ? 2 : undefined, timeout: 30_000, reporter: [ ['list'], @@ -20,10 +23,4 @@ export default defineConfig({ screenshot: 'only-on-failure', video: 'retain-on-failure', }, - webServer: { - command: 'npm run start:test-server', - url: baseURL, - reuseExistingServer: false, - timeout: 120_000, - }, }) diff --git a/scripts/report-test-timings.js b/scripts/report-test-timings.js index 544fefb..92a5093 100644 --- a/scripts/report-test-timings.js +++ b/scripts/report-test-timings.js @@ -4,7 +4,9 @@ const fs = require('fs'); const path = require('path'); const ROOT = path.resolve(__dirname, '..'); -const DEFAULT_JUNIT_PATH = path.join(ROOT, 'test-results', 'vitest.junit.xml'); +const DEFAULT_JUNIT_PATH = path.join(ROOT, 'test-results', 'vitest-timings.junit.xml'); +const DEFAULT_WARN_SUITE_SECONDS = 2; +const DEFAULT_WARN_TEST_SECONDS = 0.5; function parseAttributes(tag) { const attributes = {}; @@ -40,6 +42,65 @@ function parseJUnit(xml) { return { suites, cases }; } +function parsePositiveNumber(value, optionName) { + const parsed = Number(value); + + if (!Number.isFinite(parsed) || parsed <= 0) { + throw new Error(`${optionName} must be a positive number.`); + } + + return parsed; +} + +function parseArgs(argv) { + const options = { + junitPath: DEFAULT_JUNIT_PATH, + maxSuiteSeconds: null, + maxTestSeconds: null, + warnSuiteSeconds: DEFAULT_WARN_SUITE_SECONDS, + warnTestSeconds: DEFAULT_WARN_TEST_SECONDS, + }; + let positionalPath = null; + + for (const arg of argv) { + if (arg.startsWith('--max-suite-seconds=')) { + options.maxSuiteSeconds = parsePositiveNumber( + arg.slice('--max-suite-seconds='.length), + '--max-suite-seconds', + ); + } else if (arg.startsWith('--max-test-seconds=')) { + options.maxTestSeconds = parsePositiveNumber( + arg.slice('--max-test-seconds='.length), + '--max-test-seconds', + ); + } else if (arg.startsWith('--warn-suite-seconds=')) { + options.warnSuiteSeconds = parsePositiveNumber( + arg.slice('--warn-suite-seconds='.length), + '--warn-suite-seconds', + ); + } else if (arg.startsWith('--warn-test-seconds=')) { + options.warnTestSeconds = parsePositiveNumber( + arg.slice('--warn-test-seconds='.length), + '--warn-test-seconds', + ); + } else if (arg === '--help' || arg === '-h') { + options.help = true; + } else if (arg.startsWith('--')) { + throw new Error(`Unknown option: ${arg}`); + } else if (!positionalPath) { + positionalPath = path.resolve(process.cwd(), arg); + } else { + throw new Error(`Unexpected argument: ${arg}`); + } + } + + if (positionalPath) { + options.junitPath = positionalPath; + } + + return options; +} + function formatSeconds(value) { return `${value.toFixed(3)}s`; } @@ -59,38 +120,123 @@ function printRanking(title, rows, formatRow) { process.stdout.write('\n'); } -function main() { - const junitPath = process.argv[2] - ? path.resolve(process.cwd(), process.argv[2]) - : DEFAULT_JUNIT_PATH; - - if (!fs.existsSync(junitPath)) { - process.stderr.write(`JUnit report not found: ${junitPath}\n`); - process.stderr.write('Run vitest with the junit reporter first.\n'); - process.exitCode = 1; - return; +function filterTimedRows(rows, threshold) { + if (threshold === null || threshold === undefined) { + return []; } - const xml = fs.readFileSync(junitPath, 'utf8'); - const { suites, cases } = parseJUnit(xml); + return rows + .filter((row) => Number.isFinite(row.time) && row.time > threshold) + .sort((left, right) => right.time - left.time); +} - const slowSuites = suites +function buildTimingReport(junit, options = {}) { + const slowSuites = junit.suites .filter((suite) => Number.isFinite(suite.time)) .sort((left, right) => right.time - left.time) .slice(0, 10); - const slowCases = cases + const slowCases = junit.cases .filter((testCase) => Number.isFinite(testCase.time)) .sort((left, right) => right.time - left.time) .slice(0, 15); - printRanking('Slowest suites', slowSuites, (suite) => { + return { + slowSuites, + slowCases, + suiteWarnings: filterTimedRows(junit.suites, options.warnSuiteSeconds), + testWarnings: filterTimedRows(junit.cases, options.warnTestSeconds), + suiteFailures: filterTimedRows(junit.suites, options.maxSuiteSeconds), + testFailures: filterTimedRows(junit.cases, options.maxTestSeconds), + }; +} + +function printBudgetRows(title, rows, formatRow) { + if (rows.length === 0) { + return; + } + + printRanking(title, rows, formatRow); +} + +function printUsage(stdout) { + stdout.write(`Usage: node scripts/report-test-timings.js [junit-file] [options] + +Options: + --warn-suite-seconds=N Print suites slower than N seconds. Default: ${DEFAULT_WARN_SUITE_SECONDS} + --warn-test-seconds=N Print tests slower than N seconds. Default: ${DEFAULT_WARN_TEST_SECONDS} + --max-suite-seconds=N Fail when any suite is slower than N seconds. + --max-test-seconds=N Fail when any test is slower than N seconds. +`); +} + +function run(argv = process.argv.slice(2), streams = process) { + let options; + + try { + options = parseArgs(argv); + } catch (error) { + streams.stderr.write(`${error instanceof Error ? error.message : String(error)}\n`); + return 1; + } + + if (options.help) { + printUsage(streams.stdout); + return 0; + } + + if (!fs.existsSync(options.junitPath)) { + streams.stderr.write(`JUnit report not found: ${options.junitPath}\n`); + streams.stderr.write('Run vitest with the junit reporter first.\n'); + return 1; + } + + const xml = fs.readFileSync(options.junitPath, 'utf8'); + const report = buildTimingReport(parseJUnit(xml), options); + + printRanking('Slowest suites', report.slowSuites, (suite) => { + return `${formatSeconds(suite.time)} | ${suite.name}`; + }); + + printRanking('Slowest tests', report.slowCases, (testCase) => { + return `${formatSeconds(testCase.time)} | ${testCase.suite} | ${testCase.name}`; + }); + + printBudgetRows('Suites over warning budget', report.suiteWarnings, (suite) => { + return `${formatSeconds(suite.time)} | ${suite.name}`; + }); + + printBudgetRows('Tests over warning budget', report.testWarnings, (testCase) => { + return `${formatSeconds(testCase.time)} | ${testCase.suite} | ${testCase.name}`; + }); + + printBudgetRows('Suites over hard budget', report.suiteFailures, (suite) => { return `${formatSeconds(suite.time)} | ${suite.name}`; }); - printRanking('Slowest tests', slowCases, (testCase) => { + printBudgetRows('Tests over hard budget', report.testFailures, (testCase) => { return `${formatSeconds(testCase.time)} | ${testCase.suite} | ${testCase.name}`; }); + + if (report.suiteFailures.length > 0 || report.testFailures.length > 0) { + streams.stderr.write('Test timing budget exceeded.\n'); + return 1; + } + + return 0; +} + +function main() { + process.exitCode = run(); +} + +if (require.main === module) { + main(); } -main(); +module.exports = { + buildTimingReport, + parseArgs, + parseJUnit, + run, +}; diff --git a/scripts/run-parallel-gate.js b/scripts/run-parallel-gate.js new file mode 100644 index 0000000..270e699 --- /dev/null +++ b/scripts/run-parallel-gate.js @@ -0,0 +1,252 @@ +#!/usr/bin/env node + +const { spawn } = require('child_process'); + +const npmCommand = process.platform === 'win32' ? 'npm.cmd' : 'npm'; + +function createTask(name, script, outputPaths = []) { + return { + name, + command: npmCommand, + args: ['run', script], + outputPaths, + }; +} + +function createParallelGatePlan({ e2e = false } = {}) { + const buildAndApiGroup = [ + createTask('static', 'test:static'), + createTask('integration', 'test:vitest:integration', [ + 'test-results/vitest-integration.junit.xml', + ]), + createTask('build', 'build:app'), + ]; + const unitGroup = [ + createTask('unit', 'test:vitest:unit', ['test-results/vitest-unit.junit.xml']), + ]; + const frontendGroup = [ + createTask('frontend', 'test:vitest:frontend', ['test-results/vitest-frontend.junit.xml']), + ]; + const architectureGroup = [ + createTask('architecture', 'test:vitest:architecture', [ + 'test-results/vitest-architecture.junit.xml', + ]), + ]; + const backgroundGroup = [ + createTask('integration-background', 'test:vitest:integration-background', [ + 'test-results/vitest-integration-background.junit.xml', + ]), + ]; + const finalGroup = [createTask('package-smoke', 'verify:package')]; + + if (e2e) { + finalGroup.push(createTask('e2e', 'test:e2e:ci', ['playwright-report/', 'test-results/'])); + } + + return [ + buildAndApiGroup, + unitGroup, + frontendGroup, + architectureGroup, + backgroundGroup, + finalGroup, + ]; +} + +function parseArgs(argv) { + const options = { + dryRun: false, + e2e: false, + help: false, + }; + + for (const arg of argv) { + if (arg === '--dry-run') { + options.dryRun = true; + } else if (arg === '--e2e') { + options.e2e = true; + } else if (arg === '--help' || arg === '-h') { + options.help = true; + } else { + throw new Error(`Unknown option: ${arg}`); + } + } + + return options; +} + +function findParallelOutputCollisions(plan) { + const collisions = []; + + plan.forEach((group, groupIndex) => { + const outputOwners = new Map(); + + for (const task of group) { + for (const outputPath of task.outputPaths || []) { + const owners = outputOwners.get(outputPath) || []; + owners.push(task.name); + outputOwners.set(outputPath, owners); + } + } + + for (const [outputPath, tasks] of outputOwners.entries()) { + if (tasks.length > 1) { + collisions.push({ + group: groupIndex + 1, + outputPath, + tasks, + }); + } + } + }); + + return collisions; +} + +function printUsage(stdout) { + stdout.write(`Usage: node scripts/run-parallel-gate.js [options] + +Options: + --e2e Run the Playwright CI smoke after the build group succeeds. + --dry-run Print the task graph without running commands. +`); +} + +function prefixChunk(stream, taskName, chunk) { + for (const line of chunk.toString().split(/\r?\n/)) { + if (line.length > 0) { + stream.write(`[${taskName}] ${line}\n`); + } + } +} + +function formatDuration(durationMs) { + return `${(durationMs / 1000).toFixed(2)}s`; +} + +function runTask(task, spawnImpl = spawn, streams = process) { + return new Promise((resolve) => { + streams.stdout.write(`[${task.name}] starting ${task.command} ${task.args.join(' ')}\n`); + const startedAt = Date.now(); + + const child = spawnImpl(task.command, task.args, { + cwd: process.cwd(), + env: process.env, + stdio: ['ignore', 'pipe', 'pipe'], + }); + + child.stdout.on('data', (chunk) => prefixChunk(streams.stdout, task.name, chunk)); + child.stderr.on('data', (chunk) => prefixChunk(streams.stderr, task.name, chunk)); + child.on('error', (error) => { + const durationMs = Date.now() - startedAt; + streams.stderr.write(`[${task.name}] failed to start: ${error.message}\n`); + resolve({ durationMs, task, status: 1 }); + }); + child.on('close', (status) => { + const durationMs = Date.now() - startedAt; + streams.stdout.write( + `[${task.name}] exited with ${status ?? 1} after ${formatDuration(durationMs)}\n`, + ); + resolve({ durationMs, task, status: status ?? 1 }); + }); + }); +} + +async function runTaskGroup(tasks, spawnImpl, streams) { + const results = await Promise.all(tasks.map((task) => runTask(task, spawnImpl, streams))); + return { + results, + status: results.every((result) => result.status === 0) ? 0 : 1, + }; +} + +function printTimingSummary(results, streams = process) { + if (results.length === 0) { + return; + } + + streams.stdout.write('\nparallel gate timing summary\n'); + + for (const result of results) { + streams.stdout.write( + ` ${result.task.name}: status=${result.status} duration=${formatDuration( + result.durationMs, + )}\n`, + ); + } +} + +async function run(argv = process.argv.slice(2), streams = process, spawnImpl = spawn) { + let options; + + try { + options = parseArgs(argv); + } catch (error) { + streams.stderr.write(`${error instanceof Error ? error.message : String(error)}\n`); + return 1; + } + + if (options.help) { + printUsage(streams.stdout); + return 0; + } + + const plan = createParallelGatePlan({ e2e: options.e2e }); + const outputCollisions = findParallelOutputCollisions(plan); + + if (outputCollisions.length > 0) { + streams.stderr.write('Parallel gate output collision detected.\n'); + outputCollisions.forEach((collision) => { + streams.stderr.write( + ` group ${collision.group}: ${collision.outputPath} <- ${collision.tasks.join(', ')}\n`, + ); + }); + return 1; + } + + if (options.dryRun) { + plan.forEach((group, index) => { + streams.stdout.write(`group ${index + 1}\n`); + group.forEach((task) => { + streams.stdout.write(` ${task.name}: ${task.command} ${task.args.join(' ')}\n`); + if (task.outputPaths.length > 0) { + streams.stdout.write(` outputs: ${task.outputPaths.join(', ')}\n`); + } + }); + }); + return 0; + } + + const allResults = []; + + for (const [index, group] of plan.entries()) { + streams.stdout.write(`\nparallel gate group ${index + 1}/${plan.length}\n`); + const groupResult = await runTaskGroup(group, spawnImpl, streams); + allResults.push(...groupResult.results); + + if (groupResult.status !== 0) { + printTimingSummary(allResults, streams); + return groupResult.status; + } + } + + printTimingSummary(allResults, streams); + return 0; +} + +function main() { + run().then((status) => { + process.exitCode = status; + }); +} + +if (require.main === module) { + main(); +} + +module.exports = { + createParallelGatePlan, + findParallelOutputCollisions, + parseArgs, + run, +}; diff --git a/scripts/run-vitest-project-timings.js b/scripts/run-vitest-project-timings.js new file mode 100644 index 0000000..1536101 --- /dev/null +++ b/scripts/run-vitest-project-timings.js @@ -0,0 +1,298 @@ +#!/usr/bin/env node + +const fs = require('fs'); +const path = require('path'); +const { spawnSync } = require('child_process'); + +const ROOT = path.resolve(__dirname, '..'); +const DEFAULT_REPORT_DIR = path.join(ROOT, 'test-results'); +const DEFAULT_PROJECTS = [ + 'architecture', + 'unit', + 'frontend', + 'integration', + 'integration-background', +]; +const DEFAULT_MAX_SUITE_SECONDS = 20; +const DEFAULT_MAX_TEST_SECONDS = 12; + +function parsePositiveInteger(value, optionName) { + const parsed = Number(value); + + if (!Number.isInteger(parsed) || parsed <= 0) { + throw new Error(`${optionName} must be a positive integer.`); + } + + return parsed; +} + +function parsePositiveNumber(value, optionName) { + const parsed = Number(value); + + if (!Number.isFinite(parsed) || parsed <= 0) { + throw new Error(`${optionName} must be a positive number.`); + } + + return parsed; +} + +function parseProjects(value) { + const projects = value + .split(',') + .map((project) => project.trim()) + .filter(Boolean); + + if (projects.length === 0) { + throw new Error('--projects must list at least one Vitest project.'); + } + + const unknownProjects = projects.filter((project) => !DEFAULT_PROJECTS.includes(project)); + + if (unknownProjects.length > 0) { + throw new Error(`Unknown Vitest project: ${unknownProjects.join(', ')}`); + } + + return projects; +} + +function parseArgs(argv) { + const options = { + dryRun: false, + help: false, + maxSuiteSeconds: DEFAULT_MAX_SUITE_SECONDS, + maxTestSeconds: DEFAULT_MAX_TEST_SECONDS, + projects: DEFAULT_PROJECTS, + repeat: 1, + reportDir: DEFAULT_REPORT_DIR, + }; + + for (const arg of argv) { + if (arg === '--dry-run') { + options.dryRun = true; + } else if (arg === '--help' || arg === '-h') { + options.help = true; + } else if (arg.startsWith('--max-suite-seconds=')) { + options.maxSuiteSeconds = parsePositiveNumber( + arg.slice('--max-suite-seconds='.length), + '--max-suite-seconds', + ); + } else if (arg.startsWith('--max-test-seconds=')) { + options.maxTestSeconds = parsePositiveNumber( + arg.slice('--max-test-seconds='.length), + '--max-test-seconds', + ); + } else if (arg.startsWith('--projects=')) { + options.projects = parseProjects(arg.slice('--projects='.length)); + } else if (arg.startsWith('--repeat=')) { + options.repeat = parsePositiveInteger(arg.slice('--repeat='.length), '--repeat'); + } else if (arg.startsWith('--report-dir=')) { + options.reportDir = path.resolve(process.cwd(), arg.slice('--report-dir='.length)); + } else { + throw new Error(`Unknown option: ${arg}`); + } + } + + return options; +} + +function getReportPath(project, iteration, repeat, reportDir) { + const suffix = repeat === 1 ? 'timing' : `timing-run-${iteration}`; + return path.join(reportDir, `vitest-${project}.${suffix}.junit.xml`); +} + +function buildProjectRuns(options) { + const runs = []; + + for (let iteration = 1; iteration <= options.repeat; iteration += 1) { + for (const project of options.projects) { + runs.push({ + iteration, + project, + reportPath: getReportPath(project, iteration, options.repeat, options.reportDir), + }); + } + } + + return runs; +} + +function getNpxCommand(platform = process.platform) { + return platform === 'win32' ? 'npx.cmd' : 'npx'; +} + +function buildVitestCommand(run, platform = process.platform) { + return { + command: getNpxCommand(platform), + args: [ + 'vitest', + 'run', + '--project', + run.project, + '--reporter=dot', + '--reporter=junit', + `--outputFile.junit=${run.reportPath}`, + ], + }; +} + +function buildBudgetCommand(run, options) { + return { + command: process.execPath, + args: [ + path.join(ROOT, 'scripts', 'report-test-timings.js'), + run.reportPath, + `--max-suite-seconds=${options.maxSuiteSeconds}`, + `--max-test-seconds=${options.maxTestSeconds}`, + ], + }; +} + +function median(values) { + if (values.length === 0) { + return 0; + } + + const sorted = [...values].sort((left, right) => left - right); + const middle = Math.floor(sorted.length / 2); + + if (sorted.length % 2 === 1) { + return sorted[middle]; + } + + return (sorted[middle - 1] + sorted[middle]) / 2; +} + +function formatSeconds(ms) { + return `${(ms / 1000).toFixed(2)}s`; +} + +function printUsage(stdout) { + stdout.write(`Usage: node scripts/run-vitest-project-timings.js [options] + +Options: + --projects=a,b Comma-separated Vitest projects. Default: ${DEFAULT_PROJECTS.join(',')} + --repeat=N Run each project N times. Default: 1 + --report-dir=PATH Directory for per-run JUnit reports. Default: test-results + --max-suite-seconds=N Fail when any suite is slower than N seconds. Default: ${DEFAULT_MAX_SUITE_SECONDS} + --max-test-seconds=N Fail when any test is slower than N seconds. Default: ${DEFAULT_MAX_TEST_SECONDS} + --dry-run Print commands without running them. +`); +} + +function runCommand(command, args, spawnSyncImpl) { + return spawnSyncImpl(command, args, { + cwd: ROOT, + env: process.env, + stdio: 'inherit', + }); +} + +function writeLaunchError(stderr, label, error) { + const message = error instanceof Error ? error.message : String(error); + stderr.write(`Failed to launch ${label}: ${message}\n`); +} + +function run(argv = process.argv.slice(2), streams = process, spawnSyncImpl = spawnSync) { + let options; + + try { + options = parseArgs(argv); + } catch (error) { + streams.stderr.write(`${error instanceof Error ? error.message : String(error)}\n`); + return 1; + } + + if (options.help) { + printUsage(streams.stdout); + return 0; + } + + if (!options.dryRun) { + fs.mkdirSync(options.reportDir, { recursive: true }); + } + + const results = new Map(); + + for (const projectRun of buildProjectRuns(options)) { + const vitestCommand = buildVitestCommand(projectRun); + const budgetCommand = buildBudgetCommand(projectRun, options); + + streams.stdout.write( + `\n[${projectRun.project}] run ${projectRun.iteration}/${options.repeat} -> ${path.relative( + ROOT, + projectRun.reportPath, + )}\n`, + ); + + if (options.dryRun) { + streams.stdout.write(` ${vitestCommand.command} ${vitestCommand.args.join(' ')}\n`); + streams.stdout.write(` ${budgetCommand.command} ${budgetCommand.args.join(' ')}\n`); + continue; + } + + const startedAt = Date.now(); + const testResult = runCommand(vitestCommand.command, vitestCommand.args, spawnSyncImpl); + const durationMs = Date.now() - startedAt; + + if (testResult.error) { + writeLaunchError(streams.stderr, 'vitest', testResult.error); + return 1; + } + + if (testResult.status !== 0) { + return testResult.status || 1; + } + + const budgetResult = runCommand(budgetCommand.command, budgetCommand.args, spawnSyncImpl); + + if (budgetResult.error) { + writeLaunchError(streams.stderr, 'budget check', budgetResult.error); + return 1; + } + + if (budgetResult.status !== 0) { + return budgetResult.status || 1; + } + + const projectResults = results.get(projectRun.project) || []; + projectResults.push(durationMs); + results.set(projectRun.project, projectResults); + } + + if (options.dryRun) { + return 0; + } + + streams.stdout.write('\nVitest project timing summary\n'); + + for (const project of options.projects) { + const projectResults = results.get(project) || []; + const worst = projectResults.length > 0 ? Math.max(...projectResults) : 0; + streams.stdout.write( + ` ${project}: runs=${projectResults.length} median=${formatSeconds( + median(projectResults), + )} worst=${formatSeconds(worst)}\n`, + ); + } + + return 0; +} + +function main() { + process.exitCode = run(); +} + +if (require.main === module) { + main(); +} + +module.exports = { + buildBudgetCommand, + buildProjectRuns, + buildVitestCommand, + getNpxCommand, + getReportPath, + median, + parseArgs, + run, +}; diff --git a/scripts/start-test-server.js b/scripts/start-test-server.js index 0fef0b8..08eb146 100644 --- a/scripts/start-test-server.js +++ b/scripts/start-test-server.js @@ -4,7 +4,19 @@ const fs = require('fs'); const path = require('path'); const root = path.resolve(__dirname, '..'); -const runtimeRoot = path.join(root, '.tmp-playwright', 'app'); +const playwrightRoot = path.join(root, '.tmp-playwright'); +const runtimeRoot = process.env.PLAYWRIGHT_TEST_RUNTIME_ROOT + ? path.resolve(process.env.PLAYWRIGHT_TEST_RUNTIME_ROOT) + : path.join(root, '.tmp-playwright', 'app'); + +function isPathInside(parent, child) { + const relative = path.relative(parent, child); + return relative !== '' && !relative.startsWith('..') && !path.isAbsolute(relative); +} + +if (!isPathInside(playwrightRoot, runtimeRoot)) { + throw new Error(`Refusing to delete unsafe Playwright runtime root: ${runtimeRoot}`); +} fs.rmSync(runtimeRoot, { recursive: true, force: true }); fs.mkdirSync(path.join(runtimeRoot, 'cache'), { recursive: true }); diff --git a/scripts/verify-main-ci.js b/scripts/verify-main-ci.js index 6eb1982..d93e5dd 100644 --- a/scripts/verify-main-ci.js +++ b/scripts/verify-main-ci.js @@ -18,6 +18,7 @@ function parseArgs(argv) { workflow: null, branch: 'main', sha: null, + requiredJob: null, retries: DEFAULT_RETRIES, retryDelayMs: DEFAULT_RETRY_DELAY_MS, }; @@ -50,6 +51,12 @@ function parseArgs(argv) { continue; } + if (arg === '--required-job' && next) { + options.requiredJob = next; + index += 1; + continue; + } + if (arg === '--retries' && next) { options.retries = Number.parseInt(next, 10); index += 1; @@ -65,7 +72,7 @@ function parseArgs(argv) { if (!options.repo || !options.workflow || !options.sha) { fail( - 'Usage: node scripts/verify-main-ci.js --repo --workflow --sha [--branch main] [--retries N] [--retry-delay-ms MS]', + 'Usage: node scripts/verify-main-ci.js --repo --workflow --sha [--branch main] [--required-job ] [--retries N] [--retry-delay-ms MS]', ); } @@ -120,10 +127,59 @@ async function fetchWorkflowRuns(options, token) { return response.json(); } +async function fetchWorkflowRunJobs(options, token, runId) { + const jobs = []; + let page = 1; + + while (true) { + const url = new URL(`https://api.github.com/repos/${options.repo}/actions/runs/${runId}/jobs`); + url.searchParams.set('per_page', '100'); + url.searchParams.set('page', String(page)); + + const response = await fetch(url, { + headers: { + Accept: 'application/vnd.github+json', + Authorization: `Bearer ${token}`, + 'X-GitHub-Api-Version': '2022-11-28', + 'User-Agent': 'ttdash-release-workflow', + }, + }); + + if (!response.ok) { + const body = await response.text(); + const normalizedBody = body.replace(/\s+/g, ' ').trim(); + const maxPreviewLength = 200; + const bodyPreview = + normalizedBody.length > maxPreviewLength + ? `${normalizedBody.slice(0, maxPreviewLength)}…` + : normalizedBody; + const previewSuffix = bodyPreview ? ` Response preview: ${bodyPreview}` : ''; + throw new Error(`GitHub API request failed with ${response.status}.${previewSuffix}`); + } + + const payload = await response.json(); + if (!Array.isArray(payload.jobs)) { + return jobs; + } + + jobs.push(...payload.jobs); + + if (payload.jobs.length < 100) { + return jobs; + } + + page += 1; + } +} + function describeRun(run) { return `${run.name} (${run.status}/${run.conclusion ?? 'pending'})`; } +function describeJob(job) { + return `${job.name} (${job.status}/${job.conclusion ?? 'pending'})`; +} + async function main() { const token = getToken(); if (!token) { @@ -144,6 +200,30 @@ async function main() { log( `CI workflow run is still in progress: ${describeRun(run)} (attempt ${attempt}/${options.retries}).`, ); + } else if (options.requiredJob) { + const jobs = await fetchWorkflowRunJobs(options, token, run.id); + const job = jobs.find((candidate) => candidate.name === options.requiredJob); + + if (!job) { + const jobNames = jobs + .map((candidate) => candidate.name) + .sort() + .join(', '); + fail( + `Required CI job "${options.requiredJob}" was not found in workflow run ${run.id} for ${options.sha}. Available jobs: ${jobNames || 'none'}.`, + ); + } else if (job.status !== 'completed') { + log( + `Required CI job is still in progress: ${describeJob(job)} (attempt ${attempt}/${options.retries}).`, + ); + } else if (job.conclusion === 'success') { + log(`Verified required CI job for ${options.sha}: ${describeJob(job)}.`); + return; + } else { + fail( + `Required CI job "${options.requiredJob}" for ${options.sha} completed with ${job.conclusion}. Release aborted.`, + ); + } } else if (run.conclusion === 'success') { log(`Verified CI success for ${options.sha}: ${describeRun(run)}.`); return; @@ -156,6 +236,12 @@ async function main() { } } + if (options.requiredJob) { + fail( + `Required CI job "${options.requiredJob}" for ${options.sha} did not reach a successful conclusion in time.`, + ); + } + fail(`CI workflow for ${options.sha} did not reach a successful conclusion in time.`); } diff --git a/server/auto-import-runtime.js b/server/auto-import-runtime.js index b7af8b4..2effccc 100644 --- a/server/auto-import-runtime.js +++ b/server/auto-import-runtime.js @@ -1,4 +1,9 @@ const { createExclusiveRuntimeLease, createExpiringAsyncCache } = require('./runtime-state'); +const { createAutoImportCommandRunner } = require('./auto-import-runtime/command-runner'); +const { createAutoImportExecutor } = require('./auto-import-runtime/import-executor'); +const { createLatestToktrackVersionLookup } = require('./auto-import-runtime/latest-version'); +const { createAutoImportMessages } = require('./auto-import-runtime/messages'); +const { createToktrackRunnerResolver } = require('./auto-import-runtime/toktrack-runners'); function createAutoImportRuntime({ fs, @@ -24,830 +29,76 @@ function createAutoImportRuntime({ toktrackLatestLookupTimeoutMs, toktrackLatestCacheSuccessTtlMs, toktrackLatestCacheFailureTtlMs, + probeLog, }) { - function createAutoImportMessageEvent(key, vars = {}) { - return { - key, - vars, - }; - } - - function createAutoImportError(message, key, vars = {}) { - const error = new Error(message); - error.messageKey = key; - error.messageVars = vars; - return error; - } - - function summarizeCommandError(error, fallbackMessage = 'Unknown error') { - if (error instanceof Error && error.message.trim()) { - return error.message.trim(); - } - - return fallbackMessage; - } - - function getTimeoutSeconds(timeoutMs) { - return Math.max(1, Math.ceil(Number(timeoutMs) / 1000)); - } - - function toAutoImportErrorEvent(error) { - if (error && typeof error.messageKey === 'string') { - return createAutoImportMessageEvent(error.messageKey, error.messageVars || {}); - } - - return createAutoImportMessageEvent('errorPrefix', { - message: error && error.message ? error.message : 'Unknown error', - }); - } - - function formatAutoImportMessageEvent(event) { - switch (event?.key) { - case 'startingLocalImport': - return 'Starting toktrack import...'; - case 'warmingUpPackageRunner': - return `Preparing ${event.vars?.runner || 'package runner'} (the first run may take longer while toktrack is downloaded)...`; - case 'loadingUsageData': - return `Loading usage data via ${event.vars?.command || 'unknown command'}...`; - case 'processingUsageData': - return `Processing usage data... (${event.vars?.seconds || 0}s)`; - case 'autoImportRunning': - return 'An auto-import is already running. Please wait.'; - case 'noRunnerFound': - return 'No local toktrack, Bun, or npm exec installation found.'; - case 'localToktrackVersionMismatch': - return `Local toktrack v${event.vars?.detectedVersion || 'unknown'} does not match the required v${event.vars?.expectedVersion || toktrackVersion}.`; - case 'localToktrackFailed': - return `Local toktrack could not be started: ${event.vars?.message || 'Unknown error'}`; - case 'packageRunnerFailed': - return `No compatible bunx or npm exec runner succeeded: ${event.vars?.message || 'Unknown error'}`; - case 'packageRunnerWarmupTimedOut': - return `${event.vars?.runner || 'The package runner'} took longer than ${event.vars?.seconds || 0}s to prepare toktrack. The first run may need to download the package first. Please try again or verify network access.`; - case 'toktrackVersionCheckFailed': - return `Toktrack was found, but the version check failed: ${event.vars?.message || 'Unknown error'}`; - case 'toktrackExecutionFailed': - return `Toktrack failed while loading usage data: ${event.vars?.message || 'Unknown error'}`; - case 'toktrackExecutionTimedOut': - return `Toktrack did not finish loading usage data within ${event.vars?.seconds || 0}s via ${event.vars?.runner || 'the selected runner'}. Please try again.`; - case 'toktrackInvalidJson': - return `Toktrack returned invalid JSON output: ${event.vars?.message || 'Unknown error'}`; - case 'toktrackInvalidData': - return `Toktrack returned data that TTDash could not process: ${event.vars?.message || 'Unknown error'}`; - case 'errorPrefix': - return `Error: ${event.vars?.message || 'Unknown error'}`; - default: - return 'Auto-import update'; - } - } - - function getExecutableName(baseName, forceWindows = isWindows) { - if (!forceWindows) { - return baseName; - } - - switch (baseName) { - case 'npm': - return 'npm.cmd'; - case 'bun': - case 'bunx': - return 'bun.exe'; - case 'npx': - return 'npx.cmd'; - default: - return baseName; - } - } - - function spawnCommand(command, args, options = {}) { - return spawnCrossPlatform(command, args, { - ...options, - windowsHide: options.windowsHide ?? true, - }); - } - - function commandExists(command, args = ['--version']) { - return new Promise((resolve) => { - const child = spawnCommand(command, args, { stdio: 'ignore' }); - child.on('error', () => resolve(false)); - child.on('close', (code) => resolve(code === 0)); - }); - } - - function parseToktrackVersionOutput(output) { - return String(output) - .trim() - .replace(/^toktrack\s+/, ''); - } - - function getLocalToktrackDisplayCommand(forceWindows = isWindows) { - if (processObject.env.TTDASH_TOKTRACK_LOCAL_BIN) { - return `${toktrackLocalBin} daily --json`; - } - - return forceWindows - ? 'node_modules\\.bin\\toktrack.cmd daily --json' - : 'node_modules/.bin/toktrack daily --json'; - } - - function createLocalToktrackRunner() { - return { - command: toktrackLocalBin, - prefixArgs: [], - env: processObject.env, - method: 'local', - label: 'local toktrack', - displayCommand: getLocalToktrackDisplayCommand(), - }; - } - - function createBunxToktrackRunner() { - return { - command: getExecutableName('bunx'), - prefixArgs: isWindows ? ['x', toktrackPackageSpec] : [toktrackPackageSpec], - env: processObject.env, - method: 'bunx', - label: 'bunx', - displayCommand: `bunx ${toktrackPackageSpec} daily --json`, - }; - } - - function createNpxToktrackRunner() { - return { - command: getExecutableName('npx'), - prefixArgs: ['--yes', toktrackPackageSpec], - env: { - ...processObject.env, - npm_config_cache: npxCacheDir, - }, - method: 'npm', - label: 'npm exec', - displayCommand: `npx --yes ${toktrackPackageSpec} daily --json`, - }; - } - - function isPackageToktrackRunner(runner) { - return runner?.method === 'bunx' || runner?.method === 'npm'; - } - - function getToktrackRunnerTimeouts(runner) { - if (isPackageToktrackRunner(runner)) { - return { - probeMs: toktrackPackageRunnerProbeTimeoutMs, - versionCheckMs: toktrackPackageRunnerVersionCheckTimeoutMs, - importMs: toktrackPackageRunnerImportTimeoutMs, - }; - } - - return { - probeMs: toktrackLocalRunnerProbeTimeoutMs, - versionCheckMs: toktrackLocalRunnerVersionCheckTimeoutMs, - importMs: toktrackLocalRunnerImportTimeoutMs, - }; - } - - function formatCommandForDisplay(command, args = []) { - return [command, ...args].join(' ').trim(); - } - - function createCommandError( - message, - { command, args = [], stdout = '', stderr = '', exitCode = null, timedOut = false } = {}, - ) { - const error = new Error(message); - error.command = command; - error.args = args; - error.stdout = stdout; - error.stderr = stderr; - error.exitCode = exitCode; - error.timedOut = timedOut; - return error; - } - - function terminateChildProcess(child) { - if (!child || child.exitCode !== null) { - return; - } - - child.kill('SIGTERM'); - - const forceKillTimeout = setTimeout(() => { - if (child.exitCode === null) { - child.kill('SIGKILL'); - } - }, processTerminationGraceMs); - - child.once('close', () => { - clearTimeout(forceKillTimeout); - }); - } - - function runCommand( - command, - args, - { - env = processObject.env, - streamStderr = false, - onStderr, - signalOnClose, - timeoutMs = null, - } = {}, - ) { - return runCommandWithSpawn(command, args, { - env, - streamStderr, - onStderr, - signalOnClose, - timeoutMs, - spawnImpl: spawnCommand, - }); - } - - function runCommandWithSpawn( - command, - args, - { - env = processObject.env, - streamStderr = false, - onStderr, - signalOnClose, - timeoutMs = null, - spawnImpl = spawnCommand, - } = {}, - ) { - return new Promise((resolve, reject) => { - const child = spawnImpl(command, args, { - stdio: ['ignore', 'pipe', 'pipe'], - env, - }); - const commandLabel = formatCommandForDisplay(command, args); - - let stdout = ''; - let stderr = ''; - let finished = false; - let timeoutId = null; - let timeoutError = null; - - const settle = (handler, value) => { - if (finished) { - return; - } - finished = true; - if (timeoutId) { - clearTimeout(timeoutId); - } - handler(value); - }; - - if (signalOnClose) { - signalOnClose(() => terminateChildProcess(child)); - } - - if (typeof timeoutMs === 'number' && Number.isFinite(timeoutMs) && timeoutMs > 0) { - timeoutId = setTimeout(() => { - timeoutError = createCommandError( - `Command timed out after ${timeoutMs}ms: ${commandLabel}`, - { - command, - args, - stdout, - stderr, - timedOut: true, - }, - ); - terminateChildProcess(child); - }, timeoutMs); - } - - child.stdout.on('data', (chunk) => { - stdout += chunk.toString(); - }); - - child.stderr.on('data', (chunk) => { - const line = chunk.toString(); - stderr += line; - if (streamStderr && onStderr && line.trim()) { - onStderr(line.trimEnd()); - } - }); - - child.on('error', (error) => - settle( - reject, - createCommandError(error.message || `Could not start ${commandLabel}.`, { - command, - args, - stdout, - stderr, - }), - ), - ); - child.on('close', (code) => { - if (finished) { - return; - } - if (timeoutError) { - settle(reject, timeoutError); - return; - } - if (code === 0) { - settle(resolve, stdout.trimEnd()); - return; - } - settle( - reject, - createCommandError( - stderr.trim() || stdout.trim() || `Command exited with code ${code}: ${commandLabel}`, - { - command, - args, - stdout, - stderr, - exitCode: code, - }, - ), - ); - }); - }); - } - - function runToktrack( - runner, - args, - { streamStderr = false, onStderr, signalOnClose, timeoutMs = null } = {}, - ) { - return runCommand(runner.command, [...runner.prefixArgs, ...args], { - env: runner.env, - streamStderr, - onStderr, - signalOnClose, - timeoutMs, - }); - } - - async function probeToktrackRunner( - runner, - timeoutMs = getToktrackRunnerTimeouts(runner).probeMs, - ) { - try { - await runToktrack(runner, ['--version'], { timeoutMs }); - return { - ok: true, - errorMessage: null, - timedOut: false, - }; - } catch (error) { - const message = summarizeCommandError(error, `Could not start ${runner.label}.`); - console.warn(`Failed to probe ${runner.label}: ${message}`); - return { - ok: false, - errorMessage: message, - timedOut: Boolean(error?.timedOut), - }; - } - } - - async function resolveToktrackRunnerWithDiagnostics() { - const resolution = { - runner: null, - localVersionMismatch: null, - localFailure: null, - runnerFailures: [], - }; - - if (fs.existsSync(toktrackLocalBin)) { - const localRunner = createLocalToktrackRunner(); - - try { - const localVersion = parseToktrackVersionOutput( - await runToktrack(localRunner, ['--version'], { - timeoutMs: getToktrackRunnerTimeouts(localRunner).probeMs, - }), - ); - if (localVersion === toktrackVersion) { - resolution.runner = localRunner; - return resolution; - } - resolution.localVersionMismatch = { - detectedVersion: localVersion || 'unknown', - expectedVersion: toktrackVersion, - }; - } catch (error) { - resolution.localFailure = summarizeCommandError( - error, - 'The local toktrack binary could not be started.', - ); - } - } - - const bunxRunner = createBunxToktrackRunner(); - const bunxProbe = await probeToktrackRunner(bunxRunner); - if (bunxProbe.ok) { - resolution.runner = bunxRunner; - return resolution; - } - if (bunxProbe.errorMessage) { - resolution.runnerFailures.push({ - label: bunxRunner.label, - message: bunxProbe.errorMessage, - timedOut: bunxProbe.timedOut, - }); - } - - const npxRunner = createNpxToktrackRunner(); - const npxProbe = await probeToktrackRunner(npxRunner); - if (npxProbe.ok) { - resolution.runner = npxRunner; - return resolution; - } - if (npxProbe.errorMessage) { - resolution.runnerFailures.push({ - label: npxRunner.label, - message: npxProbe.errorMessage, - timedOut: npxProbe.timedOut, - }); - } - - return resolution; - } - - async function resolveToktrackRunner() { - const resolution = await resolveToktrackRunnerWithDiagnostics(); - return resolution.runner; - } - - function toAutoImportRunnerResolutionError(resolution) { - if (resolution.localVersionMismatch) { - return createAutoImportError( - formatAutoImportMessageEvent( - createAutoImportMessageEvent( - 'localToktrackVersionMismatch', - resolution.localVersionMismatch, - ), - ), - 'localToktrackVersionMismatch', - resolution.localVersionMismatch, - ); - } - - if (resolution.localFailure) { - return createAutoImportError( - formatAutoImportMessageEvent( - createAutoImportMessageEvent('localToktrackFailed', { - message: resolution.localFailure, - }), - ), - 'localToktrackFailed', - { - message: resolution.localFailure, - }, - ); - } - - if (resolution.runnerFailures.length > 0) { - const timedOutRunnerFailures = resolution.runnerFailures.filter( - (failure) => failure.timedOut, - ); - if ( - timedOutRunnerFailures.length > 0 && - timedOutRunnerFailures.length === resolution.runnerFailures.length - ) { - const runners = timedOutRunnerFailures.map((failure) => failure.label).join(' / '); - const seconds = getTimeoutSeconds(toktrackPackageRunnerProbeTimeoutMs); - return createAutoImportError( - formatAutoImportMessageEvent( - createAutoImportMessageEvent('packageRunnerWarmupTimedOut', { - runner: runners, - seconds, - }), - ), - 'packageRunnerWarmupTimedOut', - { - runner: runners, - seconds, - }, - ); - } - - return createAutoImportError( - formatAutoImportMessageEvent( - createAutoImportMessageEvent('packageRunnerFailed', { - message: resolution.runnerFailures - .map((failure) => `${failure.label}: ${failure.message}`) - .join(' | '), - }), - ), - 'packageRunnerFailed', - { - message: resolution.runnerFailures - .map((failure) => `${failure.label}: ${failure.message}`) - .join(' | '), - }, - ); - } - - return createAutoImportError( - 'No local toktrack, Bun, or npm exec installation found.', - 'noRunnerFound', - ); - } - - function createAutoImportAlreadyRunningError() { - return createAutoImportError( - 'An auto-import is already running. Please wait.', - 'autoImportRunning', - ); - } - - const autoImportLease = createExclusiveRuntimeLease({ - createAlreadyRunningError: createAutoImportAlreadyRunningError, + const messages = createAutoImportMessages({ + toktrackVersion, }); - - const latestToktrackVersionStatusCache = createExpiringAsyncCache({ - load: async (timeoutMs = toktrackLatestLookupTimeoutMs) => { - try { - const latestVersion = String( - await runCommand( - getExecutableName('npm'), - ['view', `${toktrackPackageName}@latest`, 'version'], - { - env: { - ...processObject.env, - npm_config_cache: npxCacheDir, - }, - timeoutMs, - }, - ), - ).trim(); - - return { - configuredVersion: toktrackVersion, - latestVersion, - isLatest: latestVersion === toktrackVersion, - lookupStatus: 'ok', - }; - } catch (error) { - return { - configuredVersion: toktrackVersion, - latestVersion: null, - isLatest: null, - lookupStatus: 'failed', - message: - error instanceof Error && error.message.trim() - ? error.message.trim() - : 'Could not determine the latest toktrack version.', - }; - } - }, - getTtlMs: (value) => - value.lookupStatus === 'ok' - ? toktrackLatestCacheSuccessTtlMs - : toktrackLatestCacheFailureTtlMs, + const commandRunner = createAutoImportCommandRunner({ + processObject, + spawnCrossPlatform, + isWindows, + processTerminationGraceMs, + }); + const runnerResolver = createToktrackRunnerResolver({ + fs, + processObject, + commandRunner, + messages, + toktrackPackageSpec, + toktrackVersion, + toktrackLocalBin, + npxCacheDir, + isWindows, + toktrackLocalRunnerProbeTimeoutMs, + toktrackLocalRunnerVersionCheckTimeoutMs, + toktrackLocalRunnerImportTimeoutMs, + toktrackPackageRunnerProbeTimeoutMs, + toktrackPackageRunnerVersionCheckTimeoutMs, + toktrackPackageRunnerImportTimeoutMs, + probeLog, + }); + const latestVersionLookup = createLatestToktrackVersionLookup({ + createExpiringAsyncCache, + commandRunner, + processObject, + toktrackPackageName, + toktrackVersion, + npxCacheDir, + toktrackLatestLookupTimeoutMs, + toktrackLatestCacheSuccessTtlMs, + toktrackLatestCacheFailureTtlMs, + }); + const importExecutor = createAutoImportExecutor({ + createExclusiveRuntimeLease, + messages, + runnerResolver, + normalizeIncomingData, + withSettingsAndDataMutationLock, + writeData, + updateDataLoadState, }); - - async function lookupLatestToktrackVersion(timeoutMs = toktrackLatestLookupTimeoutMs) { - return latestToktrackVersionStatusCache.lookup(timeoutMs); - } - - function acquireAutoImportLease() { - return autoImportLease.acquire(); - } - - async function performAutoImport({ - source = 'auto-import', - onCheck = () => {}, - onProgress = () => {}, - onOutput = () => {}, - signalOnClose, - lease = null, - } = {}) { - const activeLease = lease || acquireAutoImportLease(); - let progressSeconds = 0; - const progressInterval = setInterval(() => { - progressSeconds += 5; - onProgress(createAutoImportMessageEvent('processingUsageData', { seconds: progressSeconds })); - }, 5000); - - try { - onCheck({ tool: 'toktrack', status: 'checking' }); - onProgress(createAutoImportMessageEvent('startingLocalImport')); - - const resolution = await resolveToktrackRunnerWithDiagnostics(); - const runner = resolution.runner; - if (!runner) { - const resolutionError = toAutoImportRunnerResolutionError(resolution); - if (resolutionError.messageKey === 'noRunnerFound') { - onCheck({ tool: 'toktrack', status: 'not_found' }); - } - throw resolutionError; - } - - if (isPackageToktrackRunner(runner)) { - onProgress( - createAutoImportMessageEvent('warmingUpPackageRunner', { - runner: runner.label, - }), - ); - } - - let versionResult; - try { - versionResult = await runToktrack(runner, ['--version'], { - timeoutMs: getToktrackRunnerTimeouts(runner).versionCheckMs, - }); - } catch (error) { - if (isPackageToktrackRunner(runner) && error?.timedOut) { - throw createAutoImportError( - formatAutoImportMessageEvent( - createAutoImportMessageEvent('packageRunnerWarmupTimedOut', { - runner: runner.label, - seconds: getTimeoutSeconds(getToktrackRunnerTimeouts(runner).versionCheckMs), - }), - ), - 'packageRunnerWarmupTimedOut', - { - runner: runner.label, - seconds: getTimeoutSeconds(getToktrackRunnerTimeouts(runner).versionCheckMs), - }, - ); - } - - throw createAutoImportError( - formatAutoImportMessageEvent( - createAutoImportMessageEvent('toktrackVersionCheckFailed', { - message: summarizeCommandError(error), - }), - ), - 'toktrackVersionCheckFailed', - { - message: summarizeCommandError(error), - }, - ); - } - - onCheck({ - tool: 'toktrack', - status: 'found', - method: runner.label, - version: parseToktrackVersionOutput(versionResult), - }); - onProgress( - createAutoImportMessageEvent('loadingUsageData', { - command: runner.displayCommand, - }), - ); - - let rawJson; - try { - rawJson = await runToktrack(runner, ['daily', '--json'], { - streamStderr: true, - onStderr: (line) => { - onOutput(line); - }, - signalOnClose, - timeoutMs: getToktrackRunnerTimeouts(runner).importMs, - }); - } catch (error) { - if (error?.timedOut) { - throw createAutoImportError( - formatAutoImportMessageEvent( - createAutoImportMessageEvent('toktrackExecutionTimedOut', { - runner: runner.label, - seconds: getTimeoutSeconds(getToktrackRunnerTimeouts(runner).importMs), - }), - ), - 'toktrackExecutionTimedOut', - { - runner: runner.label, - seconds: getTimeoutSeconds(getToktrackRunnerTimeouts(runner).importMs), - }, - ); - } - - throw createAutoImportError( - formatAutoImportMessageEvent( - createAutoImportMessageEvent('toktrackExecutionFailed', { - message: summarizeCommandError(error), - }), - ), - 'toktrackExecutionFailed', - { - message: summarizeCommandError(error), - }, - ); - } - - let parsedJson; - try { - parsedJson = JSON.parse(rawJson); - } catch (error) { - throw createAutoImportError( - formatAutoImportMessageEvent( - createAutoImportMessageEvent('toktrackInvalidJson', { - message: summarizeCommandError(error), - }), - ), - 'toktrackInvalidJson', - { - message: summarizeCommandError(error), - }, - ); - } - - let normalized; - try { - normalized = normalizeIncomingData(parsedJson); - } catch (error) { - throw createAutoImportError( - formatAutoImportMessageEvent( - createAutoImportMessageEvent('toktrackInvalidData', { - message: summarizeCommandError(error), - }), - ), - 'toktrackInvalidData', - { - message: summarizeCommandError(error), - }, - ); - } - - await withSettingsAndDataMutationLock(async () => { - await writeData(normalized); - await updateDataLoadState({ - lastLoadedAt: new Date().toISOString(), - lastLoadSource: source, - }); - }); - - return { - days: normalized.daily.length, - totalCost: normalized.totals.totalCost, - }; - } finally { - clearInterval(progressInterval); - activeLease.release(); - } - } - - async function runStartupAutoLoad({ - source = 'cli-auto-load', - log = console.log, - errorLog = console.error, - } = {}) { - log('Auto-load enabled, starting import...'); - - try { - const result = await performAutoImport({ - source, - onCheck: (event) => { - if (event.status === 'found') { - log(`toktrack found (${event.method}, v${event.version})`); - } - }, - onProgress: (event) => { - log(formatAutoImportMessageEvent(event)); - }, - onOutput: (line) => { - log(line); - }, - }); - - log(`Auto-load complete: imported ${result.days} days, ${result.totalCost}.`); - return result; - } catch (error) { - errorLog(`Auto-load failed: ${error.message}`); - errorLog('Dashboard will start without newly imported data.'); - return null; - } - } - - function getToktrackLatestLookupTimeoutMs() { - return toktrackLatestLookupTimeoutMs; - } - - function resetLatestToktrackVersionCache() { - latestToktrackVersionStatusCache.reset(); - } return { - acquireAutoImportLease, - commandExists, - createAutoImportMessageEvent, - formatAutoImportMessageEvent, - getExecutableName, - getLocalToktrackDisplayCommand, - getToktrackLatestLookupTimeoutMs, - getToktrackRunnerTimeouts, - isAutoImportRunning: autoImportLease.isActive, - lookupLatestToktrackVersion, - parseToktrackVersionOutput, - performAutoImport, - resetLatestToktrackVersionCache, - resolveToktrackRunner, - runCommandWithSpawn, - runStartupAutoLoad, - runToktrack, - toAutoImportErrorEvent, - toAutoImportRunnerResolutionError, + acquireAutoImportLease: importExecutor.acquireAutoImportLease, + commandExists: commandRunner.commandExists, + createAutoImportMessageEvent: messages.createAutoImportMessageEvent, + formatAutoImportMessageEvent: messages.formatAutoImportMessageEvent, + getExecutableName: commandRunner.getExecutableName, + getLocalToktrackDisplayCommand: runnerResolver.getLocalToktrackDisplayCommand, + getToktrackLatestLookupTimeoutMs: latestVersionLookup.getToktrackLatestLookupTimeoutMs, + getToktrackRunnerTimeouts: runnerResolver.getToktrackRunnerTimeouts, + isAutoImportRunning: importExecutor.isAutoImportRunning, + lookupLatestToktrackVersion: latestVersionLookup.lookupLatestToktrackVersion, + parseToktrackVersionOutput: runnerResolver.parseToktrackVersionOutput, + performAutoImport: importExecutor.performAutoImport, + resetLatestToktrackVersionCache: latestVersionLookup.resetLatestToktrackVersionCache, + resolveToktrackRunner: runnerResolver.resolveToktrackRunner, + runCommandWithSpawn: commandRunner.runCommandWithSpawn, + runStartupAutoLoad: importExecutor.runStartupAutoLoad, + runToktrack: runnerResolver.runToktrack, + toAutoImportErrorEvent: messages.toAutoImportErrorEvent, + toAutoImportRunnerResolutionError: runnerResolver.toAutoImportRunnerResolutionError, }; } diff --git a/server/auto-import-runtime/command-runner.js b/server/auto-import-runtime/command-runner.js new file mode 100644 index 0000000..90319d1 --- /dev/null +++ b/server/auto-import-runtime/command-runner.js @@ -0,0 +1,368 @@ +const { StringDecoder } = require('node:string_decoder'); + +const DEFAULT_COMMAND_OUTPUT_MAX_BYTES = 1024 * 1024; +const DEFAULT_PROCESS_TERMINATION_GRACE_MS = 5000; + +function createAutoImportCommandRunner({ + processObject = process, + spawnCrossPlatform, + isWindows, + processTerminationGraceMs, +}) { + const terminationGraceMs = + typeof processTerminationGraceMs === 'number' && + Number.isFinite(processTerminationGraceMs) && + processTerminationGraceMs > 0 + ? processTerminationGraceMs + : DEFAULT_PROCESS_TERMINATION_GRACE_MS; + + function getExecutableName(baseName, forceWindows = isWindows) { + if (!forceWindows) { + return baseName; + } + + switch (baseName) { + case 'npm': + return 'npm.cmd'; + case 'bun': + case 'bunx': + return 'bun.exe'; + case 'npx': + return 'npx.cmd'; + default: + return baseName; + } + } + + function spawnCommand(command, args, options = {}) { + return spawnCrossPlatform(command, args, { + ...options, + windowsHide: options.windowsHide ?? true, + }); + } + + function commandExists(command, args = ['--version']) { + return new Promise((resolve) => { + const child = spawnCommand(command, args, { stdio: 'ignore' }); + child.on('error', () => resolve(false)); + child.on('close', (code) => resolve(code === 0)); + }); + } + + function formatCommandForDisplay(command, args = []) { + return [command, ...args].join(' ').trim(); + } + + function createCommandError( + message, + { + command, + args = [], + stdout = '', + stderr = '', + exitCode = null, + exitSignal = null, + timedOut = false, + outputTruncated = false, + } = {}, + ) { + const error = new Error(message); + error.command = command; + error.args = args; + error.stdout = stdout; + error.stderr = stderr; + error.exitCode = exitCode; + error.exitSignal = exitSignal; + error.timedOut = timedOut; + error.outputTruncated = outputTruncated; + return error; + } + + function getMaxOutputBytes(value) { + return typeof value === 'number' && Number.isFinite(value) && value > 0 + ? value + : DEFAULT_COMMAND_OUTPUT_MAX_BYTES; + } + + function createCapturedOutputState() { + return { + bytes: 0, + decoder: new StringDecoder('utf8'), + truncated: false, + value: '', + }; + } + + function getUtf8SafePrefixLength(chunkBuffer, maxBytes) { + let safeEnd = Math.min(maxBytes, chunkBuffer.length); + while (safeEnd > 0 && (chunkBuffer[safeEnd] & 0b11000000) === 0b10000000) { + safeEnd -= 1; + } + return safeEnd; + } + + function appendCapturedOutput(state, chunkBuffer, maxOutputBytes) { + const remainingBytes = maxOutputBytes - state.bytes; + + if (remainingBytes <= 0) { + state.truncated = state.truncated || chunkBuffer.length > 0; + return; + } + + if (chunkBuffer.length > remainingBytes) { + const safeEnd = getUtf8SafePrefixLength(chunkBuffer, remainingBytes); + if (safeEnd > 0) { + state.value += state.decoder.write(chunkBuffer.subarray(0, safeEnd)); + } + state.bytes = maxOutputBytes; + state.truncated = true; + return; + } + + state.bytes += chunkBuffer.length; + state.value += state.decoder.write(chunkBuffer); + } + + function flushCapturedOutput(state) { + if (!state.truncated) { + state.value += state.decoder.end(); + } + return state.value; + } + + function terminateChildProcess(child) { + if (!child || child.exitCode !== null) { + return; + } + + child.kill('SIGTERM'); + + const forceKillTimeout = setTimeout(() => { + if (child.exitCode === null) { + child.kill('SIGKILL'); + } + }, terminationGraceMs); + forceKillTimeout.unref?.(); + + child.once('close', () => { + clearTimeout(forceKillTimeout); + }); + } + + function runCommand( + command, + args, + { + env = processObject.env, + streamStderr = false, + onStderr, + signalOnClose, + timeoutMs = null, + maxOutputBytes = DEFAULT_COMMAND_OUTPUT_MAX_BYTES, + } = {}, + ) { + return runCommandWithSpawn(command, args, { + env, + streamStderr, + onStderr, + signalOnClose, + timeoutMs, + maxOutputBytes, + spawnImpl: spawnCommand, + }); + } + + function runCommandWithSpawn( + command, + args, + { + env = processObject.env, + streamStderr = false, + onStderr, + signalOnClose, + timeoutMs = null, + maxOutputBytes = DEFAULT_COMMAND_OUTPUT_MAX_BYTES, + spawnImpl = spawnCommand, + } = {}, + ) { + return new Promise((resolve, reject) => { + const commandLabel = formatCommandForDisplay(command, args); + let child; + + try { + child = spawnImpl(command, args, { + stdio: ['ignore', 'pipe', 'pipe'], + env, + }); + } catch (error) { + reject( + createCommandError(error?.message || `Could not start ${commandLabel}.`, { + command, + args, + }), + ); + return; + } + + const stdoutCapture = createCapturedOutputState(); + const stderrCapture = createCapturedOutputState(); + const stderrStreamDecoder = new StringDecoder('utf8'); + let stdout = ''; + let stderr = ''; + let outputTruncated = false; + const capturedOutputMaxBytes = getMaxOutputBytes(maxOutputBytes); + let finished = false; + let timedOut = false; + let timeoutId = null; + let outputFinalized = false; + + const settle = (handler, value) => { + if (finished) { + return false; + } + finished = true; + if (timeoutId) { + clearTimeout(timeoutId); + } + handler(value); + return true; + }; + + const rejectFromCallbackError = (error) => { + const callbackError = error instanceof Error ? error : new Error(String(error)); + const rejected = settle(reject, callbackError); + if (rejected) { + terminateChildProcess(child); + } + return false; + }; + + const emitStderrChunk = (chunkText) => { + if (finished) { + return false; + } + if (!streamStderr || !onStderr || !chunkText.trim()) { + return true; + } + try { + onStderr(chunkText); + return true; + } catch (error) { + return rejectFromCallbackError(error); + } + }; + + const finalizeOutput = () => { + if (outputFinalized) { + return true; + } + outputFinalized = true; + stdout = flushCapturedOutput(stdoutCapture); + stderr = flushCapturedOutput(stderrCapture); + outputTruncated = stdoutCapture.truncated || stderrCapture.truncated; + return emitStderrChunk(stderrStreamDecoder.end()); + }; + + if (signalOnClose) { + signalOnClose(() => terminateChildProcess(child)); + } + + if (typeof timeoutMs === 'number' && Number.isFinite(timeoutMs) && timeoutMs > 0) { + timeoutId = setTimeout(() => { + timedOut = true; + terminateChildProcess(child); + }, timeoutMs); + timeoutId.unref?.(); + } + + child.stdout.on('data', (chunk) => { + if (finished) { + return; + } + appendCapturedOutput(stdoutCapture, chunk, capturedOutputMaxBytes); + }); + + child.stderr.on('data', (chunk) => { + if (finished) { + return; + } + appendCapturedOutput(stderrCapture, chunk, capturedOutputMaxBytes); + emitStderrChunk(stderrStreamDecoder.write(chunk)); + }); + + child.on('error', (error) => { + if (!finalizeOutput()) { + return; + } + settle( + reject, + createCommandError(error.message || `Could not start ${commandLabel}.`, { + command, + args, + stdout, + stderr, + outputTruncated, + }), + ); + }); + child.on('close', (code, signal) => { + if (finished) { + return; + } + if (!finalizeOutput()) { + return; + } + if (timedOut) { + settle( + reject, + createCommandError(`Command timed out after ${timeoutMs}ms: ${commandLabel}`, { + command, + args, + stdout, + stderr, + exitCode: code, + exitSignal: signal ?? null, + timedOut: true, + outputTruncated, + }), + ); + return; + } + if (code === 0) { + settle(resolve, stdout.trimEnd()); + return; + } + settle( + reject, + createCommandError( + stderr.trim() || stdout.trim() || `Command exited with code ${code}: ${commandLabel}`, + { + command, + args, + stdout, + stderr, + exitCode: code, + exitSignal: signal ?? null, + outputTruncated, + }, + ), + ); + }); + }); + } + + return { + commandExists, + createCommandError, + formatCommandForDisplay, + getExecutableName, + runCommand, + runCommandWithSpawn, + spawnCommand, + terminateChildProcess, + }; +} + +module.exports = { + createAutoImportCommandRunner, +}; diff --git a/server/auto-import-runtime/import-executor.js b/server/auto-import-runtime/import-executor.js new file mode 100644 index 0000000..ceea5e4 --- /dev/null +++ b/server/auto-import-runtime/import-executor.js @@ -0,0 +1,269 @@ +const { formatCurrency, formatDayCount, formatErrorMessage } = require('../runtime-formatters'); + +function createAutoImportExecutor({ + createExclusiveRuntimeLease, + messages, + runnerResolver, + normalizeIncomingData, + withSettingsAndDataMutationLock, + writeData, + updateDataLoadState, +}) { + const { + createAutoImportError, + createAutoImportMessageEvent, + formatAutoImportMessageEvent, + getTimeoutSeconds, + summarizeCommandError, + } = messages; + const { + getToktrackRunnerTimeouts, + isPackageToktrackRunner, + parseToktrackVersionOutput, + resolveToktrackRunnerWithDiagnostics, + runToktrack, + toAutoImportRunnerResolutionError, + } = runnerResolver; + + function createAutoImportAlreadyRunningError() { + return createAutoImportError( + 'An auto-import is already running. Please wait.', + 'autoImportRunning', + ); + } + + const autoImportLease = createExclusiveRuntimeLease({ + createAlreadyRunningError: createAutoImportAlreadyRunningError, + }); + + function acquireAutoImportLease() { + return autoImportLease.acquire(); + } + + async function performAutoImport({ + source = 'auto-import', + onCheck = () => {}, + onProgress = () => {}, + onOutput = () => {}, + signalOnClose, + lease = null, + } = {}) { + const ownsLease = !lease; + const activeLease = lease || acquireAutoImportLease(); + let progressSeconds = 0; + const progressInterval = setInterval(() => { + progressSeconds += 5; + onProgress(createAutoImportMessageEvent('processingUsageData', { seconds: progressSeconds })); + }, 5000); + progressInterval.unref?.(); + + try { + onCheck({ tool: 'toktrack', status: 'checking' }); + onProgress(createAutoImportMessageEvent('startingLocalImport')); + + const resolution = await resolveToktrackRunnerWithDiagnostics(); + const runner = resolution.runner; + if (!runner) { + const resolutionError = toAutoImportRunnerResolutionError(resolution); + if (resolutionError.messageKey === 'noRunnerFound') { + onCheck({ tool: 'toktrack', status: 'not_found' }); + } + throw resolutionError; + } + + if (isPackageToktrackRunner(runner)) { + onProgress( + createAutoImportMessageEvent('warmingUpPackageRunner', { + runner: runner.label, + }), + ); + } + + let versionResult; + try { + versionResult = await runToktrack(runner, ['--version'], { + timeoutMs: getToktrackRunnerTimeouts(runner).versionCheckMs, + }); + } catch (error) { + if (isPackageToktrackRunner(runner) && error?.timedOut) { + throw createAutoImportError( + formatAutoImportMessageEvent( + createAutoImportMessageEvent('packageRunnerWarmupTimedOut', { + runner: runner.label, + seconds: getTimeoutSeconds(getToktrackRunnerTimeouts(runner).versionCheckMs), + }), + ), + 'packageRunnerWarmupTimedOut', + { + runner: runner.label, + seconds: getTimeoutSeconds(getToktrackRunnerTimeouts(runner).versionCheckMs), + }, + ); + } + + throw createAutoImportError( + formatAutoImportMessageEvent( + createAutoImportMessageEvent('toktrackVersionCheckFailed', { + message: summarizeCommandError(error), + }), + ), + 'toktrackVersionCheckFailed', + { + message: summarizeCommandError(error), + }, + ); + } + + onCheck({ + tool: 'toktrack', + status: 'found', + method: runner.label, + version: parseToktrackVersionOutput(versionResult), + }); + onProgress( + createAutoImportMessageEvent('loadingUsageData', { + command: runner.displayCommand, + }), + ); + + let rawJson; + try { + rawJson = await runToktrack(runner, ['daily', '--json'], { + streamStderr: true, + onStderr: (line) => { + onOutput(line); + }, + signalOnClose, + timeoutMs: getToktrackRunnerTimeouts(runner).importMs, + }); + } catch (error) { + if (error?.timedOut) { + throw createAutoImportError( + formatAutoImportMessageEvent( + createAutoImportMessageEvent('toktrackExecutionTimedOut', { + runner: runner.label, + seconds: getTimeoutSeconds(getToktrackRunnerTimeouts(runner).importMs), + }), + ), + 'toktrackExecutionTimedOut', + { + runner: runner.label, + seconds: getTimeoutSeconds(getToktrackRunnerTimeouts(runner).importMs), + }, + ); + } + + throw createAutoImportError( + formatAutoImportMessageEvent( + createAutoImportMessageEvent('toktrackExecutionFailed', { + message: summarizeCommandError(error), + }), + ), + 'toktrackExecutionFailed', + { + message: summarizeCommandError(error), + }, + ); + } + + let parsedJson; + try { + parsedJson = JSON.parse(rawJson); + } catch (error) { + throw createAutoImportError( + formatAutoImportMessageEvent( + createAutoImportMessageEvent('toktrackInvalidJson', { + message: summarizeCommandError(error), + }), + ), + 'toktrackInvalidJson', + { + message: summarizeCommandError(error), + }, + ); + } + + let normalized; + try { + normalized = normalizeIncomingData(parsedJson); + } catch (error) { + throw createAutoImportError( + formatAutoImportMessageEvent( + createAutoImportMessageEvent('toktrackInvalidData', { + message: summarizeCommandError(error), + }), + ), + 'toktrackInvalidData', + { + message: summarizeCommandError(error), + }, + ); + } + + await withSettingsAndDataMutationLock(async () => { + await writeData(normalized); + await updateDataLoadState({ + lastLoadedAt: new Date().toISOString(), + lastLoadSource: source, + }); + }); + + return { + days: normalized.daily.length, + totalCost: normalized.totals.totalCost, + }; + } finally { + clearInterval(progressInterval); + if (ownsLease) { + activeLease.release(); + } + } + } + + async function runStartupAutoLoad({ + source = 'cli-auto-load', + log = console.log, + errorLog = console.error, + } = {}) { + log('Auto-load enabled, starting import...'); + + try { + const result = await performAutoImport({ + source, + onCheck: (event) => { + if (event.status === 'found') { + log(`toktrack found (${event.method}, v${event.version})`); + } + }, + onProgress: (event) => { + log(formatAutoImportMessageEvent(event)); + }, + onOutput: (line) => { + log(line); + }, + }); + + log( + `Auto-load complete: imported ${formatDayCount(result.days)}, ${formatCurrency( + result.totalCost, + )}.`, + ); + return result; + } catch (error) { + errorLog(`Auto-load failed: ${formatErrorMessage(error)}`); + errorLog('Dashboard will start without newly imported data.'); + return null; + } + } + + return { + acquireAutoImportLease, + isAutoImportRunning: autoImportLease.isActive, + performAutoImport, + runStartupAutoLoad, + }; +} + +module.exports = { + createAutoImportExecutor, +}; diff --git a/server/auto-import-runtime/latest-version.js b/server/auto-import-runtime/latest-version.js new file mode 100644 index 0000000..a46af12 --- /dev/null +++ b/server/auto-import-runtime/latest-version.js @@ -0,0 +1,115 @@ +const exactSemverPattern = + /^(?:0|[1-9]\d*)\.(?:0|[1-9]\d*)\.(?:0|[1-9]\d*)(?:-[0-9A-Za-z-]+(?:\.[0-9A-Za-z-]+)*)?(?:\+[0-9A-Za-z-]+(?:\.[0-9A-Za-z-]+)*)?$/; + +function validateLatestVersionOutput(output) { + const latestVersion = String(output).trim(); + + if (!latestVersion) { + return { + latestVersion: null, + message: 'npm returned no toktrack version.', + ok: false, + }; + } + + if (!exactSemverPattern.test(latestVersion)) { + return { + latestVersion: null, + message: `npm returned an invalid toktrack version: ${latestVersion}`, + ok: false, + }; + } + + return { + latestVersion, + message: null, + ok: true, + }; +} + +function createLatestToktrackVersionLookup({ + createExpiringAsyncCache, + commandRunner, + processObject = process, + toktrackPackageName, + toktrackVersion, + npxCacheDir, + toktrackLatestLookupTimeoutMs, + toktrackLatestCacheSuccessTtlMs, + toktrackLatestCacheFailureTtlMs, +}) { + const latestToktrackVersionStatusCache = createExpiringAsyncCache({ + load: async () => { + try { + const latestVersionResult = validateLatestVersionOutput( + await commandRunner.runCommand( + commandRunner.getExecutableName('npm'), + ['view', `${toktrackPackageName}@latest`, 'version'], + { + env: { + ...processObject.env, + npm_config_cache: npxCacheDir, + }, + timeoutMs: toktrackLatestLookupTimeoutMs, + }, + ), + ); + + if (!latestVersionResult.ok) { + return { + configuredVersion: toktrackVersion, + latestVersion: null, + isLatest: null, + lookupStatus: 'malformed-output', + message: latestVersionResult.message, + }; + } + + return { + configuredVersion: toktrackVersion, + latestVersion: latestVersionResult.latestVersion, + isLatest: latestVersionResult.latestVersion === toktrackVersion, + lookupStatus: 'ok', + }; + } catch (error) { + const timedOut = error instanceof Error && error.timedOut === true; + return { + configuredVersion: toktrackVersion, + latestVersion: null, + isLatest: null, + lookupStatus: timedOut ? 'timeout' : 'failed', + message: + error instanceof Error && error.message.trim() + ? error.message.trim() + : 'Could not determine the latest toktrack version.', + }; + } + }, + getTtlMs: (value) => + value.lookupStatus === 'ok' + ? toktrackLatestCacheSuccessTtlMs + : toktrackLatestCacheFailureTtlMs, + }); + + async function lookupLatestToktrackVersion() { + return latestToktrackVersionStatusCache.lookup(); + } + + function getToktrackLatestLookupTimeoutMs() { + return toktrackLatestLookupTimeoutMs; + } + + function resetLatestToktrackVersionCache() { + latestToktrackVersionStatusCache.reset(); + } + + return { + getToktrackLatestLookupTimeoutMs, + lookupLatestToktrackVersion, + resetLatestToktrackVersionCache, + }; +} + +module.exports = { + createLatestToktrackVersionLookup, +}; diff --git a/server/auto-import-runtime/messages.js b/server/auto-import-runtime/messages.js new file mode 100644 index 0000000..ba920fe --- /dev/null +++ b/server/auto-import-runtime/messages.js @@ -0,0 +1,93 @@ +function createAutoImportMessageEvent(key, vars = {}) { + return { + key, + vars, + }; +} + +function createAutoImportError(message, key, vars = {}) { + const error = new Error(message); + error.messageKey = key; + error.messageVars = vars; + return error; +} + +function summarizeCommandError(error, fallbackMessage = 'Unknown error') { + if (error instanceof Error && error.message.trim()) { + return error.message.trim(); + } + + return fallbackMessage; +} + +function getTimeoutSeconds(timeoutMs) { + return Math.max(1, Math.ceil(Number(timeoutMs) / 1000)); +} + +function createAutoImportMessages({ toktrackVersion }) { + function toAutoImportErrorEvent(error) { + if (error && typeof error.messageKey === 'string') { + return createAutoImportMessageEvent(error.messageKey, error.messageVars || {}); + } + + return createAutoImportMessageEvent('errorPrefix', { + message: error && error.message ? error.message : 'Unknown error', + }); + } + + function formatAutoImportMessageEvent(event) { + switch (event?.key) { + case 'startingLocalImport': + return 'Starting toktrack import...'; + case 'warmingUpPackageRunner': + return `Preparing ${event.vars?.runner || 'package runner'} (the first run may take longer while toktrack is downloaded)...`; + case 'loadingUsageData': + return `Loading usage data via ${event.vars?.command || 'unknown command'}...`; + case 'processingUsageData': + return `Processing usage data... (${event.vars?.seconds || 0}s)`; + case 'autoImportRunning': + return 'An auto-import is already running. Please wait.'; + case 'noRunnerFound': + return 'No local toktrack, Bun, or npm exec installation found.'; + case 'localToktrackVersionMismatch': + return `Local toktrack v${event.vars?.detectedVersion || 'unknown'} does not match the required v${event.vars?.expectedVersion || toktrackVersion}.`; + case 'localToktrackFailed': + return `Local toktrack could not be started: ${event.vars?.message || 'Unknown error'}`; + case 'packageRunnerFailed': + return `No compatible bunx or npm exec runner succeeded: ${event.vars?.message || 'Unknown error'}`; + case 'packageRunnerWarmupTimedOut': + return `${event.vars?.runner || 'The package runner'} took longer than ${event.vars?.seconds || 0}s to prepare toktrack. The first run may need to download the package first. Please try again or verify network access.`; + case 'toktrackVersionCheckFailed': + return `Toktrack was found, but the version check failed: ${event.vars?.message || 'Unknown error'}`; + case 'toktrackExecutionFailed': + return `Toktrack failed while loading usage data: ${event.vars?.message || 'Unknown error'}`; + case 'toktrackExecutionTimedOut': + return `Toktrack did not finish loading usage data within ${event.vars?.seconds || 0}s via ${event.vars?.runner || 'the selected runner'}. Please try again.`; + case 'toktrackInvalidJson': + return `Toktrack returned invalid JSON output: ${event.vars?.message || 'Unknown error'}`; + case 'toktrackInvalidData': + return `Toktrack returned data that TTDash could not process: ${event.vars?.message || 'Unknown error'}`; + case 'errorPrefix': + return `Error: ${event.vars?.message || 'Unknown error'}`; + default: + return 'Auto-import update'; + } + } + + return { + createAutoImportError, + createAutoImportMessageEvent, + formatAutoImportMessageEvent, + getTimeoutSeconds, + summarizeCommandError, + toAutoImportErrorEvent, + }; +} + +module.exports = { + createAutoImportError, + createAutoImportMessageEvent, + createAutoImportMessages, + getTimeoutSeconds, + summarizeCommandError, +}; diff --git a/server/auto-import-runtime/toktrack-runners.js b/server/auto-import-runtime/toktrack-runners.js new file mode 100644 index 0000000..86d333c --- /dev/null +++ b/server/auto-import-runtime/toktrack-runners.js @@ -0,0 +1,299 @@ +function createToktrackRunnerResolver({ + fs, + processObject = process, + commandRunner, + messages, + toktrackPackageSpec, + toktrackVersion, + toktrackLocalBin, + npxCacheDir, + isWindows, + toktrackLocalRunnerProbeTimeoutMs, + toktrackLocalRunnerVersionCheckTimeoutMs, + toktrackLocalRunnerImportTimeoutMs, + toktrackPackageRunnerProbeTimeoutMs, + toktrackPackageRunnerVersionCheckTimeoutMs, + toktrackPackageRunnerImportTimeoutMs, + probeLog = () => {}, +}) { + const { + createAutoImportError, + createAutoImportMessageEvent, + formatAutoImportMessageEvent, + getTimeoutSeconds, + summarizeCommandError, + } = messages; + const { getExecutableName, runCommand } = commandRunner; + + function parseToktrackVersionOutput(output) { + return String(output) + .trim() + .replace(/^toktrack\s+/, ''); + } + + function getConfiguredLocalToktrackBin() { + return processObject.env.TTDASH_TOKTRACK_LOCAL_BIN || toktrackLocalBin; + } + + function getLocalToktrackDisplayCommand(forceWindows = isWindows) { + if (processObject.env.TTDASH_TOKTRACK_LOCAL_BIN) { + return `${getConfiguredLocalToktrackBin()} daily --json`; + } + + return forceWindows + ? 'node_modules\\.bin\\toktrack.cmd daily --json' + : 'node_modules/.bin/toktrack daily --json'; + } + + function createLocalToktrackRunner() { + return { + command: getConfiguredLocalToktrackBin(), + prefixArgs: [], + env: processObject.env, + method: 'local', + label: 'local toktrack', + displayCommand: getLocalToktrackDisplayCommand(), + }; + } + + function createBunxToktrackRunner() { + return { + command: getExecutableName('bunx'), + prefixArgs: isWindows ? ['x', toktrackPackageSpec] : [toktrackPackageSpec], + env: processObject.env, + method: 'bunx', + label: 'bunx', + displayCommand: `bunx ${toktrackPackageSpec} daily --json`, + }; + } + + function createNpxToktrackRunner() { + return { + command: getExecutableName('npx'), + prefixArgs: ['--yes', toktrackPackageSpec], + env: { + ...processObject.env, + npm_config_cache: npxCacheDir, + }, + method: 'npm', + label: 'npm exec', + displayCommand: `npx --yes ${toktrackPackageSpec} daily --json`, + }; + } + + function isPackageToktrackRunner(runner) { + return runner?.method === 'bunx' || runner?.method === 'npm'; + } + + function getToktrackRunnerTimeouts(runner) { + if (isPackageToktrackRunner(runner)) { + return { + probeMs: toktrackPackageRunnerProbeTimeoutMs, + versionCheckMs: toktrackPackageRunnerVersionCheckTimeoutMs, + importMs: toktrackPackageRunnerImportTimeoutMs, + }; + } + + return { + probeMs: toktrackLocalRunnerProbeTimeoutMs, + versionCheckMs: toktrackLocalRunnerVersionCheckTimeoutMs, + importMs: toktrackLocalRunnerImportTimeoutMs, + }; + } + + function runToktrack( + runner, + args, + { streamStderr = false, onStderr, signalOnClose, timeoutMs = null } = {}, + ) { + return runCommand(runner.command, [...runner.prefixArgs, ...args], { + env: runner.env, + streamStderr, + onStderr, + signalOnClose, + timeoutMs, + }); + } + + async function probeToktrackRunner( + runner, + timeoutMs = getToktrackRunnerTimeouts(runner).probeMs, + ) { + try { + await runToktrack(runner, ['--version'], { timeoutMs }); + return { + ok: true, + errorMessage: null, + timedOut: false, + }; + } catch (error) { + const message = summarizeCommandError(error, `Could not start ${runner.label}.`); + probeLog(`Failed to probe ${runner.label}: ${message}`); + return { + ok: false, + errorMessage: message, + timedOut: Boolean(error?.timedOut), + }; + } + } + + async function resolveToktrackRunnerWithDiagnostics() { + const resolution = { + runner: null, + localVersionMismatch: null, + localFailure: null, + runnerFailures: [], + }; + + if (fs.existsSync(getConfiguredLocalToktrackBin())) { + const localRunner = createLocalToktrackRunner(); + + try { + const localVersion = parseToktrackVersionOutput( + await runToktrack(localRunner, ['--version'], { + timeoutMs: getToktrackRunnerTimeouts(localRunner).versionCheckMs, + }), + ); + if (localVersion === toktrackVersion) { + resolution.runner = localRunner; + return resolution; + } + resolution.localVersionMismatch = { + detectedVersion: localVersion || 'unknown', + expectedVersion: toktrackVersion, + }; + } catch (error) { + resolution.localFailure = summarizeCommandError( + error, + 'The local toktrack binary could not be started.', + ); + } + } + + const bunxRunner = createBunxToktrackRunner(); + const bunxProbe = await probeToktrackRunner(bunxRunner); + if (bunxProbe.ok) { + resolution.runner = bunxRunner; + return resolution; + } + if (bunxProbe.errorMessage) { + resolution.runnerFailures.push({ + label: bunxRunner.label, + message: bunxProbe.errorMessage, + timedOut: bunxProbe.timedOut, + }); + } + + const npxRunner = createNpxToktrackRunner(); + const npxProbe = await probeToktrackRunner(npxRunner); + if (npxProbe.ok) { + resolution.runner = npxRunner; + return resolution; + } + if (npxProbe.errorMessage) { + resolution.runnerFailures.push({ + label: npxRunner.label, + message: npxProbe.errorMessage, + timedOut: npxProbe.timedOut, + }); + } + + return resolution; + } + + async function resolveToktrackRunner() { + const resolution = await resolveToktrackRunnerWithDiagnostics(); + return resolution.runner; + } + + function toAutoImportRunnerResolutionError(resolution) { + if (resolution.localVersionMismatch) { + return createAutoImportError( + formatAutoImportMessageEvent( + createAutoImportMessageEvent( + 'localToktrackVersionMismatch', + resolution.localVersionMismatch, + ), + ), + 'localToktrackVersionMismatch', + resolution.localVersionMismatch, + ); + } + + if (resolution.localFailure) { + return createAutoImportError( + formatAutoImportMessageEvent( + createAutoImportMessageEvent('localToktrackFailed', { + message: resolution.localFailure, + }), + ), + 'localToktrackFailed', + { + message: resolution.localFailure, + }, + ); + } + + if (resolution.runnerFailures.length > 0) { + const timedOutRunnerFailures = resolution.runnerFailures.filter( + (failure) => failure.timedOut, + ); + if ( + timedOutRunnerFailures.length > 0 && + timedOutRunnerFailures.length === resolution.runnerFailures.length + ) { + const runners = timedOutRunnerFailures.map((failure) => failure.label).join(' / '); + const seconds = getTimeoutSeconds(toktrackPackageRunnerProbeTimeoutMs); + return createAutoImportError( + formatAutoImportMessageEvent( + createAutoImportMessageEvent('packageRunnerWarmupTimedOut', { + runner: runners, + seconds, + }), + ), + 'packageRunnerWarmupTimedOut', + { + runner: runners, + seconds, + }, + ); + } + + const runnerFailuresMessage = resolution.runnerFailures + .map((failure) => `${failure.label}: ${failure.message}`) + .join(' | '); + + return createAutoImportError( + formatAutoImportMessageEvent( + createAutoImportMessageEvent('packageRunnerFailed', { + message: runnerFailuresMessage, + }), + ), + 'packageRunnerFailed', + { + message: runnerFailuresMessage, + }, + ); + } + + return createAutoImportError( + 'No local toktrack, Bun, or npm exec installation found.', + 'noRunnerFound', + ); + } + + return { + getLocalToktrackDisplayCommand, + getToktrackRunnerTimeouts, + isPackageToktrackRunner, + parseToktrackVersionOutput, + resolveToktrackRunner, + resolveToktrackRunnerWithDiagnostics, + runToktrack, + toAutoImportRunnerResolutionError, + }; +} + +module.exports = { + createToktrackRunnerResolver, +}; diff --git a/server/background-runtime.js b/server/background-runtime.js index 7bee18a..f3cde11 100644 --- a/server/background-runtime.js +++ b/server/background-runtime.js @@ -299,7 +299,7 @@ function createBackgroundRuntime({ } function buildBackgroundLogFilePath() { - return path.join(backgroundLogDir, `server-${Date.now()}.log`); + return path.join(backgroundLogDir, `server-${Date.now()}-${processObject.pid}.log`); } function readBackgroundLogOutput(logFile) { diff --git a/server/data-runtime.js b/server/data-runtime.js index 77cd1cd..9a20c48 100644 --- a/server/data-runtime.js +++ b/server/data-runtime.js @@ -4,7 +4,12 @@ const { normalizePersistedAppSettings, normalizeProviderLimits: normalizeSharedProviderLimits, } = require('../shared/app-settings.js'); +const { resolveDataRuntimeAppPaths } = require('./data-runtime/app-paths'); +const { createDataRuntimeFileIo } = require('./data-runtime/file-io'); +const { createDataRuntimeFileLocks } = require('./data-runtime/file-locks'); +const { createDataRuntimeImportMerge } = require('./data-runtime/import-merge'); +/** Creates the persistence, locking, import, and settings runtime facade. */ function createDataRuntime({ fs, fsPromises, @@ -23,344 +28,69 @@ function createDataRuntime({ secureFileMode, fileMutationLockTimeoutMs, fileMutationLockStaleMs, + fileMutationLockInstanceMismatchStaleMs = fileMutationLockTimeoutMs, + debugLog = console.debug, getCliAutoLoadActive = () => false, }) { - function resolveAppPaths() { - const homeDir = os.homedir(); - const explicitPaths = { - dataDir: processObject.env.TTDASH_DATA_DIR, - configDir: processObject.env.TTDASH_CONFIG_DIR, - cacheDir: processObject.env.TTDASH_CACHE_DIR, - }; - let platformPaths; - - if (processObject.platform === 'darwin') { - const appSupportDir = path.join(homeDir, 'Library', 'Application Support', appDirName); - platformPaths = { - dataDir: appSupportDir, - configDir: appSupportDir, - cacheDir: path.join(homeDir, 'Library', 'Caches', appDirName), - }; - } else if (isWindows) { - platformPaths = { - dataDir: path.join( - processObject.env.LOCALAPPDATA || path.join(homeDir, 'AppData', 'Local'), - appDirName, - ), - configDir: path.join( - processObject.env.APPDATA || path.join(homeDir, 'AppData', 'Roaming'), - appDirName, - ), - cacheDir: path.join( - processObject.env.LOCALAPPDATA || path.join(homeDir, 'AppData', 'Local'), - appDirName, - 'Cache', - ), - }; - } else { - platformPaths = { - dataDir: path.join( - processObject.env.XDG_DATA_HOME || path.join(homeDir, '.local', 'share'), - appDirNameLinux, - ), - configDir: path.join( - processObject.env.XDG_CONFIG_HOME || path.join(homeDir, '.config'), - appDirNameLinux, - ), - cacheDir: path.join( - processObject.env.XDG_CACHE_HOME || path.join(homeDir, '.cache'), - appDirNameLinux, - ), - }; - } - - return { - dataDir: explicitPaths.dataDir || platformPaths.dataDir, - configDir: explicitPaths.configDir || platformPaths.configDir, - cacheDir: explicitPaths.cacheDir || platformPaths.cacheDir, - }; - } - - const appPaths = resolveAppPaths(); + const appPaths = resolveDataRuntimeAppPaths({ + os, + path, + processObject, + appDirName, + appDirNameLinux, + isWindows, + }); const dataFile = path.join(appPaths.dataDir, 'data.json'); const settingsFile = path.join(appPaths.configDir, 'settings.json'); const npxCacheDir = path.join(appPaths.cacheDir, 'npx-cache'); - const fileMutationLocks = new Map(); - - function ensureDir(dirPath) { - fs.mkdirSync(dirPath, { recursive: true, mode: secureDirMode }); - if (!isWindows) { - fs.chmodSync(dirPath, secureDirMode); - } - } - - function ensureAppDirs(extraDirs = []) { - ensureDir(appPaths.dataDir); - ensureDir(appPaths.configDir); - ensureDir(appPaths.cacheDir); - ensureDir(npxCacheDir); - extraDirs.forEach((dirPath) => ensureDir(dirPath)); - } - - function applySecureFileMode(filePath) { - if (!isWindows) { - fs.chmodSync(filePath, secureFileMode); - } - } - - function isMissingFileError(error) { - return error?.code === 'ENOENT' || /no such file/i.test(String(error?.message || error)); - } - - function writeJsonAtomic(filePath, data) { - ensureDir(path.dirname(filePath)); - const tempPath = `${filePath}.${processObject.pid}.${Date.now()}.tmp`; - try { - fs.writeFileSync(tempPath, JSON.stringify(data, null, 2), { - mode: secureFileMode, - }); - applySecureFileMode(tempPath); - fs.renameSync(tempPath, filePath); - } catch (error) { - try { - if (fs.existsSync(tempPath)) { - fs.unlinkSync(tempPath); - } - } catch { - // Ignore cleanup failures so the original write error is preserved. - } - throw error; - } - } - - async function writeJsonAtomicAsync(filePath, data) { - const tempPath = `${filePath}.${processObject.pid}.${Date.now()}.tmp`; - let tempPathCreated = false; - const parentDir = path.dirname(filePath); - - try { - await fsPromises.mkdir(parentDir, { recursive: true, mode: secureDirMode }); - if (!isWindows) { - await fsPromises.chmod(parentDir, secureDirMode); - } - tempPathCreated = true; - await fsPromises.writeFile(tempPath, JSON.stringify(data, null, 2), { - mode: secureFileMode, - }); - - if (!isWindows) { - await fsPromises.chmod(tempPath, secureFileMode); - } - - await fsPromises.rename(tempPath, filePath); - } catch (error) { - if (tempPathCreated) { - try { - await fsPromises.unlink(tempPath); - } catch (unlinkError) { - if (unlinkError?.code !== 'ENOENT') { - // Ignore temp-file cleanup failures so the original error wins. - } - } - } - throw error; - } - } - - async function unlinkIfExists(filePath) { - try { - await fsPromises.unlink(filePath); - } catch (error) { - if (error?.code !== 'ENOENT') { - throw error; - } - } - } - - function getFileMutationLockDir(filePath) { - return `${filePath}.lock`; - } - - function getFileMutationLockOwnerPath(lockDir) { - return path.join(lockDir, 'owner.json'); - } - - async function removeFileMutationLockDir(lockDir) { - try { - await fsPromises.rm(lockDir, { recursive: true, force: true }); - } catch (error) { - if (error?.code !== 'ENOENT') { - throw error; - } - } - } - - async function writeFileMutationLockOwner(lockDir) { - const ownerPath = getFileMutationLockOwnerPath(lockDir); - const owner = { - pid: processObject.pid, - createdAt: new Date().toISOString(), - instanceId: runtimeInstanceId, - }; - await fsPromises.writeFile(ownerPath, JSON.stringify(owner, null, 2), { - mode: secureFileMode, - }); - if (!isWindows) { - await fsPromises.chmod(ownerPath, secureFileMode); - } - } - - function isProcessRunning(pid) { - if (!Number.isInteger(pid) || pid <= 0) { - return false; - } - - try { - processObject.kill(pid, 0); - return true; - } catch (error) { - return error && error.code === 'EPERM'; - } - } - - async function sleep(ms) { - return new Promise((resolve) => setTimeout(resolve, ms)); - } - - async function shouldReapFileMutationLock(lockDir) { - const ownerPath = getFileMutationLockOwnerPath(lockDir); - let owner = null; - - try { - const rawOwner = await fsPromises.readFile(ownerPath, 'utf-8'); - owner = JSON.parse(rawOwner); - } catch (error) { - if (error?.code !== 'ENOENT') { - // Fall back to age-based cleanup if the owner metadata is missing or malformed. - } - } - - try { - if (Number.isInteger(owner?.pid)) { - return !isProcessRunning(owner.pid); - } - - const ownerCreatedAt = owner?.createdAt ? Date.parse(owner.createdAt) : Number.NaN; - const stats = await fsPromises.stat(lockDir); - const lockAgeMs = Number.isFinite(ownerCreatedAt) - ? Date.now() - ownerCreatedAt - : Date.now() - stats.mtimeMs; - - if (lockAgeMs > fileMutationLockStaleMs) { - return true; - } - - return false; - } catch (error) { - if (error?.code === 'ENOENT') { - return false; - } - throw error; - } - } - - async function withCrossProcessFileMutationLock( - filePath, - operation, - timeoutMs = fileMutationLockTimeoutMs, - ) { - const lockDir = getFileMutationLockDir(filePath); - const startedAt = Date.now(); - - while (true) { - try { - await fsPromises.mkdir(path.dirname(lockDir), { - recursive: true, - mode: secureDirMode, - }); - await fsPromises.mkdir(lockDir, { mode: secureDirMode }); - if (!isWindows) { - await fsPromises.chmod(lockDir, secureDirMode); - } - - try { - await writeFileMutationLockOwner(lockDir); - } catch (error) { - await removeFileMutationLockDir(lockDir).catch(() => undefined); - throw error; - } - - break; - } catch (error) { - if (!error || error.code !== 'EEXIST') { - throw error; - } - - if (await shouldReapFileMutationLock(lockDir)) { - await removeFileMutationLockDir(lockDir).catch(() => undefined); - continue; - } - - if (Date.now() - startedAt >= timeoutMs) { - throw new Error(`Could not acquire file mutation lock for ${path.basename(filePath)}.`, { - cause: error, - }); - } - - await sleep(50); - } - } - - try { - return await operation(); - } finally { - try { - await removeFileMutationLockDir(lockDir); - } catch { - // Ignore cleanup races so the original operation result wins. - } - } - } - - async function withFileMutationLock(filePath, operation) { - const previous = fileMutationLocks.get(filePath) || Promise.resolve(); - let releaseCurrent; - const current = new Promise((resolve) => { - releaseCurrent = resolve; - }); - - fileMutationLocks.set(filePath, current); - - await previous.catch(() => undefined); - - try { - return await withCrossProcessFileMutationLock(filePath, operation); - } finally { - releaseCurrent(); - if (fileMutationLocks.get(filePath) === current) { - fileMutationLocks.delete(filePath); - } - } - } - - async function withOrderedFileMutationLocks(filePaths, operation) { - const uniquePaths = Array.from(new Set(filePaths)).sort(); - - const runWithLock = async (index) => { - if (index >= uniquePaths.length) { - return operation(); - } - - const filePath = uniquePaths[index]; - return withFileMutationLock(filePath, () => runWithLock(index + 1)); - }; - - return runWithLock(0); - } - - async function withSettingsAndDataMutationLock(operation) { - return withOrderedFileMutationLocks([settingsFile, dataFile], operation); - } + const fileIo = createDataRuntimeFileIo({ + fs, + fsPromises, + path, + processObject, + appPaths, + npxCacheDir, + isWindows, + secureDirMode, + secureFileMode, + }); + const fileLocks = createDataRuntimeFileLocks({ + fsPromises, + path, + processObject, + runtimeInstanceId, + isWindows, + secureDirMode, + secureFileMode, + fileMutationLockTimeoutMs, + fileMutationLockStaleMs, + fileMutationLockInstanceMismatchStaleMs, + settingsFile, + dataFile, + debugLog, + }); + const importMerge = createDataRuntimeImportMerge({ + normalizeIncomingData, + settingsBackupKind, + usageBackupKind, + }); + const { + ensureDir, + ensureAppDirs, + applySecureFileMode, + isMissingFileError, + writeJsonAtomic, + writeJsonAtomicAsync, + unlinkIfExists, + } = fileIo; + const { + getFileMutationLockDir, + withFileMutationLock, + withOrderedFileMutationLocks, + withSettingsAndDataMutationLock, + getPendingFileMutationLockCount, + } = fileLocks; + const { extractSettingsImportPayload, extractUsageImportPayload, mergeUsageData } = importMerge; const normalizeIsoTimestamp = normalizeSharedIsoTimestamp; const normalizeProviderLimits = normalizeSharedProviderLimits; const normalizeSettings = normalizePersistedAppSettings; @@ -405,234 +135,6 @@ function createDataRuntime({ } } - function isPlainObject(value) { - return Boolean(value) && typeof value === 'object' && !Array.isArray(value); - } - - function sortStrings(values) { - return [ - ...new Set( - (Array.isArray(values) ? values : []) - .filter((value) => typeof value === 'string') - .map((value) => value.trim()) - .filter(Boolean), - ), - ].sort((left, right) => left.localeCompare(right)); - } - - function canonicalizeModelBreakdown(entry) { - return { - modelName: typeof entry?.modelName === 'string' ? entry.modelName.trim() : '', - inputTokens: Number(entry?.inputTokens) || 0, - outputTokens: Number(entry?.outputTokens) || 0, - cacheCreationTokens: Number(entry?.cacheCreationTokens) || 0, - cacheReadTokens: Number(entry?.cacheReadTokens) || 0, - thinkingTokens: Number(entry?.thinkingTokens) || 0, - cost: Number(entry?.cost) || 0, - requestCount: Number(entry?.requestCount) || 0, - }; - } - - function canonicalizeUsageDay(day) { - return { - date: typeof day?.date === 'string' ? day.date : '', - inputTokens: Number(day?.inputTokens) || 0, - outputTokens: Number(day?.outputTokens) || 0, - cacheCreationTokens: Number(day?.cacheCreationTokens) || 0, - cacheReadTokens: Number(day?.cacheReadTokens) || 0, - thinkingTokens: Number(day?.thinkingTokens) || 0, - totalTokens: Number(day?.totalTokens) || 0, - totalCost: Number(day?.totalCost) || 0, - requestCount: Number(day?.requestCount) || 0, - modelsUsed: sortStrings(day?.modelsUsed), - modelBreakdowns: (Array.isArray(day?.modelBreakdowns) ? day.modelBreakdowns : []) - .map(canonicalizeModelBreakdown) - .sort((left, right) => left.modelName.localeCompare(right.modelName)), - }; - } - - function areUsageDaysEquivalent(left, right) { - const leftDay = canonicalizeUsageDay(left); - const rightDay = canonicalizeUsageDay(right); - const scalarFields = [ - 'date', - 'inputTokens', - 'outputTokens', - 'cacheCreationTokens', - 'cacheReadTokens', - 'thinkingTokens', - 'totalTokens', - 'totalCost', - 'requestCount', - ]; - - for (const field of scalarFields) { - if (leftDay[field] !== rightDay[field]) { - return false; - } - } - - if (leftDay.modelsUsed.length !== rightDay.modelsUsed.length) { - return false; - } - for (let index = 0; index < leftDay.modelsUsed.length; index += 1) { - if (leftDay.modelsUsed[index] !== rightDay.modelsUsed[index]) { - return false; - } - } - - if (leftDay.modelBreakdowns.length !== rightDay.modelBreakdowns.length) { - return false; - } - - const breakdownFields = [ - 'modelName', - 'inputTokens', - 'outputTokens', - 'cacheCreationTokens', - 'cacheReadTokens', - 'thinkingTokens', - 'cost', - 'requestCount', - ]; - for (let index = 0; index < leftDay.modelBreakdowns.length; index += 1) { - const leftBreakdown = leftDay.modelBreakdowns[index]; - const rightBreakdown = rightDay.modelBreakdowns[index]; - for (const field of breakdownFields) { - if (leftBreakdown[field] !== rightBreakdown[field]) { - return false; - } - } - } - - return true; - } - - function extractSettingsImportPayload(payload) { - if (!isPlainObject(payload)) { - throw new Error('Uploaded JSON is not a settings backup file.'); - } - - if (payload.kind === settingsBackupKind) { - if (!Object.prototype.hasOwnProperty.call(payload, 'settings')) { - throw new Error('The settings backup file does not contain any settings.'); - } - if (!isPlainObject(payload.settings)) { - throw new Error('The settings backup file has an invalid settings payload.'); - } - return payload.settings; - } - - if (typeof payload.kind === 'string' && payload.kind === usageBackupKind) { - throw new Error('This is a data backup file, not a settings file.'); - } - - throw new Error('Uploaded JSON is not a settings backup file.'); - } - - function extractUsageImportPayload(payload) { - if (!isPlainObject(payload)) { - return payload; - } - - if (payload.kind === usageBackupKind) { - if (!Object.prototype.hasOwnProperty.call(payload, 'data')) { - throw new Error('The usage backup file does not contain any usage data.'); - } - return payload.data; - } - - if (typeof payload.kind === 'string' && payload.kind === settingsBackupKind) { - throw new Error('This is a settings backup file, not a data file.'); - } - - return payload; - } - - function computeUsageTotals(daily) { - return daily.reduce( - (totals, day) => ({ - inputTokens: totals.inputTokens + (day.inputTokens || 0), - outputTokens: totals.outputTokens + (day.outputTokens || 0), - cacheCreationTokens: totals.cacheCreationTokens + (day.cacheCreationTokens || 0), - cacheReadTokens: totals.cacheReadTokens + (day.cacheReadTokens || 0), - thinkingTokens: totals.thinkingTokens + (day.thinkingTokens || 0), - totalCost: totals.totalCost + (day.totalCost || 0), - totalTokens: totals.totalTokens + (day.totalTokens || 0), - requestCount: totals.requestCount + (day.requestCount || 0), - }), - { - inputTokens: 0, - outputTokens: 0, - cacheCreationTokens: 0, - cacheReadTokens: 0, - thinkingTokens: 0, - totalCost: 0, - totalTokens: 0, - requestCount: 0, - }, - ); - } - - function mergeUsageData(currentData, importedData) { - const current = - currentData && Array.isArray(currentData.daily) && currentData.daily.length > 0 - ? normalizeIncomingData(currentData) - : null; - - if (!current) { - return { - data: importedData, - summary: { - importedDays: importedData.daily.length, - addedDays: importedData.daily.length, - unchangedDays: 0, - conflictingDays: 0, - totalDays: importedData.daily.length, - }, - }; - } - - const currentByDate = new Map(current.daily.map((day) => [day.date, day])); - let addedDays = 0; - let unchangedDays = 0; - let conflictingDays = 0; - - for (const importedDay of importedData.daily) { - const existingDay = currentByDate.get(importedDay.date); - if (!existingDay) { - currentByDate.set(importedDay.date, importedDay); - addedDays += 1; - continue; - } - - if (areUsageDaysEquivalent(existingDay, importedDay)) { - unchangedDays += 1; - continue; - } - - conflictingDays += 1; - } - - const mergedDaily = [...currentByDate.values()].sort((left, right) => - left.date.localeCompare(right.date), - ); - - return { - data: { - daily: mergedDaily, - totals: computeUsageTotals(mergedDaily), - }, - summary: { - importedDays: importedData.daily.length, - addedDays, - unchangedDays, - conflictingDays, - totalDays: mergedDaily.length, - }, - }; - } - function toSettingsResponse(settings) { return { ...normalizeSettings(settings), @@ -766,7 +268,7 @@ function createDataRuntime({ withFileMutationLock, withOrderedFileMutationLocks, withSettingsAndDataMutationLock, - getPendingFileMutationLockCount: () => fileMutationLocks.size, + getPendingFileMutationLockCount, migrateLegacyDataFile, normalizeIsoTimestamp, normalizeProviderLimits, diff --git a/server/data-runtime/app-paths.js b/server/data-runtime/app-paths.js new file mode 100644 index 0000000..2873f42 --- /dev/null +++ b/server/data-runtime/app-paths.js @@ -0,0 +1,67 @@ +/** Resolves platform-specific data, config, and cache directories for the local app runtime. */ +function resolveDataRuntimeAppPaths({ + os, + path, + processObject, + appDirName, + appDirNameLinux, + isWindows, +}) { + const homeDir = os.homedir(); + const explicitPaths = { + dataDir: processObject.env.TTDASH_DATA_DIR, + configDir: processObject.env.TTDASH_CONFIG_DIR, + cacheDir: processObject.env.TTDASH_CACHE_DIR, + }; + let platformPaths; + + if (processObject.platform === 'darwin') { + const appSupportDir = path.join(homeDir, 'Library', 'Application Support', appDirName); + platformPaths = { + dataDir: appSupportDir, + configDir: appSupportDir, + cacheDir: path.join(homeDir, 'Library', 'Caches', appDirName), + }; + } else if (isWindows) { + platformPaths = { + dataDir: path.join( + processObject.env.LOCALAPPDATA || path.join(homeDir, 'AppData', 'Local'), + appDirName, + ), + configDir: path.join( + processObject.env.APPDATA || path.join(homeDir, 'AppData', 'Roaming'), + appDirName, + ), + cacheDir: path.join( + processObject.env.LOCALAPPDATA || path.join(homeDir, 'AppData', 'Local'), + appDirName, + 'Cache', + ), + }; + } else { + platformPaths = { + dataDir: path.join( + processObject.env.XDG_DATA_HOME || path.join(homeDir, '.local', 'share'), + appDirNameLinux, + ), + configDir: path.join( + processObject.env.XDG_CONFIG_HOME || path.join(homeDir, '.config'), + appDirNameLinux, + ), + cacheDir: path.join( + processObject.env.XDG_CACHE_HOME || path.join(homeDir, '.cache'), + appDirNameLinux, + ), + }; + } + + return { + dataDir: explicitPaths.dataDir || platformPaths.dataDir, + configDir: explicitPaths.configDir || platformPaths.configDir, + cacheDir: explicitPaths.cacheDir || platformPaths.cacheDir, + }; +} + +module.exports = { + resolveDataRuntimeAppPaths, +}; diff --git a/server/data-runtime/file-io.js b/server/data-runtime/file-io.js new file mode 100644 index 0000000..b42af74 --- /dev/null +++ b/server/data-runtime/file-io.js @@ -0,0 +1,129 @@ +const { randomBytes } = require('crypto'); + +/** Creates secure JSON file I/O helpers for the data runtime. */ +function createDataRuntimeFileIo({ + fs, + fsPromises, + path, + processObject, + appPaths, + npxCacheDir, + isWindows, + secureDirMode, + secureFileMode, +}) { + function ensureDir(dirPath) { + fs.mkdirSync(dirPath, { recursive: true, mode: secureDirMode }); + if (!isWindows) { + fs.chmodSync(dirPath, secureDirMode); + } + } + + async function ensureDirAsync(dirPath) { + await fsPromises.mkdir(dirPath, { recursive: true, mode: secureDirMode }); + if (!isWindows) { + await fsPromises.chmod(dirPath, secureDirMode); + } + } + + function ensureAppDirs(extraDirs = []) { + ensureDir(appPaths.dataDir); + ensureDir(appPaths.configDir); + ensureDir(appPaths.cacheDir); + ensureDir(npxCacheDir); + extraDirs.forEach((dirPath) => ensureDir(dirPath)); + } + + function applySecureFileMode(filePath) { + if (!isWindows) { + fs.chmodSync(filePath, secureFileMode); + } + } + + function isMissingFileError(error) { + return error?.code === 'ENOENT'; + } + + function createAtomicTempPath(filePath) { + const randomSuffix = randomBytes(8).toString('hex'); + return `${filePath}.${processObject.pid}.${Date.now()}.${randomSuffix}.tmp`; + } + + function writeJsonAtomic(filePath, data) { + ensureDir(path.dirname(filePath)); + const tempPath = createAtomicTempPath(filePath); + try { + fs.writeFileSync(tempPath, JSON.stringify(data, null, 2), { + mode: secureFileMode, + }); + applySecureFileMode(tempPath); + fs.renameSync(tempPath, filePath); + } catch (error) { + try { + fs.unlinkSync(tempPath); + } catch (cleanupError) { + if (cleanupError?.code !== 'ENOENT') { + throw new AggregateError( + [error, cleanupError], + `Failed atomic JSON write and temp-file cleanup for ${path.basename(filePath)}.`, + ); + } + } + throw error; + } + } + + async function writeJsonAtomicAsync(filePath, data) { + const tempPath = createAtomicTempPath(filePath); + const parentDir = path.dirname(filePath); + + try { + await ensureDirAsync(parentDir); + await fsPromises.writeFile(tempPath, JSON.stringify(data, null, 2), { + mode: secureFileMode, + }); + + if (!isWindows) { + await fsPromises.chmod(tempPath, secureFileMode); + } + + await fsPromises.rename(tempPath, filePath); + } catch (error) { + try { + await fsPromises.unlink(tempPath); + } catch (unlinkError) { + if (unlinkError?.code !== 'ENOENT') { + throw new AggregateError( + [error, unlinkError], + `Failed atomic JSON write and temp-file cleanup for ${path.basename(filePath)}.`, + ); + } + } + throw error; + } + } + + async function unlinkIfExists(filePath) { + try { + await fsPromises.unlink(filePath); + } catch (error) { + if (error?.code !== 'ENOENT') { + throw error; + } + } + } + + return { + ensureDir, + ensureAppDirs, + applySecureFileMode, + isMissingFileError, + writeJsonAtomic, + writeJsonAtomicAsync, + unlinkIfExists, + }; +} + +module.exports = { + createDataRuntimeFileIo, +}; diff --git a/server/data-runtime/file-locks.js b/server/data-runtime/file-locks.js new file mode 100644 index 0000000..b4047e3 --- /dev/null +++ b/server/data-runtime/file-locks.js @@ -0,0 +1,334 @@ +/** Creates in-process and cross-process file mutation locks for data runtime writes. */ +function getInvalidFileLockOptionNames({ + fsPromises, + path, + processObject, + runtimeInstanceId, + isWindows, + secureDirMode, + secureFileMode, + fileMutationLockTimeoutMs, + fileMutationLockStaleMs, + fileMutationLockInstanceMismatchStaleMs, + settingsFile, + dataFile, + debugLog, +}) { + const invalidOptions = []; + + if (!fsPromises) invalidOptions.push('fsPromises'); + if (!path) invalidOptions.push('path'); + if (!processObject) invalidOptions.push('processObject'); + if (typeof runtimeInstanceId !== 'string' || !runtimeInstanceId) { + invalidOptions.push('runtimeInstanceId'); + } + if (typeof isWindows !== 'boolean') invalidOptions.push('isWindows'); + if (!Number.isInteger(secureDirMode) || secureDirMode < 0 || secureDirMode > 0o777) { + invalidOptions.push('secureDirMode'); + } + if (!Number.isInteger(secureFileMode) || secureFileMode < 0 || secureFileMode > 0o777) { + invalidOptions.push('secureFileMode'); + } + if ( + typeof fileMutationLockTimeoutMs !== 'number' || + !Number.isFinite(fileMutationLockTimeoutMs) || + fileMutationLockTimeoutMs < 0 + ) { + invalidOptions.push('fileMutationLockTimeoutMs'); + } + if ( + typeof fileMutationLockStaleMs !== 'number' || + !Number.isFinite(fileMutationLockStaleMs) || + fileMutationLockStaleMs < 0 + ) { + invalidOptions.push('fileMutationLockStaleMs'); + } + if ( + typeof fileMutationLockInstanceMismatchStaleMs !== 'number' || + !Number.isFinite(fileMutationLockInstanceMismatchStaleMs) || + fileMutationLockInstanceMismatchStaleMs < 0 + ) { + invalidOptions.push('fileMutationLockInstanceMismatchStaleMs'); + } + if (typeof settingsFile !== 'string' || !settingsFile) invalidOptions.push('settingsFile'); + if (typeof dataFile !== 'string' || !dataFile) invalidOptions.push('dataFile'); + if (typeof debugLog !== 'function') invalidOptions.push('debugLog'); + + return invalidOptions; +} + +function createDataRuntimeFileLocks({ + fsPromises, + path, + processObject, + runtimeInstanceId, + isWindows, + secureDirMode, + secureFileMode, + fileMutationLockTimeoutMs, + fileMutationLockStaleMs, + fileMutationLockInstanceMismatchStaleMs = fileMutationLockTimeoutMs, + settingsFile, + dataFile, + debugLog = console.debug, +}) { + const invalidOptions = getInvalidFileLockOptionNames({ + fsPromises, + path, + processObject, + runtimeInstanceId, + isWindows, + secureDirMode, + secureFileMode, + fileMutationLockTimeoutMs, + fileMutationLockStaleMs, + fileMutationLockInstanceMismatchStaleMs, + settingsFile, + dataFile, + debugLog, + }); + + if (invalidOptions.length > 0) { + throw new TypeError( + `createDataRuntimeFileLocks received invalid options: ${invalidOptions.join(', ')}`, + ); + } + + const fileMutationLocks = new Map(); + + function getFileMutationLockDir(filePath) { + return `${filePath}.lock`; + } + + function getFileMutationLockOwnerPath(lockDir) { + return path.join(lockDir, 'owner.json'); + } + + async function removeFileMutationLockDir(lockDir) { + try { + await fsPromises.rm(lockDir, { recursive: true, force: true }); + } catch (error) { + if (error?.code !== 'ENOENT') { + throw error; + } + } + } + + async function writeFileMutationLockOwner(lockDir) { + const ownerPath = getFileMutationLockOwnerPath(lockDir); + const owner = { + pid: processObject.pid, + createdAt: new Date().toISOString(), + instanceId: runtimeInstanceId, + }; + await fsPromises.writeFile(ownerPath, JSON.stringify(owner, null, 2), { + mode: secureFileMode, + }); + if (!isWindows) { + await fsPromises.chmod(ownerPath, secureFileMode); + } + } + + function isProcessRunning(pid) { + if (!Number.isInteger(pid) || pid <= 0) { + return false; + } + + try { + processObject.kill(pid, 0); + return true; + } catch (error) { + return error && error.code === 'EPERM'; + } + } + + async function sleep(ms) { + return new Promise((resolve) => setTimeout(resolve, ms)); + } + + async function shouldReapFileMutationLock(lockDir) { + const ownerPath = getFileMutationLockOwnerPath(lockDir); + let owner = null; + + try { + const rawOwner = await fsPromises.readFile(ownerPath, 'utf-8'); + owner = JSON.parse(rawOwner); + } catch (error) { + if (error?.code !== 'ENOENT') { + debugLog('Failed to read file mutation lock owner', { ownerPath, error }); + // Continue through the stale-lock fallback chain: owner fields first, age last. + } + } + + try { + const ownerCreatedAt = owner?.createdAt ? Date.parse(owner.createdAt) : Number.NaN; + if ( + typeof owner?.instanceId === 'string' && + owner.instanceId !== runtimeInstanceId && + Number.isFinite(ownerCreatedAt) && + Date.now() - ownerCreatedAt > fileMutationLockInstanceMismatchStaleMs + ) { + return true; + } + + if (Number.isInteger(owner?.pid)) { + return !isProcessRunning(owner.pid); + } + + const stats = await fsPromises.stat(lockDir); + const lockAgeMs = Number.isFinite(ownerCreatedAt) + ? Date.now() - ownerCreatedAt + : Date.now() - stats.mtimeMs; + + if (lockAgeMs > fileMutationLockStaleMs) { + return true; + } + + return false; + } catch (error) { + if (error?.code === 'ENOENT') { + return false; + } + throw error; + } + } + + function createFileMutationLockQueueTimeoutError(filePath) { + return new Error( + `Timed out waiting for previous file mutation lock for ${path.basename(filePath)}.`, + ); + } + + async function waitForPreviousFileMutationLock(previous, filePath) { + let timeoutId; + const timeout = new Promise((_, reject) => { + timeoutId = setTimeout(() => { + reject(createFileMutationLockQueueTimeoutError(filePath)); + }, fileMutationLockTimeoutMs); + }); + + try { + await Promise.race([previous.catch(() => undefined), timeout]); + } finally { + clearTimeout(timeoutId); + } + } + + async function withCrossProcessFileMutationLock( + filePath, + operation, + timeoutMs = fileMutationLockTimeoutMs, + ) { + const lockDir = getFileMutationLockDir(filePath); + const startedAt = Date.now(); + let backoffMs = 50; + const maxBackoffMs = 1600; + + while (true) { + try { + await fsPromises.mkdir(path.dirname(lockDir), { + recursive: true, + mode: secureDirMode, + }); + await fsPromises.mkdir(lockDir, { mode: secureDirMode }); + if (!isWindows) { + await fsPromises.chmod(lockDir, secureDirMode); + } + + try { + await writeFileMutationLockOwner(lockDir); + } catch (error) { + try { + await removeFileMutationLockDir(lockDir); + } catch (cleanupError) { + debugLog('Failed to remove mutation lock dir', { lockDir, error: cleanupError }); + } + throw error; + } + + break; + } catch (error) { + if (!error || error.code !== 'EEXIST') { + throw error; + } + + if (await shouldReapFileMutationLock(lockDir)) { + await removeFileMutationLockDir(lockDir).catch(() => undefined); + backoffMs = 50; + continue; + } + + if (Date.now() - startedAt >= timeoutMs) { + throw new Error(`Could not acquire file mutation lock for ${path.basename(filePath)}.`, { + cause: error, + }); + } + + await sleep(backoffMs); + backoffMs = Math.min(backoffMs * 2, maxBackoffMs); + } + } + + try { + return await operation(); + } finally { + try { + await removeFileMutationLockDir(lockDir); + } catch (error) { + debugLog('failed cleaning lockDir', error); + // Ignore cleanup races so the original operation result wins. + } + } + } + + async function withFileMutationLock(filePath, operation) { + const previous = fileMutationLocks.get(filePath) || Promise.resolve(); + let releaseCurrent = () => {}; + const current = new Promise((resolve) => { + releaseCurrent = resolve; + }); + + fileMutationLocks.set(filePath, current); + + try { + await waitForPreviousFileMutationLock(previous, filePath); + return await withCrossProcessFileMutationLock(filePath, operation); + } finally { + releaseCurrent(); + if (fileMutationLocks.get(filePath) === current) { + fileMutationLocks.delete(filePath); + } + } + } + + async function withOrderedFileMutationLocks(filePaths, operation) { + const uniquePaths = Array.from(new Set(filePaths)).sort(); + + const runWithLock = async (index) => { + if (index >= uniquePaths.length) { + return operation(); + } + + const filePath = uniquePaths[index]; + return withFileMutationLock(filePath, () => runWithLock(index + 1)); + }; + + return runWithLock(0); + } + + async function withSettingsAndDataMutationLock(operation) { + return withOrderedFileMutationLocks([settingsFile, dataFile], operation); + } + + return { + getFileMutationLockDir, + withFileMutationLock, + withOrderedFileMutationLocks, + withSettingsAndDataMutationLock, + getPendingFileMutationLockCount: () => fileMutationLocks.size, + }; +} + +module.exports = { + createDataRuntimeFileLocks, +}; diff --git a/server/data-runtime/import-merge.js b/server/data-runtime/import-merge.js new file mode 100644 index 0000000..a443af2 --- /dev/null +++ b/server/data-runtime/import-merge.js @@ -0,0 +1,290 @@ +const COST_COMPARISON_TOLERANCE = 1e-9; + +function isPlainObject(value) { + return Boolean(value) && typeof value === 'object' && !Array.isArray(value); +} + +function areNumbersEquivalent(left, right, tolerance = COST_COMPARISON_TOLERANCE) { + return Math.abs(left - right) <= tolerance; +} + +function hasValidUsageDayDate(day) { + return typeof day?.date === 'string' && day.date.trim().length > 0; +} + +function sortStrings(values) { + return [ + ...new Set( + (Array.isArray(values) ? values : []) + .filter((value) => typeof value === 'string') + .map((value) => value.trim()) + .filter(Boolean), + ), + ].sort((left, right) => left.localeCompare(right)); +} + +function canonicalizeModelBreakdown(entry) { + return { + modelName: typeof entry?.modelName === 'string' ? entry.modelName.trim() : '', + inputTokens: Number(entry?.inputTokens) || 0, + outputTokens: Number(entry?.outputTokens) || 0, + cacheCreationTokens: Number(entry?.cacheCreationTokens) || 0, + cacheReadTokens: Number(entry?.cacheReadTokens) || 0, + thinkingTokens: Number(entry?.thinkingTokens) || 0, + cost: Number(entry?.cost) || 0, + requestCount: Number(entry?.requestCount) || 0, + }; +} + +function canonicalizeUsageDay(day) { + return { + date: typeof day?.date === 'string' ? day.date : '', + inputTokens: Number(day?.inputTokens) || 0, + outputTokens: Number(day?.outputTokens) || 0, + cacheCreationTokens: Number(day?.cacheCreationTokens) || 0, + cacheReadTokens: Number(day?.cacheReadTokens) || 0, + thinkingTokens: Number(day?.thinkingTokens) || 0, + totalTokens: Number(day?.totalTokens) || 0, + totalCost: Number(day?.totalCost) || 0, + requestCount: Number(day?.requestCount) || 0, + modelsUsed: sortStrings(day?.modelsUsed), + modelBreakdowns: (Array.isArray(day?.modelBreakdowns) ? day.modelBreakdowns : []) + .map(canonicalizeModelBreakdown) + .sort((left, right) => left.modelName.localeCompare(right.modelName)), + }; +} + +function areUsageDaysEquivalent(left, right) { + const leftDay = canonicalizeUsageDay(left); + const rightDay = canonicalizeUsageDay(right); + const scalarFields = [ + 'date', + 'inputTokens', + 'outputTokens', + 'cacheCreationTokens', + 'cacheReadTokens', + 'thinkingTokens', + 'totalTokens', + 'totalCost', + 'requestCount', + ]; + + for (const field of scalarFields) { + if (field === 'totalCost') { + if (!areNumbersEquivalent(leftDay[field], rightDay[field])) { + return false; + } + continue; + } + + if (leftDay[field] !== rightDay[field]) { + return false; + } + } + + if (leftDay.modelsUsed.length !== rightDay.modelsUsed.length) { + return false; + } + for (let index = 0; index < leftDay.modelsUsed.length; index += 1) { + if (leftDay.modelsUsed[index] !== rightDay.modelsUsed[index]) { + return false; + } + } + + if (leftDay.modelBreakdowns.length !== rightDay.modelBreakdowns.length) { + return false; + } + + const breakdownFields = [ + 'modelName', + 'inputTokens', + 'outputTokens', + 'cacheCreationTokens', + 'cacheReadTokens', + 'thinkingTokens', + 'cost', + 'requestCount', + ]; + for (let index = 0; index < leftDay.modelBreakdowns.length; index += 1) { + const leftBreakdown = leftDay.modelBreakdowns[index]; + const rightBreakdown = rightDay.modelBreakdowns[index]; + for (const field of breakdownFields) { + if (field === 'cost') { + if (!areNumbersEquivalent(leftBreakdown[field], rightBreakdown[field])) { + return false; + } + continue; + } + + if (leftBreakdown[field] !== rightBreakdown[field]) { + return false; + } + } + } + + return true; +} + +function computeUsageTotals(daily) { + const toNumber = (value) => Number(value) || 0; + + return daily.reduce( + (totals, day) => ({ + inputTokens: totals.inputTokens + toNumber(day.inputTokens), + outputTokens: totals.outputTokens + toNumber(day.outputTokens), + cacheCreationTokens: totals.cacheCreationTokens + toNumber(day.cacheCreationTokens), + cacheReadTokens: totals.cacheReadTokens + toNumber(day.cacheReadTokens), + thinkingTokens: totals.thinkingTokens + toNumber(day.thinkingTokens), + totalCost: totals.totalCost + toNumber(day.totalCost), + totalTokens: totals.totalTokens + toNumber(day.totalTokens), + requestCount: totals.requestCount + toNumber(day.requestCount), + }), + { + inputTokens: 0, + outputTokens: 0, + cacheCreationTokens: 0, + cacheReadTokens: 0, + thinkingTokens: 0, + totalCost: 0, + totalTokens: 0, + requestCount: 0, + }, + ); +} + +/** Creates import payload extraction and usage merge helpers. */ +function createDataRuntimeImportMerge({ + normalizeIncomingData, + settingsBackupKind, + usageBackupKind, +}) { + function extractSettingsImportPayload(payload) { + if (!isPlainObject(payload)) { + throw new Error('Uploaded JSON is not a settings backup file.'); + } + + if (payload.kind === settingsBackupKind) { + if (!Object.prototype.hasOwnProperty.call(payload, 'settings')) { + throw new Error('The settings backup file does not contain any settings.'); + } + if (!isPlainObject(payload.settings)) { + throw new Error('The settings backup file has an invalid settings payload.'); + } + return payload.settings; + } + + if (typeof payload.kind === 'string' && payload.kind === usageBackupKind) { + throw new Error('This is a data backup file, not a settings file.'); + } + + throw new Error('Uploaded JSON is not a settings backup file.'); + } + + /** + * Extracts usage data from backup files while keeping legacy raw imports working. + * Non-object payloads are returned verbatim for backwards compatibility with raw arrays + * or pre-validated data. Object payloads with usageBackupKind must contain data, + * while settingsBackupKind objects are rejected as the wrong backup type. + */ + function extractUsageImportPayload(payload) { + if (!isPlainObject(payload)) { + return payload; + } + + if (payload.kind === usageBackupKind) { + if (!Object.prototype.hasOwnProperty.call(payload, 'data')) { + throw new Error('The usage backup file does not contain any usage data.'); + } + return payload.data; + } + + if (typeof payload.kind === 'string' && payload.kind === settingsBackupKind) { + throw new Error('This is a settings backup file, not a data file.'); + } + + return payload; + } + + function mergeUsageData(currentData, importedData) { + if (!importedData || !Array.isArray(importedData.daily)) { + throw new Error('Imported data must contain a daily array.'); + } + + const validImportedDaily = importedData.daily + .filter(hasValidUsageDayDate) + .map(canonicalizeUsageDay); + const skippedDays = importedData.daily.length - validImportedDaily.length; + const current = + currentData && Array.isArray(currentData.daily) && currentData.daily.length > 0 + ? normalizeIncomingData(currentData) + : null; + + if (!current) { + return { + data: { + daily: validImportedDaily, + totals: computeUsageTotals(validImportedDaily), + }, + summary: { + importedDays: importedData.daily.length, + addedDays: validImportedDaily.length, + unchangedDays: 0, + conflictingDays: 0, + skippedDays, + totalDays: validImportedDaily.length, + }, + }; + } + + const currentByDate = new Map(current.daily.map((day) => [day.date, day])); + let addedDays = 0; + let unchangedDays = 0; + let conflictingDays = 0; + + for (const importedDay of validImportedDaily) { + const existingDay = currentByDate.get(importedDay.date); + if (!existingDay) { + currentByDate.set(importedDay.date, importedDay); + addedDays += 1; + continue; + } + + if (areUsageDaysEquivalent(existingDay, importedDay)) { + unchangedDays += 1; + continue; + } + + // Preserve local data on conflicts and report the day so users can resolve it explicitly. + conflictingDays += 1; + } + + const mergedDaily = [...currentByDate.values()].sort((left, right) => + left.date.localeCompare(right.date), + ); + + return { + data: { + daily: mergedDaily, + totals: computeUsageTotals(mergedDaily), + }, + summary: { + importedDays: importedData.daily.length, + addedDays, + unchangedDays, + conflictingDays, + skippedDays, + totalDays: mergedDaily.length, + }, + }; + } + + return { + extractSettingsImportPayload, + extractUsageImportPayload, + mergeUsageData, + }; +} + +module.exports = { + createDataRuntimeImportMerge, +}; diff --git a/server/http-router.js b/server/http-router.js index 8524681..81f64c5 100644 --- a/server/http-router.js +++ b/server/http-router.js @@ -1,3 +1,12 @@ +const { createAutoImportRoutes } = require('./routes/auto-import-routes'); +const { createReportRoutes } = require('./routes/report-routes'); +const { createRuntimeRoutes } = require('./routes/runtime-routes'); +const { createSettingsRoutes } = require('./routes/settings-routes'); +const { createStaticRouteHandler } = require('./routes/static-routes'); +const { createUsageRoutes } = require('./routes/usage-routes'); +const { readMutationBody, sendSSE } = require('./routes/http-route-utils'); + +/** Creates the HTTP router that validates requests and dispatches route groups. */ function createHttpRouter({ fs, path, @@ -19,164 +28,68 @@ function createHttpRouter({ validateMutationRequest, validateRequestHost, } = httpUtils; - const { - extractSettingsImportPayload, - extractUsageImportPayload, - isPayloadTooLargeError, - isPersistedStateError, - mergeUsageData, - readData, - readSettings, - unlinkIfExists, - _updateDataLoadStateUnlocked, - updateSettings, - withFileMutationLock, - withSettingsAndDataMutationLock, - writeData, - writeSettings, - normalizeSettings, - paths: { dataFile, settingsFile }, - } = dataRuntime; - const { - createAutoImportMessageEvent, - formatAutoImportMessageEvent, - acquireAutoImportLease, - lookupLatestToktrackVersion, - performAutoImport, - toAutoImportErrorEvent, - } = autoImportRuntime; - - const mimeTypes = { - '.html': 'text/html; charset=utf-8', - '.css': 'text/css; charset=utf-8', - '.js': 'application/javascript; charset=utf-8', - '.json': 'application/json; charset=utf-8', - '.png': 'image/png', - '.jpg': 'image/jpeg', - '.jpeg': 'image/jpeg', - '.gif': 'image/gif', - '.svg': 'image/svg+xml', - '.ico': 'image/x-icon', - '.woff': 'font/woff', - '.woff2': 'font/woff2', - }; - - function sendSSE(res, event, data) { - res.write(`event: ${event}\ndata: ${JSON.stringify(data)}\n\n`); - } - - function getCacheControl(filePath) { - if (filePath.includes(path.sep + 'assets' + path.sep)) { - return 'public, max-age=31536000, immutable'; - } - if (filePath.endsWith('.html')) { - return 'no-cache'; - } - return 'public, max-age=86400'; - } - - function writeStaticErrorResponse(res, status, message) { - res.writeHead(status, { - 'Content-Type': 'application/json; charset=utf-8', - ...securityHeaders, + const routeReadMutationBody = (req, res, messages) => + readMutationBody(req, res, { + readBody, + json, + isPayloadTooLargeError: dataRuntime.isPayloadTooLargeError, + ...messages, }); - res.end(JSON.stringify({ message })); - } - - function getErrorMessage(error, fallback) { - return error && typeof error.message === 'string' && error.message ? error.message : fallback; - } - - async function readMutationBody(req, res, { tooLargeMessage, invalidMessage }) { - try { - return { ok: true, body: await readBody(req) }; - } catch (error) { - if (isPayloadTooLargeError(error)) { - json(res, 413, { message: tooLargeMessage }); - return { ok: false }; - } - json(res, 400, { message: getErrorMessage(error, invalidMessage) }); - return { ok: false }; - } - } - - function writeMutationServerError(res) { - return json(res, 500, { message: 'Server error' }); - } - - function sendStaticFile(res, reqPath, data) { - const ext = path.extname(reqPath).toLowerCase(); - const contentType = mimeTypes[ext] || 'application/octet-stream'; - const isHtml = ext === '.html'; - const htmlResponse = isHtml ? prepareHtmlResponse(data.toString('utf8')) : null; - const responseBody = htmlResponse ? htmlResponse.body : data; - const responseSecurityHeaders = htmlResponse ? htmlResponse.headers : securityHeaders; - - res.writeHead(200, { - 'Content-Type': contentType, - 'Cache-Control': getCacheControl(reqPath), - ...responseSecurityHeaders, - }); - res.end(responseBody); - } - - function readStaticFile(reqPath) { - if (fs.promises && typeof fs.promises.readFile === 'function') { - return fs.promises.readFile(reqPath); - } - - return new Promise((resolve, reject) => { - fs.readFile(reqPath, (error, data) => { - if (error) { - reject(error); - return; - } - resolve(data); - }); - }); - } - - function shouldServeSpaFallback(req, safePath) { - const acceptHeader = req.headers?.accept || req.headers?.Accept || ''; - const acceptsHtml = String( - Array.isArray(acceptHeader) ? acceptHeader[0] : acceptHeader, - ).includes('text/html'); - return acceptsHtml || safePath.endsWith('/') || path.extname(safePath) === ''; - } - - async function serveFile(req, res, reqPath, safePath) { - try { - const data = await readStaticFile(reqPath); - sendStaticFile(res, reqPath, data); - } catch (error) { - if (error && error.code === 'ENOENT') { - if (!shouldServeSpaFallback(req, safePath)) { - writeStaticErrorResponse(res, 404, 'Not Found'); - return; - } - - try { - const indexPath = path.join(staticRoot, 'index.html'); - const html = await readStaticFile(indexPath); - sendStaticFile(res, indexPath, html); - } catch { - writeStaticErrorResponse(res, 500, 'Internal Server Error'); - } - return; - } - - const invalidPath = error && error.code === 'ERR_INVALID_ARG_VALUE'; - const directoryRead = error && error.code === 'EISDIR'; - writeStaticErrorResponse( - res, - invalidPath ? 400 : directoryRead ? 403 : 500, - invalidPath - ? 'Invalid request path' - : directoryRead - ? 'Access denied' - : 'Internal Server Error', - ); - } + const usageRoutes = createUsageRoutes({ + json, + validateMutationRequest, + readMutationBody: routeReadMutationBody, + dataRuntime, + }); + const settingsRoutes = createSettingsRoutes({ + json, + validateMutationRequest, + readMutationBody: routeReadMutationBody, + dataRuntime, + }); + const autoImportRoutes = createAutoImportRoutes({ + json, + validateMutationRequest, + securityHeaders, + autoImportRuntime, + sendSSE, + }); + const runtimeRoutes = createRuntimeRoutes({ + json, + getRuntimeSnapshot, + autoImportRuntime, + }); + const reportRoutes = createReportRoutes({ + json, + readMutationBody: routeReadMutationBody, + sendBuffer, + dataRuntime, + generatePdfReport, + }); + const staticRoutes = createStaticRouteHandler({ + fs, + path, + staticRoot, + securityHeaders, + prepareHtmlResponse, + }); + const apiRouteHandlers = [ + usageRoutes.handleUsageRoutes, + settingsRoutes.handleSettingsRoutes, + autoImportRoutes.handleAutoImportRoutes, + runtimeRoutes.handleRuntimeRoutes, + reportRoutes.handleReportRoutes, + ]; + + async function dispatchApiRoute(apiPath, req, res) { + for (const handleApiRoute of apiRouteHandlers) { + const routeResult = await handleApiRoute(apiPath, req, res); + if (routeResult !== false) { + return true; + } + } + + return false; } async function handleServerRequest(req, res) { @@ -208,407 +121,12 @@ function createHttpRouter({ return json(res, 404, { message: 'Not Found' }); } - if (apiPath === '/usage') { - if (req.method === 'GET') { - let data; - try { - data = readData(); - } catch (error) { - if (isPersistedStateError(error, 'usage')) { - return json(res, 500, { message: error.message }); - } - throw error; - } - return json( - res, - 200, - data || { - daily: [], - totals: { - inputTokens: 0, - outputTokens: 0, - cacheCreationTokens: 0, - cacheReadTokens: 0, - thinkingTokens: 0, - totalCost: 0, - totalTokens: 0, - requestCount: 0, - }, - }, - ); - } - if (req.method === 'DELETE') { - const validationError = validateMutationRequest(req); - if (validationError) { - return json(res, validationError.status, { message: validationError.message }); - } - try { - await withSettingsAndDataMutationLock(async () => { - await unlinkIfExists(dataFile); - await _updateDataLoadStateUnlocked({ - lastLoadedAt: null, - lastLoadSource: null, - }); - }); - } catch (error) { - if (isPersistedStateError(error, 'usage') || isPersistedStateError(error, 'settings')) { - return json(res, 500, { message: error.message }); - } - return writeMutationServerError(res); - } - return json(res, 200, { success: true }); - } - return json(res, 405, { message: 'Method Not Allowed' }); - } - - if (apiPath === '/runtime') { - if (req.method !== 'GET') { - return json(res, 405, { message: 'Method Not Allowed' }); - } - - return json(res, 200, getRuntimeSnapshot()); - } - - if (apiPath === '/settings') { - if (req.method === 'GET') { - try { - return json(res, 200, readSettings()); - } catch (error) { - if (isPersistedStateError(error, 'settings')) { - return json(res, 500, { message: error.message }); - } - throw error; - } - } - - if (req.method === 'DELETE') { - const validationError = validateMutationRequest(req); - if (validationError) { - return json(res, validationError.status, { message: validationError.message }); - } - try { - const settings = await withFileMutationLock(settingsFile, async () => { - await unlinkIfExists(settingsFile); - return readSettings(); - }); - return json(res, 200, { success: true, settings }); - } catch (error) { - if (isPersistedStateError(error, 'settings')) { - return json(res, 500, { message: error.message }); - } - return writeMutationServerError(res); - } - } - - if (req.method === 'PATCH') { - const validationError = validateMutationRequest(req, { requiresJsonContentType: true }); - if (validationError) { - return json(res, validationError.status, { message: validationError.message }); - } - - const bodyResult = await readMutationBody(req, res, { - tooLargeMessage: 'Settings request too large', - invalidMessage: 'Invalid settings request', - }); - if (!bodyResult.ok) { - return; - } - - try { - return json(res, 200, await updateSettings(bodyResult.body)); - } catch (error) { - if (isPersistedStateError(error, 'settings')) { - return json(res, 500, { message: error.message }); - } - return writeMutationServerError(res); - } - } - - return json(res, 405, { message: 'Method Not Allowed' }); - } - - if (apiPath === '/settings/import') { - if (req.method !== 'POST') { - return json(res, 405, { message: 'Method Not Allowed' }); - } - - const validationError = validateMutationRequest(req, { requiresJsonContentType: true }); - if (validationError) { - return json(res, validationError.status, { message: validationError.message }); - } - - const bodyResult = await readMutationBody(req, res, { - tooLargeMessage: 'Settings file too large', - invalidMessage: 'Invalid settings file', - }); - if (!bodyResult.ok) { - return; - } - - let importedSettings; - try { - importedSettings = normalizeSettings(extractSettingsImportPayload(bodyResult.body)); - } catch (error) { - return json(res, 400, { message: getErrorMessage(error, 'Invalid settings file') }); - } - - try { - const settings = await withFileMutationLock(settingsFile, async () => { - await writeSettings(importedSettings); - return readSettings(); - }); - return json(res, 200, settings); - } catch (error) { - if (isPersistedStateError(error, 'settings')) { - return json(res, 500, { message: error.message }); - } - return writeMutationServerError(res); - } - } - - if (apiPath === '/upload') { - if (req.method === 'POST') { - const validationError = validateMutationRequest(req, { requiresJsonContentType: true }); - if (validationError) { - return json(res, validationError.status, { message: validationError.message }); - } - - const bodyResult = await readMutationBody(req, res, { - tooLargeMessage: 'File too large (max. 10 MB)', - invalidMessage: 'Invalid JSON', - }); - if (!bodyResult.ok) { - return; - } - - let nextData; - try { - const normalized = dataRuntime.normalizeIncomingData - ? dataRuntime.normalizeIncomingData(bodyResult.body) - : null; - nextData = normalized || bodyResult.body; - } catch (error) { - return json(res, 400, { message: getErrorMessage(error, 'Invalid JSON') }); - } - - try { - await withSettingsAndDataMutationLock(async () => { - await writeData(nextData); - await _updateDataLoadStateUnlocked({ - lastLoadedAt: new Date().toISOString(), - lastLoadSource: 'file', - }); - }); - return json(res, 200, { - days: nextData.daily.length, - totalCost: nextData.totals.totalCost, - }); - } catch (error) { - if (isPersistedStateError(error, 'settings') || isPersistedStateError(error, 'usage')) { - return json(res, 500, { message: error.message }); - } - return writeMutationServerError(res); - } - } - return json(res, 405, { message: 'Method Not Allowed' }); - } - - if (apiPath === '/usage/import') { - if (req.method !== 'POST') { - return json(res, 405, { message: 'Method Not Allowed' }); - } - - const validationError = validateMutationRequest(req, { requiresJsonContentType: true }); - if (validationError) { - return json(res, validationError.status, { message: validationError.message }); - } - - const bodyResult = await readMutationBody(req, res, { - tooLargeMessage: 'Usage backup file too large', - invalidMessage: 'Invalid usage backup file', - }); - if (!bodyResult.ok) { + if (apiPath !== null) { + const handled = await dispatchApiRoute(apiPath, req, res); + if (handled) { return; } - let importedData; - try { - const usagePayload = extractUsageImportPayload(bodyResult.body); - importedData = dataRuntime.normalizeIncomingData - ? dataRuntime.normalizeIncomingData(usagePayload) - : usagePayload; - } catch (error) { - return json(res, 400, { message: getErrorMessage(error, 'Invalid usage backup file') }); - } - - try { - const result = await withSettingsAndDataMutationLock(async () => { - const currentData = readData(); - const merged = mergeUsageData(currentData, importedData); - await writeData(merged.data); - await _updateDataLoadStateUnlocked({ - lastLoadedAt: new Date().toISOString(), - lastLoadSource: 'file', - }); - return merged; - }); - return json(res, 200, result.summary); - } catch (error) { - if (isPersistedStateError(error, 'usage') || isPersistedStateError(error, 'settings')) { - return json(res, 500, { message: error.message }); - } - return writeMutationServerError(res); - } - } - - if (apiPath === '/auto-import/stream') { - if (req.method !== 'POST') { - return json(res, 405, { message: 'Method Not Allowed' }); - } - - const validationError = validateMutationRequest(req); - if (validationError) { - return json(res, validationError.status, { message: validationError.message }); - } - - let autoImportLease; - try { - autoImportLease = acquireAutoImportLease(); - } catch (error) { - if (error?.messageKey !== 'autoImportRunning') { - throw error; - } - return json(res, 409, { - message: formatAutoImportMessageEvent(createAutoImportMessageEvent('autoImportRunning')), - }); - } - - res.writeHead(200, { - 'Content-Type': 'text/event-stream', - 'Cache-Control': 'no-cache', - Connection: 'keep-alive', - 'X-Accel-Buffering': 'no', - ...securityHeaders, - }); - - let aborted = false; - req.on('close', () => { - aborted = true; - }); - - try { - const result = await performAutoImport({ - source: 'auto-import', - onCheck: (event) => { - if (!aborted) { - sendSSE(res, 'check', event); - } - }, - onProgress: (event) => { - if (!aborted) { - sendSSE(res, 'progress', event); - } - }, - onOutput: (line) => { - if (!aborted) { - sendSSE(res, 'stderr', { line }); - } - }, - signalOnClose: (close) => { - req.on('close', close); - }, - lease: autoImportLease, - }); - - if (aborted) { - return; - } - - sendSSE(res, 'success', result); - sendSSE(res, 'done', {}); - res.end(); - } catch (error) { - if (aborted) { - return; - } - sendSSE(res, 'error', toAutoImportErrorEvent(error)); - sendSSE(res, 'done', {}); - res.end(); - } finally { - autoImportLease.release(); - } - return; - } - - if (apiPath === '/toktrack/version-status') { - if (req.method !== 'GET') { - return json(res, 405, { message: 'Method Not Allowed' }); - } - - try { - return json(res, 200, await lookupLatestToktrackVersion()); - } catch (error) { - return json(res, 503, { - message: 'Service Unavailable', - detail: getErrorMessage(error, 'Could not determine the latest toktrack version.'), - }); - } - } - - if (apiPath === '/report/pdf') { - if (req.method !== 'POST') { - return json(res, 405, { message: 'Method Not Allowed' }); - } - - const validationError = validateMutationRequest(req, { requiresJsonContentType: true }); - if (validationError) { - return json(res, validationError.status, { message: validationError.message }); - } - - let data; - try { - data = readData(); - } catch (error) { - if (isPersistedStateError(error, 'usage')) { - return json(res, 500, { message: error.message }); - } - throw error; - } - if (!data || !Array.isArray(data.daily) || data.daily.length === 0) { - return json(res, 400, { message: 'No data available for the report.' }); - } - - let body; - try { - body = await readBody(req); - } catch (error) { - const status = isPayloadTooLargeError(error) ? 413 : 400; - return json(res, status, { - message: isPayloadTooLargeError(error) - ? 'Report request too large' - : 'Invalid report request', - }); - } - - try { - const result = await generatePdfReport(data.daily, body || {}); - return sendBuffer( - res, - 200, - { - 'Content-Type': 'application/pdf', - 'Content-Disposition': `attachment; filename="${result.filename}"`, - }, - result.buffer, - ); - } catch (error) { - const message = error && error.message ? error.message : 'PDF generation failed'; - const status = error && error.code === 'TYPST_MISSING' ? 503 : 500; - return json(res, status, { message }); - } - } - - if (apiPath !== null) { return json(res, 404, { message: 'API endpoint not found' }); } @@ -622,22 +140,7 @@ function createHttpRouter({ return; } - const safePath = pathname === '/' ? '/index.html' : pathname; - if (safePath.includes('\0')) { - return json(res, 400, { message: 'Invalid request path' }); - } - const resolvedStaticRoot = path.resolve(staticRoot); - const filePath = path.resolve(staticRoot, `.${safePath}`); - - if ( - filePath === resolvedStaticRoot || - (!filePath.startsWith(resolvedStaticRoot + path.sep) && - filePath !== path.resolve(staticRoot, 'index.html')) - ) { - return json(res, 403, { message: 'Access denied' }); - } - - await serveFile(req, res, filePath, safePath); + await staticRoutes.handleStaticRequest(req, res, pathname); } return { diff --git a/server/routes/auto-import-routes.js b/server/routes/auto-import-routes.js new file mode 100644 index 0000000..ba28353 --- /dev/null +++ b/server/routes/auto-import-routes.js @@ -0,0 +1,109 @@ +/** Creates the auto-import streaming API route handler. */ +function createAutoImportRoutes({ + json, + validateMutationRequest, + securityHeaders, + autoImportRuntime, + sendSSE, +}) { + const { + createAutoImportMessageEvent, + formatAutoImportMessageEvent, + acquireAutoImportLease, + performAutoImport, + toAutoImportErrorEvent, + } = autoImportRuntime; + + async function handleAutoImportRoutes(apiPath, req, res) { + if (apiPath !== '/auto-import/stream') { + return false; + } + + if (req.method !== 'POST') { + return json(res, 405, { message: 'Method Not Allowed' }); + } + + const validationError = validateMutationRequest(req); + if (validationError) { + return json(res, validationError.status, { message: validationError.message }); + } + + let autoImportLease; + let aborted = false; + try { + autoImportLease = acquireAutoImportLease(); + } catch (error) { + if (error?.messageKey !== 'autoImportRunning') { + throw error; + } + return json(res, 409, { + message: formatAutoImportMessageEvent(createAutoImportMessageEvent('autoImportRunning')), + }); + } + + try { + res.writeHead(200, { + 'Content-Type': 'text/event-stream', + 'Cache-Control': 'no-cache', + Connection: 'keep-alive', + 'X-Accel-Buffering': 'no', + ...securityHeaders, + }); + + // Local abort state gates SSE writes; performAutoImport gets its own close signal below. + req.on('close', () => { + aborted = true; + }); + + const result = await performAutoImport({ + source: 'auto-import', + onCheck: (event) => { + if (!aborted) { + sendSSE(res, 'check', event); + } + }, + onProgress: (event) => { + if (!aborted) { + sendSSE(res, 'progress', event); + } + }, + onOutput: (line) => { + if (!aborted) { + sendSSE(res, 'stderr', { line }); + } + }, + signalOnClose: (close) => { + // This second close listener aborts the long-running import command itself. + req.on('close', close); + }, + lease: autoImportLease, + }); + + if (aborted) { + return true; + } + + sendSSE(res, 'success', result); + sendSSE(res, 'done', {}); + res.end(); + } catch (error) { + if (aborted) { + return true; + } + sendSSE(res, 'error', toAutoImportErrorEvent(error)); + sendSSE(res, 'done', {}); + res.end(); + } finally { + autoImportLease.release(); + } + return true; + } + + return { + handleAutoImportRoutes, + }; +} + +module.exports = { + createAutoImportRoutes, +}; diff --git a/server/routes/http-route-utils.js b/server/routes/http-route-utils.js new file mode 100644 index 0000000..f74d5a5 --- /dev/null +++ b/server/routes/http-route-utils.js @@ -0,0 +1,55 @@ +/** Returns a stable fallback message for unknown thrown values. */ +function getErrorMessage(error, fallback) { + return error && typeof error.message === 'string' && error.message ? error.message : fallback; +} + +/** Reads a mutation body and writes the matching client error response on parse/size failures. */ +async function readMutationBody( + req, + res, + { + readBody, + json, + isPayloadTooLargeError, + tooLargeMessage, + invalidMessage, + suppressErrorDetails = false, + }, +) { + try { + return { ok: true, body: await readBody(req) }; + } catch (error) { + if (isPayloadTooLargeError(error)) { + json(res, 413, { message: tooLargeMessage }); + return { ok: false }; + } + json(res, 400, { + message: suppressErrorDetails ? invalidMessage : getErrorMessage(error, invalidMessage), + }); + return { ok: false }; + } +} + +/** Writes the generic mutation error response used by persistence-backed routes. */ +function writeMutationServerError(json, res) { + return json(res, 500, { message: 'Server error' }); +} + +/** Writes one Server-Sent Event frame. */ +function sendSSE(res, event, data) { + let payload; + try { + payload = JSON.stringify(data); + } catch (error) { + console.error(`Failed to serialize SSE payload for event "${event}":`, error); + payload = JSON.stringify({ message: 'Serialization error', event }); + } + res.write(`event: ${event}\ndata: ${payload}\n\n`); +} + +module.exports = { + getErrorMessage, + readMutationBody, + sendSSE, + writeMutationServerError, +}; diff --git a/server/routes/report-routes.js b/server/routes/report-routes.js new file mode 100644 index 0000000..c1167d1 --- /dev/null +++ b/server/routes/report-routes.js @@ -0,0 +1,82 @@ +const { getErrorMessage } = require('./http-route-utils'); + +function formatReportContentDisposition(filename) { + const rawFilename = String(filename || 'ttdash-report.pdf'); + const asciiFallback = rawFilename + .replace(/["\\;]/g, '_') + .replace(/[^\x20-\x7E]/g, '_') + .trim(); + const safeAsciiFilename = asciiFallback || 'ttdash-report.pdf'; + const encodedFilename = encodeURIComponent(rawFilename); + + return `attachment; filename="${safeAsciiFilename}"; filename*=UTF-8''${encodedFilename}`; +} + +/** Creates PDF report API route handlers. */ +function createReportRoutes({ + json, + readMutationBody, + sendBuffer, + dataRuntime, + generatePdfReport, +}) { + const { isPersistedStateError, readData } = dataRuntime; + + async function handleReportRoutes(apiPath, req, res) { + if (apiPath !== '/report/pdf') { + return false; + } + + if (req.method !== 'POST') { + return json(res, 405, { message: 'Method Not Allowed' }); + } + + let data; + try { + data = readData(); + } catch (error) { + if (isPersistedStateError(error, 'usage')) { + return json(res, 500, { message: error.message }); + } + console.error('Unexpected report route usage read error:', error); + throw new Error('report-routes: unexpected error during usage handling', { cause: error }); + } + if (!data || !Array.isArray(data.daily) || data.daily.length === 0) { + return json(res, 400, { message: 'No data available for the report.' }); + } + + const bodyResult = await readMutationBody(req, res, { + tooLargeMessage: 'Report request too large', + invalidMessage: 'Invalid report request', + suppressErrorDetails: true, + }); + if (!bodyResult.ok) { + return true; + } + + try { + const result = await generatePdfReport(data.daily, bodyResult.body || {}); + return sendBuffer( + res, + 200, + { + 'Content-Type': 'application/pdf', + 'Content-Disposition': formatReportContentDisposition(result.filename), + }, + result.buffer, + ); + } catch (error) { + const message = getErrorMessage(error, 'PDF generation failed'); + const status = error && error.code === 'TYPST_MISSING' ? 503 : 500; + return json(res, status, { message }); + } + } + + return { + handleReportRoutes, + }; +} + +module.exports = { + createReportRoutes, +}; diff --git a/server/routes/runtime-routes.js b/server/routes/runtime-routes.js new file mode 100644 index 0000000..1bd9397 --- /dev/null +++ b/server/routes/runtime-routes.js @@ -0,0 +1,41 @@ +const { getErrorMessage } = require('./http-route-utils'); + +/** Creates runtime metadata and toktrack version-status API route handlers. */ +function createRuntimeRoutes({ json, getRuntimeSnapshot, autoImportRuntime }) { + const { lookupLatestToktrackVersion } = autoImportRuntime; + + async function handleRuntimeRoutes(apiPath, req, res) { + if (apiPath === '/runtime') { + if (req.method !== 'GET') { + return json(res, 405, { message: 'Method Not Allowed' }); + } + + return json(res, 200, getRuntimeSnapshot()); + } + + if (apiPath === '/toktrack/version-status') { + if (req.method !== 'GET') { + return json(res, 405, { message: 'Method Not Allowed' }); + } + + try { + return json(res, 200, await lookupLatestToktrackVersion()); + } catch (error) { + return json(res, 503, { + message: 'Service Unavailable', + detail: getErrorMessage(error, 'Could not determine the latest toktrack version.'), + }); + } + } + + return false; + } + + return { + handleRuntimeRoutes, + }; +} + +module.exports = { + createRuntimeRoutes, +}; diff --git a/server/routes/settings-routes.js b/server/routes/settings-routes.js new file mode 100644 index 0000000..c461c9a --- /dev/null +++ b/server/routes/settings-routes.js @@ -0,0 +1,125 @@ +const { getErrorMessage, writeMutationServerError } = require('./http-route-utils'); + +/** Creates settings and settings-import API route handlers. */ +function createSettingsRoutes({ json, validateMutationRequest, readMutationBody, dataRuntime }) { + const { + extractSettingsImportPayload, + isPersistedStateError, + readSettings, + unlinkIfExists, + updateSettings, + withFileMutationLock, + writeSettings, + normalizeSettings, + paths: { settingsFile }, + } = dataRuntime; + + async function handleSettingsRoutes(apiPath, req, res) { + if (apiPath === '/settings') { + if (req.method === 'GET') { + try { + return json(res, 200, readSettings()); + } catch (error) { + if (isPersistedStateError(error, 'settings')) { + return json(res, 500, { message: error.message }); + } + throw error; + } + } + + if (req.method === 'DELETE') { + const validationError = validateMutationRequest(req); + if (validationError) { + return json(res, validationError.status, { message: validationError.message }); + } + try { + const settings = await withFileMutationLock(settingsFile, async () => { + await unlinkIfExists(settingsFile); + return readSettings(); + }); + return json(res, 200, { success: true, settings }); + } catch (error) { + if (isPersistedStateError(error, 'settings')) { + return json(res, 500, { message: error.message }); + } + return writeMutationServerError(json, res); + } + } + + if (req.method === 'PATCH') { + const validationError = validateMutationRequest(req, { requiresJsonContentType: true }); + if (validationError) { + return json(res, validationError.status, { message: validationError.message }); + } + + const bodyResult = await readMutationBody(req, res, { + tooLargeMessage: 'Settings request too large', + invalidMessage: 'Invalid settings request', + }); + if (!bodyResult.ok) { + return true; + } + + try { + return json(res, 200, await updateSettings(bodyResult.body)); + } catch (error) { + if (isPersistedStateError(error, 'settings')) { + return json(res, 500, { message: error.message }); + } + return writeMutationServerError(json, res); + } + } + + return json(res, 405, { message: 'Method Not Allowed' }); + } + + if (apiPath === '/settings/import') { + if (req.method !== 'POST') { + return json(res, 405, { message: 'Method Not Allowed' }); + } + + const validationError = validateMutationRequest(req, { requiresJsonContentType: true }); + if (validationError) { + return json(res, validationError.status, { message: validationError.message }); + } + + const bodyResult = await readMutationBody(req, res, { + tooLargeMessage: 'Settings file too large', + invalidMessage: 'Invalid settings file', + }); + if (!bodyResult.ok) { + return true; + } + + let importedSettings; + try { + importedSettings = normalizeSettings(extractSettingsImportPayload(bodyResult.body)); + } catch (error) { + return json(res, 400, { message: getErrorMessage(error, 'Invalid settings file') }); + } + + try { + const settings = await withFileMutationLock(settingsFile, async () => { + await writeSettings(importedSettings); + return readSettings(); + }); + return json(res, 200, settings); + } catch (error) { + if (isPersistedStateError(error, 'settings')) { + return json(res, 500, { message: error.message }); + } + return writeMutationServerError(json, res); + } + } + + return false; + } + + return { + handleSettingsRoutes, + }; +} + +module.exports = { + createSettingsRoutes, +}; diff --git a/server/routes/static-routes.js b/server/routes/static-routes.js new file mode 100644 index 0000000..083cc30 --- /dev/null +++ b/server/routes/static-routes.js @@ -0,0 +1,138 @@ +/** Creates the static asset and SPA fallback route handler. */ +function createStaticRouteHandler({ fs, path, staticRoot, securityHeaders, prepareHtmlResponse }) { + const mimeTypes = { + '.html': 'text/html; charset=utf-8', + '.css': 'text/css; charset=utf-8', + '.js': 'application/javascript; charset=utf-8', + '.json': 'application/json; charset=utf-8', + '.png': 'image/png', + '.jpg': 'image/jpeg', + '.jpeg': 'image/jpeg', + '.gif': 'image/gif', + '.svg': 'image/svg+xml', + '.ico': 'image/x-icon', + '.woff': 'font/woff', + '.woff2': 'font/woff2', + }; + + function getCacheControl(filePath) { + if (filePath.includes(path.sep + 'assets' + path.sep)) { + return 'public, max-age=31536000, immutable'; + } + if (filePath.endsWith('.html')) { + return 'no-cache'; + } + return 'public, max-age=86400'; + } + + function writeStaticErrorResponse(res, status, message) { + res.writeHead(status, { + 'Content-Type': 'application/json; charset=utf-8', + ...securityHeaders, + }); + res.end(JSON.stringify({ message })); + } + + function sendStaticFile(res, reqPath, data) { + const ext = path.extname(reqPath).toLowerCase(); + const contentType = mimeTypes[ext] || 'application/octet-stream'; + const isHtml = ext === '.html'; + const htmlResponse = + isHtml && typeof prepareHtmlResponse === 'function' + ? prepareHtmlResponse(data.toString('utf8')) + : null; + const responseBody = htmlResponse ? htmlResponse.body : data; + const responseSecurityHeaders = htmlResponse + ? { ...securityHeaders, ...htmlResponse.headers } + : securityHeaders; + + res.writeHead(200, { + 'Content-Type': contentType, + 'Cache-Control': getCacheControl(reqPath), + ...responseSecurityHeaders, + }); + res.end(responseBody); + } + + function readStaticFile(reqPath) { + return fs.promises.readFile(reqPath); + } + + function shouldServeSpaFallback(req, safePath) { + const acceptHeader = req.headers?.accept || ''; + const acceptsHtml = String( + Array.isArray(acceptHeader) ? acceptHeader[0] : acceptHeader, + ).includes('text/html'); + // Preserve direct local navigation and simple test clients that omit Accept for app routes. + return acceptsHtml || safePath.endsWith('/') || path.extname(safePath) === ''; + } + + async function serveFile(req, res, reqPath, safePath) { + try { + const data = await readStaticFile(reqPath); + sendStaticFile(res, reqPath, data); + } catch (error) { + if (error && error.code === 'ENOENT') { + if (!shouldServeSpaFallback(req, safePath)) { + writeStaticErrorResponse(res, 404, 'Not Found'); + return; + } + + const indexPath = path.join(staticRoot, 'index.html'); + try { + const html = await readStaticFile(indexPath); + sendStaticFile(res, indexPath, html); + } catch (fallbackError) { + console.error('SPA fallback read failed', { + error: fallbackError, + indexPath, + safePath, + staticRoot, + }); + writeStaticErrorResponse(res, 500, 'Internal Server Error'); + } + return; + } + + const errorCode = error?.code; + const staticErrorResponses = { + ERR_INVALID_ARG_VALUE: { status: 400, message: 'Invalid request path' }, + EISDIR: { status: 403, message: 'Access denied' }, + }; + const response = staticErrorResponses[errorCode] || { + status: 500, + message: 'Internal Server Error', + }; + console.error('Static file read failed', { + error, + reqPath, + safePath, + staticRoot, + }); + writeStaticErrorResponse(res, response.status, response.message); + } + } + + async function handleStaticRequest(req, res, pathname) { + const safePath = pathname === '/' ? '/index.html' : pathname; + if (safePath.includes('\0')) { + return writeStaticErrorResponse(res, 400, 'Invalid request path'); + } + const resolvedStaticRoot = path.resolve(staticRoot); + const filePath = path.resolve(staticRoot, `.${safePath}`); + + if (filePath === resolvedStaticRoot || !filePath.startsWith(resolvedStaticRoot + path.sep)) { + return writeStaticErrorResponse(res, 403, 'Access denied'); + } + + await serveFile(req, res, filePath, safePath); + } + + return { + handleStaticRequest, + }; +} + +module.exports = { + createStaticRouteHandler, +}; diff --git a/server/routes/usage-routes.js b/server/routes/usage-routes.js new file mode 100644 index 0000000..351e59e --- /dev/null +++ b/server/routes/usage-routes.js @@ -0,0 +1,211 @@ +const { getErrorMessage, writeMutationServerError } = require('./http-route-utils'); + +const EMPTY_USAGE_RESPONSE = { + daily: [], + totals: { + inputTokens: 0, + outputTokens: 0, + cacheCreationTokens: 0, + cacheReadTokens: 0, + thinkingTokens: 0, + totalCost: 0, + totalTokens: 0, + requestCount: 0, + }, +}; + +const REQUIRED_USAGE_TOTAL_KEYS = Object.keys(EMPTY_USAGE_RESPONSE.totals); + +/** Creates usage, upload, and usage-import API route handlers. */ +function createUsageRoutes({ json, validateMutationRequest, readMutationBody, dataRuntime }) { + const { + extractUsageImportPayload, + isPersistedStateError, + mergeUsageData, + readData, + unlinkIfExists, + _updateDataLoadStateUnlocked, + withSettingsAndDataMutationLock, + writeData, + paths: { dataFile }, + } = dataRuntime; + + function isPlainObject(value) { + return value !== null && typeof value === 'object' && !Array.isArray(value); + } + + function hasNumericUsageTotals(totals) { + return ( + isPlainObject(totals) && + REQUIRED_USAGE_TOTAL_KEYS.every((key) => Number.isFinite(totals[key])) + ); + } + + function isUsageData(value) { + return ( + isPlainObject(value) && + Array.isArray(value.daily) && + value.daily.every((day) => isPlainObject(day) && typeof day.date === 'string') && + hasNumericUsageTotals(value.totals) + ); + } + + function normalizeIncomingUsagePayload(payload, invalidMessage) { + let nextPayload = payload; + + if (typeof dataRuntime.normalizeIncomingData === 'function') { + const normalized = dataRuntime.normalizeIncomingData(payload); + // normalizeIncomingData may return undefined to keep the original payload; null is invalid. + if (normalized !== undefined) { + nextPayload = normalized; + } + } + + if (!isUsageData(nextPayload)) { + throw new Error(invalidMessage); + } + + return nextPayload; + } + + async function handleUsageRoutes(apiPath, req, res) { + if (apiPath === '/usage') { + if (req.method === 'GET') { + let data; + try { + data = readData(); + } catch (error) { + if (isPersistedStateError(error, 'usage')) { + return json(res, 500, { message: error.message }); + } + throw error; + } + return json(res, 200, data || EMPTY_USAGE_RESPONSE); + } + if (req.method === 'DELETE') { + const validationError = validateMutationRequest(req); + if (validationError) { + return json(res, validationError.status, { message: validationError.message }); + } + try { + await withSettingsAndDataMutationLock(async () => { + await unlinkIfExists(dataFile); + await _updateDataLoadStateUnlocked({ + lastLoadedAt: null, + lastLoadSource: null, + }); + }); + } catch (error) { + if (isPersistedStateError(error, 'usage') || isPersistedStateError(error, 'settings')) { + return json(res, 500, { message: error.message }); + } + return writeMutationServerError(json, res); + } + return json(res, 200, { success: true }); + } + return json(res, 405, { message: 'Method Not Allowed' }); + } + + if (apiPath === '/upload') { + if (req.method === 'POST') { + const validationError = validateMutationRequest(req, { requiresJsonContentType: true }); + if (validationError) { + return json(res, validationError.status, { message: validationError.message }); + } + + const bodyResult = await readMutationBody(req, res, { + tooLargeMessage: 'File too large (max. 10 MB)', + invalidMessage: 'Invalid JSON', + }); + if (!bodyResult.ok) { + // readMutationBody has already written the error response; true means handled. + return true; + } + + let nextData; + try { + nextData = normalizeIncomingUsagePayload(bodyResult.body, 'Invalid JSON'); + } catch (error) { + return json(res, 400, { message: getErrorMessage(error, 'Invalid JSON') }); + } + + try { + await withSettingsAndDataMutationLock(async () => { + await writeData(nextData); + await _updateDataLoadStateUnlocked({ + lastLoadedAt: new Date().toISOString(), + lastLoadSource: 'file', + }); + }); + return json(res, 200, { + days: nextData.daily.length, + totalCost: nextData.totals.totalCost, + }); + } catch (error) { + if (isPersistedStateError(error, 'settings') || isPersistedStateError(error, 'usage')) { + return json(res, 500, { message: error.message }); + } + return writeMutationServerError(json, res); + } + } + return json(res, 405, { message: 'Method Not Allowed' }); + } + + if (apiPath === '/usage/import') { + if (req.method !== 'POST') { + return json(res, 405, { message: 'Method Not Allowed' }); + } + + const validationError = validateMutationRequest(req, { requiresJsonContentType: true }); + if (validationError) { + return json(res, validationError.status, { message: validationError.message }); + } + + const bodyResult = await readMutationBody(req, res, { + tooLargeMessage: 'Usage backup file too large', + invalidMessage: 'Invalid usage backup file', + }); + if (!bodyResult.ok) { + // readMutationBody has already written the error response; true means handled. + return true; + } + + let importedData; + try { + const usagePayload = extractUsageImportPayload(bodyResult.body); + importedData = normalizeIncomingUsagePayload(usagePayload, 'Invalid usage backup file'); + } catch (error) { + return json(res, 400, { message: getErrorMessage(error, 'Invalid usage backup file') }); + } + + try { + const result = await withSettingsAndDataMutationLock(async () => { + const currentData = readData(); + const merged = mergeUsageData(currentData, importedData); + await writeData(merged.data); + await _updateDataLoadStateUnlocked({ + lastLoadedAt: new Date().toISOString(), + lastLoadSource: 'file', + }); + return merged; + }); + return json(res, 200, result.summary); + } catch (error) { + if (isPersistedStateError(error, 'usage') || isPersistedStateError(error, 'settings')) { + return json(res, 500, { message: error.message }); + } + return writeMutationServerError(json, res); + } + } + + return false; + } + + return { + handleUsageRoutes, + }; +} + +module.exports = { + createUsageRoutes, +}; diff --git a/server/runtime-formatters.js b/server/runtime-formatters.js new file mode 100644 index 0000000..f200a8a --- /dev/null +++ b/server/runtime-formatters.js @@ -0,0 +1,60 @@ +function formatCurrency(value, locale = 'de-CH') { + const numericValue = Number(value) || 0; + const fractionDigits = Math.abs(numericValue) >= 100 ? 0 : 2; + + // Toktrack reports usage cost in USD; server logs keep the app's default Swiss locale. + return new Intl.NumberFormat(locale, { + style: 'currency', + currency: 'USD', + minimumFractionDigits: fractionDigits, + maximumFractionDigits: fractionDigits, + }).format(numericValue); +} + +/** Formats rounded whole-number counts for server startup/status logs. */ +function formatInteger(value, locale = 'de-CH') { + return new Intl.NumberFormat(locale).format(Math.round(Number(value) || 0)); +} + +// Server CLI/status copy is English; locale only controls number formatting. +function formatDayCount(value, locale = 'de-CH') { + const roundedValue = Math.round(Number(value) || 0); + const dayLabel = roundedValue === 1 ? 'day' : 'days'; + + return `${formatInteger(roundedValue, locale)} ${dayLabel}`; +} + +function formatErrorMessage(error, fallbackMessage = 'Unknown error') { + if (error instanceof Error && error.message.trim()) { + return error.message.trim(); + } + + if (typeof error === 'string' && error.trim()) { + return error.trim(); + } + + if (error === null || error === undefined) { + return fallbackMessage; + } + + if (Object.prototype.toString.call(error) === '[object Object]') { + try { + const serializedError = JSON.stringify(error); + if (serializedError && serializedError.trim()) { + return serializedError; + } + } catch { + return fallbackMessage; + } + } + + const message = String(error).trim(); + return message || fallbackMessage; +} + +module.exports = { + formatCurrency, + formatDayCount, + formatErrorMessage, + formatInteger, +}; diff --git a/server/startup-runtime.js b/server/startup-runtime.js index fe2e567..e15e2a1 100644 --- a/server/startup-runtime.js +++ b/server/startup-runtime.js @@ -1,15 +1,9 @@ -function formatCurrency(value, locale = 'de-CH') { - return new Intl.NumberFormat(locale, { - style: 'currency', - currency: 'USD', - minimumFractionDigits: value >= 100 ? 0 : 2, - maximumFractionDigits: value >= 100 ? 0 : 2, - }).format(value || 0); -} - -function formatInteger(value, locale = 'de-CH') { - return new Intl.NumberFormat(locale).format(value || 0); -} +const { + formatCurrency, + formatDayCount, + formatErrorMessage, + formatInteger, +} = require('./runtime-formatters'); function createStartupRuntime({ fs, @@ -83,9 +77,7 @@ function createStartupRuntime({ const totalCost = formatCurrency(normalized.totals?.totalCost || 0); const totalTokens = formatInteger(normalized.totals?.totalTokens || 0); const days = normalized.daily?.length || 0; - const dailyCount = formatInteger(days); - const dayLabel = days === 1 ? 'day' : 'days'; - return `${dailyCount} ${dayLabel}, ${totalCost}, ${totalTokens} tokens`; + return `${formatDayCount(days)}, ${totalCost}, ${totalTokens} tokens`; } catch { return 'present, but unreadable'; } @@ -208,9 +200,13 @@ function createStartupRuntime({ }); markStartupAutoLoadCompleted(); - log(`Auto-load complete: imported ${result.days} days, ${formatCurrency(result.totalCost)}.`); + log( + `Auto-load complete: imported ${formatDayCount(result.days)}, ${formatCurrency( + result.totalCost, + )}.`, + ); } catch (error) { - errorLog(`Auto-load failed: ${error.message}`); + errorLog(`Auto-load failed: ${formatErrorMessage(error)}`); errorLog('Dashboard will start without newly imported data.'); } } @@ -228,5 +224,7 @@ function createStartupRuntime({ module.exports = { createStartupRuntime, formatCurrency, + formatDayCount, + formatErrorMessage, formatInteger, }; diff --git a/shared/dashboard-preferences.js b/shared/dashboard-preferences.js index 8af91f0..5f89eb1 100644 --- a/shared/dashboard-preferences.js +++ b/shared/dashboard-preferences.js @@ -229,8 +229,13 @@ function normalizeDashboardSectionVisibility(value) { const defaults = getDefaultDashboardSectionVisibility() return DASHBOARD_SECTION_IDS.reduce((visibility, sectionId) => { + // Boolean values are preserved; malformed present keys are treated as explicitly hidden. visibility[sectionId] = - typeof source[sectionId] === 'boolean' ? source[sectionId] : defaults[sectionId] + typeof source[sectionId] === 'boolean' + ? source[sectionId] + : Object.prototype.hasOwnProperty.call(source, sectionId) + ? false + : defaults[sectionId] return visibility }, {}) } diff --git a/shared/locales/de/common.json b/shared/locales/de/common.json index 74fd0f5..aad663f 100644 --- a/shared/locales/de/common.json +++ b/shared/locales/de/common.json @@ -752,7 +752,7 @@ "close": "Schliessen" }, "commandPalette": { - "title": "Command Palette", + "title": "Befehlspalette", "description": "Tastaturgesteuerte Befehlsauswahl für Navigation und Aktionen im ttdash Dashboard.", "placeholder": "Befehl suchen...", "empty": "Kein Befehl gefunden.", diff --git a/shared/toktrack-version.d.ts b/shared/toktrack-version.d.ts index 582efb4..c1aa0f5 100644 --- a/shared/toktrack-version.d.ts +++ b/shared/toktrack-version.d.ts @@ -1,6 +1,6 @@ /** Canonical npm package name used for toktrack lookups and execution. */ export const TOKTRACK_PACKAGE_NAME: 'toktrack' /** Pinned toktrack version validated by TTDash. */ -export const TOKTRACK_VERSION: '2.6.0' +export const TOKTRACK_VERSION: string /** Fully qualified toktrack package spec used by npm and bun executors. */ -export const TOKTRACK_PACKAGE_SPEC: 'toktrack@2.6.0' +export const TOKTRACK_PACKAGE_SPEC: `toktrack@${string}` diff --git a/shared/toktrack-version.js b/shared/toktrack-version.js index 4f99f0c..2d25681 100644 --- a/shared/toktrack-version.js +++ b/shared/toktrack-version.js @@ -1,5 +1,16 @@ +const packageJson = require('../package.json') + const TOKTRACK_PACKAGE_NAME = 'toktrack' -const TOKTRACK_VERSION = '2.6.0' +const EXACT_SEMVER_PATTERN = + /^(?:0|[1-9]\d*)\.(?:0|[1-9]\d*)\.(?:0|[1-9]\d*)(?:-[0-9A-Za-z-]+(?:\.[0-9A-Za-z-]+)*)?(?:\+[0-9A-Za-z-]+(?:\.[0-9A-Za-z-]+)*)?$/ +const TOKTRACK_VERSION = packageJson.dependencies?.[TOKTRACK_PACKAGE_NAME] + +if (typeof TOKTRACK_VERSION !== 'string' || !EXACT_SEMVER_PATTERN.test(TOKTRACK_VERSION)) { + throw new Error( + `package.json dependencies.${TOKTRACK_PACKAGE_NAME} must be pinned to an exact SemVer version.`, + ) +} + const TOKTRACK_PACKAGE_SPEC = `${TOKTRACK_PACKAGE_NAME}@${TOKTRACK_VERSION}` module.exports = { diff --git a/src/components/cards/MonthMetrics.tsx b/src/components/cards/MonthMetrics.tsx index 2ad9ed0..a5407ca 100644 --- a/src/components/cards/MonthMetrics.tsx +++ b/src/components/cards/MonthMetrics.tsx @@ -86,7 +86,7 @@ export function MonthMetrics({ daily, metrics }: MonthMetricsProps) { if (!topModel || cost > topModel.cost) topModel = { name, cost } } - // Days elapsed in the current month so far + // Days elapsed in the current local month so far. const today = new Date() const dayOfMonth = today.getDate() diff --git a/src/components/charts/CostByWeekday.tsx b/src/components/charts/CostByWeekday.tsx index 8b21f2b..f9849d5 100644 --- a/src/components/charts/CostByWeekday.tsx +++ b/src/components/charts/CostByWeekday.tsx @@ -21,32 +21,82 @@ interface CostByWeekdayProps { data: WeekdayData[] } +const WEEKEND_DAY_FALLBACKS = new Set([ + // Saturday + 'sa', + 'sam', + 'samedi', + 'sab', + 'sabado', + 'sabato', + 'sat', + 'saturday', + // Sunday + 'dim', + 'dimanche', + 'dom', + 'domingo', + 'domenica', + 'so', + 'sonntag', + 'su', + 'sun', + 'sunday', +]) + +function isWeekendEntry(entry: WeekdayData) { + if (entry.weekdayIndex !== undefined) { + return entry.weekdayIndex === 5 || entry.weekdayIndex === 6 + } + + // buildDashboardChartTransforms provides weekdayIndex; day labels are a legacy/external fallback. + const normalizedDay = entry.day + .trim() + .toLocaleLowerCase() + .normalize('NFD') + .replace(/[\u0300-\u036f]/g, '') + .replace(/\.$/, '') + return WEEKEND_DAY_FALLBACKS.has(normalizedDay) +} + /** Renders average cost by weekday. */ export function CostByWeekday({ data }: CostByWeekdayProps) { const { t } = useTranslation() const [activeIndex, setActiveIndex] = useState(null) const uid = useId() const gid = (n: string) => `${uid}-${n}`.replace(/:/g, '') + const chartData = [...data].sort((left, right) => { + if (left.weekdayIndex === undefined && right.weekdayIndex === undefined) { + return 0 + } + if (left.weekdayIndex === undefined) { + return 1 + } + if (right.weekdayIndex === undefined) { + return -1 + } + return left.weekdayIndex - right.weekdayIndex + }) - const maxCost = Math.max(...data.map((d) => d.cost)) - const minCost = Math.min(...data.map((d) => d.cost)) - const peakIndex = data.findIndex((d) => d.cost === maxCost) - const lowIndex = data.findIndex((d) => d.cost === minCost) - const weekendCost = data - .filter((entry) => entry.day === 'Sa' || entry.day === 'So') + const maxCost = Math.max(...chartData.map((d) => d.cost)) + const minCost = Math.min(...chartData.map((d) => d.cost)) + const peakIndex = chartData.findIndex((d) => d.cost === maxCost) + const lowIndex = chartData.findIndex((d) => d.cost === minCost) + const weekendCost = chartData + .filter((entry) => isWeekendEntry(entry)) .reduce((sum, entry) => sum + entry.cost, 0) - const weekTotal = data.reduce((sum, entry) => sum + entry.cost, 0) + const weekTotal = chartData.reduce((sum, entry) => sum + entry.cost, 0) return ( 0 ? (weekendCost / weekTotal) * 100 : 0).toFixed(0)}%`, })} info={CHART_HELP.costByWeekday} - chartData={data as unknown as Record[]} + chartData={chartData as unknown as Record[]} valueKey="cost" valueFormatter={formatCurrency} > @@ -55,7 +105,7 @@ export function CostByWeekday({ data }: CostByWeekdayProps) { { if ( @@ -107,7 +157,7 @@ export function CostByWeekday({ data }: CostByWeekdayProps) { name={t('charts.costByWeekday.averageCost')} {...getBarAnimationProps(animate)} > - {data.map((_, index) => { + {chartData.map((_, index) => { let fill = `url(#${gid('weekday')})` if (activeIndex === index) fill = `url(#${gid('weekdayActive')})` else if (index === peakIndex) fill = `url(#${gid('weekdayPeak')})` diff --git a/src/components/dashboard/DashboardSections.tsx b/src/components/dashboard/DashboardSections.tsx index e7c1fa7..8df0e60 100644 --- a/src/components/dashboard/DashboardSections.tsx +++ b/src/components/dashboard/DashboardSections.tsx @@ -1,206 +1,19 @@ -import { - Fragment, - Suspense, - lazy, - useEffect, - useMemo, - useState, - type ComponentType, - type LazyExoticComponent, - type ReactNode, -} from 'react' +import { Fragment, Suspense, useEffect, useMemo, useState, type ReactNode } from 'react' import { useTranslation } from 'react-i18next' -import { PrimaryMetrics } from '../cards/PrimaryMetrics' -import { SecondaryMetrics } from '../cards/SecondaryMetrics' -import { TodayMetrics } from '../cards/TodayMetrics' -import { MonthMetrics } from '../cards/MonthMetrics' -import { HeatmapCalendar } from '../features/heatmap/HeatmapCalendar' -import { UsageInsights } from '../features/insights/UsageInsights' -import { ConcentrationRisk } from '../features/risk/ConcentrationRisk' -import { SectionHeader } from '../ui/section-header' -import { ExpandableCard } from '../ui/expandable-card' import { ChartCardSkeleton } from '../ui/skeleton' import { ErrorBoundary } from '../ui/error-boundary' import { AnimatedDashboardSection, scheduleDashboardPreloads } from './DashboardMotion' +import { resolveDashboardSectionPreloadTasks } from './dashboard-section-preloading' +import { dashboardSectionPreloaders } from './sections/dashboard-section-lazy-components' import { - resolveDashboardSectionPreloadTasks, - type DashboardSectionPreloaders, -} from './dashboard-section-preloading' -import { SECTION_HELP } from '@/lib/help-content' + dashboardSectionAnchorMap, + dashboardSectionPlaceholderClassName, +} from './sections/dashboard-section-metadata' +import { renderDashboardSection } from './sections/dashboard-section-renderers' import { cn } from '@/lib/cn' import type { DashboardSectionsViewModel } from '@/types/dashboard-view-model' -import { formatCurrency, formatPercent, formatTokens, periodUnit } from '@/lib/formatters' import type { DashboardSectionId } from '@/types' -// eslint-disable-next-line @typescript-eslint/no-explicit-any -type PreloadableLazyComponent> = LazyExoticComponent & { - preload: () => Promise<{ default: T }> -} - -// eslint-disable-next-line @typescript-eslint/no-explicit-any -function lazyWithPreload>( - loader: () => Promise<{ default: T }>, -): PreloadableLazyComponent { - const Component = lazy(loader) as PreloadableLazyComponent - Component.preload = loader - return Component -} - -function preloadComponents( - ...components: Array<{ preload: () => Promise }> -): Promise { - return Promise.all(components.map((component) => component.preload())) -} - -const CostForecast = lazyWithPreload(() => - import('../features/forecast/CostForecast').then((module) => ({ - default: module.CostForecast, - })), -) -const ProviderCostForecast = lazyWithPreload(() => - import('../features/forecast/ProviderCostForecast').then((module) => ({ - default: module.ProviderCostForecast, - })), -) -const ForecastZoomDialog = lazyWithPreload(() => - import('../features/forecast/ForecastZoomDialog').then((module) => ({ - default: module.ForecastZoomDialog, - })), -) -const CostOverTime = lazyWithPreload(() => - import('../charts/CostOverTime').then((module) => ({ - default: module.CostOverTime, - })), -) -const CostByModel = lazyWithPreload(() => - import('../charts/CostByModel').then((module) => ({ - default: module.CostByModel, - })), -) -const CostByModelOverTime = lazyWithPreload(() => - import('../charts/CostByModelOverTime').then((module) => ({ - default: module.CostByModelOverTime, - })), -) -const CumulativeCost = lazyWithPreload(() => - import('../charts/CumulativeCost').then((module) => ({ - default: module.CumulativeCost, - })), -) -const CumulativeCostPerProvider = lazyWithPreload(() => - import('../charts/CumulativeCostPerProvider').then((module) => ({ - default: module.CumulativeCostPerProvider, - })), -) -const CostByWeekday = lazyWithPreload(() => - import('../charts/CostByWeekday').then((module) => ({ - default: module.CostByWeekday, - })), -) -const TokenEfficiency = lazyWithPreload(() => - import('../charts/TokenEfficiency').then((module) => ({ - default: module.TokenEfficiency, - })), -) -const ModelMix = lazyWithPreload(() => - import('../charts/ModelMix').then((module) => ({ - default: module.ModelMix, - })), -) -const TokensOverTime = lazyWithPreload(() => - import('../charts/TokensOverTime').then((module) => ({ - default: module.TokensOverTime, - })), -) -const TokenTypes = lazyWithPreload(() => - import('../charts/TokenTypes').then((module) => ({ - default: module.TokenTypes, - })), -) -const RequestsOverTime = lazyWithPreload(() => - import('../charts/RequestsOverTime').then((module) => ({ - default: module.RequestsOverTime, - })), -) -const RequestCacheHitRateByModel = lazyWithPreload(() => - import('../charts/RequestCacheHitRateByModel').then((module) => ({ - default: module.RequestCacheHitRateByModel, - })), -) -const CacheROI = lazyWithPreload(() => - import('../features/cache-roi/CacheROI').then((module) => ({ - default: module.CacheROI, - })), -) -const ProviderLimitsSection = lazyWithPreload(() => - import('../features/limits/ProviderLimitsSection').then((module) => ({ - default: module.ProviderLimitsSection, - })), -) -const RequestQuality = lazyWithPreload(() => - import('../features/request-quality/RequestQuality').then((module) => ({ - default: module.RequestQuality, - })), -) -const DistributionAnalysis = lazyWithPreload(() => - import('../charts/DistributionAnalysis').then((module) => ({ - default: module.DistributionAnalysis, - })), -) -const CorrelationAnalysis = lazyWithPreload(() => - import('../charts/CorrelationAnalysis').then((module) => ({ - default: module.CorrelationAnalysis, - })), -) -const PeriodComparison = lazyWithPreload(() => - import('../features/comparison/PeriodComparison').then((module) => ({ - default: module.PeriodComparison, - })), -) -const AnomalyDetection = lazyWithPreload(() => - import('../features/anomaly/AnomalyDetection').then((module) => ({ - default: module.AnomalyDetection, - })), -) -const ModelEfficiency = lazyWithPreload(() => - import('../tables/ModelEfficiency').then((module) => ({ - default: module.ModelEfficiency, - })), -) -const ProviderEfficiency = lazyWithPreload(() => - import('../tables/ProviderEfficiency').then((module) => ({ - default: module.ProviderEfficiency, - })), -) -const RecentDays = lazyWithPreload(() => - import('../tables/RecentDays').then((module) => ({ - default: module.RecentDays, - })), -) - -const dashboardSectionPreloaders = { - forecastCache: () => - preloadComponents(CostForecast, ProviderCostForecast, ForecastZoomDialog, CacheROI), - limits: () => preloadComponents(ProviderLimitsSection), - costAnalysis: () => - preloadComponents( - CostOverTime, - CostByModel, - CumulativeCostPerProvider, - CostByModelOverTime, - CumulativeCost, - CostByWeekday, - TokenEfficiency, - ModelMix, - ), - tokenAnalysis: () => preloadComponents(TokensOverTime, TokenTypes), - requestAnalysis: () => - preloadComponents(RequestsOverTime, RequestCacheHitRateByModel, RequestQuality), - advancedAnalysis: () => preloadComponents(DistributionAnalysis, CorrelationAnalysis), - comparisons: () => preloadComponents(PeriodComparison, AnomalyDetection), - tables: () => preloadComponents(ModelEfficiency, ProviderEfficiency, RecentDays), -} satisfies DashboardSectionPreloaders - interface DashboardSectionsProps { viewModel: DashboardSectionsViewModel } @@ -209,19 +22,7 @@ interface DashboardSectionsProps { export function DashboardSections({ viewModel }: DashboardSectionsProps) { const { t } = useTranslation() const [forecastZoomOpen, setForecastZoomOpen] = useState(false) - const { - layout, - overview, - forecast, - limits, - costAnalysis, - tokenAnalysis, - requestAnalysis, - advancedAnalysis, - comparisons, - tables, - interactions, - } = viewModel + const { layout, requestAnalysis } = viewModel const { sectionOrder, sectionVisibility } = layout const warmupPreloadTasks = useMemo( () => @@ -271,30 +72,6 @@ export function DashboardSections({ viewModel }: DashboardSectionsProps) { ) - const sectionPlaceholderClassName: Record = { - insights: 'min-h-[260px]', - metrics: 'min-h-[320px]', - today: 'min-h-[320px]', - currentMonth: 'min-h-[360px]', - activity: 'min-h-[360px]', - forecastCache: 'min-h-[900px]', - limits: 'min-h-[480px]', - costAnalysis: 'min-h-[1460px]', - tokenAnalysis: 'min-h-[430px]', - requestAnalysis: 'min-h-[1040px]', - advancedAnalysis: 'min-h-[760px]', - comparisons: 'min-h-[480px]', - tables: 'min-h-[1100px]', - } - const sectionAnchorMap: Partial> = { - costAnalysis: 'charts', - currentMonth: 'current-month', - forecastCache: 'forecast-cache', - tokenAnalysis: 'token-analysis', - requestAnalysis: 'request-analysis', - advancedAnalysis: 'advanced-analysis', - } - const renderAnimatedSection = ( sectionId: DashboardSectionId, children: ReactNode, @@ -303,13 +80,13 @@ export function DashboardSections({ viewModel }: DashboardSectionsProps) { onPreload, }: { eager?: boolean; onPreload?: () => void | Promise } = {}, ) => { - const sectionAnchorId = sectionAnchorMap[sectionId] ?? sectionId + const sectionAnchorId = dashboardSectionAnchorMap[sectionId] ?? sectionId return ( {children} @@ -317,475 +94,15 @@ export function DashboardSections({ viewModel }: DashboardSectionsProps) { ) } - const renderSection = (sectionId: DashboardSectionId) => { - switch (sectionId) { - case 'insights': - return sectionVisibility.insights - ? renderAnimatedSection( - 'insights', - , - { eager: true }, - ) - : null - case 'metrics': - return sectionVisibility.metrics - ? renderAnimatedSection( - 'metrics', - <> - - -
- entry.totalCost)} - viewMode={overview.viewMode} - /> -
- , - { eager: true }, - ) - : null - case 'today': - return sectionVisibility.today && overview.todayData - ? renderAnimatedSection( - 'today', - , - { - eager: true, - }, - ) - : null - case 'currentMonth': - return sectionVisibility.currentMonth && overview.hasCurrentMonthData - ? renderAnimatedSection( - 'currentMonth', - , - { - eager: true, - }, - ) - : null - case 'activity': - return sectionVisibility.activity - ? renderAnimatedSection( - 'activity', - <> - -
- - - -
- , - ) - : null - case 'forecastCache': - return sectionVisibility.forecastCache - ? renderAnimatedSection( - 'forecastCache', - <> - -
- {renderLazySection( - setForecastZoomOpen(true)} - />, - 'h-[360px]', - )} - {renderLazySection( - - - , - 'h-[360px]', - )} -
-
- {renderLazySection( - setForecastZoomOpen(true)} - />, - 'h-[430px]', - )} -
- - - - - - , - { - onPreload: dashboardSectionPreloaders.forecastCache, - }, - ) - : null - case 'limits': - return sectionVisibility.limits - ? renderAnimatedSection( - 'limits', - renderLazySection( - , - 'h-[420px]', - ), - { - onPreload: dashboardSectionPreloaders.limits, - }, - ) - : null - case 'costAnalysis': - return sectionVisibility.costAnalysis - ? renderAnimatedSection( - 'costAnalysis', - <> - -
-
- {renderLazySection( - , - 'h-[360px]', - )} -
- {renderLazySection(, 'h-[360px]')} -
-
- {renderLazySection( - , - 'h-[320px]', - )} - {renderLazySection( - , - 'h-[320px]', - )} -
-
- {renderLazySection( - , - 'h-[320px]', - )} - {renderLazySection( - , - 'h-[320px]', - )} -
-
- {renderLazySection( - , - 'h-[320px]', - )} - {renderLazySection(, 'h-[320px]')} -
- , - { - onPreload: dashboardSectionPreloaders.costAnalysis, - }, - ) - : null - case 'tokenAnalysis': - return sectionVisibility.tokenAnalysis - ? renderAnimatedSection( - 'tokenAnalysis', - <> - -
- {renderLazySection( - , - 'h-[320px]', - )} - {renderLazySection(, 'h-[320px]')} -
- , - { - onPreload: dashboardSectionPreloaders.tokenAnalysis, - }, - ) - : null - case 'requestAnalysis': - return sectionVisibility.requestAnalysis && requestAnalysis.metrics.hasRequestData - ? renderAnimatedSection( - 'requestAnalysis', - <> - - {renderLazySection( - , - 'h-[320px]', - )} -
- {renderLazySection( - , - 'h-[320px]', - )} -
-
- {renderLazySection( - , - 'h-[280px]', - )} -
- , - { - onPreload: dashboardSectionPreloaders.requestAnalysis, - }, - ) - : null - case 'advancedAnalysis': - return sectionVisibility.advancedAnalysis - ? renderAnimatedSection( - 'advancedAnalysis', - <> - -
- {renderLazySection( - , - 'h-[320px]', - )} - -
-
- {renderLazySection( - , - 'h-[320px]', - )} -
- , - { - onPreload: dashboardSectionPreloaders.advancedAnalysis, - }, - ) - : null - case 'comparisons': - return sectionVisibility.comparisons - ? renderAnimatedSection( - 'comparisons', - <> - -
- {renderLazySection( - - - , - 'h-[360px]', - )} - {renderLazySection( - - - , - 'h-[360px]', - )} -
- , - { - onPreload: dashboardSectionPreloaders.comparisons, - }, - ) - : null - case 'tables': - return sectionVisibility.tables - ? renderAnimatedSection( - 'tables', - <> - - {renderLazySection( - , - 'h-[320px]', - )} -
- {renderLazySection( - , - 'h-[320px]', - )} -
-
- {renderLazySection( - , - 'h-[360px]', - )} -
- , - { - onPreload: dashboardSectionPreloaders.tables, - }, - ) - : null - default: - return null - } - } + const renderSection = (sectionId: DashboardSectionId) => + renderDashboardSection(sectionId, { + viewModel, + t, + forecastZoomOpen, + setForecastZoomOpen, + renderAnimatedSection, + renderLazySection, + }) return ( <> diff --git a/src/components/dashboard/sections/dashboard-analysis-sections.tsx b/src/components/dashboard/sections/dashboard-analysis-sections.tsx new file mode 100644 index 0000000..ce50a04 --- /dev/null +++ b/src/components/dashboard/sections/dashboard-analysis-sections.tsx @@ -0,0 +1,209 @@ +import { ConcentrationRisk } from '../../features/risk/ConcentrationRisk' +import { SectionHeader } from '../../ui/section-header' +import { SECTION_HELP } from '@/lib/help-content' +import type { DashboardSectionId } from '@/types' +import { + dashboardLazySectionComponents, + dashboardSectionPreloaders, +} from './dashboard-section-lazy-components' +import type { DashboardSectionRenderer } from './dashboard-section-renderer-types' + +const { + CorrelationAnalysis, + CostByModel, + CostByModelOverTime, + CostByWeekday, + CostOverTime, + CumulativeCost, + CumulativeCostPerProvider, + DistributionAnalysis, + ModelMix, + RequestCacheHitRateByModel, + RequestQuality, + RequestsOverTime, + TokenEfficiency, + TokenTypes, + TokensOverTime, +} = dashboardLazySectionComponents + +/** Renderers for chart-heavy dashboard analysis sections. */ +export const analysisSectionRenderers = { + costAnalysis: ({ viewModel, t, renderAnimatedSection, renderLazySection }) => { + const { costAnalysis, interactions, layout } = viewModel + + return layout.sectionVisibility.costAnalysis + ? renderAnimatedSection( + 'costAnalysis', + <> + +
+
+ {renderLazySection( + , + 'h-[360px]', + )} +
+ {renderLazySection(, 'h-[360px]')} +
+
+ {renderLazySection( + , + 'h-[320px]', + )} + {renderLazySection( + , + 'h-[320px]', + )} +
+
+ {renderLazySection( + , + 'h-[320px]', + )} + {renderLazySection(, 'h-[320px]')} +
+
+ {renderLazySection(, 'h-[320px]')} + {renderLazySection(, 'h-[320px]')} +
+ , + { + onPreload: dashboardSectionPreloaders.costAnalysis, + }, + ) + : null + }, + tokenAnalysis: ({ viewModel, t, renderAnimatedSection, renderLazySection }) => { + const { tokenAnalysis, interactions, layout } = viewModel + + return layout.sectionVisibility.tokenAnalysis + ? renderAnimatedSection( + 'tokenAnalysis', + <> + +
+ {renderLazySection( + , + 'h-[320px]', + )} + {renderLazySection(, 'h-[320px]')} +
+ , + { + onPreload: dashboardSectionPreloaders.tokenAnalysis, + }, + ) + : null + }, + requestAnalysis: ({ viewModel, t, renderAnimatedSection, renderLazySection }) => { + const { requestAnalysis, interactions, layout } = viewModel + + return layout.sectionVisibility.requestAnalysis && requestAnalysis.metrics.hasRequestData + ? renderAnimatedSection( + 'requestAnalysis', + <> + + {renderLazySection( + , + 'h-[320px]', + )} +
+ {renderLazySection( + , + 'h-[320px]', + )} +
+
+ {renderLazySection( + , + 'h-[280px]', + )} +
+ , + { + onPreload: dashboardSectionPreloaders.requestAnalysis, + }, + ) + : null + }, + advancedAnalysis: ({ viewModel, t, renderAnimatedSection, renderLazySection }) => { + const { advancedAnalysis, layout } = viewModel + + return layout.sectionVisibility.advancedAnalysis + ? renderAnimatedSection( + 'advancedAnalysis', + <> + +
+ {renderLazySection( + , + 'h-[320px]', + )} + {/* Keep this card eager to preserve the existing advanced-analysis first paint. */} + +
+
+ {renderLazySection( + , + 'h-[320px]', + )} +
+ , + { + onPreload: dashboardSectionPreloaders.advancedAnalysis, + }, + ) + : null + }, +} satisfies Partial> diff --git a/src/components/dashboard/sections/dashboard-comparison-table-sections.tsx b/src/components/dashboard/sections/dashboard-comparison-table-sections.tsx new file mode 100644 index 0000000..eea3071 --- /dev/null +++ b/src/components/dashboard/sections/dashboard-comparison-table-sections.tsx @@ -0,0 +1,129 @@ +import { ExpandableCard } from '../../ui/expandable-card' +import { SectionHeader } from '../../ui/section-header' +import { SECTION_HELP } from '@/lib/help-content' +import { formatCurrency, periodUnit } from '@/lib/formatters' +import type { DashboardSectionId } from '@/types' +import { + dashboardLazySectionComponents, + dashboardSectionPreloaders, +} from './dashboard-section-lazy-components' +import type { DashboardSectionRenderer } from './dashboard-section-renderer-types' + +const { AnomalyDetection, ModelEfficiency, PeriodComparison, ProviderEfficiency, RecentDays } = + dashboardLazySectionComponents + +/** Renderers for comparison and table dashboard sections. */ +export const comparisonTableSectionRenderers = { + comparisons: ({ viewModel, t, renderAnimatedSection, renderLazySection }) => { + const { comparisons, interactions, layout } = viewModel + + return layout.sectionVisibility.comparisons + ? renderAnimatedSection( + 'comparisons', + <> + +
+ {renderLazySection( + + + , + 'h-[360px]', + )} + {renderLazySection( + + + , + 'h-[360px]', + )} +
+ , + { + onPreload: dashboardSectionPreloaders.comparisons, + }, + ) + : null + }, + tables: ({ viewModel, t, renderAnimatedSection, renderLazySection }) => { + const { tables, interactions, layout } = viewModel + + return layout.sectionVisibility.tables + ? renderAnimatedSection( + 'tables', + <> + + {renderLazySection( + , + 'h-[320px]', + )} +
+ {renderLazySection( + , + 'h-[320px]', + )} +
+
+ {renderLazySection( + , + 'h-[360px]', + )} +
+ , + { + onPreload: dashboardSectionPreloaders.tables, + }, + ) + : null + }, +} satisfies Partial> diff --git a/src/components/dashboard/sections/dashboard-forecast-sections.tsx b/src/components/dashboard/sections/dashboard-forecast-sections.tsx new file mode 100644 index 0000000..f5f67ce --- /dev/null +++ b/src/components/dashboard/sections/dashboard-forecast-sections.tsx @@ -0,0 +1,120 @@ +import { Suspense } from 'react' +import { ExpandableCard } from '../../ui/expandable-card' +import { ErrorBoundary } from '../../ui/error-boundary' +import { SectionHeader } from '../../ui/section-header' +import { SECTION_HELP } from '@/lib/help-content' +import { formatPercent, formatTokens } from '@/lib/formatters' +import type { DashboardSectionId } from '@/types' +import { + dashboardLazySectionComponents, + dashboardSectionPreloaders, +} from './dashboard-section-lazy-components' +import type { DashboardSectionRenderer } from './dashboard-section-renderer-types' + +const { CacheROI, CostForecast, ForecastZoomDialog, ProviderCostForecast, ProviderLimitsSection } = + dashboardLazySectionComponents + +/** Renderers for forecast, cache, and limit dashboard sections. */ +export const forecastSectionRenderers = { + forecastCache: ({ + viewModel, + t, + forecastZoomOpen, + setForecastZoomOpen, + renderAnimatedSection, + renderLazySection, + }) => { + const { forecast, layout } = viewModel + + return layout.sectionVisibility.forecastCache + ? renderAnimatedSection( + 'forecastCache', + <> + +
+ {renderLazySection( + setForecastZoomOpen(true)} + />, + 'h-[360px]', + )} + {renderLazySection( + + + , + 'h-[360px]', + )} +
+
+ {renderLazySection( + setForecastZoomOpen(true)} + />, + 'h-[430px]', + )} +
+ + + + + + , + { + onPreload: dashboardSectionPreloaders.forecastCache, + }, + ) + : null + }, + limits: ({ viewModel, renderAnimatedSection, renderLazySection }) => { + const { limits, layout } = viewModel + + return layout.sectionVisibility.limits + ? renderAnimatedSection( + 'limits', + renderLazySection( + , + 'h-[420px]', + ), + { + onPreload: dashboardSectionPreloaders.limits, + }, + ) + : null + }, +} satisfies Partial> diff --git a/src/components/dashboard/sections/dashboard-overview-sections.tsx b/src/components/dashboard/sections/dashboard-overview-sections.tsx new file mode 100644 index 0000000..4b5076d --- /dev/null +++ b/src/components/dashboard/sections/dashboard-overview-sections.tsx @@ -0,0 +1,128 @@ +import { PrimaryMetrics } from '../../cards/PrimaryMetrics' +import { SecondaryMetrics } from '../../cards/SecondaryMetrics' +import { TodayMetrics } from '../../cards/TodayMetrics' +import { MonthMetrics } from '../../cards/MonthMetrics' +import { HeatmapCalendar } from '../../features/heatmap/HeatmapCalendar' +import { UsageInsights } from '../../features/insights/UsageInsights' +import { SectionHeader } from '../../ui/section-header' +import { SECTION_HELP } from '@/lib/help-content' +import type { DashboardSectionId } from '@/types' +import type { DashboardSectionRenderer } from './dashboard-section-renderer-types' + +const activityDescriptionKeys = { + daily: 'dashboard.activity.dailyDescription', + monthly: 'dashboard.activity.monthlyDescription', + yearly: 'dashboard.activity.yearlyDescription', +} as const + +/** Renderers for eager overview and activity dashboard sections. */ +export const overviewSectionRenderers = { + insights: ({ viewModel, renderAnimatedSection }) => { + const { overview, layout } = viewModel + + return layout.sectionVisibility.insights + ? renderAnimatedSection( + 'insights', + , + { eager: true }, + ) + : null + }, + metrics: ({ viewModel, t, renderAnimatedSection }) => { + const { overview, layout } = viewModel + + return layout.sectionVisibility.metrics + ? renderAnimatedSection( + 'metrics', + <> + + +
+ +
+ , + { eager: true }, + ) + : null + }, + today: ({ viewModel, renderAnimatedSection }) => { + const { overview, layout } = viewModel + + return layout.sectionVisibility.today && overview.todayData + ? renderAnimatedSection( + 'today', + , + { + eager: true, + }, + ) + : null + }, + currentMonth: ({ viewModel, renderAnimatedSection }) => { + const { overview, layout } = viewModel + + return layout.sectionVisibility.currentMonth && overview.hasCurrentMonthData + ? renderAnimatedSection( + 'currentMonth', + , + { + eager: true, + }, + ) + : null + }, + activity: ({ viewModel, t, renderAnimatedSection }) => { + const { overview, layout } = viewModel + + return layout.sectionVisibility.activity + ? renderAnimatedSection( + 'activity', + <> + +
+ + + +
+ , + { eager: false }, + ) + : null + }, +} satisfies Partial> diff --git a/src/components/dashboard/sections/dashboard-section-lazy-components.ts b/src/components/dashboard/sections/dashboard-section-lazy-components.ts new file mode 100644 index 0000000..2288f90 --- /dev/null +++ b/src/components/dashboard/sections/dashboard-section-lazy-components.ts @@ -0,0 +1,212 @@ +import { lazy, type ComponentType, type LazyExoticComponent } from 'react' +import type { DashboardSectionPreloaders } from '../dashboard-section-preloading' + +// eslint-disable-next-line @typescript-eslint/no-explicit-any +type PreloadableLazyComponent> = LazyExoticComponent & { + preload: () => Promise<{ default: T }> +} + +// eslint-disable-next-line @typescript-eslint/no-explicit-any +function lazyWithPreload>( + loader: () => Promise<{ default: T }>, +): PreloadableLazyComponent { + const Component = lazy(loader) as PreloadableLazyComponent + Component.preload = loader + return Component +} + +async function preloadComponents( + ...components: { readonly [K in keyof T]: { preload: () => Promise } } +): Promise { + const results = await Promise.allSettled(components.map((component) => component.preload())) + const rejectedResult = results.find((result): result is PromiseRejectedResult => { + return result.status === 'rejected' + }) + + if (rejectedResult) { + throw rejectedResult.reason + } + + // The rejection guard above ensures results contains only fulfilled preload promises here. + const fulfilledResults = results as readonly PromiseFulfilledResult[] + return fulfilledResults.map((result) => result.value) as unknown as T +} + +const CostForecast = lazyWithPreload(() => + import('../../features/forecast/CostForecast').then((module) => ({ + default: module.CostForecast, + })), +) +const ProviderCostForecast = lazyWithPreload(() => + import('../../features/forecast/ProviderCostForecast').then((module) => ({ + default: module.ProviderCostForecast, + })), +) +const ForecastZoomDialog = lazyWithPreload(() => + import('../../features/forecast/ForecastZoomDialog').then((module) => ({ + default: module.ForecastZoomDialog, + })), +) +const CostOverTime = lazyWithPreload(() => + import('../../charts/CostOverTime').then((module) => ({ + default: module.CostOverTime, + })), +) +const CostByModel = lazyWithPreload(() => + import('../../charts/CostByModel').then((module) => ({ + default: module.CostByModel, + })), +) +const CostByModelOverTime = lazyWithPreload(() => + import('../../charts/CostByModelOverTime').then((module) => ({ + default: module.CostByModelOverTime, + })), +) +const CumulativeCost = lazyWithPreload(() => + import('../../charts/CumulativeCost').then((module) => ({ + default: module.CumulativeCost, + })), +) +const CumulativeCostPerProvider = lazyWithPreload(() => + import('../../charts/CumulativeCostPerProvider').then((module) => ({ + default: module.CumulativeCostPerProvider, + })), +) +const CostByWeekday = lazyWithPreload(() => + import('../../charts/CostByWeekday').then((module) => ({ + default: module.CostByWeekday, + })), +) +const TokenEfficiency = lazyWithPreload(() => + import('../../charts/TokenEfficiency').then((module) => ({ + default: module.TokenEfficiency, + })), +) +const ModelMix = lazyWithPreload(() => + import('../../charts/ModelMix').then((module) => ({ + default: module.ModelMix, + })), +) +const TokensOverTime = lazyWithPreload(() => + import('../../charts/TokensOverTime').then((module) => ({ + default: module.TokensOverTime, + })), +) +const TokenTypes = lazyWithPreload(() => + import('../../charts/TokenTypes').then((module) => ({ + default: module.TokenTypes, + })), +) +const RequestsOverTime = lazyWithPreload(() => + import('../../charts/RequestsOverTime').then((module) => ({ + default: module.RequestsOverTime, + })), +) +const RequestCacheHitRateByModel = lazyWithPreload(() => + import('../../charts/RequestCacheHitRateByModel').then((module) => ({ + default: module.RequestCacheHitRateByModel, + })), +) +const CacheROI = lazyWithPreload(() => + import('../../features/cache-roi/CacheROI').then((module) => ({ + default: module.CacheROI, + })), +) +const ProviderLimitsSection = lazyWithPreload(() => + import('../../features/limits/ProviderLimitsSection').then((module) => ({ + default: module.ProviderLimitsSection, + })), +) +const RequestQuality = lazyWithPreload(() => + import('../../features/request-quality/RequestQuality').then((module) => ({ + default: module.RequestQuality, + })), +) +const DistributionAnalysis = lazyWithPreload(() => + import('../../charts/DistributionAnalysis').then((module) => ({ + default: module.DistributionAnalysis, + })), +) +const CorrelationAnalysis = lazyWithPreload(() => + import('../../charts/CorrelationAnalysis').then((module) => ({ + default: module.CorrelationAnalysis, + })), +) +const PeriodComparison = lazyWithPreload(() => + import('../../features/comparison/PeriodComparison').then((module) => ({ + default: module.PeriodComparison, + })), +) +const AnomalyDetection = lazyWithPreload(() => + import('../../features/anomaly/AnomalyDetection').then((module) => ({ + default: module.AnomalyDetection, + })), +) +const ModelEfficiency = lazyWithPreload(() => + import('../../tables/ModelEfficiency').then((module) => ({ + default: module.ModelEfficiency, + })), +) +const ProviderEfficiency = lazyWithPreload(() => + import('../../tables/ProviderEfficiency').then((module) => ({ + default: module.ProviderEfficiency, + })), +) +const RecentDays = lazyWithPreload(() => + import('../../tables/RecentDays').then((module) => ({ + default: module.RecentDays, + })), +) + +/** Lazy dashboard section component registry shared by family renderers and preload plans. */ +export const dashboardLazySectionComponents = { + CostForecast, + ProviderCostForecast, + ForecastZoomDialog, + CostOverTime, + CostByModel, + CostByModelOverTime, + CumulativeCost, + CumulativeCostPerProvider, + CostByWeekday, + TokenEfficiency, + ModelMix, + TokensOverTime, + TokenTypes, + RequestsOverTime, + RequestCacheHitRateByModel, + CacheROI, + ProviderLimitsSection, + RequestQuality, + DistributionAnalysis, + CorrelationAnalysis, + PeriodComparison, + AnomalyDetection, + ModelEfficiency, + ProviderEfficiency, + RecentDays, +} + +/** Preload groups aligned with the dashboard section families. */ +export const dashboardSectionPreloaders = { + forecastCache: () => + preloadComponents(CostForecast, ProviderCostForecast, ForecastZoomDialog, CacheROI), + limits: () => preloadComponents(ProviderLimitsSection), + costAnalysis: () => + preloadComponents( + CostOverTime, + CostByModel, + CumulativeCostPerProvider, + CostByModelOverTime, + CumulativeCost, + CostByWeekday, + TokenEfficiency, + ModelMix, + ), + tokenAnalysis: () => preloadComponents(TokensOverTime, TokenTypes), + requestAnalysis: () => + preloadComponents(RequestsOverTime, RequestCacheHitRateByModel, RequestQuality), + advancedAnalysis: () => preloadComponents(DistributionAnalysis, CorrelationAnalysis), + comparisons: () => preloadComponents(PeriodComparison, AnomalyDetection), + tables: () => preloadComponents(ModelEfficiency, ProviderEfficiency, RecentDays), +} satisfies DashboardSectionPreloaders diff --git a/src/components/dashboard/sections/dashboard-section-metadata.ts b/src/components/dashboard/sections/dashboard-section-metadata.ts new file mode 100644 index 0000000..067ec38 --- /dev/null +++ b/src/components/dashboard/sections/dashboard-section-metadata.ts @@ -0,0 +1,28 @@ +import type { DashboardSectionId } from '@/types' + +/** Reserved placeholder heights used while dashboard sections lazy-render on scroll. */ +export const dashboardSectionPlaceholderClassName: Record = { + insights: 'min-h-[260px]', + metrics: 'min-h-[320px]', + today: 'min-h-[320px]', + currentMonth: 'min-h-[360px]', + activity: 'min-h-[360px]', + forecastCache: 'min-h-[900px]', + limits: 'min-h-[480px]', + costAnalysis: 'min-h-[1460px]', + tokenAnalysis: 'min-h-[430px]', + requestAnalysis: 'min-h-[1040px]', + advancedAnalysis: 'min-h-[760px]', + comparisons: 'min-h-[480px]', + tables: 'min-h-[1100px]', +} + +/** DOM anchor aliases that preserve existing dashboard deep links. */ +export const dashboardSectionAnchorMap: Partial> = { + costAnalysis: 'charts', + currentMonth: 'current-month', + forecastCache: 'forecast-cache', + tokenAnalysis: 'token-analysis', + requestAnalysis: 'request-analysis', + advancedAnalysis: 'advanced-analysis', +} diff --git a/src/components/dashboard/sections/dashboard-section-renderer-types.d.ts b/src/components/dashboard/sections/dashboard-section-renderer-types.d.ts new file mode 100644 index 0000000..c961f90 --- /dev/null +++ b/src/components/dashboard/sections/dashboard-section-renderer-types.d.ts @@ -0,0 +1,27 @@ +import type { Dispatch, ReactNode, SetStateAction } from 'react' +import type { TFunction } from 'i18next' +import type { DashboardSectionId } from '@/types' +import type { DashboardSectionsViewModel } from '@/types/dashboard-view-model' + +/** Options passed through to the animated dashboard section shell. */ +export interface DashboardSectionRenderOptions { + eager?: boolean + onPreload?: () => void | Promise +} + +/** Shared render context injected into dashboard section family renderers. */ +export interface DashboardSectionRenderContext { + viewModel: DashboardSectionsViewModel + t: TFunction + forecastZoomOpen: boolean + setForecastZoomOpen: Dispatch> + renderAnimatedSection: ( + sectionId: DashboardSectionId, + children: ReactNode, + options?: DashboardSectionRenderOptions, + ) => ReactNode + renderLazySection: (content: ReactNode, className?: string) => ReactNode +} + +/** Renders one dashboard section from the shared section render context. */ +export type DashboardSectionRenderer = (context: DashboardSectionRenderContext) => ReactNode diff --git a/src/components/dashboard/sections/dashboard-section-renderers.ts b/src/components/dashboard/sections/dashboard-section-renderers.ts new file mode 100644 index 0000000..da80f6b --- /dev/null +++ b/src/components/dashboard/sections/dashboard-section-renderers.ts @@ -0,0 +1,25 @@ +import type { DashboardSectionId } from '@/types' +import { analysisSectionRenderers } from './dashboard-analysis-sections' +import { comparisonTableSectionRenderers } from './dashboard-comparison-table-sections' +import { forecastSectionRenderers } from './dashboard-forecast-sections' +import { overviewSectionRenderers } from './dashboard-overview-sections' +import type { + DashboardSectionRenderContext, + DashboardSectionRenderer, +} from './dashboard-section-renderer-types' + +/** Complete dashboard section renderer registry assembled from section family owners. */ +export const dashboardSectionRenderers = { + ...overviewSectionRenderers, + ...forecastSectionRenderers, + ...analysisSectionRenderers, + ...comparisonTableSectionRenderers, +} satisfies Record + +/** Renders a dashboard section through the family-owned renderer registry. */ +export function renderDashboardSection( + sectionId: DashboardSectionId, + context: DashboardSectionRenderContext, +) { + return dashboardSectionRenderers[sectionId](context) +} diff --git a/src/components/features/command-palette/CommandPalette.commands.tsx b/src/components/features/command-palette/CommandPalette.commands.tsx new file mode 100644 index 0000000..5cb52e0 --- /dev/null +++ b/src/components/features/command-palette/CommandPalette.commands.tsx @@ -0,0 +1,567 @@ +import { + ArrowDown, + ArrowUp, + BarChart3, + Calendar, + CalendarRange, + ChartBar, + CircleHelp, + Download, + Filter, + Languages, + Layers3, + LineChart, + Moon, + RefreshCcw, + Sigma, + SlidersHorizontal, + Sun, + Table, + Trash2, + Upload, + Zap, +} from 'lucide-react' +import { DASHBOARD_SECTION_DEFINITION_MAP } from '@/lib/dashboard-preferences' +import type { DashboardCommandPaletteViewModel } from '@/types/dashboard-view-model' +import type { DashboardSectionId } from '@/types' + +/** Describes one executable command rendered by the dashboard command palette. */ +export interface CommandItem { + id: string + label: string + description?: string + keywords?: string[] + aliases?: string[] + shortcut?: string + icon: React.ReactNode + action: () => void + group: string + testId?: string +} + +type Translate = (key: string, options?: Record) => string + +type CommandPaletteBuilderOptions = DashboardCommandPaletteViewModel & { + onScrollBottom: () => void + onScrollTop: () => void + t: Translate +} + +type SectionNavigationOptions = Pick< + DashboardCommandPaletteViewModel, + 'onScrollTo' | 'sectionOrder' | 'sectionVisibility' +> & { + sectionAvailability: Record + t: Translate +} + +const SECTION_COMMAND_ICON_MAP: Record = { + insights: , + metrics: , + today: , + currentMonth: , + activity: , + forecastCache: , + limits: , + costAnalysis: , + tokenAnalysis: , + requestAnalysis: , + advancedAnalysis: , + comparisons: , + tables: , +} + +const SECTION_COMMAND_KEYWORDS: Record = { + insights: ['summary', 'insight'], + metrics: ['kpi', 'zahlen'], + today: ['today', 'heute'], + currentMonth: ['monat', 'current month'], + activity: ['heatmap', 'aktivität'], + forecastCache: ['forecast', 'cache', 'roi'], + limits: ['limits', 'subscriptions', 'budget', 'anbieter limits'], + costAnalysis: ['charts', 'kostenanalyse'], + tokenAnalysis: ['tokens', 'token analyse'], + requestAnalysis: ['requests', 'request analyse', 'anfragen'], + advancedAnalysis: ['advanced analysis', 'distributions', 'risk', 'verteilungen'], + comparisons: ['anomalie', 'vergleich'], + tables: ['table', 'details'], +} + +const SECTION_COMMAND_ALIASES: Partial> = { + limits: ['limits sektion', 'subscriptions sektion'], + tokenAnalysis: ['token chart'], + requestAnalysis: ['request chart', 'request donut'], +} + +/** Normalizes command labels, aliases, and queries into comparable search text. */ +export function normalizeSearchValue(value: string) { + return value + .toLowerCase() + .normalize('NFD') + .replace(/[\u0300-\u036f]/g, '') + .replace(/[^\p{L}\p{N}]+/gu, ' ') + .trim() +} + +/** Returns the searchable text corpus for a command. */ +export function getCommandSearchText(cmd: CommandItem) { + return normalizeSearchValue( + [cmd.label, cmd.description, ...(cmd.keywords ?? []), ...(cmd.aliases ?? [])] + .filter(Boolean) + .join(' '), + ) +} + +/** Scores how well a command matches a query, returning zero for non-matches. */ +export function getCommandSearchScore(cmd: CommandItem, query: string) { + if (!query) return 1 + + const normalizedQuery = normalizeSearchValue(query) + if (!normalizedQuery) return 1 + + const haystack = getCommandSearchText(cmd) + const terms = normalizedQuery.split(/\s+/).filter(Boolean) + if (terms.length === 0) return 1 + + let score = 0 + + for (const term of terms) { + if (!haystack.includes(term)) return 0 + + if (normalizeSearchValue(cmd.label) === term) { + score += 120 + continue + } + + if (normalizeSearchValue(cmd.label).startsWith(term)) { + score += 80 + continue + } + + if ((cmd.aliases ?? []).some((alias) => normalizeSearchValue(alias) === term)) { + score += 70 + continue + } + + if ((cmd.keywords ?? []).some((keyword) => normalizeSearchValue(keyword).startsWith(term))) { + score += 55 + continue + } + + if (haystack.includes(` ${term}`) || haystack.startsWith(term)) { + score += 35 + continue + } + + score += 15 + } + + return score +} + +/** Filters and ranks commands using the palette's deterministic search policy. */ +export function filterCommandItems(commands: CommandItem[], search: string) { + return commands + .map((cmd, index) => ({ cmd, score: getCommandSearchScore(cmd, search), index })) + .filter((entry) => entry.score > 0) + .sort((a, b) => b.score - a.score || a.index - b.index) + .map((entry) => entry.cmd) +} + +/** Returns the commands that can be triggered by numeric quick-select shortcuts. */ +export function getVisibleCommandItems(commands: CommandItem[]) { + return commands.slice(0, 9) +} + +/** Builds the per-section availability map from the current dashboard data state. */ +export function buildSectionAvailability({ + hasMonthSection, + hasRequestSection, + hasTodaySection, +}: Pick< + DashboardCommandPaletteViewModel, + 'hasMonthSection' | 'hasRequestSection' | 'hasTodaySection' +>): Record { + return { + insights: true, + metrics: true, + today: hasTodaySection, + currentMonth: hasMonthSection, + activity: true, + forecastCache: true, + limits: true, + costAnalysis: true, + tokenAnalysis: true, + requestAnalysis: hasRequestSection, + advancedAnalysis: true, + comparisons: true, + tables: true, + } +} + +/** Builds ordered dashboard-section navigation commands. */ +export function buildSectionNavigationCommands({ + onScrollTo, + sectionAvailability, + sectionOrder, + sectionVisibility, + t, +}: SectionNavigationOptions): CommandItem[] { + return sectionOrder.flatMap((sectionId) => { + const section = DASHBOARD_SECTION_DEFINITION_MAP[sectionId] + + if (!sectionVisibility[sectionId] || !sectionAvailability[sectionId]) { + return [] + } + + const sectionLabel = t(section.labelKey) + + return [ + { + id: `section-${section.id}`, + label: t('commandPalette.commands.goToSection.label', { section: sectionLabel }), + description: t('commandPalette.commands.goToSection.description', { + section: sectionLabel, + }), + keywords: [sectionLabel, section.domId, ...SECTION_COMMAND_KEYWORDS[section.id]], + icon: SECTION_COMMAND_ICON_MAP[section.id], + action: () => onScrollTo(section.domId), + group: t('commandPalette.groups.navigation'), + testId: `command-section-${section.id}`, + ...(SECTION_COMMAND_ALIASES[section.id] + ? { aliases: SECTION_COMMAND_ALIASES[section.id] } + : {}), + }, + ] + }) +} + +/** Builds the complete command list for the current dashboard view model. */ +export function buildCommandPaletteCommands({ + isDark, + availableProviders, + selectedProviders, + availableModels, + selectedModels, + reportGenerating, + onToggleTheme, + onExportCSV, + onGenerateReport, + onDelete, + onUpload, + onAutoImport, + onOpenSettings, + onScrollTo, + onScrollTop, + onScrollBottom, + onViewModeChange, + onApplyPreset, + onToggleProvider, + onToggleModel, + onClearProviders, + onClearModels, + onClearDateRange, + onResetAll, + onHelp, + onLanguageChange, + t, + ...sectionInputs +}: CommandPaletteBuilderOptions): CommandItem[] { + const sectionNavigationCommands = buildSectionNavigationCommands({ + onScrollTo, + sectionAvailability: buildSectionAvailability(sectionInputs), + sectionOrder: sectionInputs.sectionOrder, + sectionVisibility: sectionInputs.sectionVisibility, + t, + }) + + const baseCommands: CommandItem[] = [ + { + id: 'auto-import', + label: t('commandPalette.commands.autoImport.label'), + description: t('commandPalette.commands.autoImport.description'), + keywords: ['toktrack', 'import', 'load', 'sync'], + aliases: ['auto import', 'daten importieren'], + icon: , + action: onAutoImport, + group: t('commandPalette.groups.loadData'), + }, + { + id: 'upload', + label: t('commandPalette.commands.upload.label'), + description: t('commandPalette.commands.upload.description'), + keywords: ['upload', 'file', 'json', 'import'], + aliases: ['datei laden', 'json import'], + shortcut: '⌘U', + icon: , + action: onUpload, + group: t('commandPalette.groups.loadData'), + }, + { + id: 'csv', + label: t('commandPalette.commands.exportCsv.label'), + description: t('commandPalette.commands.exportCsv.description'), + keywords: ['download', 'export', 'csv'], + aliases: ['csv download', 'daten exportieren'], + shortcut: '⌘E', + icon: , + action: onExportCSV, + group: t('commandPalette.groups.exports'), + }, + { + id: 'report', + label: reportGenerating + ? t('commandPalette.commands.generateReport.labelLoading') + : t('commandPalette.commands.generateReport.label'), + description: t('commandPalette.commands.generateReport.description'), + keywords: ['pdf', 'report', 'bericht', 'export'], + aliases: ['report export', 'pdf export', 'bericht generieren'], + icon: , + action: onGenerateReport, + group: t('commandPalette.groups.exports'), + }, + { + id: 'settings-open', + label: t('commandPalette.commands.openSettings.label'), + description: t('commandPalette.commands.openSettings.description'), + keywords: ['settings', 'limits', 'subscription', 'anbieter limit', 'backup'], + aliases: ['settings dialog', 'einstellungen öffnen', 'provider limits'], + icon: , + action: onOpenSettings, + group: t('commandPalette.groups.maintenance'), + }, + { + id: 'delete', + label: t('commandPalette.commands.delete.label'), + description: t('commandPalette.commands.delete.description'), + keywords: ['reset data', 'clear data', 'delete'], + aliases: ['daten reset', 'alles loeschen'], + icon: , + action: onDelete, + group: t('commandPalette.groups.maintenance'), + }, + { + id: 'view-daily', + label: t('commandPalette.commands.viewDaily.label'), + description: t('commandPalette.commands.viewDaily.description'), + keywords: ['daily', 'tage', 'tag', 'tagesansicht'], + aliases: ['daily view'], + icon: , + action: () => onViewModeChange('daily'), + group: t('commandPalette.groups.filters'), + }, + { + id: 'view-monthly', + label: t('commandPalette.commands.viewMonthly.label'), + description: t('commandPalette.commands.viewMonthly.description'), + keywords: ['monthly', 'monate', 'monat', 'monatsansicht'], + aliases: ['monthly view'], + icon: , + action: () => onViewModeChange('monthly'), + group: t('commandPalette.groups.filters'), + }, + { + id: 'view-yearly', + label: t('commandPalette.commands.viewYearly.label'), + description: t('commandPalette.commands.viewYearly.description'), + keywords: ['yearly', 'jahre', 'jahr', 'jahresansicht'], + aliases: ['yearly view'], + icon: , + action: () => onViewModeChange('yearly'), + group: t('commandPalette.groups.filters'), + }, + { + id: 'preset-7d', + label: t('commandPalette.commands.preset7d.label'), + description: t('commandPalette.commands.preset7d.description'), + keywords: ['7d', '7 tage'], + icon: , + action: () => onApplyPreset('7d'), + group: t('commandPalette.groups.filters'), + }, + { + id: 'preset-30d', + label: t('commandPalette.commands.preset30d.label'), + description: t('commandPalette.commands.preset30d.description'), + keywords: ['30d', '30 tage'], + icon: , + action: () => onApplyPreset('30d'), + group: t('commandPalette.groups.filters'), + }, + { + id: 'preset-month', + label: t('commandPalette.commands.presetMonth.label'), + description: t('commandPalette.commands.presetMonth.description'), + keywords: ['current month', 'monat'], + icon: , + action: () => onApplyPreset('month'), + group: t('commandPalette.groups.filters'), + }, + { + id: 'preset-year', + label: t('commandPalette.commands.presetYear.label'), + description: t('commandPalette.commands.presetYear.description'), + keywords: ['current year', 'jahr'], + icon: , + action: () => onApplyPreset('year'), + group: t('commandPalette.groups.filters'), + }, + { + id: 'preset-all', + label: t('commandPalette.commands.presetAll.label'), + description: t('commandPalette.commands.presetAll.description'), + keywords: ['all', 'alles'], + icon: , + action: () => onApplyPreset('all'), + group: t('commandPalette.groups.filters'), + }, + { + id: 'clear-providers', + label: t('commandPalette.commands.clearProviders.label'), + description: t('commandPalette.commands.clearProviders.description'), + keywords: ['provider', 'anbieter', 'clear'], + icon: , + action: onClearProviders, + group: t('commandPalette.groups.filters'), + }, + { + id: 'clear-models', + label: t('commandPalette.commands.clearModels.label'), + description: t('commandPalette.commands.clearModels.description'), + keywords: ['models', 'modelle', 'clear'], + icon: , + action: onClearModels, + group: t('commandPalette.groups.filters'), + }, + { + id: 'clear-dates', + label: t('commandPalette.commands.clearDates.label'), + description: t('commandPalette.commands.clearDates.description'), + keywords: ['date', 'datum', 'range', 'clear'], + icon: , + action: onClearDateRange, + group: t('commandPalette.groups.filters'), + }, + { + id: 'reset-all', + label: t('commandPalette.commands.resetAll.label'), + description: t('commandPalette.commands.resetAll.description'), + keywords: ['reset all', 'alles zurücksetzen', 'default', 'clear filters'], + aliases: ['reset dashboard', 'alles reset', 'filter reset'], + icon: , + action: onResetAll, + group: t('commandPalette.groups.filters'), + }, + { + id: 'top', + label: t('commandPalette.commands.scrollTop.label'), + description: t('commandPalette.commands.scrollTop.description'), + keywords: ['top', 'start', 'anfang'], + shortcut: '⌘↑', + icon: , + action: onScrollTop, + group: t('commandPalette.groups.navigation'), + }, + { + id: 'bottom', + label: t('commandPalette.commands.scrollBottom.label'), + description: t('commandPalette.commands.scrollBottom.description'), + keywords: ['bottom', 'ende'], + icon: , + action: onScrollBottom, + group: t('commandPalette.groups.navigation'), + }, + { + id: 'filters', + label: t('commandPalette.commands.filters.label'), + description: t('commandPalette.commands.filters.description'), + keywords: ['filterbar', 'filter'], + icon: , + action: () => onScrollTo('filters'), + group: t('commandPalette.groups.navigation'), + }, + ...sectionNavigationCommands, + { + id: 'theme', + label: isDark + ? t('commandPalette.commands.themeLight.label') + : t('commandPalette.commands.themeDark.label'), + description: t('commandPalette.commands.themeDark.description'), + keywords: ['theme', 'dark', 'light'], + shortcut: '⌘D', + icon: isDark ? : , + action: onToggleTheme, + group: t('commandPalette.groups.view'), + }, + { + id: 'language-de', + label: t('commandPalette.commands.languageGerman.label'), + description: t('commandPalette.commands.languageGerman.description'), + keywords: ['language', 'sprache', 'deutsch', 'german', 'locale'], + aliases: ['switch german', 'auf deutsch', 'sprache deutsch'], + icon: , + action: () => onLanguageChange('de'), + group: t('commandPalette.groups.language'), + }, + { + id: 'language-en', + label: t('commandPalette.commands.languageEnglish.label'), + description: t('commandPalette.commands.languageEnglish.description'), + keywords: ['language', 'sprache', 'english', 'englisch', 'locale'], + aliases: ['switch english', 'auf englisch', 'sprache english'], + icon: , + action: () => onLanguageChange('en'), + group: t('commandPalette.groups.language'), + }, + { + id: 'help', + label: t('commandPalette.commands.help.label'), + description: t('commandPalette.commands.help.description'), + keywords: ['shortcut', 'hilfe'], + shortcut: '?', + icon: , + action: onHelp, + group: t('commandPalette.groups.help'), + }, + ] + + const providerCommands: CommandItem[] = availableProviders.map((provider) => { + const selected = selectedProviders.includes(provider) + return { + id: `provider-${provider}`, + label: `${selected ? t('commandPalette.commands.clearProviders.label') : t('common.provider')}: ${provider}`, + description: selected + ? `${t('commandPalette.commands.clearProviders.description')}: ${provider}` + : `${t('common.provider')} ${provider}`, + keywords: ['anbieter', 'provider', provider.toLowerCase()], + aliases: [`filter ${provider.toLowerCase()}`, `${provider.toLowerCase()} daten`], + icon: , + action: () => onToggleProvider(provider), + group: t('commandPalette.groups.providers'), + testId: `command-provider-${provider}`, + } + }) + + const modelCommands: CommandItem[] = availableModels.map((model) => { + const selected = selectedModels.includes(model) + return { + id: `model-${model}`, + label: `${selected ? t('commandPalette.commands.clearModels.label') : t('common.model')}: ${model}`, + description: selected + ? `${t('commandPalette.commands.clearModels.description')}: ${model}` + : `${t('common.model')} ${model}`, + keywords: ['modell', 'model', model.toLowerCase()], + aliases: [ + `filter ${model.toLowerCase()}`, + `${model.toLowerCase()} requests`, + `${model.toLowerCase()} kosten`, + ], + icon: , + action: () => onToggleModel(model), + group: t('commandPalette.groups.models'), + testId: `command-model-${model}`, + } + }) + + return [...baseCommands, ...providerCommands, ...modelCommands] +} diff --git a/src/components/features/command-palette/CommandPalette.tsx b/src/components/features/command-palette/CommandPalette.tsx index e6bf86c..7aab4e1 100644 --- a/src/components/features/command-palette/CommandPalette.tsx +++ b/src/components/features/command-palette/CommandPalette.tsx @@ -2,150 +2,17 @@ import { useEffect, useMemo, useState } from 'react' import { useTranslation } from 'react-i18next' import { Command } from 'cmdk' import { Dialog, DialogContent, DialogDescription, DialogTitle } from '@/components/ui/dialog' -import { - Download, - Trash2, - Upload, - Sun, - Moon, - Calendar, - ChartBar, - Table, - Search, - ArrowUp, - CircleHelp, - Zap, - Filter, - BarChart3, - LineChart, - Sigma, - CalendarRange, - Layers3, - ArrowDown, - RefreshCcw, - SlidersHorizontal, - Languages, -} from 'lucide-react' -import { DASHBOARD_SECTION_DEFINITION_MAP } from '@/lib/dashboard-preferences' +import { Search } from 'lucide-react' import type { DashboardCommandPaletteViewModel } from '@/types/dashboard-view-model' -import type { DashboardSectionId } from '@/types' +import { + buildCommandPaletteCommands, + filterCommandItems, + getVisibleCommandItems, + type CommandItem, +} from './CommandPalette.commands' type CommandPaletteProps = DashboardCommandPaletteViewModel -interface CommandItem { - id: string - label: string - description?: string - keywords?: string[] - aliases?: string[] - shortcut?: string - icon: React.ReactNode - action: () => void - group: string - testId?: string -} - -const SECTION_COMMAND_ICON_MAP: Record = { - insights: , - metrics: , - today: , - currentMonth: , - activity: , - forecastCache: , - limits: , - costAnalysis: , - tokenAnalysis: , - requestAnalysis: , - advancedAnalysis: , - comparisons: , - tables:
, -} - -const SECTION_COMMAND_KEYWORDS: Record = { - insights: ['summary', 'insight'], - metrics: ['kpi', 'zahlen'], - today: ['today', 'heute'], - currentMonth: ['monat', 'current month'], - activity: ['heatmap', 'aktivität'], - forecastCache: ['forecast', 'cache', 'roi'], - limits: ['limits', 'subscriptions', 'budget', 'anbieter limits'], - costAnalysis: ['charts', 'kostenanalyse'], - tokenAnalysis: ['tokens', 'token analyse'], - requestAnalysis: ['requests', 'request analyse', 'anfragen'], - advancedAnalysis: ['advanced analysis', 'distributions', 'risk', 'verteilungen'], - comparisons: ['anomalie', 'vergleich'], - tables: ['table', 'details'], -} - -const SECTION_COMMAND_ALIASES: Partial> = { - limits: ['limits sektion', 'subscriptions sektion'], - tokenAnalysis: ['token chart'], - requestAnalysis: ['request chart', 'request donut'], -} - -function normalizeSearchValue(value: string) { - return value - .toLowerCase() - .normalize('NFD') - .replace(/[\u0300-\u036f]/g, '') - .replace(/[^\p{L}\p{N}]+/gu, ' ') - .trim() -} - -function getCommandSearchText(cmd: CommandItem) { - return normalizeSearchValue( - [cmd.label, cmd.description, ...(cmd.keywords ?? []), ...(cmd.aliases ?? [])] - .filter(Boolean) - .join(' '), - ) -} - -function getCommandSearchScore(cmd: CommandItem, query: string) { - if (!query) return 1 - - const normalizedQuery = normalizeSearchValue(query) - if (!normalizedQuery) return 1 - - const haystack = getCommandSearchText(cmd) - const terms = normalizedQuery.split(/\s+/).filter(Boolean) - if (terms.length === 0) return 1 - - let score = 0 - - for (const term of terms) { - if (!haystack.includes(term)) return 0 - - if (normalizeSearchValue(cmd.label) === term) { - score += 120 - continue - } - - if (normalizeSearchValue(cmd.label).startsWith(term)) { - score += 80 - continue - } - - if ((cmd.aliases ?? []).some((alias) => normalizeSearchValue(alias) === term)) { - score += 70 - continue - } - - if ((cmd.keywords ?? []).some((keyword) => normalizeSearchValue(keyword).startsWith(term))) { - score += 55 - continue - } - - if (haystack.includes(` ${term}`) || haystack.startsWith(term)) { - score += 35 - continue - } - - score += 15 - } - - return score -} - /** Renders the keyboard-first command palette for dashboard actions. */ export function CommandPalette({ isDark, @@ -182,25 +49,6 @@ export function CommandPalette({ const [open, setOpen] = useState(false) const [search, setSearch] = useState('') - const sectionAvailability = useMemo>( - () => ({ - insights: true, - metrics: true, - today: hasTodaySection, - currentMonth: hasMonthSection, - activity: true, - forecastCache: true, - limits: true, - costAnalysis: true, - tokenAnalysis: true, - requestAnalysis: hasRequestSection, - advancedAnalysis: true, - comparisons: true, - tables: true, - }), - [hasMonthSection, hasRequestSection, hasTodaySection], - ) - useEffect(() => { const handleKeyDown = (e: KeyboardEvent) => { if ((e.metaKey || e.ctrlKey) && e.key === 'k') { @@ -212,293 +60,56 @@ export function CommandPalette({ return () => document.removeEventListener('keydown', handleKeyDown) }, []) - const sectionNavigationCommands = useMemo( + const commands = useMemo( () => - sectionOrder.flatMap((sectionId) => { - const section = DASHBOARD_SECTION_DEFINITION_MAP[sectionId] - - if (!sectionVisibility[sectionId] || !sectionAvailability[sectionId]) { - return [] - } - - const sectionLabel = t(section.labelKey) - - return [ - { - id: `section-${section.id}`, - label: t('commandPalette.commands.goToSection.label', { section: sectionLabel }), - description: t('commandPalette.commands.goToSection.description', { - section: sectionLabel, - }), - keywords: [sectionLabel, section.domId, ...SECTION_COMMAND_KEYWORDS[section.id]], - icon: SECTION_COMMAND_ICON_MAP[section.id], - action: () => onScrollTo(section.domId), - group: t('commandPalette.groups.navigation'), - testId: `command-section-${section.id}`, - ...(SECTION_COMMAND_ALIASES[section.id] - ? { aliases: SECTION_COMMAND_ALIASES[section.id] } - : {}), - }, - ] + buildCommandPaletteCommands({ + isDark, + availableProviders, + selectedProviders, + availableModels, + selectedModels, + hasTodaySection, + hasMonthSection, + hasRequestSection, + sectionVisibility, + sectionOrder, + reportGenerating, + onToggleTheme, + onExportCSV, + onGenerateReport, + onDelete, + onUpload, + onAutoImport, + onOpenSettings, + onScrollTo, + onScrollTop: () => window.scrollTo({ top: 0, behavior: 'smooth' }), + onScrollBottom: () => + window.scrollTo({ top: document.body.scrollHeight, behavior: 'smooth' }), + onViewModeChange, + onApplyPreset, + onToggleProvider, + onToggleModel, + onClearProviders, + onClearModels, + onClearDateRange, + onResetAll, + onHelp, + onLanguageChange, + t: (key, options) => (options === undefined ? t(key) : t(key, options)), }), - [onScrollTo, sectionAvailability, sectionOrder, sectionVisibility, t], - ) - - const baseCommands = useMemo( - () => [ - { - id: 'auto-import', - label: t('commandPalette.commands.autoImport.label'), - description: t('commandPalette.commands.autoImport.description'), - keywords: ['toktrack', 'import', 'load', 'sync'], - aliases: ['auto import', 'daten importieren'], - icon: , - action: onAutoImport, - group: t('commandPalette.groups.loadData'), - }, - { - id: 'upload', - label: t('commandPalette.commands.upload.label'), - description: t('commandPalette.commands.upload.description'), - keywords: ['upload', 'file', 'json', 'import'], - aliases: ['datei laden', 'json import'], - shortcut: '⌘U', - icon: , - action: onUpload, - group: t('commandPalette.groups.loadData'), - }, - { - id: 'csv', - label: t('commandPalette.commands.exportCsv.label'), - description: t('commandPalette.commands.exportCsv.description'), - keywords: ['download', 'export', 'csv'], - aliases: ['csv download', 'daten exportieren'], - shortcut: '⌘E', - icon: , - action: onExportCSV, - group: t('commandPalette.groups.exports'), - }, - { - id: 'report', - label: reportGenerating - ? t('commandPalette.commands.generateReport.labelLoading') - : t('commandPalette.commands.generateReport.label'), - description: t('commandPalette.commands.generateReport.description'), - keywords: ['pdf', 'report', 'bericht', 'export'], - aliases: ['report export', 'pdf export', 'bericht generieren'], - icon: , - action: onGenerateReport, - group: t('commandPalette.groups.exports'), - }, - { - id: 'settings-open', - label: t('commandPalette.commands.openSettings.label'), - description: t('commandPalette.commands.openSettings.description'), - keywords: ['settings', 'limits', 'subscription', 'anbieter limit', 'backup'], - aliases: ['settings dialog', 'einstellungen öffnen', 'provider limits'], - icon: , - action: onOpenSettings, - group: t('commandPalette.groups.maintenance'), - }, - { - id: 'delete', - label: t('commandPalette.commands.delete.label'), - description: t('commandPalette.commands.delete.description'), - keywords: ['reset data', 'clear data', 'delete'], - aliases: ['daten reset', 'alles loeschen'], - icon: , - action: onDelete, - group: t('commandPalette.groups.maintenance'), - }, - - { - id: 'view-daily', - label: t('commandPalette.commands.viewDaily.label'), - description: t('commandPalette.commands.viewDaily.description'), - keywords: ['daily', 'tage', 'tag', 'tagesansicht'], - aliases: ['daily view'], - icon: , - action: () => onViewModeChange('daily'), - group: t('commandPalette.groups.filters'), - }, - { - id: 'view-monthly', - label: t('commandPalette.commands.viewMonthly.label'), - description: t('commandPalette.commands.viewMonthly.description'), - keywords: ['monthly', 'monate', 'monat', 'monatsansicht'], - aliases: ['monthly view'], - icon: , - action: () => onViewModeChange('monthly'), - group: t('commandPalette.groups.filters'), - }, - { - id: 'view-yearly', - label: t('commandPalette.commands.viewYearly.label'), - description: t('commandPalette.commands.viewYearly.description'), - keywords: ['yearly', 'jahre', 'jahr', 'jahresansicht'], - aliases: ['yearly view'], - icon: , - action: () => onViewModeChange('yearly'), - group: t('commandPalette.groups.filters'), - }, - { - id: 'preset-7d', - label: t('commandPalette.commands.preset7d.label'), - description: t('commandPalette.commands.preset7d.description'), - keywords: ['7d', '7 tage'], - icon: , - action: () => onApplyPreset('7d'), - group: t('commandPalette.groups.filters'), - }, - { - id: 'preset-30d', - label: t('commandPalette.commands.preset30d.label'), - description: t('commandPalette.commands.preset30d.description'), - keywords: ['30d', '30 tage'], - icon: , - action: () => onApplyPreset('30d'), - group: t('commandPalette.groups.filters'), - }, - { - id: 'preset-month', - label: t('commandPalette.commands.presetMonth.label'), - description: t('commandPalette.commands.presetMonth.description'), - keywords: ['current month', 'monat'], - icon: , - action: () => onApplyPreset('month'), - group: t('commandPalette.groups.filters'), - }, - { - id: 'preset-year', - label: t('commandPalette.commands.presetYear.label'), - description: t('commandPalette.commands.presetYear.description'), - keywords: ['current year', 'jahr'], - icon: , - action: () => onApplyPreset('year'), - group: t('commandPalette.groups.filters'), - }, - { - id: 'preset-all', - label: t('commandPalette.commands.presetAll.label'), - description: t('commandPalette.commands.presetAll.description'), - keywords: ['all', 'alles'], - icon: , - action: () => onApplyPreset('all'), - group: t('commandPalette.groups.filters'), - }, - { - id: 'clear-providers', - label: t('commandPalette.commands.clearProviders.label'), - description: t('commandPalette.commands.clearProviders.description'), - keywords: ['provider', 'anbieter', 'clear'], - icon: , - action: onClearProviders, - group: t('commandPalette.groups.filters'), - }, - { - id: 'clear-models', - label: t('commandPalette.commands.clearModels.label'), - description: t('commandPalette.commands.clearModels.description'), - keywords: ['models', 'modelle', 'clear'], - icon: , - action: onClearModels, - group: t('commandPalette.groups.filters'), - }, - { - id: 'clear-dates', - label: t('commandPalette.commands.clearDates.label'), - description: t('commandPalette.commands.clearDates.description'), - keywords: ['date', 'datum', 'range', 'clear'], - icon: , - action: onClearDateRange, - group: t('commandPalette.groups.filters'), - }, - { - id: 'reset-all', - label: t('commandPalette.commands.resetAll.label'), - description: t('commandPalette.commands.resetAll.description'), - keywords: ['reset all', 'alles zurücksetzen', 'default', 'clear filters'], - aliases: ['reset dashboard', 'alles reset', 'filter reset'], - icon: , - action: onResetAll, - group: t('commandPalette.groups.filters'), - }, - - { - id: 'top', - label: t('commandPalette.commands.scrollTop.label'), - description: t('commandPalette.commands.scrollTop.description'), - keywords: ['top', 'start', 'anfang'], - shortcut: '⌘↑', - icon: , - action: () => window.scrollTo({ top: 0, behavior: 'smooth' }), - group: t('commandPalette.groups.navigation'), - }, - { - id: 'bottom', - label: t('commandPalette.commands.scrollBottom.label'), - description: t('commandPalette.commands.scrollBottom.description'), - keywords: ['bottom', 'ende'], - icon: , - action: () => window.scrollTo({ top: document.body.scrollHeight, behavior: 'smooth' }), - group: t('commandPalette.groups.navigation'), - }, - { - id: 'filters', - label: t('commandPalette.commands.filters.label'), - description: t('commandPalette.commands.filters.description'), - keywords: ['filterbar', 'filter'], - icon: , - action: () => onScrollTo('filters'), - group: t('commandPalette.groups.navigation'), - }, - ...sectionNavigationCommands, - - { - id: 'theme', - label: isDark - ? t('commandPalette.commands.themeLight.label') - : t('commandPalette.commands.themeDark.label'), - description: t('commandPalette.commands.themeDark.description'), - keywords: ['theme', 'dark', 'light'], - shortcut: '⌘D', - icon: isDark ? : , - action: onToggleTheme, - group: t('commandPalette.groups.view'), - }, - { - id: 'language-de', - label: t('commandPalette.commands.languageGerman.label'), - description: t('commandPalette.commands.languageGerman.description'), - keywords: ['language', 'sprache', 'deutsch', 'german', 'locale'], - aliases: ['switch german', 'auf deutsch', 'sprache deutsch'], - icon: , - action: () => onLanguageChange('de'), - group: t('commandPalette.groups.language'), - }, - { - id: 'language-en', - label: t('commandPalette.commands.languageEnglish.label'), - description: t('commandPalette.commands.languageEnglish.description'), - keywords: ['language', 'sprache', 'english', 'englisch', 'locale'], - aliases: ['switch english', 'auf englisch', 'sprache english'], - icon: , - action: () => onLanguageChange('en'), - group: t('commandPalette.groups.language'), - }, - { - id: 'help', - label: t('commandPalette.commands.help.label'), - description: t('commandPalette.commands.help.description'), - keywords: ['shortcut', 'hilfe'], - shortcut: '?', - icon: , - action: onHelp, - group: t('commandPalette.groups.help'), - }, - ], [ isDark, + availableProviders, + selectedProviders, + availableModels, + selectedModels, + hasTodaySection, + hasMonthSection, + hasRequestSection, + sectionVisibility, + sectionOrder, + reportGenerating, + onToggleTheme, onAutoImport, onOpenSettings, onExportCSV, @@ -512,75 +123,20 @@ export function CommandPalette({ onClearDateRange, onResetAll, onScrollTo, - sectionNavigationCommands, - reportGenerating, - onToggleTheme, + onToggleProvider, + onToggleModel, onLanguageChange, onHelp, t, ], ) - const providerCommands = useMemo( - () => - availableProviders.map((provider) => { - const selected = selectedProviders.includes(provider) - return { - id: `provider-${provider}`, - label: `${selected ? t('commandPalette.commands.clearProviders.label') : t('common.provider')}: ${provider}`, - description: selected - ? `${t('commandPalette.commands.clearProviders.description')}: ${provider}` - : `${t('common.provider')} ${provider}`, - keywords: ['anbieter', 'provider', provider.toLowerCase()], - aliases: [`filter ${provider.toLowerCase()}`, `${provider.toLowerCase()} daten`], - icon: , - action: () => onToggleProvider(provider), - group: t('commandPalette.groups.providers'), - } - }), - [availableProviders, selectedProviders, onToggleProvider, t], - ) + const filteredCommands = useMemo(() => filterCommandItems(commands, search), [commands, search]) - const modelCommands = useMemo( - () => - availableModels.map((model) => { - const selected = selectedModels.includes(model) - return { - id: `model-${model}`, - label: `${selected ? t('commandPalette.commands.clearModels.label') : t('common.model')}: ${model}`, - description: selected - ? `${t('commandPalette.commands.clearModels.description')}: ${model}` - : `${t('common.model')} ${model}`, - keywords: ['modell', 'model', model.toLowerCase()], - aliases: [ - `filter ${model.toLowerCase()}`, - `${model.toLowerCase()} requests`, - `${model.toLowerCase()} kosten`, - ], - icon: , - action: () => onToggleModel(model), - group: t('commandPalette.groups.models'), - } - }), - [availableModels, selectedModels, onToggleModel, t], - ) - - const commands = useMemo( - () => [...baseCommands, ...providerCommands, ...modelCommands], - [baseCommands, providerCommands, modelCommands], - ) - - const filteredCommands = useMemo( - () => - commands - .map((cmd, index) => ({ cmd, score: getCommandSearchScore(cmd, search), index })) - .filter((entry) => entry.score > 0) - .sort((a, b) => b.score - a.score || a.index - b.index) - .map((entry) => entry.cmd), - [commands, search], + const visibleCommands = useMemo( + () => getVisibleCommandItems(filteredCommands), + [filteredCommands], ) - - const visibleCommands = useMemo(() => filteredCommands.slice(0, 9), [filteredCommands]) const groups = useMemo( () => Array.from(new Set(filteredCommands.map((c) => c.group))), [filteredCommands], diff --git a/src/components/features/settings/use-settings-modal-version-status.ts b/src/components/features/settings/use-settings-modal-version-status.ts index 302eb06..d944248 100644 --- a/src/components/features/settings/use-settings-modal-version-status.ts +++ b/src/components/features/settings/use-settings-modal-version-status.ts @@ -28,13 +28,13 @@ export function useSettingsModalVersionStatus(): SettingsVersionStatusViewModel const statusToneClass = useMemo(() => { if (state.isLoading) return 'text-muted-foreground' - if (state.lookupStatus === 'failed' || state.isLatest === false) return 'text-amber-500' + if (state.lookupStatus !== 'ok' || state.isLatest === false) return 'text-amber-500' return 'text-green-500' }, [state.isLatest, state.isLoading, state.lookupStatus]) const statusLabel = useMemo(() => { if (state.isLoading) return t('settings.modal.toktrackCheckingLatest') - if (state.lookupStatus === 'failed') return t('settings.modal.toktrackLatestCheckFailed') + if (state.lookupStatus !== 'ok') return t('settings.modal.toktrackLatestCheckFailed') if (state.isLatest) return t('settings.modal.toktrackLatest') return t('settings.modal.toktrackUpdateAvailable', { diff --git a/src/hooks/use-dashboard-controller-derived-state.ts b/src/hooks/use-dashboard-controller-derived-state.ts index ee0e6a6..ab1537a 100644 --- a/src/hooks/use-dashboard-controller-derived-state.ts +++ b/src/hooks/use-dashboard-controller-derived-state.ts @@ -28,6 +28,10 @@ export function useDashboardControllerDerivedState({ }: DashboardControllerDerivedStateParams): DashboardControllerDerivedState { const filters = useDashboardFilters(daily, settings.defaultFilters) const computed = useComputedMetrics(filters.filteredData, locale) + const dailyCosts = useMemo( + () => filters.filteredData.map((entry) => entry.totalCost), + [filters.filteredData], + ) const totalCalendarDays = useMemo(() => { if (!filters.dateRange || filters.viewMode !== 'daily') return 0 @@ -99,6 +103,7 @@ export function useDashboardControllerDerivedState({ hasData, filters, computed, + dailyCosts, totalCalendarDays, todayData, hasCurrentMonthData, diff --git a/src/hooks/use-dashboard-controller.ts b/src/hooks/use-dashboard-controller.ts index 35d9e9b..c0b573f 100644 --- a/src/hooks/use-dashboard-controller.ts +++ b/src/hooks/use-dashboard-controller.ts @@ -213,6 +213,7 @@ export function useDashboardControllerWithBootstrap( viewMode: derived.filters.viewMode, totalCalendarDays: derived.totalCalendarDays, filteredData: derived.filters.filteredData, + dailyCosts: derived.dailyCosts, filteredDailyData: derived.filters.filteredDailyData, todayData: derived.todayData, hasCurrentMonthData: derived.hasCurrentMonthData, diff --git a/src/lib/data-transforms.ts b/src/lib/data-transforms.ts index 6a06722..685f6a5 100644 --- a/src/lib/data-transforms.ts +++ b/src/lib/data-transforms.ts @@ -143,7 +143,11 @@ export function buildDashboardChartTransforms( modelCostChartData: [], tokenChartData: [], requestChartData: [], - weekdayData: createWeekdayLabels(locale).map((day) => ({ day, cost: 0 })), + weekdayData: createWeekdayLabels(locale).map((day, weekdayIndex) => ({ + day, + cost: 0, + weekdayIndex, + })), } } @@ -295,7 +299,7 @@ export function buildDashboardChartTransforms( const values = weekdayCosts[index] ?? [] const average = values.length > 0 ? values.reduce((sum, value) => sum + value, 0) / values.length : 0 - return { day, cost: average } + return { day, cost: average, weekdayIndex: index } }) return { diff --git a/src/lib/toktrack-version-status.ts b/src/lib/toktrack-version-status.ts index 27c54fb..fe510cf 100644 --- a/src/lib/toktrack-version-status.ts +++ b/src/lib/toktrack-version-status.ts @@ -60,7 +60,7 @@ function normalizeToktrackVersionStatus( configuredVersion: status.configuredVersion || TOKTRACK_VERSION, latestVersion: status.latestVersion ?? null, isLatest: typeof status.isLatest === 'boolean' ? status.isLatest : null, - lookupStatus: status.lookupStatus === 'failed' ? 'failed' : 'ok', + lookupStatus: status.lookupStatus, isLoading: false, } } diff --git a/src/types/dashboard-controller.d.ts b/src/types/dashboard-controller.d.ts index a7dee83..5526720 100644 --- a/src/types/dashboard-controller.d.ts +++ b/src/types/dashboard-controller.d.ts @@ -108,6 +108,7 @@ export interface DashboardControllerDerivedState { hasData: boolean filters: DashboardControllerFiltersState computed: DashboardControllerComputedState + dailyCosts: number[] totalCalendarDays: number todayData: DailyUsage | null hasCurrentMonthData: boolean diff --git a/src/types/dashboard-view-model.d.ts b/src/types/dashboard-view-model.d.ts index 9cc9198..de531f7 100644 --- a/src/types/dashboard-view-model.d.ts +++ b/src/types/dashboard-view-model.d.ts @@ -204,6 +204,7 @@ export interface DashboardOverviewSectionsViewModel { viewMode: ViewMode totalCalendarDays: number filteredData: DailyUsage[] + dailyCosts: number[] filteredDailyData: DailyUsage[] todayData: DailyUsage | null hasCurrentMonthData: boolean diff --git a/src/types/index.ts b/src/types/index.ts index 688b943..a8bfab8 100644 --- a/src/types/index.ts +++ b/src/types/index.ts @@ -48,6 +48,7 @@ export interface UsageImportSummary { addedDays: number unchangedDays: number conflictingDays: number + skippedDays: number totalDays: number } @@ -217,6 +218,7 @@ export interface ModelCostChartPoint extends ChartDataPoint { export interface WeekdayData { day: string cost: number + weekdayIndex?: number } /** Grades the forecast quality based on data density and volatility. */ @@ -316,6 +318,6 @@ export interface ToktrackVersionStatus { configuredVersion: string latestVersion: string | null isLatest: boolean | null - lookupStatus: 'ok' | 'failed' + lookupStatus: 'ok' | 'failed' | 'malformed-output' | 'timeout' message?: string } diff --git a/tests/architecture/dashboard-sections-contract.test.ts b/tests/architecture/dashboard-sections-contract.test.ts index dd70344..b8eb89c 100644 --- a/tests/architecture/dashboard-sections-contract.test.ts +++ b/tests/architecture/dashboard-sections-contract.test.ts @@ -50,6 +50,20 @@ function findFunction(sourceFile: ts.SourceFile, name: string) { return declaration } +function getImportSpecifiers(sourceFile: ts.SourceFile) { + return sourceFile.statements.flatMap((statement) => { + if ( + ts.isImportDeclaration(statement) && + statement.moduleSpecifier && + ts.isStringLiteral(statement.moduleSpecifier) + ) { + return [statement.moduleSpecifier.text] + } + + return [] + }) +} + function getIdentifierText(name: ts.PropertyName | ts.BindingName) { if (ts.isIdentifier(name)) return name.text throw new Error(`Expected identifier, received ${ts.SyntaxKind[name.kind]}`) @@ -92,6 +106,53 @@ describe('dashboard sections contract guardrails', () => { ]) }) + it('keeps DashboardSections as a thin section orchestrator', () => { + const sourceFile = readSourceFile(dashboardSectionsPath) + const source = sourceFile.getFullText() + const imports = getImportSpecifiers(sourceFile) + + expect(source).not.toContain('switch (sectionId)') + expect(source).not.toContain('function lazyWithPreload') + expect(source).not.toContain('const dashboardSectionPreloaders =') + for (const forbiddenImport of [ + '../cards/PrimaryMetrics', + '../features/forecast/CostForecast', + '../features/heatmap/HeatmapCalendar', + '../features/risk/ConcentrationRisk', + '../charts/CostOverTime', + '../tables/ModelEfficiency', + ]) { + expect(imports).not.toContain(forbiddenImport) + } + expect(imports).toEqual( + expect.arrayContaining([ + './sections/dashboard-section-lazy-components', + './sections/dashboard-section-metadata', + './sections/dashboard-section-renderers', + ]), + ) + }) + + it('keeps section renderer ownership split by section family', () => { + const rendererRegistryPath = path.resolve( + process.cwd(), + 'src/components/dashboard/sections/dashboard-section-renderers.ts', + ) + const sourceFile = readSourceFile(rendererRegistryPath) + const source = sourceFile.getFullText() + const imports = getImportSpecifiers(sourceFile) + + expect(source).not.toContain('switch (sectionId)') + expect(imports).toEqual( + expect.arrayContaining([ + './dashboard-overview-sections', + './dashboard-forecast-sections', + './dashboard-analysis-sections', + './dashboard-comparison-table-sections', + ]), + ) + }) + it('keeps DashboardSectionsViewModel split into section bundles', () => { const sourceFile = readSourceFile(dashboardViewModelPath) diff --git a/tests/architecture/import-patterns.ts b/tests/architecture/import-patterns.ts new file mode 100644 index 0000000..fc17256 --- /dev/null +++ b/tests/architecture/import-patterns.ts @@ -0,0 +1,14 @@ +export function escapeRegex(value: string) { + return value.replace(/[.*+?^${}()|[\]\\]/g, '\\$&') +} + +export function createForbiddenImportPattern(importPath: string) { + const escapedPath = escapeRegex(importPath) + const importBoundary = importPath.endsWith('/') + ? escapedPath + : String.raw`${escapedPath}(?=(?:['"./]|$))` + + return new RegExp( + String.raw`(?:require\s*\(\s*['"]${importBoundary}|from\s*['"]${importBoundary}|import\s*\(\s*['"]${importBoundary})`, + ) +} diff --git a/tests/architecture/server-auto-import-runtime-boundaries.test.ts b/tests/architecture/server-auto-import-runtime-boundaries.test.ts new file mode 100644 index 0000000..cd0320e --- /dev/null +++ b/tests/architecture/server-auto-import-runtime-boundaries.test.ts @@ -0,0 +1,57 @@ +import { readFileSync } from 'node:fs' +import path from 'node:path' +import { describe, expect, it } from 'vitest' +import { createForbiddenImportPattern } from './import-patterns' + +function readRepoFile(relativePath: string) { + return readFileSync(path.resolve(process.cwd(), relativePath), 'utf8') +} + +const autoImportRuntimeServiceFiles = [ + 'server/auto-import-runtime/messages.js', + 'server/auto-import-runtime/command-runner.js', + 'server/auto-import-runtime/toktrack-runners.js', + 'server/auto-import-runtime/latest-version.js', + 'server/auto-import-runtime/import-executor.js', +] + +const forbiddenRuntimeImports = [ + '../background-runtime', + '../data-runtime', + '../http-router', + '../routes/', + '../server-lifecycle', + '../startup-runtime', +] + +describe('server auto-import runtime boundary contract', () => { + it('matches forbidden runtime imports with extensions and subpaths', () => { + expect("require('../background-runtime.js')").toMatch( + createForbiddenImportPattern('../background-runtime'), + ) + expect("from '../routes/auto-import-routes.js'").toMatch( + createForbiddenImportPattern('../routes/'), + ) + }) + + it('keeps auto-import-runtime.js as the auto-import composition facade', () => { + const source = readRepoFile('server/auto-import-runtime.js') + + expect(source).toContain("require('./auto-import-runtime/messages')") + expect(source).toContain("require('./auto-import-runtime/command-runner')") + expect(source).toContain("require('./auto-import-runtime/toktrack-runners')") + expect(source).toContain("require('./auto-import-runtime/latest-version')") + expect(source).toContain("require('./auto-import-runtime/import-executor')") + expect(source).toContain('function createAutoImportRuntime') + }) + + it('keeps auto-import runtime services independent from HTTP and sibling runtimes', () => { + for (const serviceFile of autoImportRuntimeServiceFiles) { + const source = readRepoFile(serviceFile) + + for (const importPath of forbiddenRuntimeImports) { + expect(source).not.toMatch(createForbiddenImportPattern(importPath)) + } + } + }) +}) diff --git a/tests/architecture/server-data-runtime-boundaries.test.ts b/tests/architecture/server-data-runtime-boundaries.test.ts new file mode 100644 index 0000000..0b1ba70 --- /dev/null +++ b/tests/architecture/server-data-runtime-boundaries.test.ts @@ -0,0 +1,51 @@ +import { readFileSync } from 'node:fs' +import path from 'node:path' +import { describe, expect, it } from 'vitest' +import { createForbiddenImportPattern } from './import-patterns' + +function readRepoFile(relativePath: string) { + return readFileSync(path.resolve(process.cwd(), relativePath), 'utf8') +} + +const dataRuntimeServiceFiles = [ + 'server/data-runtime/app-paths.js', + 'server/data-runtime/file-io.js', + 'server/data-runtime/file-locks.js', + 'server/data-runtime/import-merge.js', +] + +const forbiddenRuntimeImports = [ + '../http-router', + '../routes/', + '../auto-import-runtime', + '../background-runtime', +] + +describe('server data runtime boundary contract', () => { + it('matches forbidden runtime imports with extensions and subpaths', () => { + expect("require('../auto-import-runtime.js')").toMatch( + createForbiddenImportPattern('../auto-import-runtime'), + ) + expect("from '../routes/usage-routes.js'").toMatch(createForbiddenImportPattern('../routes/')) + }) + + it('keeps data-runtime.js as the persistence composition facade', () => { + const source = readRepoFile('server/data-runtime.js') + + expect(source).toContain("require('./data-runtime/app-paths')") + expect(source).toContain("require('./data-runtime/file-io')") + expect(source).toContain("require('./data-runtime/file-locks')") + expect(source).toContain("require('./data-runtime/import-merge')") + expect(source).toContain('function createDataRuntime') + }) + + it('keeps data runtime services independent from HTTP and other runtime modules', () => { + for (const serviceFile of dataRuntimeServiceFiles) { + const source = readRepoFile(serviceFile) + + for (const importPath of forbiddenRuntimeImports) { + expect(source).not.toMatch(createForbiddenImportPattern(importPath)) + } + } + }) +}) diff --git a/tests/architecture/server-http-boundaries.test.ts b/tests/architecture/server-http-boundaries.test.ts index f959c33..0f88194 100644 --- a/tests/architecture/server-http-boundaries.test.ts +++ b/tests/architecture/server-http-boundaries.test.ts @@ -5,6 +5,16 @@ function readRepoFile(relativePath: string) { return readFileSync(path.resolve(process.cwd(), relativePath), 'utf8') } +const serverRouteFiles = [ + 'server/routes/auto-import-routes.js', + 'server/routes/http-route-utils.js', + 'server/routes/report-routes.js', + 'server/routes/runtime-routes.js', + 'server/routes/settings-routes.js', + 'server/routes/static-routes.js', + 'server/routes/usage-routes.js', +] + describe('server HTTP boundary contract', () => { it('keeps request guard policy behind the HTTP utils facade', () => { const routerSource = readRepoFile('server/http-router.js') @@ -31,4 +41,28 @@ describe('server HTTP boundary contract', () => { expect(requestGuardsSource).toContain('function getHostHeaderHost') expect(requestGuardsSource).toContain('function hasJsonContentType') }) + + it('keeps the HTTP router as a route-group composition shell', () => { + const routerSource = readRepoFile('server/http-router.js') + + expect(routerSource).toContain("require('./routes/usage-routes')") + expect(routerSource).toContain("require('./routes/settings-routes')") + expect(routerSource).toContain("require('./routes/auto-import-routes')") + expect(routerSource).toContain("require('./routes/report-routes')") + expect(routerSource).toContain("require('./routes/static-routes')") + expect(routerSource).not.toContain("apiPath === '/settings'") + expect(routerSource).not.toContain("apiPath === '/upload'") + expect(routerSource).not.toContain("apiPath === '/report/pdf'") + }) + + it('keeps route groups dependent on injected services instead of runtime implementations', () => { + for (const routeFile of serverRouteFiles) { + const routeSource = readRepoFile(routeFile) + + expect(routeSource).not.toContain("require('../data-runtime") + expect(routeSource).not.toContain("require('../auto-import-runtime") + expect(routeSource).not.toContain("require('../background-runtime") + expect(routeSource).not.toContain("require('../http-router") + } + }) }) diff --git a/tests/architecture/server-runtime-state-contract.test.ts b/tests/architecture/server-runtime-state-contract.test.ts index b4d108f..1d31ea9 100644 --- a/tests/architecture/server-runtime-state-contract.test.ts +++ b/tests/architecture/server-runtime-state-contract.test.ts @@ -8,9 +8,13 @@ function readRepoFile(relativePath: string) { describe('server runtime state contract', () => { it('keeps auto-import stream state outside the HTTP router', () => { const routerSource = readRepoFile('server/http-router.js') + const autoImportRoutesSource = readRepoFile('server/routes/auto-import-routes.js') expect(routerSource).not.toContain('autoImportStreamRunning') - expect(routerSource).toContain('acquireAutoImportLease') + expect(routerSource).not.toContain('acquireAutoImportLease') + expect(routerSource).toContain('createAutoImportRoutes') + expect(autoImportRoutesSource).not.toContain('autoImportStreamRunning') + expect(autoImportRoutesSource).toContain('acquireAutoImportLease') }) it('keeps mutable runtime flags behind runtime-state services', () => { diff --git a/tests/architecture/shared-declaration-contract.test.ts b/tests/architecture/shared-declaration-contract.test.ts new file mode 100644 index 0000000..eee5a60 --- /dev/null +++ b/tests/architecture/shared-declaration-contract.test.ts @@ -0,0 +1,329 @@ +import { createRequire } from 'node:module' +import { existsSync, mkdtempSync, readFileSync, rmSync, writeFileSync } from 'node:fs' +import { tmpdir } from 'node:os' +import path from 'node:path' +import * as ts from 'typescript' + +const require = createRequire(import.meta.url) + +interface SharedContractModule { + declarationPath: string + modulePath: string +} + +const sharedContractModules: SharedContractModule[] = [ + { + modulePath: 'shared/app-settings.js', + declarationPath: 'shared/app-settings.d.ts', + }, + { + modulePath: 'shared/dashboard-preferences.js', + declarationPath: 'shared/dashboard-preferences.d.ts', + }, + { + modulePath: 'shared/toktrack-version.js', + declarationPath: 'shared/toktrack-version.d.ts', + }, +] + +function hasExportModifier(node: ts.Node) { + return ts.canHaveModifiers(node) + ? Boolean(node.modifiers?.some((modifier) => modifier.kind === ts.SyntaxKind.ExportKeyword)) + : false +} + +function unwrapExpression(expression: ts.Expression): ts.Expression { + return ts.isParenthesizedExpression(expression) + ? unwrapExpression(expression.expression) + : expression +} + +function readExportAssignmentName(statement: ts.ExportAssignment) { + const expression = unwrapExpression(statement.expression) + + if (ts.isIdentifier(expression)) { + return expression.text + } + + if ( + (ts.isClassExpression(expression) || ts.isFunctionExpression(expression)) && + expression.name + ) { + return expression.name.text + } + + return null +} + +function collectBindingIdentifierNames(name: ts.BindingName): string[] { + if (ts.isIdentifier(name)) { + return [name.text] + } + + return name.elements.flatMap((element) => { + if (ts.isOmittedExpression(element)) { + return [] + } + + return collectBindingIdentifierNames(element.name) + }) +} + +function readDeclarationSourceFile(filePath: string) { + return ts.createSourceFile( + filePath, + readFileSync(filePath, 'utf8'), + ts.ScriptTarget.Latest, + true, + ts.ScriptKind.TS, + ) +} + +function resolveDeclarationModulePath(importerPath: string, moduleSpecifier: string) { + if (!moduleSpecifier.startsWith('.')) { + return null + } + + const resolvedBasePath = path.resolve(path.dirname(importerPath), moduleSpecifier) + const candidates = [ + resolvedBasePath, + `${resolvedBasePath}.d.ts`, + `${resolvedBasePath}.ts`, + path.join(resolvedBasePath, 'index.d.ts'), + path.join(resolvedBasePath, 'index.ts'), + ] + + return candidates.find((candidate) => existsSync(candidate)) ?? null +} + +function collectStarReExportValueExports( + sourceFile: ts.SourceFile, + statement: ts.ExportDeclaration, + visitedFiles: Set, +) { + if (!statement.moduleSpecifier || !ts.isStringLiteral(statement.moduleSpecifier)) { + return [] + } + + const declarationPath = resolveDeclarationModulePath( + sourceFile.fileName, + statement.moduleSpecifier.text, + ) + if (!declarationPath) { + return [] + } + + return collectDeclarationValueExports(readDeclarationSourceFile(declarationPath), visitedFiles) +} + +function collectDeclarationValueExports( + sourceFile: ts.SourceFile, + visitedFiles = new Set(), +) { + const sourcePath = path.resolve(sourceFile.fileName) + if (visitedFiles.has(sourcePath)) { + return [] + } + visitedFiles.add(sourcePath) + + const exports = new Set() + + for (const statement of sourceFile.statements) { + if ( + hasExportModifier(statement) && + (ts.isFunctionDeclaration(statement) || + ts.isClassDeclaration(statement) || + ts.isEnumDeclaration(statement)) && + statement.name + ) { + exports.add(statement.name.text) + continue + } + + if (hasExportModifier(statement) && ts.isVariableStatement(statement)) { + for (const declaration of statement.declarationList.declarations) { + for (const exportName of collectBindingIdentifierNames(declaration.name)) { + exports.add(exportName) + } + } + continue + } + + if (ts.isExportDeclaration(statement)) { + if (statement.isTypeOnly) { + continue + } + + if (!statement.exportClause) { + for (const exportName of collectStarReExportValueExports( + sourceFile, + statement, + visitedFiles, + )) { + exports.add(exportName) + } + continue + } + + if (ts.isNamespaceExport(statement.exportClause)) { + exports.add(statement.exportClause.name.text) + continue + } + + for (const element of statement.exportClause.elements) { + if (!element.isTypeOnly) { + exports.add(element.name.text) + } + } + } + + if (ts.isExportAssignment(statement)) { + const exportName = readExportAssignmentName(statement) + if (exportName) { + exports.add(exportName) + } + } + } + + return [...exports].sort() +} + +function readDeclarationValueExports(declarationPath: string) { + const absolutePath = path.resolve(process.cwd(), declarationPath) + return collectDeclarationValueExports(readDeclarationSourceFile(absolutePath)) +} + +function readRuntimeExports(modulePath: string) { + const moduleExports = require(path.resolve(process.cwd(), modulePath)) as unknown + + if (moduleExports === null || moduleExports === undefined) { + return [] + } + + if (typeof moduleExports === 'object') { + return Object.keys(moduleExports).sort() + } + + if (typeof moduleExports === 'function' && moduleExports.name) { + return [moduleExports.name] + } + + return ['default'] +} + +function createDeclarationFixture(source: string) { + return ts.createSourceFile('fixture.d.ts', source, ts.ScriptTarget.Latest, true, ts.ScriptKind.TS) +} + +describe('shared runtime declaration contracts', () => { + it('collects named exports and assignment exports from declaration files', () => { + expect( + collectDeclarationValueExports( + createDeclarationFixture(` + export const namedConst: string + export function namedFunction(): void + `), + ), + ).toEqual(['namedConst', 'namedFunction']) + + expect( + collectDeclarationValueExports( + createDeclarationFixture(` + declare const defaultContract: unknown + export default defaultContract + `), + ), + ).toEqual(['defaultContract']) + + expect( + collectDeclarationValueExports( + createDeclarationFixture(` + declare const equalsContract: unknown + export = equalsContract + `), + ), + ).toEqual(['equalsContract']) + }) + + it('collects value exports from binding pattern declarations', () => { + const sourceFile = createDeclarationFixture(` + const valueSource = {} + export const { objectValue, nested: { nestedValue } } = valueSource + export const [firstArrayValue, , { deepArrayValue }] = valueSource + `) + + expect(collectDeclarationValueExports(sourceFile)).toEqual([ + 'deepArrayValue', + 'firstArrayValue', + 'nestedValue', + 'objectValue', + ]) + }) + + it('skips type-only re-export declarations', () => { + const sourceFile = createDeclarationFixture(` + const runtimeValue = 'runtime' + interface TypeOnlyContract {} + export { runtimeValue, type TypeOnlyContract } + export type { TypeOnlyContract as RenamedTypeOnlyContract } + `) + + expect(collectDeclarationValueExports(sourceFile)).toEqual(['runtimeValue']) + }) + + it('collects value exports from star re-export declarations', () => { + const fixtureRoot = mkdtempSync(path.join(tmpdir(), 'ttdash-declaration-fixture-')) + + try { + const reExportPath = path.join(fixtureRoot, 're-export.d.ts') + writeFileSync( + path.join(fixtureRoot, 'fixture-exports.d.ts'), + ` + export const FIRST_VALUE: string + export function secondValue(): void + export interface IgnoredTypeOnlyContract {} + `, + 'utf8', + ) + + const sourceFile = ts.createSourceFile( + reExportPath, + "export * from './fixture-exports'", + ts.ScriptTarget.Latest, + true, + ts.ScriptKind.TS, + ) + + expect(collectDeclarationValueExports(sourceFile)).toEqual(['FIRST_VALUE', 'secondValue']) + } finally { + rmSync(fixtureRoot, { recursive: true, force: true }) + } + }) + + it('reads named function default exports from CommonJS runtime modules', () => { + const fixtureRoot = mkdtempSync(path.join(tmpdir(), 'ttdash-runtime-fixture-')) + + try { + const namedFunctionPath = path.join(fixtureRoot, 'named-function.cjs') + const primitivePath = path.join(fixtureRoot, 'primitive.cjs') + const emptyPath = path.join(fixtureRoot, 'empty.cjs') + + writeFileSync(namedFunctionPath, 'module.exports = function exportedRuntime() {}', 'utf8') + writeFileSync(primitivePath, 'module.exports = 42', 'utf8') + writeFileSync(emptyPath, 'module.exports = null', 'utf8') + + expect(readRuntimeExports(namedFunctionPath)).toEqual(['exportedRuntime']) + expect(readRuntimeExports(primitivePath)).toEqual(['default']) + expect(readRuntimeExports(emptyPath)).toEqual([]) + } finally { + rmSync(fixtureRoot, { recursive: true, force: true }) + } + }) + + it.each(sharedContractModules)( + 'keeps $declarationPath value exports aligned with $modulePath', + ({ declarationPath, modulePath }) => { + expect(readDeclarationValueExports(declarationPath)).toEqual(readRuntimeExports(modulePath)) + }, + ) +}) diff --git a/tests/e2e/command-palette.spec.ts b/tests/e2e/command-palette.spec.ts index 9b9b62b..80ebaea 100644 --- a/tests/e2e/command-palette.spec.ts +++ b/tests/e2e/command-palette.spec.ts @@ -1,7 +1,6 @@ -import { expect, test, type Page } from '@playwright/test' +import { expect, test, type Page } from './fixtures' import { dailyViewPattern, - mockAutoImportStream, mockPdfReport, monthlyViewPattern, prepareDashboard, @@ -9,71 +8,10 @@ import { viewModeComboboxPattern, } from './helpers' -const commandPaletteTitlePattern = /^Command [Pp]alette$/ -const settingsDialogTitlePattern = /^(Settings|Einstellungen)$/ +const commandPaletteTitlePattern = /^(Command Palette|Befehlspalette)$/ const helpDialogTitlePattern = /^(Help & shortcuts|Hilfe & Tastenkürzel)$/ -const autoImportDialogTitlePattern = /^(Toktrack auto import|Toktrack Auto-Import)$/ -const closeButtonPattern = /^(Close|Schliessen)$/ -const autoImportButtonPattern = /^(Auto-Import|Auto import)$/ -const uploadFileButtonPattern = /^(Datei hochladen|Upload file)$/ -const yearlyViewPattern = /^(Jahresansicht|Yearly view)$/ -const providersActivePattern = /^(1 providers active|1 Anbieter aktiv)$/ -const modelsActivePattern = /^(1 models active|1 Modelle aktiv)$/ const dateFilterActivePattern = /^(Date filter active|Datumsfilter aktiv)$/ const preset7Pattern = /^(7D|7T)$/ -const preset30Pattern = /^(30D|30T)$/ -const presetMonthPattern = /^(Month|Monat)$/ -const presetYearPattern = /^(Year|Jahr)$/ -const presetAllPattern = /^(All|Alle)$/ - -const providerLabels = ['Anthropic', 'Google', 'OpenAI'] -const modelLabels = ['Claude Sonnet 4.5', 'Gemini 2.5 Pro', 'GPT-5.4'] -const sectionCommands = [ - { testId: 'command-section-insights', selector: '#insights' }, - { testId: 'command-section-metrics', selector: '#metrics' }, - { testId: 'command-section-today', selector: '#today' }, - { testId: 'command-section-currentMonth', selector: '#current-month' }, - { testId: 'command-section-activity', selector: '#activity' }, - { testId: 'command-section-forecastCache', selector: '#forecast-cache' }, - { testId: 'command-section-limits', selector: '#limits' }, - { testId: 'command-section-costAnalysis', selector: '#charts' }, - { testId: 'command-section-tokenAnalysis', selector: '#token-analysis' }, - { testId: 'command-section-requestAnalysis', selector: '#request-analysis' }, - { testId: 'command-section-advancedAnalysis', selector: '#advanced-analysis' }, - { testId: 'command-section-comparisons', selector: '#comparisons' }, - { testId: 'command-section-tables', selector: '#tables' }, -] as const - -const expectedCommandTestIds = [ - 'command-auto-import', - 'command-settings-open', - 'command-csv', - 'command-report', - 'command-upload', - 'command-delete', - 'command-view-daily', - 'command-view-monthly', - 'command-view-yearly', - 'command-preset-7d', - 'command-preset-30d', - 'command-preset-month', - 'command-preset-year', - 'command-preset-all', - 'command-clear-providers', - 'command-clear-models', - 'command-clear-dates', - 'command-reset-all', - 'command-top', - 'command-bottom', - 'command-filters', - 'command-theme', - 'command-language-de', - 'command-language-en', - 'command-help', - ...sectionCommands.map((command) => command.testId), - ...providerLabels.map((provider) => `command-provider-${provider}`), - ...modelLabels.map((model) => `command-model-${model}`), -].sort() function getPalette(page: Page) { return page.getByRole('dialog', { name: commandPaletteTitlePattern }) @@ -110,301 +48,68 @@ async function waitForSectionNearTop(page: Page, selector: string) { .toBeLessThan(220) } -async function runSectionNavigationCommands( - page: Page, - commands: readonly (typeof sectionCommands)[number][], -) { - for (const section of commands) { - await test.step(`${section.testId} scrolls to the expected section`, async () => { - await page.evaluate(() => window.scrollTo({ top: document.body.scrollHeight })) - await runPaletteCommand(page, section.testId) - await waitForSectionNearTop(page, section.selector) - }) - } -} - test.beforeEach(async ({ page, baseURL }) => { await prepareDashboard(page, baseURL) }) -test('renders the full command palette command set for the seeded dataset', async ({ page }) => { +test('opens from the keyboard and shows representative commands', async ({ page }) => { const palette = await openPalette(page) - const renderedCommandTestIds = await palette - .locator('[data-testid^="command-"]') - .evaluateAll((nodes) => - nodes - .map((node) => node.getAttribute('data-testid')) - .filter((value): value is string => typeof value === 'string') - .sort(), - ) - expect(renderedCommandTestIds).toEqual(expectedCommandTestIds) + await expect(palette.locator('[data-testid="command-report"]')).toBeVisible() + await expect(palette.locator('[data-testid="command-view-monthly"]')).toBeVisible() + await expect(palette.locator('[data-testid="command-section-costAnalysis"]')).toBeVisible() }) -test('executes action commands from the command palette', async ({ page }) => { - await mockAutoImportStream(page) +test('executes a report action command from the command palette', async ({ page }) => { const pdfReport = await mockPdfReport(page) + const downloadPromise = page.waitForEvent('download') - await test.step('auto import opens and completes through the command palette', async () => { - await runPaletteCommand(page, 'command-auto-import') - - const dialog = page.getByRole('dialog', { name: autoImportDialogTitlePattern }) - await expect(dialog).toBeVisible() - await expect(dialog.getByText(/5 days imported|5 Tage importiert/)).toBeVisible() - await dialog.getByRole('button', { name: closeButtonPattern }).last().click() - await expect(dialog).toBeHidden() - }) - - await test.step('settings command is searchable through aliases and opens the dialog', async () => { - const palette = await openPalette(page) - await palette.locator('input').fill('einstellungen offnen') - const settingsCommand = palette.locator('[data-testid="command-settings-open"]') - await expect(settingsCommand).toBeVisible() - await settingsCommand.click() - await expect(palette).toBeHidden() - - const dialog = page.getByRole('dialog', { name: settingsDialogTitlePattern }) - await expect(dialog).toBeVisible() - await page.keyboard.press('Escape') - await expect(dialog).toBeHidden() - }) - - await test.step('upload command opens the file chooser', async () => { - const fileChooserPromise = page.waitForEvent('filechooser') - const { palette, command } = await getPaletteCommand(page, 'command-upload') - await command.click() - await fileChooserPromise - await expect(palette).toBeHidden() - }) - - await test.step('CSV export command downloads the current dataset', async () => { - const downloadPromise = page.waitForEvent('download') - await runPaletteCommand(page, 'command-csv') - const download = await downloadPromise - - expect(download.suggestedFilename()).toMatch(/^ttdash-export-\d{4}-\d{2}-\d{2}\.csv$/) - const csv = await readDownloadText(download) - expect(csv).toContain('"date","totalCost","totalTokens"') - expect(csv).toContain('GPT-5.4') - }) - - await test.step('PDF report command requests the report and downloads a PDF', async () => { - const downloadPromise = page.waitForEvent('download') - await runPaletteCommand(page, 'command-report') - const download = await downloadPromise - - expect(download.suggestedFilename()).toMatch(/^ttdash-report-\d{4}-\d{2}-\d{2}\.pdf$/) - const pdf = await readDownloadText(download) - expect(pdf.startsWith('%PDF-1.4')).toBe(true) - await expect.poll(() => pdfReport.getReportRequest()?.language).toBe('de') - }) - - await test.step('delete command clears the local dataset', async () => { - await runPaletteCommand(page, 'command-delete') - - await expect(page.getByRole('button', { name: autoImportButtonPattern })).toBeVisible() - await expect(page.getByRole('button', { name: uploadFileButtonPattern })).toBeVisible() - await expect(page.locator('#token-analysis')).toHaveCount(0) - }) -}) - -test('executes filter and view commands from the command palette', async ({ page }) => { - const filters = page.locator('#filters') - const openAiFilter = filters.getByRole('button', { name: 'OpenAI', exact: true }) - const gptFilter = filters.getByRole('button', { name: 'GPT-5.4', exact: true }) - - await test.step('view commands switch between daily, monthly, and yearly modes', async () => { - await runPaletteCommand(page, 'command-view-monthly') - await expect(filters.getByRole('combobox', { name: viewModeComboboxPattern })).toContainText( - monthlyViewPattern, - ) - - await runPaletteCommand(page, 'command-view-yearly') - await expect(filters.getByRole('combobox', { name: viewModeComboboxPattern })).toContainText( - yearlyViewPattern, - ) - - await runPaletteCommand(page, 'command-view-daily') - await expect(filters.getByRole('combobox', { name: viewModeComboboxPattern })).toContainText( - dailyViewPattern, - ) - }) - - await test.step('preset commands apply date ranges', async () => { - await runPaletteCommand(page, 'command-preset-7d') - await expect(filters.getByRole('button', { name: preset7Pattern })).toHaveAttribute( - 'aria-pressed', - 'true', - ) - await expect(filters.getByText(dateFilterActivePattern)).toBeVisible() - - await runPaletteCommand(page, 'command-preset-30d') - await expect(filters.getByRole('button', { name: preset30Pattern })).toHaveAttribute( - 'aria-pressed', - 'true', - ) - - await runPaletteCommand(page, 'command-preset-month') - await expect(filters.getByRole('button', { name: presetMonthPattern })).toHaveAttribute( - 'aria-pressed', - 'true', - ) - - await runPaletteCommand(page, 'command-preset-year') - await expect(filters.getByRole('button', { name: presetYearPattern })).toHaveAttribute( - 'aria-pressed', - 'true', - ) + await page.getByTestId('language-switcher-de').click() + await expect(page.locator('html')).toHaveAttribute('lang', 'de') + await runPaletteCommand(page, 'command-report') + const download = await downloadPromise - await runPaletteCommand(page, 'command-preset-all') - await expect(filters.getByRole('button', { name: presetAllPattern })).toHaveAttribute( - 'aria-pressed', - 'true', - ) - await expect(filters.getByText(dateFilterActivePattern)).toHaveCount(0) - }) - - await test.step('clear and reset commands restore default filter state', async () => { - await openAiFilter.click() - await gptFilter.click() - await runPaletteCommand(page, 'command-preset-7d') - - await expect(filters.getByText(providersActivePattern)).toBeVisible() - await expect(filters.getByText(modelsActivePattern)).toBeVisible() - await expect(filters.getByText(dateFilterActivePattern)).toBeVisible() - - await runPaletteCommand(page, 'command-clear-providers') - await expect(openAiFilter).toHaveAttribute('aria-pressed', 'false') - await expect(filters.getByText(providersActivePattern)).toHaveCount(0) - - await runPaletteCommand(page, 'command-clear-models') - await expect(gptFilter).toHaveAttribute('aria-pressed', 'false') - await expect(filters.getByText(modelsActivePattern)).toHaveCount(0) - - await runPaletteCommand(page, 'command-clear-dates') - await expect(filters.getByText(dateFilterActivePattern)).toHaveCount(0) - await expect(filters.getByRole('button', { name: presetAllPattern })).toHaveAttribute( - 'aria-pressed', - 'true', - ) - - await openAiFilter.click() - await gptFilter.click() - await runPaletteCommand(page, 'command-view-monthly') - await runPaletteCommand(page, 'command-preset-30d') - await expect(filters.getByRole('combobox', { name: viewModeComboboxPattern })).toContainText( - monthlyViewPattern, - ) - - await runPaletteCommand(page, 'command-reset-all') - await expect(filters.getByRole('combobox', { name: viewModeComboboxPattern })).toContainText( - dailyViewPattern, - ) - await expect(openAiFilter).toHaveAttribute('aria-pressed', 'false') - await expect(gptFilter).toHaveAttribute('aria-pressed', 'false') - await expect(filters.getByText(providersActivePattern)).toHaveCount(0) - await expect(filters.getByText(modelsActivePattern)).toHaveCount(0) - await expect(filters.getByText(dateFilterActivePattern)).toHaveCount(0) - await expect(filters.getByRole('button', { name: presetAllPattern })).toHaveAttribute( - 'aria-pressed', - 'true', - ) - }) + expect(download.suggestedFilename()).toMatch(/^ttdash-report-\d{4}-\d{2}-\d{2}\.pdf$/) + const pdf = await readDownloadText(download) + expect(pdf.startsWith('%PDF-1.4')).toBe(true) + await expect.poll(() => pdfReport.getReportRequest()?.language).toBe('de') }) -test('executes dynamic provider and model commands from the command palette', async ({ page }) => { +test('executes representative filter and quick-select commands', async ({ page }) => { const filters = page.locator('#filters') - for (const provider of providerLabels) { - await test.step(`provider command toggles ${provider}`, async () => { - const providerButton = filters.getByRole('button', { name: provider, exact: true }) + await runPaletteCommand(page, 'command-view-monthly') + await expect(filters.getByRole('combobox', { name: viewModeComboboxPattern })).toContainText( + monthlyViewPattern, + ) - await runPaletteCommand(page, `command-provider-${provider}`) - await expect(providerButton).toHaveAttribute('aria-pressed', 'true') + await runPaletteCommand(page, 'command-preset-7d') + await expect(filters.getByRole('button', { name: preset7Pattern })).toHaveAttribute( + 'aria-pressed', + 'true', + ) + await expect(filters.getByText(dateFilterActivePattern)).toBeVisible() - await runPaletteCommand(page, `command-provider-${provider}`) - await expect(providerButton).toHaveAttribute('aria-pressed', 'false') - }) - } - - for (const model of modelLabels) { - await test.step(`model command toggles ${model}`, async () => { - const modelButton = filters.getByRole('button', { name: model, exact: true }) - - await runPaletteCommand(page, `command-model-${model}`) - await expect(modelButton).toHaveAttribute('aria-pressed', 'true') - - await runPaletteCommand(page, `command-model-${model}`) - await expect(modelButton).toHaveAttribute('aria-pressed', 'false') - }) - } -}) - -test('executes scroll and filter navigation commands from the command palette', async ({ - page, -}) => { - await test.step('scroll commands reach the bottom and top of the dashboard', async () => { - await runPaletteCommand(page, 'command-bottom') - await expect.poll(() => page.evaluate(() => Math.round(window.scrollY))).toBeGreaterThan(400) - - await runPaletteCommand(page, 'command-top') - await expect.poll(() => page.evaluate(() => Math.round(window.scrollY))).toBeLessThan(40) - }) - - await test.step('filters command scrolls to the filter bar', async () => { - await page.evaluate(() => window.scrollTo({ top: document.body.scrollHeight })) - await runPaletteCommand(page, 'command-filters') - await waitForSectionNearTop(page, '#filters') - }) -}) - -test('executes dashboard section navigation commands from the command palette', async ({ - page, -}) => { - const dashboardCommands = sectionCommands.slice(0, 7) - - await runSectionNavigationCommands(page, dashboardCommands) - await expect(page.locator(dashboardCommands.at(-1)!.selector)).toBeVisible() -}) - -test('executes analysis section navigation commands from the command palette', async ({ page }) => { - const analysisCommands = sectionCommands.slice(7) + const palette = await openPalette(page) + await palette.locator('input').fill('daily') + await expect(palette.locator('[data-testid="command-view-daily"]')).toBeVisible() + await page.keyboard.press('1') - await runSectionNavigationCommands(page, analysisCommands) - await expect(page.locator(analysisCommands.at(-1)!.selector)).toBeVisible() + await expect(palette).toBeHidden() + await expect(filters.getByRole('combobox', { name: viewModeComboboxPattern })).toContainText( + dailyViewPattern, + ) }) -test('executes theme, language, help, and quick-select interactions from the command palette', async ({ - page, -}) => { - await test.step('theme command toggles the document theme', async () => { - const wasDark = await page.evaluate(() => document.documentElement.classList.contains('dark')) - await runPaletteCommand(page, 'command-theme') - await expect - .poll(async () => page.evaluate(() => document.documentElement.classList.contains('dark'))) - .toBe(!wasDark) - }) - - await test.step('quick-select runs the English language command from search result #1', async () => { - const palette = await openPalette(page) - await palette.locator('input').fill('english') - await expect(palette.locator('[data-testid="command-language-en"]')).toBeVisible() - await page.keyboard.press('1') - await expect(palette).toBeHidden() - await expect(page.locator('#filters').getByText('Filter status')).toBeVisible() - }) - - await test.step('German language command switches the UI back to German', async () => { - await runPaletteCommand(page, 'command-language-de') - await expect(page.locator('#filters').getByText('Filterstatus')).toBeVisible() - }) +test('executes representative section navigation and help commands', async ({ page }) => { + await page.evaluate(() => window.scrollTo({ top: document.body.scrollHeight })) + await runPaletteCommand(page, 'command-section-costAnalysis') + await waitForSectionNearTop(page, '#charts') - await test.step('help command opens the help panel', async () => { - await runPaletteCommand(page, 'command-help') + await runPaletteCommand(page, 'command-help') - const dialog = page.getByRole('dialog', { name: helpDialogTitlePattern }) - await expect(dialog).toBeVisible() - await page.keyboard.press('Escape') - await expect(dialog).toBeHidden() - }) + const dialog = page.getByRole('dialog', { name: helpDialogTitlePattern }) + await expect(dialog).toBeVisible() + await page.keyboard.press('Escape') + await expect(dialog).toBeHidden() }) diff --git a/tests/e2e/dashboard-forecast-filters.spec.ts b/tests/e2e/dashboard-forecast-filters.spec.ts index a300711..9ca0d19 100644 --- a/tests/e2e/dashboard-forecast-filters.spec.ts +++ b/tests/e2e/dashboard-forecast-filters.spec.ts @@ -1,4 +1,4 @@ -import { expect, test } from '@playwright/test' +import { expect, test } from './fixtures' import { gotoDashboard, resetAppState, uploadSampleUsage } from './helpers' const costForecastExpandPattern = @@ -97,21 +97,17 @@ test('exposes pressed filter state and supports keyboard date selection in the d const dateDialog = page.getByRole('dialog') await expect(dateDialog).toBeVisible() - const focusedDayBefore = await page.evaluate( - () => document.activeElement?.textContent?.trim() ?? '', - ) + const midMonthDay = dateDialog.locator('button[aria-pressed]').filter({ hasText: /^15$/ }) + await midMonthDay.focus() + await expect(midMonthDay).toBeFocused() + const focusedDayBefore = await midMonthDay.getAttribute('aria-label') + expect(focusedDayBefore ?? '').toMatch(/\d/) await page.keyboard.press('ArrowRight') - await page.waitForFunction( - (previous) => (document.activeElement?.textContent?.trim() ?? '') !== previous, - focusedDayBefore, - ) - - const focusedDayAfter = await page.evaluate( - () => document.activeElement?.textContent?.trim() ?? '', - ) - expect(focusedDayAfter).not.toBe(focusedDayBefore) - expect(focusedDayAfter).toMatch(/^\d+$/) + const focusedDay = dateDialog.locator('button[aria-pressed][tabindex="0"]') + await expect(focusedDay).not.toHaveAttribute('aria-label', focusedDayBefore ?? '') + await expect(focusedDay).toBeFocused() + await expect(focusedDay).toHaveAttribute('aria-label', /\d/) await page.keyboard.press('Enter') diff --git a/tests/e2e/dashboard-load-upload.spec.ts b/tests/e2e/dashboard-load-upload.spec.ts index 55af05b..34d1c3e 100644 --- a/tests/e2e/dashboard-load-upload.spec.ts +++ b/tests/e2e/dashboard-load-upload.spec.ts @@ -1,4 +1,4 @@ -import { expect, test } from '@playwright/test' +import { expect, test } from './fixtures' import { gotoDashboard, resetAppState, uploadSampleUsage } from './helpers' const importEntryButtonPattern = /^(Auto-Import|Auto import|Import)$/ diff --git a/tests/e2e/dashboard-reporting.spec.ts b/tests/e2e/dashboard-reporting.spec.ts index 43db4bc..f8d6e70 100644 --- a/tests/e2e/dashboard-reporting.spec.ts +++ b/tests/e2e/dashboard-reporting.spec.ts @@ -1,4 +1,4 @@ -import { expect, test } from '@playwright/test' +import { expect, test } from './fixtures' import { gotoDashboard, mockPdfReport, resetAppState, uploadSampleUsage } from './helpers' test('uses the current UI language when generating a PDF report after switching locale', async ({ diff --git a/tests/e2e/dashboard-settings-backups.spec.ts b/tests/e2e/dashboard-settings-backups.spec.ts index 7d5c277..4d91c49 100644 --- a/tests/e2e/dashboard-settings-backups.spec.ts +++ b/tests/e2e/dashboard-settings-backups.spec.ts @@ -1,7 +1,8 @@ import fsPromises from 'node:fs/promises' -import { expect, test } from '@playwright/test' +import { expect, test } from './fixtures' import { createApiAuthHeaders, + createApiUrl, createTrustedMutationHeaders, dailyViewPattern, filterStatusPattern, @@ -192,13 +193,15 @@ test('manages settings and backup imports through the settings dialog using isol await expect .poll(async () => { - const response = await page.request.get('/api/usage', { headers: createApiAuthHeaders() }) + const response = await page.request.get(createApiUrl('/api/usage', baseURL), { + headers: createApiAuthHeaders(), + }) const usage = await response.json() return usage.daily[0]?.date }) .toBe('2026-03-31') - const mergedUsageResponse = await page.request.get('/api/usage', { + const mergedUsageResponse = await page.request.get(createApiUrl('/api/usage', baseURL), { headers: createApiAuthHeaders(), }) expect(mergedUsageResponse.ok()).toBe(true) @@ -256,7 +259,7 @@ test('manages settings and backup imports through the settings dialog using isol expect(settingsImportResponse.ok()).toBe(true) await expect(page.getByRole('button', { name: exportSettingsButtonPattern })).toBeVisible() - const importedSettingsResponse = await page.request.get('/api/settings', { + const importedSettingsResponse = await page.request.get(createApiUrl('/api/settings', baseURL), { headers: createApiAuthHeaders(), }) expect(importedSettingsResponse.ok()).toBe(true) @@ -289,7 +292,7 @@ test('loads persisted settings on a fresh browser start and applies them immedia await resetAppState(page, baseURL) const trustedMutationHeaders = createTrustedMutationHeaders(baseURL) - const patchSettingsResponse = await page.request.patch('/api/settings', { + const patchSettingsResponse = await page.request.patch(createApiUrl('/api/settings', baseURL), { headers: trustedMutationHeaders, data: { language: 'en', @@ -317,7 +320,7 @@ test('loads persisted settings on a fresh browser start and applies them immedia }) expect(patchSettingsResponse.ok()).toBe(true) - const uploadResponse = await page.request.post('/api/upload', { + const uploadResponse = await page.request.post(createApiUrl('/api/upload', baseURL), { headers: trustedMutationHeaders, data: sampleUsage, }) diff --git a/tests/e2e/fixtures.ts b/tests/e2e/fixtures.ts new file mode 100644 index 0000000..220f3d4 --- /dev/null +++ b/tests/e2e/fixtures.ts @@ -0,0 +1,256 @@ +import fs from 'node:fs' +import path from 'node:path' +import { spawn, type ChildProcessWithoutNullStreams } from 'node:child_process' +import { test as base, expect, type Page } from '@playwright/test' + +type E2EServer = { + authSessionPath: string + authHeader: string + baseURL: string + bootstrapUrl: string + runtimeRoot: string +} + +type WorkerFixtures = { + e2eServer: E2EServer +} + +const startupTimeoutMs = 120_000 +const host = process.env.PLAYWRIGHT_TEST_HOST || '127.0.0.1' +const basePort = Number(process.env.PLAYWRIGHT_TEST_PORT || '3015') + +function sleep(ms: number) { + return new Promise((resolve) => setTimeout(resolve, ms)) +} + +function buildWorkerServer(workerIndex: number) { + const port = basePort + workerIndex + const authToken = `ttdash-playwright-local-auth-token-worker-${workerIndex}-port-${port}` + const authHeader = `Bearer ${authToken}` + const runtimeRoot = path.join( + process.cwd(), + '.tmp-playwright', + 'workers', + String(workerIndex), + 'app', + ) + // The session file is only a server-readiness signal; test credentials come from env. + const authSessionPath = path.join(runtimeRoot, 'config', 'session-auth.json') + + return { + authSessionPath, + authHeader, + baseURL: `http://${host}:${port}`, + bootstrapUrl: `http://${host}:${port}/?ttdash_token=${encodeURIComponent(authToken)}`, + env: { + ...process.env, + HOST: host, + NO_OPEN_BROWSER: '1', + PLAYWRIGHT_TEST_HOST: host, + PLAYWRIGHT_TEST_PORT: String(port), + PLAYWRIGHT_TEST_RUNTIME_ROOT: runtimeRoot, + PORT: String(port), + TTDASH_LOCAL_AUTH_TOKEN: authToken, + }, + runtimeRoot, + } +} + +function createOutputBuffer(serverProcess: ChildProcessWithoutNullStreams) { + let output = '' + + const append = (chunk: Buffer) => { + output += chunk.toString('utf8') + if (output.length > 16_000) { + output = output.slice(-16_000) + } + } + + serverProcess.stdout.on('data', append) + serverProcess.stderr.on('data', append) + + return () => output.trim() +} + +async function waitForServerReady( + serverProcess: ChildProcessWithoutNullStreams, + baseURL: string, + authSessionPath: string, + authHeader: string, + readOutput: () => string, +) { + let rejectSpawnError: ((error: Error) => void) | null = null + const spawnErrorPromise = new Promise((_, reject) => { + rejectSpawnError = reject + }) + const onSpawnError = (error: Error) => { + rejectSpawnError?.( + new Error(`Playwright server spawn error: ${error.message}\n${readOutput()}`), + ) + } + + serverProcess.once('error', onSpawnError) + + const readinessPromise = (async () => { + const deadline = Date.now() + startupTimeoutMs + let lastError = '' + + while (Date.now() < deadline) { + if (serverProcess.exitCode !== null) { + throw new Error( + `Playwright test server exited with code ${serverProcess.exitCode}.\n${readOutput()}`, + ) + } + + try { + const response = await fetch(baseURL, { + redirect: 'manual', + signal: AbortSignal.timeout(1_000), + }) + + if (response.status < 400) { + if (!fs.existsSync(authSessionPath)) { + lastError = `Waiting for server readiness signal file at ${authSessionPath}` + } else { + const apiResponse = await fetch(new URL('/api/usage', baseURL), { + headers: { Authorization: authHeader }, + signal: AbortSignal.timeout(1_000), + }) + + if (apiResponse.status === 200) { + return + } + + lastError = `GET /api/usage returned ${apiResponse.status}` + } + } + } catch (error) { + lastError = error instanceof Error ? error.message : String(error) + } + + await sleep(250) + } + + throw new Error( + `Timed out waiting for Playwright test server at ${baseURL}. Last error: ${lastError}\n${readOutput()}`, + ) + })() + + try { + await Promise.race([spawnErrorPromise, readinessPromise]) + } finally { + serverProcess.off('error', onSpawnError) + } +} + +async function stopServer(serverProcess: ChildProcessWithoutNullStreams) { + if (serverProcess.exitCode !== null) { + return + } + + const waitForExit = (timeoutMs: number) => { + if (serverProcess.exitCode !== null) { + return Promise.resolve(true) + } + + return new Promise((resolve) => { + const timeoutId = setTimeout(() => { + cleanup() + resolve(false) + }, timeoutMs) + + const cleanup = () => { + clearTimeout(timeoutId) + serverProcess.off('exit', onExit) + } + + const onExit = () => { + cleanup() + resolve(true) + } + + serverProcess.once('exit', onExit) + }) + } + + serverProcess.kill('SIGTERM') + + const exited = await waitForExit(5_000) + + if (!exited && serverProcess.exitCode === null) { + serverProcess.kill('SIGKILL') + await waitForExit(2_000) + } +} + +export const test = base.extend, WorkerFixtures>({ + e2eServer: [ + async ({}, runFixture, workerInfo) => { + const workerServer = buildWorkerServer(workerInfo.parallelIndex) + fs.rmSync(workerServer.runtimeRoot, { recursive: true, force: true }) + const serverProcess = spawn(process.execPath, ['scripts/start-test-server.js'], { + cwd: process.cwd(), + env: workerServer.env, + }) + const readOutput = createOutputBuffer(serverProcess) + const previousRuntimeRoot = process.env.PLAYWRIGHT_TEST_RUNTIME_ROOT + const previousAuthSessionPath = process.env.PLAYWRIGHT_TEST_AUTH_SESSION_PATH + const previousAuthorizationHeader = process.env.PLAYWRIGHT_TEST_AUTHORIZATION_HEADER + const previousBootstrapUrl = process.env.PLAYWRIGHT_TEST_BOOTSTRAP_URL + + process.env.PLAYWRIGHT_TEST_RUNTIME_ROOT = workerServer.runtimeRoot + process.env.PLAYWRIGHT_TEST_AUTH_SESSION_PATH = workerServer.authSessionPath + process.env.PLAYWRIGHT_TEST_AUTHORIZATION_HEADER = workerServer.authHeader + process.env.PLAYWRIGHT_TEST_BOOTSTRAP_URL = workerServer.bootstrapUrl + + try { + await waitForServerReady( + serverProcess, + workerServer.baseURL, + workerServer.authSessionPath, + workerServer.authHeader, + readOutput, + ) + await runFixture({ + authSessionPath: workerServer.authSessionPath, + authHeader: workerServer.authHeader, + baseURL: workerServer.baseURL, + bootstrapUrl: workerServer.bootstrapUrl, + runtimeRoot: workerServer.runtimeRoot, + }) + } finally { + await stopServer(serverProcess) + + if (previousRuntimeRoot === undefined) { + delete process.env.PLAYWRIGHT_TEST_RUNTIME_ROOT + } else { + process.env.PLAYWRIGHT_TEST_RUNTIME_ROOT = previousRuntimeRoot + } + + if (previousAuthSessionPath === undefined) { + delete process.env.PLAYWRIGHT_TEST_AUTH_SESSION_PATH + } else { + process.env.PLAYWRIGHT_TEST_AUTH_SESSION_PATH = previousAuthSessionPath + } + + if (previousAuthorizationHeader === undefined) { + delete process.env.PLAYWRIGHT_TEST_AUTHORIZATION_HEADER + } else { + process.env.PLAYWRIGHT_TEST_AUTHORIZATION_HEADER = previousAuthorizationHeader + } + + if (previousBootstrapUrl === undefined) { + delete process.env.PLAYWRIGHT_TEST_BOOTSTRAP_URL + } else { + process.env.PLAYWRIGHT_TEST_BOOTSTRAP_URL = previousBootstrapUrl + } + } + }, + { scope: 'worker', timeout: startupTimeoutMs }, + ], + baseURL: async ({ e2eServer }, runFixture) => { + await runFixture(e2eServer.baseURL) + }, +}) + +export { expect, type Page } diff --git a/tests/e2e/helpers.ts b/tests/e2e/helpers.ts index 77f8dd2..6cf9a14 100644 --- a/tests/e2e/helpers.ts +++ b/tests/e2e/helpers.ts @@ -1,19 +1,17 @@ import fs from 'node:fs' import fsPromises from 'node:fs/promises' import path from 'node:path' -import { expect, type Download, type Page } from '@playwright/test' +import { + expect, + type APIRequestContext, + type APIResponse, + type Download, + type Page, +} from '@playwright/test' import { TOKTRACK_VERSION } from '../../shared/toktrack-version.js' export const sampleUsagePath = path.join(process.cwd(), 'examples', 'sample-usage.json') -const localAuthSessionPath = path.join( - process.cwd(), - '.tmp-playwright', - 'app', - 'config', - 'session-auth.json', -) - export const uploadToastPattern = /^(Datei sample-usage\.json erfolgreich geladen|File sample-usage\.json loaded successfully)$/ export const viewModeComboboxPattern = /^(Ansichtsmodus|View mode)$/ @@ -47,10 +45,93 @@ type InitScriptTarget = { addInitScript: (script: () => void) => Promise } +type ApiRequestMethod = 'DELETE' | 'GET' | 'PATCH' | 'POST' +type ApiRequestOptions = Parameters[1] + +const transientApiRequestErrorPattern = + /(socket hang up|ECONNRESET|ECONNREFUSED|EPIPE|ETIMEDOUT|Request context disposed|Target page, context or browser has been closed)/i +const apiRequestRetryAttempts = 3 +const apiRequestRetryDelayMs = 150 + export const sampleUsage = JSON.parse(fs.readFileSync(sampleUsagePath, 'utf-8')) as SampleUsage +function sleep(ms: number) { + return new Promise((resolve) => setTimeout(resolve, ms)) +} + +function isTransientApiRequestError(error: unknown) { + const message = error instanceof Error ? error.message : String(error) + return transientApiRequestErrorPattern.test(message) +} + +async function apiResponseErrorMessage(response: APIResponse, context: string) { + let body = '' + + try { + body = await response.text() + } catch (error) { + body = `Could not read response body: ${error instanceof Error ? error.message : String(error)}` + } + + return `${context} failed with ${response.status()} ${response.statusText()}: ${body}` +} + +async function expectApiResponseOk(response: APIResponse, context: string) { + if (response.ok()) { + return + } + + throw new Error(await apiResponseErrorMessage(response, context)) +} + +async function requestApiWithRetry( + request: APIRequestContext, + method: ApiRequestMethod, + url: string, + options: ApiRequestOptions = {}, +) { + let lastError: unknown + + for (let attempt = 1; attempt <= apiRequestRetryAttempts; attempt += 1) { + try { + return await request.fetch(url, { + ...options, + method, + }) + } catch (error) { + lastError = error + if (attempt === apiRequestRetryAttempts || !isTransientApiRequestError(error)) { + throw error + } + + await sleep(apiRequestRetryDelayMs * attempt) + } + } + + throw lastError +} + +export function createApiUrl(pathname: string, baseURL?: string) { + if (!baseURL) { + throw new Error('Playwright baseURL is required for API requests') + } + + return new URL(pathname, baseURL).toString() +} + +function getRequiredPlaywrightEnv(name: string) { + const value = process.env[name] + if (!value) { + throw new Error(`${name} is required for Playwright API authentication`) + } + return value +} + export function readLocalAuthSession() { - return JSON.parse(fs.readFileSync(localAuthSessionPath, 'utf-8')) as LocalAuthSession + return { + authorizationHeader: getRequiredPlaywrightEnv('PLAYWRIGHT_TEST_AUTHORIZATION_HEADER'), + bootstrapUrl: getRequiredPlaywrightEnv('PLAYWRIGHT_TEST_BOOTSTRAP_URL'), + } satisfies LocalAuthSession } export function createApiAuthHeaders() { @@ -72,8 +153,21 @@ export function createTrustedMutationHeaders(baseURL?: string) { export async function resetAppState(page: Page, baseURL?: string) { const trustedMutationHeaders = createTrustedMutationHeaders(baseURL) - await page.request.delete('/api/usage', { headers: trustedMutationHeaders }) - await page.request.delete('/api/settings', { headers: trustedMutationHeaders }) + const usageResponse = await requestApiWithRetry( + page.request, + 'DELETE', + createApiUrl('/api/usage', baseURL), + { headers: trustedMutationHeaders }, + ) + await expectApiResponseOk(usageResponse, 'DELETE /api/usage') + + const settingsResponse = await requestApiWithRetry( + page.request, + 'DELETE', + createApiUrl('/api/settings', baseURL), + { headers: trustedMutationHeaders }, + ) + await expectApiResponseOk(settingsResponse, 'DELETE /api/settings') } export async function gotoDashboard(page: Page) { @@ -117,12 +211,17 @@ export async function seedUsage( usageData: SampleUsage = buildRelativeUsageData(), ) { const trustedMutationHeaders = createTrustedMutationHeaders(baseURL) - const uploadResponse = await page.request.post('/api/upload', { - headers: trustedMutationHeaders, - data: usageData, - }) - - expect(uploadResponse.ok()).toBe(true) + const uploadResponse = await requestApiWithRetry( + page.request, + 'POST', + createApiUrl('/api/upload', baseURL), + { + headers: trustedMutationHeaders, + data: usageData, + }, + ) + + await expectApiResponseOk(uploadResponse, 'POST /api/upload') return usageData } diff --git a/tests/frontend/auto-import-modal.test.tsx b/tests/frontend/auto-import-modal.test.tsx index 74641b4..d09698b 100644 --- a/tests/frontend/auto-import-modal.test.tsx +++ b/tests/frontend/auto-import-modal.test.tsx @@ -31,7 +31,7 @@ describe('AutoImportModal', () => { expect(closeMock).toHaveBeenCalledTimes(1) expect(onOpenChange).toHaveBeenCalledWith(false) - }, 15_000) + }) it('shows the concrete auto-import error message in the status area', () => { autoImportMocks.startAutoImport.mockImplementation((callbacks) => { diff --git a/tests/frontend/chart-card.test.tsx b/tests/frontend/chart-card.test.tsx index 4a13fba..78cec1f 100644 --- a/tests/frontend/chart-card.test.tsx +++ b/tests/frontend/chart-card.test.tsx @@ -29,7 +29,7 @@ describe('ChartCard', () => { expect(screen.getByText('Total')).toBeInTheDocument() expect(screen.getByText('Data points')).toBeInTheDocument() - }, 15_000) + }) it('reveals the expand control for keyboard focus on desktop', () => { renderWithAppProviders( diff --git a/tests/frontend/chart-legend-integration.test.tsx b/tests/frontend/chart-legend-integration.test.tsx index ac338f2..1b25c44 100644 --- a/tests/frontend/chart-legend-integration.test.tsx +++ b/tests/frontend/chart-legend-integration.test.tsx @@ -1,117 +1,55 @@ // @vitest-environment jsdom -import { cloneElement, type ReactElement, type ReactNode } from 'react' import { render, screen } from '@testing-library/react' -import { beforeEach, describe, expect, it, vi } from 'vitest' -import { CostByModel } from '@/components/charts/CostByModel' -import { RequestsOverTime } from '@/components/charts/RequestsOverTime' -import { TokenTypes } from '@/components/charts/TokenTypes' -import { TooltipProvider } from '@/components/ui/tooltip' -import { initI18n } from '@/lib/i18n' -import { MockSvgContainer, MockSvgGroup } from '../recharts-test-utils' +import { describe, expect, it } from 'vitest' +import { ChartLegend } from '@/components/charts/ChartLegend' -let lastLegendPayload: Array<{ value: string; color: string }> = [] - -vi.mock('recharts', () => ({ - ResponsiveContainer: ({ children }: { children: ReactNode }) => ( - {children} - ), - PieChart: ({ children }: { children: ReactNode }) => ( - {children} - ), - ComposedChart: ({ children }: { children: ReactNode }) => ( - {children} - ), - Pie: ({ children, data = [] }: { children: ReactNode; data?: Array<{ name: string }> }) => { - lastLegendPayload = data.map((entry, index) => ({ - value: entry.name, - color: `hsl(${(index + 1) * 40} 70% 50%)`, - })) - return {children} - }, - Legend: ({ content }: { content?: ReactElement }) => - content ? ( - {cloneElement(content, { payload: lastLegendPayload })} - ) : null, - Tooltip: () => null, - Cell: () => null, - Area: () => null, - Line: () => null, - XAxis: () => null, - YAxis: () => null, - CartesianGrid: () => null, -})) - -describe('Chart legend integrations', () => { - beforeEach(async () => { - lastLegendPayload = [] - vi.stubGlobal( - 'IntersectionObserver', - class { - observe() {} - unobserve() {} - disconnect() {} - }, - ) - await initI18n('en') - }) - - it('wraps CostByModel legend labels instead of relying on horizontal scroll', () => { +describe('ChartLegend', () => { + it('wraps legend labels instead of relying on horizontal scroll', () => { const { container } = render( - - - , + , ) - expect(screen.getByText(/GPT-5\.4 \(\$60(?:\.0+)?\)/)).toBeInTheDocument() - expect(screen.getByText(/Claude Sonnet 4\.5 \(\$25(?:\.0+)?\)/)).toBeInTheDocument() + expect(screen.getByText('GPT-5.4 ($60)')).toBeInTheDocument() + expect(screen.getByText('Claude Sonnet 4.5 ($25)')).toBeInTheDocument() expect(container.querySelector('.overflow-x-auto')).toBeNull() expect(container.querySelector('.flex-wrap')).not.toBeNull() }) - it('wraps TokenTypes legend labels instead of relying on horizontal scroll', () => { + it('keeps custom rendered labels in the wrapped legend layout', () => { const { container } = render( - - - , + `${entry.value} token type`} + />, ) - expect(screen.getByText('Cache Write (1.2k)')).toBeInTheDocument() - expect(screen.getByText('Cache Read (950)')).toBeInTheDocument() + expect(screen.getByText('Cache Write token type')).toBeInTheDocument() + expect(screen.getByText('Cache Read token type')).toBeInTheDocument() expect(container.querySelector('.overflow-x-auto')).toBeNull() expect(container.querySelector('.flex-wrap')).not.toBeNull() }) - it('wraps RequestsOverTime donut legend labels instead of relying on horizontal scroll', () => { - const { container } = render( - - - , + it('filters hidden entries before rendering the wrapped payload', () => { + render( + entry.value !== 'Hidden model'} + />, ) - expect(screen.getByText('GPT-5.4 (80)')).toBeInTheDocument() - expect(screen.getByText('Claude Sonnet 4.5 (40)')).toBeInTheDocument() - expect(container.querySelector('.overflow-x-auto')).toBeNull() - expect(container.querySelector('.flex-wrap')).not.toBeNull() + expect(screen.getByText('Visible model')).toBeInTheDocument() + expect(screen.queryByText('Hidden model')).not.toBeInTheDocument() }) }) diff --git a/tests/frontend/command-palette-action-groups.test.tsx b/tests/frontend/command-palette-action-groups.test.tsx deleted file mode 100644 index 2679b5b..0000000 --- a/tests/frontend/command-palette-action-groups.test.tsx +++ /dev/null @@ -1,118 +0,0 @@ -// @vitest-environment jsdom - -import { fireEvent, screen, within } from '@testing-library/react' -import type { ComponentProps } from 'react' -import { beforeEach, describe, expect, it, vi } from 'vitest' -import { CommandPalette } from '@/components/features/command-palette/CommandPalette' -import { DEFAULT_APP_SETTINGS } from '@/lib/app-settings' -import { initI18n } from '@/lib/i18n' -import { renderWithAppProviders } from '../test-utils' - -type CommandPaletteProps = ComponentProps - -function buildCommandPaletteProps( - overrides: Partial = {}, -): CommandPaletteProps { - const noop = () => {} - - return { - isDark: true, - availableProviders: [], - selectedProviders: [], - availableModels: [], - selectedModels: [], - hasTodaySection: false, - hasMonthSection: false, - hasRequestSection: false, - sectionVisibility: { ...DEFAULT_APP_SETTINGS.sectionVisibility }, - sectionOrder: [...DEFAULT_APP_SETTINGS.sectionOrder], - reportGenerating: false, - onToggleTheme: noop, - onExportCSV: noop, - onGenerateReport: noop, - onDelete: noop, - onUpload: noop, - onAutoImport: noop, - onOpenSettings: noop, - onScrollTo: noop, - onViewModeChange: noop, - onApplyPreset: noop, - onToggleProvider: noop, - onToggleModel: noop, - onClearProviders: noop, - onClearModels: noop, - onClearDateRange: noop, - onResetAll: noop, - onHelp: noop, - onLanguageChange: noop, - ...overrides, - } -} - -function renderCommandPalette(overrides: Partial = {}) { - return renderWithAppProviders() -} - -function openCommandPalette() { - fireEvent.keyDown(document, { key: 'k', metaKey: true }) -} - -function getCommandGroup(name: string) { - const group = screen.getByText(name).closest('[cmdk-group]') - expect(group).not.toBeNull() - return group as HTMLElement -} - -describe('CommandPalette action groups', () => { - beforeEach(async () => { - Element.prototype.scrollIntoView = vi.fn() - await initI18n('en') - }) - - it('keeps action command ids stable while separating daily, export, and maintenance groups', async () => { - renderCommandPalette() - - openCommandPalette() - expect(await screen.findByRole('dialog', { name: 'Command palette' })).toBeInTheDocument() - - const loadDataGroup = getCommandGroup('Load data') - expect(within(loadDataGroup).getByTestId('command-auto-import')).toBeInTheDocument() - expect(within(loadDataGroup).getByTestId('command-upload')).toBeInTheDocument() - - const exportsGroup = getCommandGroup('Exports') - expect(within(exportsGroup).getByTestId('command-csv')).toBeInTheDocument() - expect(within(exportsGroup).getByTestId('command-report')).toBeInTheDocument() - - const maintenanceGroup = getCommandGroup('Maintenance') - expect(within(maintenanceGroup).getByTestId('command-settings-open')).toBeInTheDocument() - expect(within(maintenanceGroup).getByTestId('command-delete')).toBeInTheDocument() - - for (const commandId of [ - 'command-auto-import', - 'command-upload', - 'command-csv', - 'command-report', - 'command-settings-open', - 'command-delete', - ]) { - expect(screen.getByTestId(commandId)).toBeInTheDocument() - } - }) - - it('localizes the command group labels', async () => { - const currentLanguage = document.documentElement.lang - - try { - await initI18n('de') - renderCommandPalette() - - openCommandPalette() - - expect(await screen.findByText('Daten laden')).toBeInTheDocument() - expect(screen.getByText('Exporte')).toBeInTheDocument() - expect(screen.getByText('Wartung')).toBeInTheDocument() - } finally { - await initI18n(currentLanguage || 'en') - } - }) -}) diff --git a/tests/frontend/command-palette-language.test.tsx b/tests/frontend/command-palette-language.test.tsx new file mode 100644 index 0000000..95cfe8b --- /dev/null +++ b/tests/frontend/command-palette-language.test.tsx @@ -0,0 +1,84 @@ +// @vitest-environment jsdom + +import { fireEvent, screen } from '@testing-library/react' +import type { ComponentProps } from 'react' +import { afterAll, beforeAll, describe, expect, it, vi } from 'vitest' +import { CommandPalette } from '@/components/features/command-palette/CommandPalette' +import { DEFAULT_APP_SETTINGS } from '@/lib/app-settings' +import { initI18n } from '@/lib/i18n' +import { renderWithAppProviders } from '../test-utils' + +type CommandPaletteProps = ComponentProps + +function buildCommandPaletteProps( + overrides: Partial = {}, +): CommandPaletteProps { + const noop = () => {} + + return { + isDark: true, + availableProviders: [], + selectedProviders: [], + availableModels: [], + selectedModels: [], + hasTodaySection: false, + hasMonthSection: false, + hasRequestSection: false, + sectionVisibility: { ...DEFAULT_APP_SETTINGS.sectionVisibility }, + sectionOrder: [...DEFAULT_APP_SETTINGS.sectionOrder], + reportGenerating: false, + onToggleTheme: noop, + onExportCSV: noop, + onGenerateReport: noop, + onDelete: noop, + onUpload: noop, + onAutoImport: noop, + onOpenSettings: noop, + onScrollTo: noop, + onViewModeChange: noop, + onApplyPreset: noop, + onToggleProvider: noop, + onToggleModel: noop, + onClearProviders: noop, + onClearModels: noop, + onClearDateRange: noop, + onResetAll: noop, + onHelp: noop, + onLanguageChange: noop, + ...overrides, + } +} + +describe('Command palette language', () => { + let originalScrollIntoView: Element['scrollIntoView'] | undefined + + beforeAll(async () => { + originalScrollIntoView = Element.prototype.scrollIntoView + Element.prototype.scrollIntoView = vi.fn() + await initI18n('de') + }) + + afterAll(async () => { + if (originalScrollIntoView) { + Element.prototype.scrollIntoView = originalScrollIntoView + } else { + delete (Element.prototype as { scrollIntoView?: Element['scrollIntoView'] }).scrollIntoView + } + + await initI18n('en') + }) + + it('localizes action groups while keeping representative command ids renderable', async () => { + renderWithAppProviders() + + fireEvent.keyDown(document, { key: 'k', metaKey: true }) + + expect(await screen.findByRole('dialog', { name: 'Befehlspalette' })).toBeInTheDocument() + expect(screen.getByText('Daten laden')).toBeInTheDocument() + expect(screen.getByText('Exporte')).toBeInTheDocument() + expect(screen.getByText('Wartung')).toBeInTheDocument() + expect(screen.getByTestId('command-auto-import')).toBeInTheDocument() + expect(screen.getByTestId('command-csv')).toBeInTheDocument() + expect(screen.getByTestId('command-settings-open')).toBeInTheDocument() + }) +}) diff --git a/tests/frontend/cost-by-weekday.test.tsx b/tests/frontend/cost-by-weekday.test.tsx new file mode 100644 index 0000000..8a15a35 --- /dev/null +++ b/tests/frontend/cost-by-weekday.test.tsx @@ -0,0 +1,51 @@ +// @vitest-environment jsdom + +import { screen } from '@testing-library/react' +import { beforeAll, describe, expect, it } from 'vitest' +import { + assertLegacyWeekdayData, + legacyWeekdayData, + weekdayData, +} from './lazy-dashboard-chart-test-utils' +import { CostByWeekday } from '@/components/charts/CostByWeekday' +import { initI18n } from '@/lib/i18n' +import { renderWithTooltip } from '../test-utils' + +describe('CostByWeekday chart', () => { + beforeAll(async () => { + await initI18n('en') + }) + + it('renders peak and low bars without a browser-only chart dependency', () => { + renderWithTooltip() + + expect(screen.getByText('Cost by weekday')).toBeInTheDocument() + expect(screen.getByText('Peak: Tu · Low: Sa · Weekend 19%')).toBeInTheDocument() + expect(screen.getByTestId('chart-bar')).toHaveAttribute('data-name', 'Avg cost') + const chartPayload = JSON.parse( + screen.getByTestId('bar-chart').getAttribute('data-chart') ?? '[]', + ) as Array<{ day: string }> + expect(chartPayload.map((entry) => entry.day)).toEqual([ + 'Mo', + 'Tu', + 'We', + 'Th', + 'Fr', + 'Sa', + 'Su', + ]) + + const fills = screen.getAllByTestId('bar-cell').map((cell) => cell.getAttribute('data-fill')) + expect(fills).toHaveLength(7) + expect(fills.some((fill) => fill?.includes('weekdayPeak'))).toBe(true) + expect(fills.some((fill) => fill?.includes('weekdayLow'))).toBe(true) + }) + + it('uses localized day labels as a defensive weekend fallback without weekday indices', () => { + assertLegacyWeekdayData(legacyWeekdayData) + + renderWithTooltip() + + expect(screen.getByText('Peak: Mar · Low: Sáb. · Weekend 19%')).toBeInTheDocument() + }) +}) diff --git a/tests/frontend/dashboard-controller-actions.test.tsx b/tests/frontend/dashboard-controller-actions.test.tsx index 3187acb..8dd9feb 100644 --- a/tests/frontend/dashboard-controller-actions.test.tsx +++ b/tests/frontend/dashboard-controller-actions.test.tsx @@ -189,6 +189,7 @@ describe('useDashboardControllerWithBootstrap actions', () => { addedDays: 2, unchangedDays: 0, conflictingDays: 0, + skippedDays: 0, totalDays: 2, }) diff --git a/tests/frontend/dashboard-controller-test-helpers.ts b/tests/frontend/dashboard-controller-test-helpers.ts index 771c865..7e5603b 100644 --- a/tests/frontend/dashboard-controller-test-helpers.ts +++ b/tests/frontend/dashboard-controller-test-helpers.ts @@ -121,6 +121,7 @@ export function createDashboardSectionsViewModel( viewMode: 'daily', totalCalendarDays: 0, filteredData: [], + dailyCosts: [], filteredDailyData: [], todayData: null, hasCurrentMonthData: false, diff --git a/tests/frontend/dashboard-error-state.test.tsx b/tests/frontend/dashboard-error-state.test.tsx index 0edbbd5..0832475 100644 --- a/tests/frontend/dashboard-error-state.test.tsx +++ b/tests/frontend/dashboard-error-state.test.tsx @@ -109,6 +109,7 @@ describe('Dashboard fatal load state', () => { addedDays: 0, unchangedDays: 0, conflictingDays: 0, + skippedDays: 0, totalDays: 0, }) toktrackVersionStatusMocks.scheduleToktrackVersionStatusWarmup.mockClear() @@ -137,7 +138,7 @@ describe('Dashboard fatal load state', () => { await waitFor(() => expect(apiMocks.deleteSettings).toHaveBeenCalledTimes(1)) expect(await screen.findByText('Settings reset')).toBeInTheDocument() - }, 15_000) + }) it('renders a fatal usage error state with a delete action for corrupted stored data', async () => { const mutateAsync = vi.fn().mockResolvedValue(undefined) diff --git a/tests/frontend/dashboard-language-regressions.test.tsx b/tests/frontend/dashboard-language-regressions.test.tsx index 5298895..84015e5 100644 --- a/tests/frontend/dashboard-language-regressions.test.tsx +++ b/tests/frontend/dashboard-language-regressions.test.tsx @@ -1,7 +1,7 @@ // @vitest-environment jsdom import { fireEvent, screen } from '@testing-library/react' -import { beforeAll, describe, expect, it } from 'vitest' +import { afterAll, beforeAll, describe, expect, it, vi } from 'vitest' import { PrimaryMetrics } from '@/components/cards/PrimaryMetrics' import { ChartCard } from '@/components/charts/ChartCard' import { UsageInsights } from '@/components/features/insights/UsageInsights' @@ -46,10 +46,22 @@ const emptyMetrics: DashboardMetrics = { } describe('Dashboard language regressions', () => { + let originalScrollIntoView: Element['scrollIntoView'] | undefined + beforeAll(async () => { + originalScrollIntoView = Element.prototype.scrollIntoView + Element.prototype.scrollIntoView = vi.fn() await initI18n('de') }) + afterAll(() => { + if (originalScrollIntoView) { + Element.prototype.scrollIntoView = originalScrollIntoView + } else { + delete (Element.prototype as { scrollIntoView?: Element['scrollIntoView'] }).scrollIntoView + } + }) + it('localizes expanded chart actions in German', () => { renderWithAppProviders( ({ + cancelPreloads: vi.fn(), + scheduleDashboardPreloads: vi.fn(), +})) + +dashboardMotionMocks.scheduleDashboardPreloads.mockImplementation(() => ({ + cancel: dashboardMotionMocks.cancelPreloads, +})) + +vi.mock('@/components/dashboard/DashboardMotion', () => ({ + AnimatedDashboardSection: ({ + children, + eager, + id, + onPreload, + placeholderClassName, + }: { + children: ReactNode + eager?: boolean + id: string + onPreload?: () => void | Promise + placeholderClassName?: string + }) => ( +
+ {children} +
+ ), + scheduleDashboardPreloads: dashboardMotionMocks.scheduleDashboardPreloads, +})) + +vi.mock('@/components/ui/error-boundary', () => ({ + ErrorBoundary: ({ children }: { children: ReactNode }) => <>{children}, +})) + +vi.mock('@/components/ui/skeleton', () => ({ + ChartCardSkeleton: ({ className }: { className?: string }) => ( +
+ ), +})) + +vi.mock('@/components/ui/section-header', () => ({ + SectionHeader: ({ title }: { title: string }) =>

{title}

, +})) + +vi.mock('@/components/ui/expandable-card', () => ({ + ExpandableCard: ({ children, title }: { children: ReactNode; title: string }) => ( +
{children}
+ ), +})) + +vi.mock('@/components/cards/PrimaryMetrics', () => ({ + PrimaryMetrics: () =>
, +})) +vi.mock('@/components/cards/SecondaryMetrics', () => ({ + SecondaryMetrics: () =>
, +})) +vi.mock('@/components/cards/TodayMetrics', () => ({ + TodayMetrics: () =>
, +})) +vi.mock('@/components/cards/MonthMetrics', () => ({ + MonthMetrics: () =>
, +})) +vi.mock('@/components/features/heatmap/HeatmapCalendar', () => ({ + HeatmapCalendar: ({ metric }: { metric: string }) =>
, +})) +vi.mock('@/components/features/insights/UsageInsights', () => ({ + UsageInsights: () =>
, +})) +vi.mock('@/components/features/risk/ConcentrationRisk', () => ({ + ConcentrationRisk: () =>
, +})) + +vi.mock('@/components/features/forecast/CostForecast', () => ({ + CostForecast: ({ onExpand }: { onExpand: () => void }) => ( + + ), +})) +vi.mock('@/components/features/forecast/ProviderCostForecast', () => ({ + ProviderCostForecast: ({ onExpand }: { onExpand: () => void }) => ( + + ), +})) +vi.mock('@/components/features/forecast/ForecastZoomDialog', () => ({ + ForecastZoomDialog: ({ open }: { open: boolean }) => ( +
{String(open)}
+ ), +})) +vi.mock('@/components/features/cache-roi/CacheROI', () => ({ + CacheROI: () =>
, +})) +vi.mock('@/components/features/limits/ProviderLimitsSection', () => ({ + ProviderLimitsSection: () =>
, +})) +vi.mock('@/components/features/request-quality/RequestQuality', () => ({ + RequestQuality: () =>
, +})) +vi.mock('@/components/features/comparison/PeriodComparison', () => ({ + PeriodComparison: () =>
, +})) +vi.mock('@/components/features/anomaly/AnomalyDetection', () => ({ + AnomalyDetection: () =>
, +})) + +vi.mock('@/components/charts/CostOverTime', () => ({ + CostOverTime: () =>
, +})) +vi.mock('@/components/charts/CostByModel', () => ({ + CostByModel: () =>
, +})) +vi.mock('@/components/charts/CostByModelOverTime', () => ({ + CostByModelOverTime: () =>
, +})) +vi.mock('@/components/charts/CumulativeCost', () => ({ + CumulativeCost: () =>
, +})) +vi.mock('@/components/charts/CumulativeCostPerProvider', () => ({ + CumulativeCostPerProvider: () =>
, +})) +vi.mock('@/components/charts/CostByWeekday', () => ({ + CostByWeekday: () =>
, +})) +vi.mock('@/components/charts/TokenEfficiency', () => ({ + TokenEfficiency: () =>
, +})) +vi.mock('@/components/charts/ModelMix', () => ({ + ModelMix: () =>
, +})) +vi.mock('@/components/charts/TokensOverTime', () => ({ + TokensOverTime: () =>
, +})) +vi.mock('@/components/charts/TokenTypes', () => ({ + TokenTypes: () =>
, +})) +vi.mock('@/components/charts/RequestsOverTime', () => ({ + RequestsOverTime: () =>
, +})) +vi.mock('@/components/charts/RequestCacheHitRateByModel', () => ({ + RequestCacheHitRateByModel: () =>
, +})) +vi.mock('@/components/charts/DistributionAnalysis', () => ({ + DistributionAnalysis: () =>
, +})) +vi.mock('@/components/charts/CorrelationAnalysis', () => ({ + CorrelationAnalysis: () =>
, +})) + +vi.mock('@/components/tables/ModelEfficiency', () => ({ + ModelEfficiency: () =>
, +})) +vi.mock('@/components/tables/ProviderEfficiency', () => ({ + ProviderEfficiency: () =>
, +})) +vi.mock('@/components/tables/RecentDays', () => ({ + RecentDays: () =>
, +})) + +function createVisibility(overrides: Partial> = {}) { + return { + ...DEFAULT_APP_SETTINGS.sectionVisibility, + ...overrides, + } +} + +describe('DashboardSections rendering contracts', () => { + beforeEach(async () => { + await initI18n('en') + dashboardMotionMocks.cancelPreloads.mockClear() + dashboardMotionMocks.scheduleDashboardPreloads.mockClear() + }) + + it('renders configured sections in order and skips sections without required data', () => { + const viewModel = createDashboardSectionsViewModel({ + layout: { + sectionOrder: [ + 'metrics', + 'today', + 'currentMonth', + 'requestAnalysis', + 'costAnalysis', + 'tokenAnalysis', + ], + sectionVisibility: createVisibility(), + }, + overview: { + hasCurrentMonthData: false, + todayData: null, + }, + requestAnalysis: { + metrics: { + ...createDashboardSectionsViewModel().requestAnalysis.metrics, + hasRequestData: false, + }, + }, + }) + + renderWithAppProviders() + + expect(screen.getByTestId('section-metrics')).toHaveAttribute('data-eager', 'true') + expect(screen.queryByTestId('section-today')).not.toBeInTheDocument() + expect(screen.queryByTestId('section-current-month')).not.toBeInTheDocument() + expect(screen.queryByTestId('section-request-analysis')).not.toBeInTheDocument() + expect(Array.from(document.querySelectorAll('section')).map((section) => section.id)).toEqual([ + 'metrics', + 'charts', + 'token-analysis', + ]) + }) + + it('schedules warmup preloads for visible lazy sections and cancels them on unmount', () => { + const viewModel = createDashboardSectionsViewModel({ + layout: { + sectionOrder: ['forecastCache', 'requestAnalysis', 'limits'], + sectionVisibility: createVisibility(), + }, + requestAnalysis: { + metrics: { + ...createDashboardSectionsViewModel().requestAnalysis.metrics, + hasRequestData: false, + }, + }, + }) + + const { unmount } = renderWithAppProviders() + + expect(dashboardMotionMocks.scheduleDashboardPreloads).toHaveBeenCalledTimes(1) + expect(dashboardMotionMocks.scheduleDashboardPreloads.mock.calls[0]?.[0]).toHaveLength(2) + expect(screen.getByTestId('section-forecast-cache')).toHaveAttribute('data-preload', 'true') + expect(screen.getByTestId('section-limits')).toHaveAttribute('data-preload', 'true') + + unmount() + + expect(dashboardMotionMocks.cancelPreloads).toHaveBeenCalledTimes(1) + }) + + it('keeps the activity heatmap section viewport lazy while overview metrics render eagerly', () => { + const viewModel = createDashboardSectionsViewModel({ + layout: { + sectionOrder: ['metrics', 'activity'], + sectionVisibility: createVisibility(), + }, + }) + + renderWithAppProviders() + + expect(screen.getByTestId('section-metrics')).toHaveAttribute('data-eager', 'true') + expect(screen.getByTestId('section-activity')).toHaveAttribute('data-eager', 'false') + }) + + it('opens the forecast zoom dialog from lazy forecast cards', async () => { + const viewModel = createDashboardSectionsViewModel({ + layout: { + sectionOrder: ['forecastCache'], + sectionVisibility: createVisibility(), + }, + }) + + renderWithAppProviders() + + // Forecast cards cross a React.lazy boundary in DashboardSections, even with mocked chunks. + await screen.findByTestId('cost-forecast-expand') + expect(screen.getByTestId('forecast-dialog-open')).toHaveTextContent('false') + + fireEvent.click(screen.getByTestId('cost-forecast-expand')) + + expect(screen.getByTestId('forecast-dialog-open')).toHaveTextContent('true') + }) +}) diff --git a/tests/frontend/drill-down-modal-regressions.test.tsx b/tests/frontend/drill-down-modal-regressions.test.tsx index 917496c..663ba12 100644 --- a/tests/frontend/drill-down-modal-regressions.test.tsx +++ b/tests/frontend/drill-down-modal-regressions.test.tsx @@ -51,7 +51,7 @@ describe('DrillDownModal regressions', () => { expect(screen.getAllByText('–').length).toBeGreaterThan(0) expect(screen.getByTestId('drilldown-token-distribution')).toBeInTheDocument() expect(screen.queryByTestId('drilldown-token-distribution-input')).not.toBeInTheDocument() - }, 15_000) + }) it('uses the canonical token sum instead of a stale day.totalTokens value', () => { const day: DailyUsage = { diff --git a/tests/frontend/expandable-card.test.tsx b/tests/frontend/expandable-card.test.tsx index 70ef72b..5f4b00e 100644 --- a/tests/frontend/expandable-card.test.tsx +++ b/tests/frontend/expandable-card.test.tsx @@ -29,7 +29,7 @@ describe('ExpandableCard', () => { 'Erweiterte Kartenansicht mit zusätzlichen Kennzahlen und vollständigem Inhalt.', ), ).toBeInTheDocument() - }, 15_000) + }) it('delegates expand handling to an external callback without opening its own dialog', () => { const onExpand = vi.fn() diff --git a/tests/frontend/filter-bar-presets.test.tsx b/tests/frontend/filter-bar-presets.test.tsx index a2bfe0a..0751125 100644 --- a/tests/frontend/filter-bar-presets.test.tsx +++ b/tests/frontend/filter-bar-presets.test.tsx @@ -3,7 +3,10 @@ import { screen } from '@testing-library/react' import { afterEach, beforeAll, beforeEach, describe, expect, it, vi } from 'vitest' import { FilterBar } from '@/components/layout/FilterBar' -import { resolveDashboardPresetRange } from '@/lib/dashboard-preferences' +import { + DASHBOARD_QUICK_DATE_PRESETS, + resolveDashboardPresetRange, +} from '@/lib/dashboard-preferences' import { initI18n } from '@/lib/i18n' import { buildFilterBarProps, renderFilterBar } from './filter-bar-test-helpers' @@ -21,6 +24,25 @@ describe('FilterBar preset and chip states', () => { vi.useRealTimers() }) + it('renders preset labels in the shared order', () => { + renderFilterBar() + + const presetLabelByKey = { + '7d': '7D', + '30d': '30D', + month: 'Month', + year: 'Year', + all: 'All', + } as const satisfies Record<(typeof DASHBOARD_QUICK_DATE_PRESETS)[number], string> + const presetLabels = DASHBOARD_QUICK_DATE_PRESETS.map((key) => presetLabelByKey[key]) + const renderedPresetLabels = screen + .getAllByRole('button') + .map((button) => button.textContent?.trim() ?? '') + .filter((label) => presetLabels.includes(label)) + + expect(renderedPresetLabels).toEqual(presetLabels) + }) + it('derives preset highlighting from the actual date range and clears it for custom ranges or month filters', () => { const sevenDayRange = resolveDashboardPresetRange('7d', new Date()) @@ -55,7 +77,7 @@ describe('FilterBar preset and chip states', () => { ) expect(screen.getByRole('button', { name: 'All' })).not.toHaveClass('bg-primary') - }, 15_000) + }) it('exposes pressed state for preset, provider, and model toggles', () => { renderFilterBar({ @@ -80,19 +102,6 @@ describe('FilterBar preset and chip states', () => { ) }) - it('renders quick presets in the shared display order', () => { - renderFilterBar() - - const presetLabels = screen - .getAllByRole('button') - .map((button) => button.textContent) - .filter((label): label is string => - ['7D', '30D', 'Month', 'Year', 'All'].includes(label ?? ''), - ) - - expect(presetLabels).toEqual(['7D', '30D', 'Month', 'Year', 'All']) - }) - it('marks unfiltered provider and model chips as included instead of selected', () => { renderFilterBar({ availableProviders: ['Anthropic', 'OpenAI'], diff --git a/tests/frontend/header-links.test.tsx b/tests/frontend/header-links.test.tsx index 12781b7..f101a60 100644 --- a/tests/frontend/header-links.test.tsx +++ b/tests/frontend/header-links.test.tsx @@ -48,7 +48,7 @@ describe('Header external links', () => { expect(versionLink).toHaveAttribute('href', NPM_PACKAGE_URL) expect(versionLink).toHaveAttribute('target', '_blank') expect(versionLink).toHaveAttribute('rel', 'noopener noreferrer') - }, 15000) + }) it('shows npm, GitHub, and GitHub issues links in the help panel', () => { render() diff --git a/tests/frontend/info-heading.test.tsx b/tests/frontend/info-heading.test.tsx index 55913f7..12ac80d 100644 --- a/tests/frontend/info-heading.test.tsx +++ b/tests/frontend/info-heading.test.tsx @@ -64,7 +64,7 @@ describe('Info heading semantics', () => { const infoButton = screen.getByRole('button', { name: 'Show info' }) expect(infoButton).toBeInTheDocument() expect(infoButton).toHaveClass('focus-visible:ring-2') - }, 15_000) + }) it('keeps feature card titles semantically separate from the info button', () => { render( @@ -75,5 +75,5 @@ describe('Info heading semantics', () => { expect(screen.getByRole('heading', { name: 'Request quality' })).toBeInTheDocument() expect(screen.getByRole('button', { name: 'Show info' })).toBeInTheDocument() - }, 15_000) + }) }) diff --git a/tests/frontend/lazy-dashboard-chart-test-utils.tsx b/tests/frontend/lazy-dashboard-chart-test-utils.tsx new file mode 100644 index 0000000..4ab770a --- /dev/null +++ b/tests/frontend/lazy-dashboard-chart-test-utils.tsx @@ -0,0 +1,237 @@ +import type { ReactNode } from 'react' +import { expect, vi } from 'vitest' +import type { DailyUsage, TokenChartDataPoint, WeekdayData } from '@/types' +import { MockSvgContainer, MockSvgGroup } from '../recharts-test-utils' + +vi.mock('@/components/charts/ChartCard', () => ({ + ChartCard: ({ + children, + expandedExtra, + subtitle, + summary, + title, + }: { + children: ReactNode | ((expanded: boolean) => ReactNode) + expandedExtra?: ReactNode + subtitle?: string + summary?: ReactNode + title: string + }) => ( +
+

{title}

+ {subtitle ?

{subtitle}

: null} + {summary ?
{summary}
: null} +
+ {typeof children === 'function' ? children(false) : children} +
+ {expandedExtra ?
{expandedExtra}
: null} +
+ ), + ChartAnimationAware: ({ children }: { children: (active: boolean) => ReactNode }) => ( + <>{children(false)} + ), + ChartReveal: ({ children }: { children: ReactNode }) => <>{children}, +})) + +vi.mock('@/lib/model-color-context', () => ({ + useModelColorHelpers: () => ({ + getModelColor: (model: string) => + model.includes('Claude') ? 'rgb(251 146 60)' : 'rgb(16 185 129)', + }), +})) + +vi.mock('recharts', () => ({ + ResponsiveContainer: ({ children }: { children: ReactNode }) => ( + {children} + ), + AreaChart: ({ children, data }: { children: ReactNode; data?: unknown }) => ( + + {children} + + ), + BarChart: ({ + children, + data, + onMouseLeave, + onMouseMove, + }: { + children: ReactNode + data?: unknown + onMouseLeave?: () => void + onMouseMove?: (state: { activeTooltipIndex: number }) => void + }) => ( + onMouseLeave?.()} + onMouseMove={() => onMouseMove?.({ activeTooltipIndex: 1 })} + > + {children} + + ), + ComposedChart: ({ + children, + data, + onClick, + }: { + children: ReactNode + data?: unknown + onClick?: (state: { activePayload?: Array<{ payload?: { date?: string } }> }) => void + }) => { + const chartData = Array.isArray(data) ? data : [] + const firstPoint = chartData[0] as { date?: string } | undefined + return ( + onClick?.({ activePayload: [{ payload: firstPoint }] })} + > + {children} + + ) + }, + Area: ({ dataKey, fill, name }: { dataKey?: string; fill?: string; name?: string }) => ( + + ), + Bar: ({ children, dataKey, name }: { children?: ReactNode; dataKey?: string; name?: string }) => ( + + {children} + + ), + CartesianGrid: () => null, + Cell: ({ fill }: { fill?: string }) => ( + + ), + Line: ({ + dataKey, + name, + strokeDasharray, + }: { + dataKey?: string + name?: string + strokeDasharray?: string + }) => ( + + ), + Tooltip: () => null, + XAxis: () => null, + YAxis: () => null, +})) + +export function createDailyUsage( + date: string, + { + claudeCost, + gptCost, + }: { + claudeCost: number + gptCost: number + }, +): DailyUsage { + const totalCost = claudeCost + gptCost + return { + date, + inputTokens: 100, + outputTokens: 50, + cacheCreationTokens: 10, + cacheReadTokens: 40, + thinkingTokens: 5, + totalTokens: 205, + totalCost, + requestCount: 4, + modelsUsed: ['Claude Sonnet 4.5', 'GPT-5.4'], + modelBreakdowns: [ + { + modelName: 'Claude Sonnet 4.5', + inputTokens: 50, + outputTokens: 25, + cacheCreationTokens: 5, + cacheReadTokens: 20, + thinkingTokens: 2, + cost: claudeCost, + requestCount: 2, + }, + { + modelName: 'GPT-5.4', + inputTokens: 50, + outputTokens: 25, + cacheCreationTokens: 5, + cacheReadTokens: 20, + thinkingTokens: 3, + cost: gptCost, + requestCount: 2, + }, + ], + } +} + +// Intentionally unsorted to verify chart components normalize weekday order. +export const weekdayData: WeekdayData[] = [ + { day: 'Mo', cost: 3, weekdayIndex: 0 }, + { day: 'Sa', cost: 2, weekdayIndex: 5 }, + { day: 'Tu', cost: 9, weekdayIndex: 1 }, + { day: 'Su', cost: 5, weekdayIndex: 6 }, + { day: 'We', cost: 6, weekdayIndex: 2 }, + { day: 'Th', cost: 4, weekdayIndex: 3 }, + { day: 'Fr', cost: 7, weekdayIndex: 4 }, +] + +// Legacy shape without weekdayIndex to verify defensive fallback in CostByWeekday. +export const legacyWeekdayData: WeekdayData[] = [ + { day: 'Lun', cost: 3 }, + { day: 'Sáb.', cost: 2 }, + { day: 'Mar', cost: 9 }, + { day: 'Dom.', cost: 5 }, + { day: 'Mié', cost: 6 }, + { day: 'Jue', cost: 4 }, + { day: 'Vie', cost: 7 }, +] + +export function assertLegacyWeekdayData(data: WeekdayData[]) { + data.forEach((entry) => { + expect(entry).not.toHaveProperty('weekdayIndex') + }) +} + +export const tokenData: TokenChartDataPoint[] = [ + { + date: '2026-04-01', + Input: 100, + Output: 50, + 'Cache Write': 20, + 'Cache Read': 60, + Thinking: 10, + totalTokens: 240, + tokenMA7: 240, + inputMA7: 100, + outputMA7: 50, + cacheWriteMA7: 20, + cacheReadMA7: 60, + thinkingMA7: 10, + }, + { + date: '2026-04-02', + Input: 120, + Output: 70, + 'Cache Write': 30, + 'Cache Read': 80, + Thinking: 20, + totalTokens: 320, + tokenMA7: 280, + inputMA7: 110, + outputMA7: 60, + cacheWriteMA7: 25, + cacheReadMA7: 70, + thinkingMA7: 15, + }, +] diff --git a/tests/frontend/metric-ratio-locale.test.tsx b/tests/frontend/metric-ratio-locale.test.tsx index 7245e74..94714ca 100644 --- a/tests/frontend/metric-ratio-locale.test.tsx +++ b/tests/frontend/metric-ratio-locale.test.tsx @@ -1,7 +1,7 @@ // @vitest-environment jsdom import { render } from '@testing-library/react' -import { beforeEach, describe, expect, it, vi } from 'vitest' +import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest' import { MonthMetrics } from '@/components/cards/MonthMetrics' import { PrimaryMetrics } from '@/components/cards/PrimaryMetrics' import { TooltipProvider } from '@/components/ui/tooltip' @@ -44,6 +44,8 @@ const metrics: DashboardMetrics = { } describe('metric ratio localization', () => { + const daysElapsedInTestMonth = 30 + const testDate = new Date(2026, 3, daysElapsedInTestMonth, 12) const daily: DailyUsage[] = [ { date: '2026-04-02', @@ -74,6 +76,8 @@ describe('metric ratio localization', () => { ] beforeEach(async () => { + vi.useFakeTimers() + vi.setSystemTime(testDate) vi.stubGlobal( 'IntersectionObserver', class { @@ -85,6 +89,11 @@ describe('metric ratio localization', () => { await initI18n('de') }) + afterEach(() => { + vi.unstubAllGlobals() + vi.useRealTimers() + }) + it('formats the primary metrics I/O ratio with the active locale', () => { const ratio = new Intl.NumberFormat(getCurrentLocale(), { minimumFractionDigits: 1, @@ -125,7 +134,7 @@ describe('metric ratio localization', () => { style: 'percent', minimumFractionDigits: 0, maximumFractionDigits: 0, - }).format(2 / new Date().getDate()) + }).format(2 / daysElapsedInTestMonth) render( diff --git a/tests/frontend/model-mix.test.tsx b/tests/frontend/model-mix.test.tsx new file mode 100644 index 0000000..5692f0a --- /dev/null +++ b/tests/frontend/model-mix.test.tsx @@ -0,0 +1,39 @@ +// @vitest-environment jsdom + +import { screen } from '@testing-library/react' +import { beforeAll, describe, expect, it } from 'vitest' +import { createDailyUsage } from './lazy-dashboard-chart-test-utils' +import { ModelMix } from '@/components/charts/ModelMix' +import { initI18n } from '@/lib/i18n' +import { renderWithTooltip } from '../test-utils' + +const modelMixData = [ + createDailyUsage('2026-04-01', { claudeCost: 3, gptCost: 7 }), + createDailyUsage('2026-04-02', { claudeCost: 5, gptCost: 5 }), + createDailyUsage('2026-04-03', { claudeCost: 8, gptCost: 2 }), +] + +describe('ModelMix chart', () => { + beforeAll(async () => { + await initI18n('en') + }) + + it('skips underspecified input', () => { + const { container } = renderWithTooltip() + + expect(container).toBeEmptyDOMElement() + }) + + it('renders enough model history', () => { + renderWithTooltip() + + expect(screen.getByText('Model mix')).toBeInTheDocument() + expect(screen.getByText('Cost share by model over time')).toBeInTheDocument() + expect(screen.getByTestId('area-chart')).toBeInTheDocument() + const areaNames = screen + .getAllByTestId('chart-area') + .map((area) => area.getAttribute('data-name')) + expect(areaNames).toHaveLength(2) + expect(areaNames).toEqual(expect.arrayContaining(['Claude Sonnet 4.5', 'GPT-5.4'])) + }) +}) diff --git a/tests/frontend/period-comparison.test.tsx b/tests/frontend/period-comparison.test.tsx new file mode 100644 index 0000000..0183bff --- /dev/null +++ b/tests/frontend/period-comparison.test.tsx @@ -0,0 +1,50 @@ +// @vitest-environment jsdom + +import { fireEvent, screen } from '@testing-library/react' +import { beforeAll, describe, expect, it } from 'vitest' +import { createDailyUsage } from './lazy-dashboard-chart-test-utils' +import { PeriodComparison } from '@/components/features/comparison/PeriodComparison' +import { initI18n } from '@/lib/i18n' +import { renderWithTooltip } from '../test-utils' + +const DAYS_FOR_COMPARISON = 14 + +function buildComparisonData() { + return Array.from({ length: DAYS_FOR_COMPARISON }, (_, index) => { + const day = String(index + 1).padStart(2, '0') + return createDailyUsage(`2026-04-${day}`, { + claudeCost: index + 1, + gptCost: index + 2, + }) + }) +} + +describe('PeriodComparison chart', () => { + beforeAll(async () => { + await initI18n('en') + }) + + it('renders empty state when data is insufficient', () => { + renderWithTooltip() + + expect(screen.getByText('Not enough data for a comparison')).toBeInTheDocument() + expect(screen.getByText('At least 7 days required (currently: 3)')).toBeInTheDocument() + }) + + it('renders populated state with week preset by default', () => { + renderWithTooltip() + + expect(screen.getByText('Period comparison')).toBeInTheDocument() + expect(screen.getByText('Last week')).toBeInTheDocument() + expect(screen.getByText('This week')).toBeInTheDocument() + }) + + it('switches to month preset when Month button is clicked', () => { + renderWithTooltip() + + fireEvent.click(screen.getByRole('button', { name: 'Month' })) + + expect(screen.getByText('Last month')).toBeInTheDocument() + expect(screen.getByText('This month')).toBeInTheDocument() + }) +}) diff --git a/tests/frontend/provider-cost-forecast.test.tsx b/tests/frontend/provider-cost-forecast.test.tsx index 9fbf031..387b140 100644 --- a/tests/frontend/provider-cost-forecast.test.tsx +++ b/tests/frontend/provider-cost-forecast.test.tsx @@ -241,7 +241,7 @@ describe('ProviderCostForecast', () => { .filter((entry) => entry.getAttribute('data-key')?.endsWith('Lower')) .every((entry) => entry.getAttribute('data-legend-type') === 'none'), ).toBe(true) - }, 15_000) + }) it('only renders visible providers from the filtered dataset', () => { const openAiOnlyData: DailyUsage[] = [ diff --git a/tests/frontend/requests-over-time.test.tsx b/tests/frontend/requests-over-time.test.tsx index 40e7b3b..9dd6026 100644 --- a/tests/frontend/requests-over-time.test.tsx +++ b/tests/frontend/requests-over-time.test.tsx @@ -154,13 +154,16 @@ describe('RequestsOverTime', () => { expect(lineNames).toEqual( expect.arrayContaining([ - 'Claude Opus 4.7', + 'GPT-5.4 7-day avg', + 'Claude Sonnet 4.5 7-day avg', 'Claude Opus 4.7 7-day avg', + 'Gemini 2.5 Pro 7-day avg', + 'GPT-4.1 7-day avg', 'Claude Haiku 4.5 7-day avg', ]), ) expect(lineNames).not.toEqual( expect.arrayContaining(['Unused Model', 'Unused Model 7-day avg']), ) - }, 15_000) + }) }) diff --git a/tests/frontend/toast.test.tsx b/tests/frontend/toast.test.tsx index b80b0cc..f8167a3 100644 --- a/tests/frontend/toast.test.tsx +++ b/tests/frontend/toast.test.tsx @@ -39,7 +39,7 @@ describe('ToastProvider', () => { fireEvent.click(screen.getByRole('button', { name: 'Close' })) expect(screen.queryByRole('status')).not.toBeInTheDocument() - }, 15_000) + }) it('announces error toasts as alerts', () => { render( @@ -51,5 +51,5 @@ describe('ToastProvider', () => { fireEvent.click(screen.getByRole('button', { name: 'Trigger toast' })) expect(screen.getByRole('alert')).toHaveTextContent('Saved successfully') - }, 15_000) + }) }) diff --git a/tests/frontend/tokens-over-time.test.tsx b/tests/frontend/tokens-over-time.test.tsx new file mode 100644 index 0000000..a6102ab --- /dev/null +++ b/tests/frontend/tokens-over-time.test.tsx @@ -0,0 +1,32 @@ +// @vitest-environment jsdom + +import { fireEvent, screen } from '@testing-library/react' +import { beforeAll, describe, expect, it, vi } from 'vitest' +import { tokenData } from './lazy-dashboard-chart-test-utils' +import { TokensOverTime } from '@/components/charts/TokensOverTime' +import { initI18n } from '@/lib/i18n' +import { renderWithTooltip } from '../test-utils' + +describe('TokensOverTime chart', () => { + beforeAll(async () => { + await initI18n('en') + }) + + it('renders token totals, moving averages, and drilldown clicks', () => { + const onClickDay = vi.fn() + + renderWithTooltip() + + expect(screen.getByText('Tokens over time')).toBeInTheDocument() + expect(screen.getByText('Cache tokens')).toBeInTheDocument() + expect(screen.getByText('Input / Output tokens')).toBeInTheDocument() + expect(screen.getByText('Thinking tokens')).toBeInTheDocument() + expect(screen.getByText('Total tokens (all types)')).toBeInTheDocument() + expect(screen.getAllByTestId('chart-area')).toHaveLength(6) + expect(screen.getAllByTestId('chart-line')).toHaveLength(6) + + fireEvent.click(screen.getAllByTestId('composed-chart')[0]) + + expect(onClickDay).toHaveBeenCalledWith('2026-04-01') + }) +}) diff --git a/tests/integration/app-runtime.test.ts b/tests/integration/app-runtime.test.ts new file mode 100644 index 0000000..95c8c44 --- /dev/null +++ b/tests/integration/app-runtime.test.ts @@ -0,0 +1,146 @@ +import { EventEmitter } from 'node:events' +import { promises as fsPromises } from 'node:fs' +import { createRequire } from 'node:module' +import { tmpdir } from 'node:os' +import path from 'node:path' +import { describe, expect, it, vi } from 'vitest' + +const require = createRequire(import.meta.url) +const { createAppRuntime } = require('../../server/app-runtime.js') as { + createAppRuntime: (options: Record) => { + server: EventEmitter + } +} + +const REQUEST_TIMEOUT_MS = Number(process.env.TEST_TIMEOUT_MS || 5000) + +class MockRequest extends EventEmitter { + method = 'GET' + url: string + headers: Record + socket = { localAddress: '127.0.0.1' } + + constructor(url: string, headers: Record) { + super() + this.url = url + this.headers = headers + } +} + +class MockResponse extends EventEmitter { + status = 0 + headers: Record = {} + body = '' + headersSent = false + + writeHead(status: number, headers: Record) { + this.status = status + this.headers = headers + this.headersSent = true + } + + end(body?: string | Buffer) { + if (body !== undefined) { + this.write(body) + } + this.emit('finish') + } + + write(chunk: string | Buffer) { + this.body += Buffer.isBuffer(chunk) ? chunk.toString('utf8') : chunk + this.headersSent = true + return true + } +} + +async function emitServerRequest( + server: EventEmitter, + url: string, + headers: Record, +) { + const request = new MockRequest(url, headers) + const response = new MockResponse() + const finished = new Promise((resolve, reject) => { + const cleanup = () => { + clearTimeout(timeout) + response.off('error', onError) + response.off('finish', onFinish) + } + const onError = (error: Error) => { + cleanup() + reject(error) + } + const onFinish = () => { + cleanup() + resolve(response) + } + const timeout = setTimeout(() => { + response.off('error', onError) + response.off('finish', onFinish) + reject(new Error(`Timed out waiting for ${url}`)) + }, REQUEST_TIMEOUT_MS) + + response.once('error', onError) + response.once('finish', onFinish) + }) + + server.emit('request', request, response) + return finished +} + +describe('app runtime wiring', () => { + it('composes the real server runtime with isolated paths, API prefix, and auth', async () => { + const runtimeRoot = await fsPromises.mkdtemp(path.join(tmpdir(), 'ttdash-app-runtime-')) + const authToken = 'test-local-auth-token-with-enough-entropy' + const stdout = Object.assign(Object.create(process.stdout), { + isTTY: false, + write: vi.fn(() => true), + }) + const processObject = { + ...process, + argv: ['node', 'server.js', '--no-open'], + env: { + ...process.env, + API_PREFIX: '/custom-api', + CI: '1', + HOST: '127.0.0.1', + NO_OPEN_BROWSER: '1', + PORT: '4217', + TTDASH_CACHE_DIR: path.join(runtimeRoot, 'cache'), + TTDASH_CONFIG_DIR: path.join(runtimeRoot, 'config'), + TTDASH_DATA_DIR: path.join(runtimeRoot, 'data'), + TTDASH_INSTANCE_ID: 'app-runtime-contract', + TTDASH_LOCAL_AUTH_TOKEN: authToken, + }, + exit: vi.fn(), + kill: vi.fn(() => true), + on: vi.fn(), + pid: 42170, + platform: process.platform, + stdout, + } + + try { + const runtime = createAppRuntime({ + entrypointPath: path.join(runtimeRoot, 'server.js'), + processObject, + }) + + const response = await emitServerRequest(runtime.server, '/custom-api/runtime', { + authorization: `Bearer ${authToken}`, + host: '127.0.0.1:4217', + }) + + expect(response.status).toBe(200) + expect(JSON.parse(response.body)).toEqual({ + id: 'app-runtime-contract', + mode: 'foreground', + port: null, + url: null, + }) + expect(processObject.exit).not.toHaveBeenCalled() + } finally { + await fsPromises.rm(runtimeRoot, { recursive: true, force: true }) + } + }) +}) diff --git a/tests/integration/data-runtime-persistence.test.ts b/tests/integration/data-runtime-persistence.test.ts new file mode 100644 index 0000000..2e3c687 --- /dev/null +++ b/tests/integration/data-runtime-persistence.test.ts @@ -0,0 +1,76 @@ +import * as fs from 'node:fs' +import { createRequire } from 'node:module' +import { tmpdir } from 'node:os' +import path from 'node:path' +import { describe, expect, it, vi } from 'vitest' + +const require = createRequire(import.meta.url) +const fsPromises = fs.promises +const { createDataRuntime } = require('../../server/data-runtime.js') as { + createDataRuntime: (options: Record) => { + ensureAppDirs: () => void + paths: { + dataFile: string + settingsFile: string + } + readData: () => unknown + readSettings: () => unknown + readSettingsForWrite: () => { cliAutoLoadActive: boolean } + writeSettings: (settings: unknown) => Promise + } +} + +function createRuntime(runtimeRoot: string) { + return createDataRuntime({ + fs, + fsPromises, + os: { homedir: () => runtimeRoot }, + path, + processObject: { + env: { + TTDASH_CACHE_DIR: path.join(runtimeRoot, 'cache'), + TTDASH_CONFIG_DIR: path.join(runtimeRoot, 'config'), + TTDASH_DATA_DIR: path.join(runtimeRoot, 'data'), + }, + kill: vi.fn(() => true), + pid: process.pid, + platform: 'linux', + }, + normalizeIncomingData: (value: unknown) => value, + runtimeInstanceId: 'data-runtime-persistence', + appDirName: 'TTDash', + appDirNameLinux: 'ttdash', + legacyDataFile: path.join(runtimeRoot, 'legacy-data.json'), + settingsBackupKind: 'ttdash-settings-backup', + usageBackupKind: 'ttdash-usage-backup', + isWindows: false, + secureDirMode: 0o700, + secureFileMode: 0o600, + fileMutationLockTimeoutMs: 80, + fileMutationLockStaleMs: 10, + }) +} + +describe('data runtime persistence', () => { + it('recovers default settings for writes while keeping corrupted persisted state visible', async () => { + const runtimeRoot = await fsPromises.mkdtemp(path.join(tmpdir(), 'ttdash-data-runtime-')) + const runtime = createRuntime(runtimeRoot) + + try { + runtime.ensureAppDirs() + await fsPromises.writeFile(runtime.paths.settingsFile, '{broken settings') + await fsPromises.writeFile(runtime.paths.dataFile, '{broken usage') + + expect(() => runtime.readSettings()).toThrow('Settings file is unreadable or corrupted.') + expect(() => runtime.readData()).toThrow('Usage data file is unreadable or corrupted.') + const recoveredSettings = runtime.readSettingsForWrite() + expect(recoveredSettings).toMatchObject({ cliAutoLoadActive: false }) + + await runtime.writeSettings(recoveredSettings) + + expect(runtime.readSettings()).toMatchObject({ cliAutoLoadActive: false }) + } finally { + await fsPromises.rm(runtimeRoot, { recursive: true, force: true }) + } + }) +}) diff --git a/tests/integration/server-api-imports.test.ts b/tests/integration/server-api-imports.test.ts index 4607324..fd726d5 100644 --- a/tests/integration/server-api-imports.test.ts +++ b/tests/integration/server-api-imports.test.ts @@ -123,6 +123,7 @@ describe('local server API imports', () => { addedDays: 1, unchangedDays: 1, conflictingDays: 1, + skippedDays: 0, totalDays: 6, }) }) diff --git a/tests/integration/server-background-concurrency.test.ts b/tests/integration/server-background-concurrency.test.ts index d6e45df..b97b682 100644 --- a/tests/integration/server-background-concurrency.test.ts +++ b/tests/integration/server-background-concurrency.test.ts @@ -4,6 +4,9 @@ import path from 'node:path' import { describe, expect, it } from 'vitest' import { createCliEnv, + formatBackgroundDiagnostics, + formatCliResult, + readBackgroundRegistry, runCli, stopAllBackgroundServers, waitForBackgroundRegistry, @@ -21,8 +24,22 @@ describe('local server background concurrent startup', () => { runCli(['--background', '--no-open'], { env: backgroundEnv }), ]) - expect(firstStart.code).toBe(0) - expect(secondStart.code).toBe(0) + expect( + firstStart.code, + [ + 'first concurrent background start failed', + formatCliResult(firstStart), + formatBackgroundDiagnostics(backgroundRoot), + ].join('\n\n'), + ).toBe(0) + expect( + secondStart.code, + [ + 'second concurrent background start failed', + formatCliResult(secondStart), + formatBackgroundDiagnostics(backgroundRoot), + ].join('\n\n'), + ).toBe(0) const registry = await waitForBackgroundRegistry( backgroundRoot, @@ -32,8 +49,14 @@ describe('local server background concurrent startup', () => { const [firstInstance, secondInstance] = registry await waitForUrlAvailable(firstInstance!.url) await waitForUrlAvailable(secondInstance!.url) - expect(registry).toHaveLength(2) - expect(new Set(registry.map((entry) => entry.url)).size).toBe(2) + expect( + readBackgroundRegistry(backgroundRoot), + formatBackgroundDiagnostics(backgroundRoot, 'registry changed after readiness checks'), + ).toHaveLength(2) + expect( + new Set(registry.map((entry) => entry.url)).size, + formatBackgroundDiagnostics(backgroundRoot, 'duplicate background registry URLs'), + ).toBe(2) } finally { await stopAllBackgroundServers(backgroundEnv, backgroundRoot) rmSync(backgroundRoot, { recursive: true, force: true }) diff --git a/tests/integration/server-test-helpers.ts b/tests/integration/server-test-helpers.ts index c59ff6b..e4baae1 100644 --- a/tests/integration/server-test-helpers.ts +++ b/tests/integration/server-test-helpers.ts @@ -4,6 +4,7 @@ import { existsSync, mkdirSync, mkdtempSync, + readdirSync, readFileSync, rmSync, statSync, @@ -48,6 +49,15 @@ export type LocalAuthSession = { createdAt: string } +export type CliResult = { + args: string[] + code: number | null + output: string + stdout: string + stderr: string + signal: NodeJS.Signals | null +} + const authHeadersByOrigin = new Map() const fetchProbeTimeoutMs = 1000 const fetchRequestTimeoutMs = 15_000 @@ -534,6 +544,18 @@ export function getCliDataDir(root: string) { return path.join(root, 'data', 'ttdash') } +export function getCliCacheDir(root: string) { + if (process.platform === 'darwin') { + return path.join(root, 'Library', 'Caches', 'TTDash') + } + + if (process.platform === 'win32') { + return path.join(root, 'AppData', 'Local', 'TTDash', 'Cache') + } + + return path.join(root, 'cache', 'ttdash') +} + export function getLocalAuthSessionPath(root: string) { return path.join(getCliConfigDir(root), 'session-auth.json') } @@ -717,6 +739,66 @@ export function tryReadBackgroundRegistry(root: string) { } } +function readBackgroundLogSnippets(root: string, entries: BackgroundRegistryEntry[]) { + const logFiles = new Set( + entries.map((entry) => entry.logFile).filter((logFile): logFile is string => Boolean(logFile)), + ) + const defaultLogDir = path.join(getCliCacheDir(root), 'background') + + try { + for (const fileName of readdirSync(defaultLogDir)) { + if (fileName.startsWith('server-') && fileName.endsWith('.log')) { + logFiles.add(path.join(defaultLogDir, fileName)) + } + } + } catch { + // Log files are optional diagnostics; missing log directories are expected + // when a background child exits before opening its log file. + } + + return [...logFiles].map((logFile) => { + let content = '' + try { + content = readFileSync(logFile, 'utf-8').trim() + } catch (error) { + content = `` + } + + return { + file: logFile, + content: content.slice(-4000), + } + }) +} + +export function formatCliResult(result: CliResult) { + return [ + `args: server.js ${result.args.join(' ')}`, + `code: ${result.code ?? 'null'}`, + `signal: ${result.signal ?? 'null'}`, + `stdout:\n${result.stdout.trim() || ''}`, + `stderr:\n${result.stderr.trim() || ''}`, + ].join('\n') +} + +export function formatBackgroundDiagnostics(root: string, label = 'background diagnostics') { + const registryPath = path.join(getCliConfigDir(root), 'background-instances.json') + const registry = tryReadBackgroundRegistry(root) + const logs = readBackgroundLogSnippets(root, registry) + + return [ + label, + `root: ${root}`, + `registry: ${registryPath}`, + `entries:\n${JSON.stringify(registry, null, 2)}`, + `logs:\n${ + logs.length + ? logs.map((log) => `--- ${log.file} ---\n${log.content || ''}`).join('\n') + : '' + }`, + ].join('\n') +} + export function writeBackgroundRegistry(root: string, entries: BackgroundRegistryEntry[]) { const normalizedEntries = normalizeBackgroundRegistryEntries(entries) if (normalizedEntries.length !== entries.length) { @@ -752,7 +834,10 @@ export async function waitForBackgroundRegistry( } throw new Error( - `Timed out waiting for background registry state: ${JSON.stringify(lastEntries, null, 2)}`, + [ + `Timed out waiting for background registry state: ${JSON.stringify(lastEntries, null, 2)}`, + formatBackgroundDiagnostics(root), + ].join('\n\n'), ) } @@ -781,14 +866,15 @@ export async function runCli( timeoutMs = cliCommandTimeoutMs, }: { env: NodeJS.ProcessEnv; input?: string; timeoutMs?: number }, ) { - return await new Promise<{ code: number | null; output: string }>((resolve, reject) => { + return await new Promise((resolve, reject) => { const cli = spawn(process.execPath, ['server.js', ...args], { cwd: process.cwd(), env, stdio: ['pipe', 'pipe', 'pipe'], }) - let cliOutput = '' + let cliStdout = '' + let cliStderr = '' let timedOut = false let forceKillId: ReturnType | null = null const timeoutId = setTimeout(() => { @@ -809,27 +895,41 @@ export async function runCli( } cli.stdout.on('data', (chunk) => { - cliOutput += chunk.toString() + cliStdout += chunk.toString() }) cli.stderr.on('data', (chunk) => { - cliOutput += chunk.toString() + cliStderr += chunk.toString() }) cli.on('error', (error) => { cleanup() reject(error) }) - cli.on('close', (code) => { + cli.on('close', (code, signal) => { cleanup() + const cliOutput = cliStdout + cliStderr if (timedOut) { reject( - new Error(`Timed out waiting for CLI command: server.js ${args.join(' ')}\n${cliOutput}`), + new Error( + [ + `Timed out waiting for CLI command: server.js ${args.join(' ')}`, + `stdout:\n${cliStdout.trim() || ''}`, + `stderr:\n${cliStderr.trim() || ''}`, + ].join('\n'), + ), ) return } - resolve({ code, output: cliOutput }) + resolve({ + args, + code, + output: cliOutput, + stdout: cliStdout, + stderr: cliStderr, + signal, + }) }) if (input) { diff --git a/tests/integration/toktrack-version-contract.test.ts b/tests/integration/toktrack-version-contract.test.ts new file mode 100644 index 0000000..f1822eb --- /dev/null +++ b/tests/integration/toktrack-version-contract.test.ts @@ -0,0 +1,243 @@ +import { createRequire } from 'node:module' +import { existsSync, readdirSync, readFileSync } from 'node:fs' +import path from 'node:path' +import { fileURLToPath } from 'node:url' +import { describe, expect, it } from 'vitest' + +const require = createRequire(import.meta.url) +const packageJson = require('../../package.json') as { + dependencies?: Record +} +const packageLockJson = require('../../package-lock.json') as { + packages?: Record< + string, + { + dependencies?: Record + version?: string + } + > +} + +type BunLockJson = { + workspaces?: Record }> + packages?: Record +} + +const repoRoot = path.resolve(path.dirname(fileURLToPath(import.meta.url)), '../..') +const exactSemverPattern = + /^(?:0|[1-9]\d*)\.(?:0|[1-9]\d*)\.(?:0|[1-9]\d*)(?:-[0-9A-Za-z-]+(?:\.[0-9A-Za-z-]+)*)?(?:\+[0-9A-Za-z-]+(?:\.[0-9A-Za-z-]+)*)?$/ +const hardcodedToktrackVersionPattern = + /(?:toktrack@\d+\.\d+\.\d+(?:[-+][0-9A-Za-z.-]+)?|TOKTRACK_(?:VERSION|PACKAGE_SPEC)\s*[:=]\s*['"`][^'"`]*\d+\.\d+\.\d+)/ +const scannedExtensions = new Set([ + '.bat', + '.cjs', + '.html', + '.js', + '.json', + '.md', + '.mjs', + '.sh', + '.ts', + '.tsx', + '.yaml', + '.yml', +]) +const excludedDirectories = new Set([ + '.git', + '.tmp-playwright', + 'coverage', + 'dist', + 'docs/review', + 'node_modules', + 'playwright-report', + 'test-results', +]) +const excludedFiles = new Set(['bun.lock', 'CHANGELOG.md', 'package-lock.json']) + +function getToktrackDependency() { + const version = packageJson.dependencies?.toktrack + if (typeof version !== 'string') { + throw new Error('package.json dependencies.toktrack must be defined.') + } + + return version +} + +type JsoncOutsideStringTransform = (context: { + character: string + index: number + nextCharacter: string | undefined + source: string +}) => { nextIndex?: number; text: string } | null + +function transformJsoncOutsideStrings( + source: string, + transformOutsideString: JsoncOutsideStringTransform, +) { + let result = '' + let inString = false + let isEscaped = false + + for (let index = 0; index < source.length; index += 1) { + const character = source[index] + const nextCharacter = source[index + 1] + + if (inString) { + result += character + if (isEscaped) { + isEscaped = false + } else if (character === '\\') { + isEscaped = true + } else if (character === '"') { + inString = false + } + continue + } + + if (character === '"') { + inString = true + result += character + continue + } + + const transformed = transformOutsideString({ + character, + index, + nextCharacter, + source, + }) + if (transformed) { + result += transformed.text + index = transformed.nextIndex ?? index + continue + } + + result += character + } + + return result +} + +// Deliberately dependency-free for a lockfile contract test; the scanner handles escaped +// strings, comments, and trailing commas without mutating comment-like text inside strings. +function stripJsoncComments(source: string) { + return transformJsoncOutsideStrings( + source, + ({ character, index, nextCharacter, source: jsoncSource }) => { + if (character === '/' && nextCharacter === '/') { + let nextIndex = index + 2 + while ( + nextIndex < jsoncSource.length && + jsoncSource[nextIndex] !== '\n' && + jsoncSource[nextIndex] !== '\r' + ) { + nextIndex += 1 + } + return { + nextIndex, + text: jsoncSource[nextIndex] ?? '', + } + } + + if (character === '/' && nextCharacter === '*') { + let nextIndex = index + 2 + while ( + nextIndex < jsoncSource.length && + !(jsoncSource[nextIndex] === '*' && jsoncSource[nextIndex + 1] === '/') + ) { + nextIndex += 1 + } + return { + nextIndex: nextIndex < jsoncSource.length ? nextIndex + 1 : nextIndex, + text: '', + } + } + + return null + }, + ) +} + +function stripJsoncTrailingCommas(source: string) { + return transformJsoncOutsideStrings(source, ({ character, index, source: jsoncSource }) => { + if (character !== ',') { + return null + } + + let nextIndex = index + 1 + while (/\s/.test(jsoncSource[nextIndex] ?? '')) { + nextIndex += 1 + } + + if (jsoncSource[nextIndex] === '}' || jsoncSource[nextIndex] === ']') { + return { text: '' } + } + + return null + }) +} + +function parseBunLockJsonc(source: string): BunLockJson { + return JSON.parse(stripJsoncTrailingCommas(stripJsoncComments(source))) as BunLockJson +} + +function shouldSkipPath(relativePath: string) { + if (excludedFiles.has(relativePath)) return true + + for (const directory of excludedDirectories) { + if (relativePath === directory || relativePath.startsWith(`${directory}/`)) { + return true + } + } + + return false +} + +function collectScannedFiles(directory: string, collected: string[] = []) { + for (const entry of readdirSync(directory, { withFileTypes: true })) { + const absolutePath = path.join(directory, entry.name) + const relativePath = path.relative(repoRoot, absolutePath) + + if (shouldSkipPath(relativePath)) continue + if (entry.isSymbolicLink()) continue + + if (entry.isDirectory()) { + collectScannedFiles(absolutePath, collected) + continue + } + + if (entry.isFile() && scannedExtensions.has(path.extname(entry.name))) { + collected.push(relativePath) + } + } + + return collected +} + +describe('toktrack version repository contract', () => { + it('keeps npm and bun lockfiles aligned with package.json', () => { + const toktrackVersion = getToktrackDependency() + const rootPackage = packageLockJson.packages?.[''] + const installedToktrackPackage = packageLockJson.packages?.['node_modules/toktrack'] + const bunLockPath = path.join(repoRoot, 'bun.lock') + + expect(toktrackVersion).toMatch(exactSemverPattern) + expect(rootPackage?.dependencies?.toktrack).toBe(toktrackVersion) + expect(installedToktrackPackage?.version).toBe(toktrackVersion) + + if (existsSync(bunLockPath)) { + const bunLockJson = parseBunLockJsonc(readFileSync(bunLockPath, 'utf8')) + expect(bunLockJson.workspaces?.['']?.dependencies?.toktrack).toBe(toktrackVersion) + expect(bunLockJson.packages?.toktrack?.[0]).toBe(`toktrack@${toktrackVersion}`) + } + }) + + it('keeps toktrack package specs free of hardcoded versions outside managed files', () => { + const offenders = collectScannedFiles(repoRoot).filter((relativePath) => { + const content = readFileSync(path.join(repoRoot, relativePath), 'utf8') + return hardcodedToktrackVersionPattern.test(content) + }) + + expect(offenders).toEqual([]) + }) +}) diff --git a/tests/unit/app-settings-contract.test.ts b/tests/unit/app-settings-contract.test.ts index 00adb52..0fd0162 100644 --- a/tests/unit/app-settings-contract.test.ts +++ b/tests/unit/app-settings-contract.test.ts @@ -1,5 +1,14 @@ import { describe, expect, it } from 'vitest' -import { DEFAULT_APP_SETTINGS, normalizeAppSettings } from '@/lib/app-settings' +import { + DEFAULT_APP_SETTINGS, + normalizeAppLanguage, + normalizeAppSettings, + normalizeAppTheme, + normalizeDataLoadSource, + normalizeReducedMotionPreference, + normalizeStoredProviderLimits, + normalizeStoredTimestamp, +} from '@/lib/app-settings' import { DEFAULT_DASHBOARD_FILTERS, getDefaultDashboardSectionOrder, @@ -68,6 +77,36 @@ describe('shared app settings contract', () => { expect(normalizeAppSettings(payload)).toEqual(normalizeSharedAppSettings(payload)) }) + it('normalizes frontend app-settings fragments through the shared wrappers', () => { + expect(normalizeAppLanguage('en')).toBe('en') + expect(normalizeAppLanguage('fr')).toBe('de') + expect(normalizeAppTheme('light')).toBe('light') + expect(normalizeAppTheme('system')).toBe('dark') + expect(normalizeReducedMotionPreference('always')).toBe('always') + expect(normalizeReducedMotionPreference('never')).toBe('never') + expect(normalizeReducedMotionPreference('sometimes')).toBe('system') + expect(normalizeDataLoadSource('file')).toBe('file') + expect(normalizeDataLoadSource('cli-auto-load')).toBe('cli-auto-load') + expect(normalizeDataLoadSource('manual')).toBeNull() + expect(normalizeStoredTimestamp('2026-04-01T12:34:56+02:00')).toBe('2026-04-01T10:34:56.000Z') + expect(normalizeStoredTimestamp('not a timestamp')).toBeNull() + expect( + normalizeStoredProviderLimits({ + OpenAI: { + hasSubscription: true, + subscriptionPrice: 12.345, + monthlyLimit: -1, + }, + }), + ).toEqual({ + OpenAI: { + hasSubscription: true, + subscriptionPrice: 12.35, + monthlyLimit: 0, + }, + }) + }) + it('normalizes dashboard fragments and provider limits through the shared contract', () => { const filters = { viewMode: 'weekly', diff --git a/tests/unit/auto-import-stream-routes.test.ts b/tests/unit/auto-import-stream-routes.test.ts new file mode 100644 index 0000000..549f734 --- /dev/null +++ b/tests/unit/auto-import-stream-routes.test.ts @@ -0,0 +1,95 @@ +import { describe, expect, it, vi } from 'vitest' +import { createRouter, request, requestRaw } from './http-router-test-helpers' + +describe('auto-import stream routes', () => { + it('streams auto-import success events and releases the acquired lease', async () => { + const lease = { release: vi.fn() } + const closeImport = vi.fn() + const performAutoImport = vi.fn(async ({ onCheck, onOutput, onProgress, signalOnClose }) => { + signalOnClose(closeImport) + onCheck({ tool: 'toktrack', status: 'found' }) + onProgress({ key: 'startingLocalImport', vars: {} }) + onOutput('runner warning') + return { days: 2, totalCost: 3.5 } + }) + const { router } = createRouter({ + autoImportRuntimeOverrides: { + acquireAutoImportLease: vi.fn(() => lease), + performAutoImport, + }, + }) + + const { req, res } = await requestRaw(router, '/api/auto-import/stream', 'POST') + + expect(res.status).toBe(200) + expect(res.headers['content-type']).toBe('text/event-stream') + expect(res.body).toContain('event: check') + expect(res.body).toContain('"status":"found"') + expect(res.body).toContain('event: progress') + expect(res.body).toContain('event: stderr') + expect(res.body).toContain('runner warning') + expect(res.body).toContain('event: success') + expect(res.body).toContain('"totalCost":3.5') + expect(res.body).toContain('event: done') + + const checkIndex = res.body.indexOf('event: check') + const progressIndex = res.body.indexOf('event: progress') + const stderrIndex = res.body.indexOf('event: stderr') + const successIndex = res.body.indexOf('event: success') + const doneIndex = res.body.indexOf('event: done') + expect(checkIndex).toBeGreaterThanOrEqual(0) + expect(progressIndex).toBeGreaterThan(checkIndex) + expect(stderrIndex).toBeGreaterThan(progressIndex) + expect(successIndex).toBeGreaterThan(stderrIndex) + expect(doneIndex).toBeGreaterThan(successIndex) + + expect(performAutoImport).toHaveBeenCalledWith( + expect.objectContaining({ + source: 'auto-import', + lease, + }), + ) + expect(req.on).toHaveBeenCalledWith('close', closeImport) + expect(lease.release).toHaveBeenCalledTimes(1) + }) + + it('maps concurrent auto-import starts to a localized conflict response', async () => { + const { router } = createRouter({ + autoImportRuntimeOverrides: { + acquireAutoImportLease: vi.fn(() => { + throw Object.assign(new Error('already running'), { messageKey: 'autoImportRunning' }) + }), + createAutoImportMessageEvent: vi.fn((key: string) => ({ key })), + formatAutoImportMessageEvent: vi.fn(() => 'An auto-import is already running.'), + }, + }) + + const { res, body } = await request(router, '/api/auto-import/stream', 'POST') + + expect(res.status).toBe(409) + expect(body).toEqual({ message: 'An auto-import is already running.' }) + }) + + it('streams structured auto-import errors and releases the lease after failures', async () => { + const lease = { release: vi.fn() } + const { router } = createRouter({ + autoImportRuntimeOverrides: { + acquireAutoImportLease: vi.fn(() => lease), + performAutoImport: vi.fn(async () => { + throw new Error('toktrack failed') + }), + toAutoImportErrorEvent: vi.fn((error: Error) => ({ + message: error.message, + })), + }, + }) + + const { res } = await requestRaw(router, '/api/auto-import/stream', 'POST') + + expect(res.status).toBe(200) + expect(res.body).toContain('event: error') + expect(res.body).toContain('"message":"toktrack failed"') + expect(res.body).toContain('event: done') + expect(lease.release).toHaveBeenCalledTimes(1) + }) +}) diff --git a/tests/unit/background-runtime.test.ts b/tests/unit/background-runtime.test.ts index f94cebf..08846a3 100644 --- a/tests/unit/background-runtime.test.ts +++ b/tests/unit/background-runtime.test.ts @@ -11,6 +11,7 @@ const { createBackgroundRuntime } = require('../../server/background-runtime.js' mkdirSync: (dirPath: string, options?: unknown) => void chmodSync: (filePath: string, mode: number) => void rmSync: (targetPath: string, options?: unknown) => void + statSync?: (targetPath: string) => { mtimeMs: number } openSync?: (filePath: string, flags: string, mode: number) => number fchmodSync?: (fd: number, mode: number) => void closeSync?: (fd: number) => void @@ -50,14 +51,127 @@ const { createBackgroundRuntime } = require('../../server/background-runtime.js' isProcessRunning: (pid: number) => boolean formatDateTime: (value: string) => string }) => { + fetchRuntimeIdentity: ( + url: string, + requestApiPrefix?: string, + timeoutMs?: number, + requestAuthHeader?: string | null, + ) => Promise<{ id: string; port: number } | null> pruneBackgroundInstances: () => Promise< Array<{ id: string; pid: number; port: number; url: string; startedAt: string }> > + runStopCommand: () => Promise startInBackground: () => Promise } } +type BackgroundRuntimeOptions = Parameters[0] + +function createTestBackgroundRuntime(overrides: Partial = {}) { + const fsMock = { + readFileSync: vi.fn(() => '[]'), + mkdirSync: vi.fn(), + chmodSync: vi.fn(), + rmSync: vi.fn(), + ...overrides.fs, + } + + return createBackgroundRuntime({ + path, + processObject: { + ...process, + env: {}, + execPath: process.execPath, + } as NodeJS.Process, + fetchImpl: vi.fn(async () => ({ + ok: true, + json: async () => ({ id: 'runtime-id', port: 3101 }), + })), + spawnImpl: vi.fn(), + readlinePromises: {} as typeof readlinePromisesModule, + entrypointPath: '/tmp/server.js', + appPaths: { + configDir: '/tmp/ttdash-config', + cacheDir: '/tmp/ttdash-cache', + }, + ensureAppDirs: vi.fn(), + ensureDir: vi.fn(), + writeJsonAtomic: vi.fn(), + normalizeIsoTimestamp: (value: string) => new Date(value).toISOString(), + bindHost: '127.0.0.1', + apiPrefix: '/api', + runtimeInstance: { + id: 'runtime-id', + pid: 999, + startedAt: '2026-04-01T07:00:00.000Z', + }, + normalizedCliArgs: [], + cliOptions: { + noOpen: true, + }, + forceOpenBrowser: false, + isWindows: false, + secureDirMode: 0o700, + secureFileMode: 0o600, + backgroundStartTimeoutMs: 15_000, + backgroundInstancesLockTimeoutMs: 5_000, + backgroundInstancesLockStaleMs: 10_000, + sleep: async () => {}, + isProcessRunning: () => true, + formatDateTime: (value: string) => value, + ...overrides, + fs: fsMock, + }) +} + describe('background runtime', () => { + it('fetches runtime identity with the requested API prefix and auth header', async () => { + const fetchImpl = vi.fn(async () => ({ + ok: true, + json: async () => ({ id: 'remote-runtime', port: 3107 }), + })) + const runtime = createTestBackgroundRuntime({ + fetchImpl, + apiPrefix: '/default-api', + }) + + await expect( + runtime.fetchRuntimeIdentity( + 'http://127.0.0.1:3107', + '/custom-api/', + 250, + 'Bearer registry-token', + ), + ).resolves.toEqual({ id: 'remote-runtime', port: 3107 }) + + expect(fetchImpl).toHaveBeenCalledWith(new URL('http://127.0.0.1:3107/custom-api/runtime'), { + headers: { Authorization: 'Bearer registry-token' }, + signal: expect.any(AbortSignal), + }) + }) + + it('treats unavailable runtime identity responses as stale registry entries', async () => { + const fetchImpl = vi + .fn() + .mockResolvedValueOnce({ + ok: false, + json: async () => ({ id: 'ignored', port: 3101 }), + }) + .mockResolvedValueOnce({ + ok: true, + json: async () => null, + }) + .mockRejectedValueOnce(new Error('offline')) + const runtime = createTestBackgroundRuntime({ + fetchImpl, + }) + + await expect(runtime.fetchRuntimeIdentity('')).resolves.toBeNull() + await expect(runtime.fetchRuntimeIdentity('http://127.0.0.1:3101')).resolves.toBeNull() + await expect(runtime.fetchRuntimeIdentity('http://127.0.0.1:3102')).resolves.toBeNull() + await expect(runtime.fetchRuntimeIdentity('http://127.0.0.1:3103')).resolves.toBeNull() + }) + it('prunes stale instances without re-reading the registry snapshot', async () => { const registryEntries = [ { @@ -163,6 +277,412 @@ describe('background runtime', () => { ) }) + it('removes stale registry locks before reading background instances', async () => { + const dateNowSpy = vi.spyOn(Date, 'now').mockReturnValue(20_000) + const fsMock = { + readFileSync: vi.fn(() => '[]'), + mkdirSync: vi + .fn() + .mockImplementationOnce(() => { + throw Object.assign(new Error('lock exists'), { code: 'EEXIST' }) + }) + .mockImplementationOnce(() => undefined), + chmodSync: vi.fn(), + rmSync: vi.fn(), + statSync: vi.fn(() => ({ mtimeMs: 0 })), + } + const runtime = createTestBackgroundRuntime({ + fs: fsMock, + backgroundInstancesLockStaleMs: 10_000, + }) + + try { + await expect(runtime.pruneBackgroundInstances()).resolves.toEqual([]) + + expect(fsMock.statSync).toHaveBeenCalledWith( + path.join('/tmp/ttdash-config', 'background-instances.lock'), + ) + expect(fsMock.rmSync).toHaveBeenCalledWith( + path.join('/tmp/ttdash-config', 'background-instances.lock'), + { recursive: true, force: true }, + ) + expect(fsMock.mkdirSync).toHaveBeenCalledTimes(2) + } finally { + dateNowSpy.mockRestore() + } + }) + + it('fails fast with a clear error when the registry lock cannot be acquired', async () => { + let currentTime = 0 + const dateNowSpy = vi.spyOn(Date, 'now').mockImplementation(() => currentTime) + const fsMock = { + readFileSync: vi.fn(() => '[]'), + mkdirSync: vi.fn(() => { + throw Object.assign(new Error('lock exists'), { code: 'EEXIST' }) + }), + chmodSync: vi.fn(), + rmSync: vi.fn(), + statSync: vi.fn(() => ({ mtimeMs: currentTime })), + } + const sleep = vi.fn(async (durationMs: number) => { + currentTime += durationMs + }) + const runtime = createTestBackgroundRuntime({ + fs: fsMock, + backgroundInstancesLockTimeoutMs: 100, + backgroundInstancesLockStaleMs: 1_000, + sleep, + }) + + try { + await expect(runtime.pruneBackgroundInstances()).rejects.toThrow( + 'Could not acquire background registry lock.', + ) + + expect(sleep).toHaveBeenCalledWith(50) + expect(fsMock.rmSync).not.toHaveBeenCalled() + } finally { + dateNowSpy.mockRestore() + } + }) + + it('rewrites invalid registry payloads to an empty alive snapshot', async () => { + const invalidRegistry = [ + { + id: 'missing-port', + pid: 101, + url: 'http://127.0.0.1:3101', + startedAt: '2026-04-01T08:00:00.000Z', + }, + ] + const writeJsonAtomic = vi.fn() + const runtime = createTestBackgroundRuntime({ + fs: { + readFileSync: vi.fn(() => JSON.stringify(invalidRegistry)), + mkdirSync: vi.fn(), + chmodSync: vi.fn(), + rmSync: vi.fn(), + }, + writeJsonAtomic, + }) + + await expect(runtime.pruneBackgroundInstances()).resolves.toEqual([]) + expect(writeJsonAtomic).toHaveBeenCalledWith( + path.join('/tmp/ttdash-config', 'background-instances.json'), + [], + ) + }) + + it('reports permission denied when the stop command cannot signal an owned instance', async () => { + const registryEntries = [ + { + id: 'owned-instance', + pid: 101, + port: 3101, + url: 'http://127.0.0.1:3101', + startedAt: '2026-04-01T08:00:00.000Z', + }, + ] + const fsMock = { + readFileSync: vi.fn(() => JSON.stringify(registryEntries)), + mkdirSync: vi.fn(), + chmodSync: vi.fn(), + rmSync: vi.fn(), + } + const processObject = { + ...process, + env: {}, + execPath: process.execPath, + exitCode: 0, + kill: vi.fn(() => { + throw Object.assign(new Error('permission denied'), { code: 'EPERM' }) + }), + } as unknown as NodeJS.Process + const errorSpy = vi.spyOn(console, 'error').mockImplementation(() => undefined) + const runtime = createBackgroundRuntime({ + fs: fsMock, + path, + processObject, + fetchImpl: vi.fn(async () => ({ + ok: true, + json: async () => ({ id: 'owned-instance', port: 3101 }), + })), + spawnImpl: vi.fn(), + readlinePromises: {} as typeof readlinePromisesModule, + entrypointPath: '/tmp/server.js', + appPaths: { + configDir: '/tmp/ttdash-config', + cacheDir: '/tmp/ttdash-cache', + }, + ensureAppDirs: vi.fn(), + ensureDir: vi.fn(), + writeJsonAtomic: vi.fn(), + normalizeIsoTimestamp: (value: string) => new Date(value).toISOString(), + bindHost: '127.0.0.1', + apiPrefix: '/api', + runtimeInstance: { + id: 'runtime-id', + pid: 999, + startedAt: '2026-04-01T07:00:00.000Z', + }, + normalizedCliArgs: [], + cliOptions: { + noOpen: true, + }, + forceOpenBrowser: false, + isWindows: false, + secureDirMode: 0o700, + secureFileMode: 0o600, + backgroundStartTimeoutMs: 15_000, + backgroundInstancesLockTimeoutMs: 5_000, + backgroundInstancesLockStaleMs: 10_000, + sleep: async () => {}, + isProcessRunning: () => true, + formatDateTime: (value: string) => value, + }) + + try { + await runtime.runStopCommand() + + expect(processObject.kill).toHaveBeenCalledWith(101, 'SIGTERM') + expect(errorSpy).toHaveBeenCalledWith( + 'Could not stop TTDash background server (permission denied): http://127.0.0.1:3101 (PID 101)', + ) + expect(processObject.exitCode).toBe(1) + } finally { + errorSpy.mockRestore() + } + }) + + it('prints a no-op message when no background instances are running', async () => { + const logSpy = vi.spyOn(console, 'log').mockImplementation(() => undefined) + const runtime = createTestBackgroundRuntime() + + try { + await runtime.runStopCommand() + + expect(logSpy).toHaveBeenCalledWith('No running TTDash background servers found.') + } finally { + logSpy.mockRestore() + } + }) + + it('cancels the stop command when the multi-instance prompt is left empty', async () => { + const registryEntries = [ + { + id: 'first-instance', + pid: 101, + port: 3101, + url: 'http://127.0.0.1:3101', + startedAt: '2026-04-01T08:00:00.000Z', + }, + { + id: 'second-instance', + pid: 102, + port: 3102, + url: 'http://127.0.0.1:3102', + startedAt: '2026-04-01T09:00:00.000Z', + }, + ] + const question = vi.fn(async () => '') + const close = vi.fn() + const processObject = { + ...process, + env: {}, + execPath: process.execPath, + kill: vi.fn(), + stdin: {} as NodeJS.ReadStream, + stdout: {} as NodeJS.WriteStream, + } as unknown as NodeJS.Process + const logSpy = vi.spyOn(console, 'log').mockImplementation(() => undefined) + const runtime = createTestBackgroundRuntime({ + fs: { + readFileSync: vi.fn(() => JSON.stringify(registryEntries)), + mkdirSync: vi.fn(), + chmodSync: vi.fn(), + rmSync: vi.fn(), + }, + processObject, + fetchImpl: vi.fn(async (input: URL) => { + const port = Number(input.port) + return { + ok: true, + json: async () => ({ + id: port === 3101 ? 'first-instance' : 'second-instance', + port, + }), + } + }), + readlinePromises: { + createInterface: vi.fn(() => ({ question, close })), + } as unknown as typeof readlinePromisesModule, + }) + + try { + await runtime.runStopCommand() + + expect(question).toHaveBeenCalledWith( + 'Which instance should be stopped? [1-2, Enter=cancel] ', + ) + expect(close).toHaveBeenCalled() + expect(logSpy).toHaveBeenCalledWith('Canceled.') + expect(processObject.kill).not.toHaveBeenCalled() + } finally { + logSpy.mockRestore() + } + }) + + it('reports a timeout and log file when a stopped instance keeps reporting the same runtime', async () => { + let currentTime = 0 + const dateNowSpy = vi.spyOn(Date, 'now').mockImplementation(() => currentTime) + const registryEntries = [ + { + id: 'owned-instance', + pid: 101, + port: 3101, + url: 'http://127.0.0.1:3101', + startedAt: '2026-04-01T08:00:00.000Z', + logFile: '/tmp/ttdash-cache/background/server-timeout.log', + }, + ] + const processObject = { + ...process, + env: {}, + execPath: process.execPath, + exitCode: 0, + kill: vi.fn(), + } as unknown as NodeJS.Process + const errorSpy = vi.spyOn(console, 'error').mockImplementation(() => undefined) + const runtime = createTestBackgroundRuntime({ + fs: { + readFileSync: vi.fn(() => JSON.stringify(registryEntries)), + mkdirSync: vi.fn(), + chmodSync: vi.fn(), + rmSync: vi.fn(), + }, + processObject, + fetchImpl: vi.fn(async () => ({ + ok: true, + json: async () => ({ id: 'owned-instance', port: 3101 }), + })), + sleep: vi.fn(async (durationMs: number) => { + currentTime += durationMs + }), + }) + + try { + await runtime.runStopCommand() + + expect(processObject.kill).toHaveBeenCalledWith(101, 'SIGTERM') + expect(errorSpy).toHaveBeenCalledWith( + 'TTDash background server did not respond to SIGTERM: http://127.0.0.1:3101 (PID 101)', + ) + expect(errorSpy).toHaveBeenCalledWith( + 'Log file: /tmp/ttdash-cache/background/server-timeout.log', + ) + expect(processObject.exitCode).toBe(1) + } finally { + dateNowSpy.mockRestore() + errorSpy.mockRestore() + } + }) + + it('removes a registry entry when stop finds the owned process already gone', async () => { + const registryEntries = [ + { + id: 'owned-instance', + pid: 101, + port: 3101, + url: 'http://127.0.0.1:3101', + startedAt: '2026-04-01T08:00:00.000Z', + }, + ] + const writeJsonAtomic = vi.fn() + const processObject = { + ...process, + env: {}, + execPath: process.execPath, + exitCode: 0, + kill: vi.fn(() => { + throw Object.assign(new Error('already gone'), { code: 'ESRCH' }) + }), + } as unknown as NodeJS.Process + const logSpy = vi.spyOn(console, 'log').mockImplementation(() => undefined) + const runtime = createTestBackgroundRuntime({ + fs: { + readFileSync: vi.fn(() => JSON.stringify(registryEntries)), + mkdirSync: vi.fn(), + chmodSync: vi.fn(), + rmSync: vi.fn(), + }, + processObject, + fetchImpl: vi.fn(async () => ({ + ok: true, + json: async () => ({ id: 'owned-instance', port: 3101 }), + })), + writeJsonAtomic, + }) + + try { + await runtime.runStopCommand() + + expect(writeJsonAtomic).toHaveBeenLastCalledWith( + path.join('/tmp/ttdash-config', 'background-instances.json'), + [], + ) + expect(logSpy).toHaveBeenCalledWith( + 'Instance was already stopped and was removed from the registry: http://127.0.0.1:3101 (PID 101)', + ) + expect(processObject.exitCode).toBe(0) + } finally { + logSpy.mockRestore() + } + }) + + it('passes the explicit browser-open decision to background children', async () => { + const spawnImpl = vi.fn(() => ({ + pid: 123, + unref: vi.fn(), + })) + const runtime = createTestBackgroundRuntime({ + fs: { + readFileSync: vi.fn(() => ''), + mkdirSync: vi.fn(), + chmodSync: vi.fn(), + rmSync: vi.fn(), + openSync: vi.fn(() => 42), + fchmodSync: vi.fn(), + closeSync: vi.fn(), + }, + processObject: { + ...process, + env: {}, + execPath: '/usr/bin/node', + } as NodeJS.Process, + spawnImpl, + normalizedCliArgs: ['--background'], + cliOptions: { + noOpen: false, + }, + forceOpenBrowser: true, + isProcessRunning: () => false, + }) + + await expect(runtime.startInBackground()).rejects.toThrow( + 'Could not start TTDash as a background process.', + ) + expect(spawnImpl).toHaveBeenCalledWith( + '/usr/bin/node', + ['/tmp/server.js'], + expect.objectContaining({ + env: expect.objectContaining({ + TTDASH_BACKGROUND_CHILD: '1', + TTDASH_FORCE_OPEN_BROWSER: '1', + }), + }), + ) + }) + it('reports the background log path when startup fails before the log can be read', async () => { const readFileSync = vi.fn(() => { throw Object.assign(new Error('missing'), { code: 'ENOENT' }) @@ -230,6 +750,86 @@ describe('background runtime', () => { expect.stringContaining('/tmp/ttdash-cache/background/server-'), 'utf-8', ) + expect(fsMock.openSync).toHaveBeenCalledWith( + expect.stringMatching( + new RegExp(`/tmp/ttdash-cache/background/server-\\d+-${process.pid}\\.log$`), + ), + 'a', + 0o600, + ) expect(fsMock.closeSync).toHaveBeenCalledWith(42) }) + + it('reports the registered child details after a successful background start', async () => { + const registryEntries = [ + { + id: 'child-runtime', + pid: 123, + port: 3101, + url: 'http://127.0.0.1:3101', + bootstrapUrl: 'http://127.0.0.1:3101/?ttdash_token=abc', + host: '127.0.0.1', + apiPrefix: '/api', + authHeader: 'Bearer child-token', + startedAt: '2026-04-01T08:00:00.000Z', + }, + ] + const spawnImpl = vi.fn(() => ({ + pid: 123, + unref: vi.fn(), + })) + const logSpy = vi.spyOn(console, 'log').mockImplementation(() => undefined) + const runtime = createTestBackgroundRuntime({ + fs: { + readFileSync: vi.fn(() => JSON.stringify(registryEntries)), + mkdirSync: vi.fn(), + chmodSync: vi.fn(), + rmSync: vi.fn(), + openSync: vi.fn(() => 42), + fchmodSync: vi.fn(), + closeSync: vi.fn(), + }, + processObject: { + ...process, + env: {}, + execPath: '/usr/bin/node', + } as NodeJS.Process, + fetchImpl: vi.fn(async () => ({ + ok: true, + json: async () => ({ id: 'child-runtime', port: 3101 }), + })), + spawnImpl, + normalizedCliArgs: ['--background', '--no-open', '--port', '3101'], + cliOptions: { + noOpen: true, + }, + }) + + try { + await runtime.startInBackground() + + expect(spawnImpl).toHaveBeenCalledWith( + '/usr/bin/node', + ['/tmp/server.js', '--no-open', '--port', '3101'], + expect.objectContaining({ + detached: true, + env: expect.objectContaining({ + TTDASH_BACKGROUND_CHILD: '1', + TTDASH_FORCE_OPEN_BROWSER: '0', + }), + }), + ) + expect(logSpy).toHaveBeenCalledWith('TTDash is running in the background.') + expect(logSpy).toHaveBeenCalledWith(' URL: http://127.0.0.1:3101') + expect(logSpy).toHaveBeenCalledWith( + ' Local Auth URL: http://127.0.0.1:3101/?ttdash_token=abc', + ) + expect(logSpy).toHaveBeenCalledWith(' PID: 123') + expect(logSpy).toHaveBeenCalledWith( + expect.stringMatching(/^ Log: \/tmp\/ttdash-cache\/background\/server-/), + ) + } finally { + logSpy.mockRestore() + } + }) }) diff --git a/tests/unit/command-palette-commands.test.ts b/tests/unit/command-palette-commands.test.ts new file mode 100644 index 0000000..162a44a --- /dev/null +++ b/tests/unit/command-palette-commands.test.ts @@ -0,0 +1,268 @@ +import { describe, expect, it, vi } from 'vitest' +import { + buildCommandPaletteCommands, + filterCommandItems, + getVisibleCommandItems, + normalizeSearchValue, +} from '@/components/features/command-palette/CommandPalette.commands' +import { DEFAULT_APP_SETTINGS } from '@/lib/app-settings' +import type { DashboardSectionId } from '@/types' + +type CommandPaletteCommandOptions = Parameters[0] + +const providerLabels = ['Anthropic', 'Google', 'OpenAI'] +const modelLabels = ['Claude Sonnet 4.5', 'Gemini 2.5 Pro', 'GPT-5.4'] +const expectedCommandTestIds = [ + 'command-auto-import', + 'command-settings-open', + 'command-csv', + 'command-report', + 'command-upload', + 'command-delete', + 'command-view-daily', + 'command-view-monthly', + 'command-view-yearly', + 'command-preset-7d', + 'command-preset-30d', + 'command-preset-month', + 'command-preset-year', + 'command-preset-all', + 'command-clear-providers', + 'command-clear-models', + 'command-clear-dates', + 'command-reset-all', + 'command-top', + 'command-bottom', + 'command-filters', + 'command-theme', + 'command-language-de', + 'command-language-en', + 'command-help', + ...DEFAULT_APP_SETTINGS.sectionOrder.map((sectionId) => `command-section-${sectionId}`), + ...providerLabels.map((provider) => `command-provider-${provider}`), + ...modelLabels.map((model) => `command-model-${model}`), +].sort() + +const translations: Record = { + 'commandPalette.commands.autoImport.label': 'Auto import', + 'commandPalette.commands.autoImport.description': 'Import data from toktrack', + 'commandPalette.commands.upload.label': 'Upload file', + 'commandPalette.commands.upload.description': 'Upload JSON usage data', + 'commandPalette.commands.exportCsv.label': 'Export CSV', + 'commandPalette.commands.exportCsv.description': 'Download CSV', + 'commandPalette.commands.generateReport.label': 'Report', + 'commandPalette.commands.generateReport.labelLoading': 'Generating report', + 'commandPalette.commands.generateReport.description': 'Download PDF report', + 'commandPalette.commands.openSettings.label': 'Settings', + 'commandPalette.commands.openSettings.description': 'Open settings', + 'commandPalette.commands.delete.label': 'Delete', + 'commandPalette.commands.delete.description': 'Delete data', + 'commandPalette.commands.viewDaily.label': 'Daily view', + 'commandPalette.commands.viewDaily.description': 'Switch to daily view', + 'commandPalette.commands.viewMonthly.label': 'Monthly view', + 'commandPalette.commands.viewMonthly.description': 'Switch to monthly view', + 'commandPalette.commands.viewYearly.label': 'Yearly view', + 'commandPalette.commands.viewYearly.description': 'Switch to yearly view', + 'commandPalette.commands.preset7d.label': '7D', + 'commandPalette.commands.preset7d.description': 'Last 7 days', + 'commandPalette.commands.preset30d.label': '30D', + 'commandPalette.commands.preset30d.description': 'Last 30 days', + 'commandPalette.commands.presetMonth.label': 'Month', + 'commandPalette.commands.presetMonth.description': 'Current month', + 'commandPalette.commands.presetYear.label': 'Year', + 'commandPalette.commands.presetYear.description': 'Current year', + 'commandPalette.commands.presetAll.label': 'All', + 'commandPalette.commands.presetAll.description': 'All data', + 'commandPalette.commands.clearProviders.label': 'Clear providers', + 'commandPalette.commands.clearProviders.description': 'Clear provider filters', + 'commandPalette.commands.clearModels.label': 'Clear models', + 'commandPalette.commands.clearModels.description': 'Clear model filters', + 'commandPalette.commands.clearDates.label': 'Clear dates', + 'commandPalette.commands.clearDates.description': 'Clear date range', + 'commandPalette.commands.resetAll.label': 'Reset all', + 'commandPalette.commands.resetAll.description': 'Reset dashboard filters', + 'commandPalette.commands.scrollTop.label': 'Top', + 'commandPalette.commands.scrollTop.description': 'Scroll to top', + 'commandPalette.commands.scrollBottom.label': 'Bottom', + 'commandPalette.commands.scrollBottom.description': 'Scroll to bottom', + 'commandPalette.commands.filters.label': 'Filters', + 'commandPalette.commands.filters.description': 'Go to filters', + 'commandPalette.commands.themeLight.label': 'Light theme', + 'commandPalette.commands.themeDark.label': 'Dark theme', + 'commandPalette.commands.themeDark.description': 'Toggle theme', + 'commandPalette.commands.languageGerman.label': 'German', + 'commandPalette.commands.languageGerman.description': 'Switch to German', + 'commandPalette.commands.languageEnglish.label': 'English', + 'commandPalette.commands.languageEnglish.description': 'Switch to English', + 'commandPalette.commands.help.label': 'Help', + 'commandPalette.commands.help.description': 'Open help', + 'commandPalette.groups.loadData': 'Load data', + 'commandPalette.groups.exports': 'Exports', + 'commandPalette.groups.maintenance': 'Maintenance', + 'commandPalette.groups.filters': 'Filters', + 'commandPalette.groups.navigation': 'Navigation', + 'commandPalette.groups.view': 'View', + 'commandPalette.groups.language': 'Language', + 'commandPalette.groups.help': 'Help', + 'commandPalette.groups.providers': 'Providers', + 'commandPalette.groups.models': 'Models', + 'common.provider': 'Provider', + 'common.model': 'Model', +} + +function createTranslator(dictionary: Record) { + return (key: string, options?: Record) => { + if (key === 'commandPalette.commands.goToSection.label') { + return `Go to ${String(options?.section)}` + } + + if (key === 'commandPalette.commands.goToSection.description') { + return `Scroll to ${String(options?.section)}` + } + + return dictionary[key] ?? key + } +} + +const t = createTranslator(translations) + +function buildOptions(overrides: Partial = {}) { + const noop = vi.fn() + + return { + isDark: true, + availableProviders: providerLabels, + selectedProviders: [], + availableModels: modelLabels, + selectedModels: [], + hasTodaySection: true, + hasMonthSection: true, + hasRequestSection: true, + sectionVisibility: { ...DEFAULT_APP_SETTINGS.sectionVisibility }, + sectionOrder: [...DEFAULT_APP_SETTINGS.sectionOrder], + reportGenerating: false, + onToggleTheme: noop, + onExportCSV: noop, + onGenerateReport: noop, + onDelete: noop, + onUpload: noop, + onAutoImport: noop, + onOpenSettings: noop, + onScrollTo: noop, + onScrollTop: noop, + onScrollBottom: noop, + onViewModeChange: noop, + onApplyPreset: noop, + onToggleProvider: noop, + onToggleModel: noop, + onClearProviders: noop, + onClearModels: noop, + onClearDateRange: noop, + onResetAll: noop, + onHelp: noop, + onLanguageChange: noop, + t, + ...overrides, + } satisfies CommandPaletteCommandOptions +} + +describe('command palette command builder', () => { + it('builds the complete command contract for the seeded dashboard state', () => { + const commands = buildCommandPaletteCommands(buildOptions()) + + expect(commands.map((cmd) => cmd.testId ?? `command-${cmd.id}`).sort()).toEqual( + expectedCommandTestIds, + ) + }) + + it('keeps section commands ordered and filtered by visibility and availability', () => { + const sectionOrder: DashboardSectionId[] = [ + 'tables', + 'requestAnalysis', + 'today', + 'costAnalysis', + ] + const commands = buildCommandPaletteCommands( + buildOptions({ + hasRequestSection: false, + hasTodaySection: false, + sectionOrder, + sectionVisibility: { + ...DEFAULT_APP_SETTINGS.sectionVisibility, + costAnalysis: false, + }, + }), + ) + + expect( + commands.filter((cmd) => cmd.id.startsWith('section-')).map((cmd) => cmd.testId), + ).toEqual(['command-section-tables']) + }) + + it('keeps action command ids stable while separating load, export, and maintenance groups', () => { + const commands = buildCommandPaletteCommands(buildOptions({ hasTodaySection: false })) + const getCommandTestIdsForGroup = (group: string) => + commands.filter((cmd) => cmd.group === group).map((cmd) => cmd.testId ?? `command-${cmd.id}`) + + expect(getCommandTestIdsForGroup('Load data')).toEqual([ + 'command-auto-import', + 'command-upload', + ]) + expect(getCommandTestIdsForGroup('Exports')).toEqual(['command-csv', 'command-report']) + expect(getCommandTestIdsForGroup('Maintenance')).toEqual([ + 'command-settings-open', + 'command-delete', + ]) + }) + + it('localizes action command groups through the command builder contract', () => { + const commands = buildCommandPaletteCommands( + buildOptions({ + t: createTranslator({ + ...translations, + 'commandPalette.groups.loadData': 'Daten laden', + 'commandPalette.groups.exports': 'Exporte', + 'commandPalette.groups.maintenance': 'Wartung', + }), + }), + ) + + expect(commands.find((cmd) => cmd.id === 'auto-import')?.group).toBe('Daten laden') + expect(commands.find((cmd) => cmd.id === 'csv')?.group).toBe('Exporte') + expect(commands.find((cmd) => cmd.id === 'settings-open')?.group).toBe('Wartung') + }) + + it('normalizes aliases and ranks matching commands for keyboard quick-select', () => { + const commands = buildCommandPaletteCommands(buildOptions()) + const settingsResults = filterCommandItems(commands, 'einstellungen offnen') + const visibleResults = getVisibleCommandItems(settingsResults) + + expect(normalizeSearchValue('Einstellungen öffnen')).toBe('einstellungen offnen') + expect(visibleResults[0]?.id).toBe('settings-open') + expect(filterCommandItems(commands, 'not-a-real-command')).toHaveLength(0) + }) + + it('wires dynamic provider and model commands to their callbacks', () => { + const onToggleProvider = vi.fn() + const onToggleModel = vi.fn() + const commands = buildCommandPaletteCommands( + buildOptions({ + selectedProviders: ['OpenAI'], + selectedModels: ['GPT-5.4'], + onToggleProvider, + onToggleModel, + }), + ) + + const providerCommand = commands.find((cmd) => cmd.id === 'provider-OpenAI') + const modelCommand = commands.find((cmd) => cmd.id === 'model-GPT-5.4') + + expect(providerCommand?.label).toBe('Clear providers: OpenAI') + providerCommand?.action() + expect(onToggleProvider).toHaveBeenCalledWith('OpenAI') + + expect(modelCommand?.label).toBe('Clear models: GPT-5.4') + modelCommand?.action() + expect(onToggleModel).toHaveBeenCalledWith('GPT-5.4') + }) +}) diff --git a/tests/unit/csv-export.test.ts b/tests/unit/csv-export.test.ts index cbd7bfe..bd76b30 100644 --- a/tests/unit/csv-export.test.ts +++ b/tests/unit/csv-export.test.ts @@ -1,5 +1,5 @@ -import { describe, expect, it } from 'vitest' -import { generateCSV } from '@/lib/csv-export' +import { describe, expect, it, vi } from 'vitest' +import { downloadCSV, generateCSV } from '@/lib/csv-export' import { buildCsvLine, stringifyCsvCell } from '@/lib/csv' import type { DailyUsage } from '@/types' @@ -46,4 +46,70 @@ describe('csv export helpers', () => { expect(csv).toContain('"models"') expect(csv).toContain('"GPT-4 ""test"""') }) + + it('exports only the header row when the usage dataset is empty', () => { + expect(generateCSV([])).toBe( + [ + '"date"', + '"totalCost"', + '"totalTokens"', + '"inputTokens"', + '"outputTokens"', + '"cacheCreationTokens"', + '"cacheReadTokens"', + '"thinkingTokens"', + '"requestCount"', + '"models"', + ].join(','), + ) + }) + + it('deduplicates normalized model names in each exported day', () => { + const day = createDay('gpt-4') + const csv = generateCSV([ + { + ...day, + modelBreakdowns: [ + { ...day.modelBreakdowns[0]!, modelName: 'gpt-4' }, + { ...day.modelBreakdowns[0]!, modelName: 'GPT-4' }, + { ...day.modelBreakdowns[0]!, modelName: 'claude sonnet 4.5' }, + ], + }, + ]) + + expect(csv).toContain('"GPT-4; Claude Sonnet 4.5"') + }) + + it('downloads the generated CSV with a local-date filename and revokes the object URL', () => { + vi.useFakeTimers() + vi.setSystemTime(new Date('2026-04-13T10:30:00Z')) + const click = vi.fn() + const anchor = { + click, + download: '', + href: '', + } + const createElement = vi.fn(() => anchor) + const createObjectURL = vi.fn(() => 'blob:ttdash-export') + const revokeObjectURL = vi.fn() + + vi.stubGlobal('document', { createElement }) + vi.stubGlobal('URL', { createObjectURL, revokeObjectURL }) + + try { + downloadCSV([createDay('gpt-4')]) + + const [blob] = createObjectURL.mock.calls[0]! + expect(blob).toBeInstanceOf(Blob) + expect((blob as Blob).type).toBe('text/csv;charset=utf-8;') + expect(createElement).toHaveBeenCalledWith('a') + expect(anchor.href).toBe('blob:ttdash-export') + expect(anchor.download).toBe('ttdash-export-2026-04-13.csv') + expect(click).toHaveBeenCalledTimes(1) + expect(revokeObjectURL).toHaveBeenCalledWith('blob:ttdash-export') + } finally { + vi.useRealTimers() + vi.unstubAllGlobals() + } + }) }) diff --git a/tests/unit/dashboard-preferences.test.ts b/tests/unit/dashboard-preferences.test.ts index 67d0579..585fed1 100644 --- a/tests/unit/dashboard-preferences.test.ts +++ b/tests/unit/dashboard-preferences.test.ts @@ -1,11 +1,15 @@ import { describe, expect, it } from 'vitest' import dashboardPreferences from '../../shared/dashboard-preferences.json' import { + DEFAULT_DASHBOARD_FILTERS, DASHBOARD_DATE_PRESETS, DASHBOARD_QUICK_DATE_PRESETS, DASHBOARD_SECTION_DEFINITIONS, DASHBOARD_SECTION_DEFINITION_MAP, DASHBOARD_VIEW_MODES, + normalizeDashboardDefaultFilters, + normalizeDashboardSectionOrder, + normalizeDashboardSectionVisibility, parseDashboardPreferencesConfig, resolveDashboardActivePreset, resolveDashboardPresetRange, @@ -122,6 +126,96 @@ describe('dashboard preferences config', () => { ]) }) + it('rejects malformed custom preference shapes before publishing partial configs', () => { + expect(() => parseSharedDashboardPreferencesConfig(null)).toThrow('expected an object') + expect(() => + parseSharedDashboardPreferencesConfig( + { + datePresets: 'all', + viewModes: ['daily'], + sectionDefinitions: [{ id: 'overview', domId: 'overview', labelKey: 'overview' }], + }, + { + validDatePresets: ['all'], + validViewModes: ['daily'], + validSectionIds: ['overview'], + }, + ), + ).toThrow('"datePresets" must be an array') + expect(() => + parseSharedDashboardPreferencesConfig( + { + datePresets: ['all'], + viewModes: ['daily'], + sectionDefinitions: [null], + }, + { + validDatePresets: ['all'], + validViewModes: ['daily'], + validSectionIds: ['overview'], + }, + ), + ).toThrow('each "sectionDefinitions" entry must be an object') + expect(() => + parseSharedDashboardPreferencesConfig( + { + datePresets: ['all'], + viewModes: ['daily'], + sectionDefinitions: [{ id: 'overview', domId: '', labelKey: 'overview' }], + }, + { + validDatePresets: ['all'], + validViewModes: ['daily'], + validSectionIds: ['overview'], + }, + ), + ).toThrow('require a domId') + expect(() => + parseSharedDashboardPreferencesConfig( + { + datePresets: ['all'], + viewModes: ['daily'], + sectionDefinitions: [{ id: 'overview', domId: 'overview', labelKey: '' }], + }, + { + validDatePresets: ['all'], + validViewModes: ['daily'], + validSectionIds: ['overview'], + }, + ), + ).toThrow('require a labelKey') + }) + + it('normalizes persisted dashboard preference branches defensively', () => { + expect(normalizeDashboardDefaultFilters(null)).toEqual(DEFAULT_DASHBOARD_FILTERS) + expect( + normalizeDashboardDefaultFilters({ + viewMode: 'yearly', + datePreset: '30d', + providers: [' OpenAI ', 'OpenAI', '', 42], + models: [' Sonnet ', null, 'Sonnet'], + }), + ).toEqual({ + viewMode: 'yearly', + datePreset: '30d', + providers: ['OpenAI'], + models: ['Sonnet'], + }) + + const visibility = normalizeDashboardSectionVisibility({ + metrics: false, + today: 'false', + unknown: false, + }) + expect(visibility.metrics).toBe(false) + expect(visibility.today).toBe(false) + expect(visibility).not.toHaveProperty('unknown') + + const orderedSections = normalizeDashboardSectionOrder(['tables', 'metrics', 'tables', 7]) + expect(orderedSections.slice(0, 2)).toEqual(['tables', 'metrics']) + expect(new Set(orderedSections).size).toBe(DASHBOARD_SECTION_DEFINITIONS.length) + }) + it('resolves preset ranges through the same shared contract used by runtime consumers', () => { const referenceDate = new Date('2026-04-06T12:00:00Z') @@ -160,5 +254,13 @@ describe('dashboard preferences config', () => { startDate: monthRange.startDate, }), ).toBeNull() + expect(resolveDashboardActivePreset(null)).toBe('all') + expect( + resolveDashboardActivePreset({ + referenceDate, + startDate: '2026-04-01', + endDate: '2026-04-05', + }), + ).toBeNull() }) }) diff --git a/tests/unit/data-runtime-contract.test.ts b/tests/unit/data-runtime-contract.test.ts new file mode 100644 index 0000000..1695b99 --- /dev/null +++ b/tests/unit/data-runtime-contract.test.ts @@ -0,0 +1,385 @@ +import { promises as fsPromises } from 'node:fs' +import { createRequire } from 'node:module' +import { tmpdir } from 'node:os' +import path from 'node:path' +import { describe, expect, it, vi } from 'vitest' + +const require = createRequire(import.meta.url) +// createDataRuntime expects the real Node fs surface; these contract tests avoid host writes. +const fs = require('node:fs') +const { createDataRuntime } = require('../../server/data-runtime.js') as { + createDataRuntime: (options: Record) => { + appPaths: { + dataDir: string + configDir: string + cacheDir: string + } + paths: { + dataFile: string + settingsFile: string + npxCacheDir: string + } + ensureAppDirs: (extraDirs?: string[]) => void + extractSettingsImportPayload: (payload: unknown) => unknown + extractUsageImportPayload: (payload: unknown) => unknown + isPersistedStateError: (error: unknown, kind?: string) => boolean + mergeUsageData: ( + currentData: unknown, + importedData: { + daily: Array> + totals: Record + }, + ) => { + data: { + daily: Array> + totals: Record + } + summary: { + importedDays: number + addedDays: number + unchangedDays: number + conflictingDays: number + skippedDays: number + totalDays: number + } + } + readData: () => unknown + readSettings: () => { cliAutoLoadActive: boolean } + readSettingsForWrite: () => { cliAutoLoadActive: boolean } + writeData: (data: unknown) => Promise + writeSettings: (settings: unknown) => Promise + } +} + +function createRuntime({ + env = {}, + getCliAutoLoadActive = () => false, + homeDir = '/Users/tester', + isWindows = false, + pathModule = path, + platform = 'darwin', +}: { + env?: Record + getCliAutoLoadActive?: () => boolean + homeDir?: string + isWindows?: boolean + pathModule?: typeof path + platform?: string +} = {}) { + return createDataRuntime({ + fs, + fsPromises, + os: { homedir: () => homeDir }, + path: pathModule, + processObject: { + env, + kill: vi.fn(() => true), + pid: 7319, + platform, + }, + normalizeIncomingData: (value: unknown) => value, + runtimeInstanceId: 'data-runtime-contract', + appDirName: 'TTDash', + appDirNameLinux: 'ttdash', + legacyDataFile: pathModule.join(homeDir, 'legacy-data.json'), + settingsBackupKind: 'ttdash-settings-backup', + usageBackupKind: 'ttdash-usage-backup', + isWindows, + secureDirMode: 0o700, + secureFileMode: 0o600, + fileMutationLockTimeoutMs: 80, + fileMutationLockStaleMs: 10, + getCliAutoLoadActive, + }) +} + +describe('data runtime contracts', () => { + it('prefers explicit data, config, and cache directories over platform defaults', async () => { + const runtimeRoot = await fsPromises.mkdtemp(path.join(tmpdir(), 'ttdash-runtime-contract-')) + const cacheDir = path.join(runtimeRoot, 'cache') + const configDir = path.join(runtimeRoot, 'config') + const dataDir = path.join(runtimeRoot, 'data') + + try { + const runtime = createRuntime({ + env: { + TTDASH_CACHE_DIR: cacheDir, + TTDASH_CONFIG_DIR: configDir, + TTDASH_DATA_DIR: dataDir, + }, + getCliAutoLoadActive: () => true, + platform: 'linux', + }) + + expect(runtime.appPaths).toEqual({ + cacheDir, + configDir, + dataDir, + }) + expect(runtime.paths).toMatchObject({ + dataFile: path.join(dataDir, 'data.json'), + settingsFile: path.join(configDir, 'settings.json'), + npxCacheDir: path.join(cacheDir, 'npx-cache'), + }) + expect(runtime.readSettings()).toMatchObject({ cliAutoLoadActive: true }) + } finally { + await fsPromises.rm(runtimeRoot, { recursive: true, force: true }) + } + }) + + it('resolves macOS, Windows, and XDG platform paths without touching the host profile', () => { + const darwinRuntime = createRuntime({ + homeDir: '/Users/alex', + platform: 'darwin', + }) + const windowsRuntime = createRuntime({ + env: { + APPDATA: 'C:\\Users\\Alex\\AppData\\Roaming', + LOCALAPPDATA: 'C:\\Users\\Alex\\AppData\\Local', + }, + homeDir: 'C:\\Users\\Alex', + isWindows: true, + pathModule: path.win32, + platform: 'win32', + }) + const linuxRuntime = createRuntime({ + env: { + XDG_CACHE_HOME: '/home/alex/.cache-root', + XDG_CONFIG_HOME: '/home/alex/.config-root', + XDG_DATA_HOME: '/home/alex/.data-root', + }, + homeDir: '/home/alex', + platform: 'linux', + }) + + expect(darwinRuntime.appPaths).toEqual({ + dataDir: '/Users/alex/Library/Application Support/TTDash', + configDir: '/Users/alex/Library/Application Support/TTDash', + cacheDir: '/Users/alex/Library/Caches/TTDash', + }) + expect(windowsRuntime.appPaths).toEqual({ + dataDir: 'C:\\Users\\Alex\\AppData\\Local\\TTDash', + configDir: 'C:\\Users\\Alex\\AppData\\Roaming\\TTDash', + cacheDir: 'C:\\Users\\Alex\\AppData\\Local\\TTDash\\Cache', + }) + expect(linuxRuntime.appPaths).toEqual({ + dataDir: '/home/alex/.data-root/ttdash', + configDir: '/home/alex/.config-root/ttdash', + cacheDir: '/home/alex/.cache-root/ttdash', + }) + }) + + it('validates backup kinds before importing settings or usage payloads', () => { + const runtime = createRuntime() + + expect( + runtime.extractSettingsImportPayload({ + kind: 'ttdash-settings-backup', + settings: { language: 'de' }, + }), + ).toEqual({ language: 'de' }) + expect( + runtime.extractUsageImportPayload({ + data: { daily: [] }, + kind: 'ttdash-usage-backup', + }), + ).toEqual({ daily: [] }) + expect(() => + runtime.extractSettingsImportPayload({ data: {}, kind: 'ttdash-usage-backup' }), + ).toThrow('This is a data backup file, not a settings file.') + expect(() => + runtime.extractUsageImportPayload({ kind: 'ttdash-settings-backup', settings: {} }), + ).toThrow('This is a settings backup file, not a data file.') + }) + + it('merges imported usage data without overwriting conflicts or duplicating equivalent days', () => { + const runtime = createRuntime() + const currentDay = { + date: '2026-04-01', + inputTokens: 10, + outputTokens: 5, + cacheCreationTokens: 0, + cacheReadTokens: 2, + thinkingTokens: 1, + totalTokens: 18, + totalCost: 0.25, + requestCount: 3, + modelsUsed: ['GPT-5.4', 'Claude'], + modelBreakdowns: [ + { + modelName: 'Claude', + inputTokens: 5, + outputTokens: 3, + cacheCreationTokens: 0, + cacheReadTokens: 1, + thinkingTokens: 0, + cost: 0.1, + requestCount: 1, + }, + { + modelName: 'GPT-5.4', + inputTokens: 5, + outputTokens: 2, + cacheCreationTokens: 0, + cacheReadTokens: 1, + thinkingTokens: 1, + cost: 0.15, + requestCount: 2, + }, + ], + } + const equivalentDay = { + ...currentDay, + totalCost: 0.1 + 0.2 - 0.05, + modelsUsed: ['Claude', 'GPT-5.4'], + modelBreakdowns: [ + { + ...currentDay.modelBreakdowns[1], + cost: 0.15 + Number.EPSILON, + }, + { + ...currentDay.modelBreakdowns[0], + cost: 0.1 + Number.EPSILON, + }, + ], + } + const conflictingDay = { + ...currentDay, + totalCost: 9, + } + const newDay = { + ...currentDay, + date: '2026-04-02', + totalCost: 0.75, + totalTokens: 20, + requestCount: 4, + } + + const equivalentMerge = runtime.mergeUsageData( + { daily: [currentDay], totals: { totalCost: 0.25 } }, + { + daily: [equivalentDay, newDay], + totals: { totalCost: 1 }, + }, + ) + const conflictingMerge = runtime.mergeUsageData( + { daily: [currentDay], totals: { totalCost: 0.25 } }, + { + daily: [conflictingDay], + totals: { totalCost: 9 }, + }, + ) + + expect(equivalentMerge.summary).toEqual({ + importedDays: 2, + addedDays: 1, + unchangedDays: 1, + conflictingDays: 0, + skippedDays: 0, + totalDays: 2, + }) + expect(equivalentMerge.data.daily.map((day) => day.date)).toEqual(['2026-04-01', '2026-04-02']) + expect(equivalentMerge.data.totals).toMatchObject({ + totalCost: 1, + totalTokens: 38, + requestCount: 7, + }) + expect(conflictingMerge.summary).toMatchObject({ + addedDays: 0, + unchangedDays: 0, + conflictingDays: 1, + skippedDays: 0, + totalDays: 1, + }) + expect(conflictingMerge.data.daily[0]?.totalCost).toBe(0.25) + }) + + it('skips imported usage days without a usable date before writing merged data', () => { + const runtime = createRuntime() + const validDay = { + date: '2026-04-03', + inputTokens: 1, + outputTokens: 2, + cacheCreationTokens: 0, + cacheReadTokens: 0, + thinkingTokens: 0, + totalTokens: 3, + totalCost: 0.1, + requestCount: 1, + modelsUsed: ['GPT-5.4'], + modelBreakdowns: [], + } + + const merge = runtime.mergeUsageData(null, { + daily: [{ ...validDay, date: '' }, validDay], + totals: { totalCost: 9 }, + }) + + expect(merge.summary).toMatchObject({ + importedDays: 2, + addedDays: 1, + skippedDays: 1, + totalDays: 1, + }) + expect(merge.data.daily.map((day) => day.date)).toEqual(['2026-04-03']) + expect(merge.data.totals).toMatchObject({ totalCost: 0.1, totalTokens: 3 }) + }) + + it('recomputes imported totals with numeric coercion when no current usage exists', () => { + const runtime = createRuntime() + + const merge = runtime.mergeUsageData(null, { + daily: [ + { + date: '2026-04-03', + inputTokens: '5', + outputTokens: '7', + cacheCreationTokens: '2', + cacheReadTokens: '3', + thinkingTokens: '4', + totalTokens: '21', + totalCost: '1.5', + requestCount: '6', + modelsUsed: ['GPT-5.4'], + modelBreakdowns: [], + }, + ], + totals: { totalCost: 999, totalTokens: 999, requestCount: 999 }, + }) + + expect(merge.summary).toMatchObject({ + importedDays: 1, + addedDays: 1, + skippedDays: 0, + totalDays: 1, + }) + expect(merge.data.daily[0]).toMatchObject({ + inputTokens: 5, + outputTokens: 7, + cacheCreationTokens: 2, + cacheReadTokens: 3, + thinkingTokens: 4, + totalTokens: 21, + totalCost: 1.5, + requestCount: 6, + }) + expect(merge.data.totals).toEqual({ + inputTokens: 5, + outputTokens: 7, + cacheCreationTokens: 2, + cacheReadTokens: 3, + thinkingTokens: 4, + totalCost: 1.5, + totalTokens: 21, + requestCount: 6, + }) + }) + + it('rejects usage imports that do not contain a daily array before merging', () => { + const runtime = createRuntime() + + expect(() => runtime.mergeUsageData(null, { totals: {} } as never)).toThrow( + 'Imported data must contain a daily array.', + ) + }) +}) diff --git a/tests/unit/data-transforms.test.ts b/tests/unit/data-transforms.test.ts index f73c89d..e891f6c 100644 --- a/tests/unit/data-transforms.test.ts +++ b/tests/unit/data-transforms.test.ts @@ -1,5 +1,5 @@ import { describe, expect, it } from 'vitest' -import { getCurrentMonthForecastData } from '@/lib/data-transforms' +import { buildDashboardChartTransforms, getCurrentMonthForecastData } from '@/lib/data-transforms' import type { DailyUsage } from '@/types' describe('getCurrentMonthForecastData', () => { @@ -142,3 +142,19 @@ describe('getCurrentMonthForecastData', () => { ).toEqual([]) }) }) + +describe('buildDashboardChartTransforms', () => { + it('includes weekday indices for empty weekday chart data', () => { + const transforms = buildDashboardChartTransforms([], 'en-US') + + expect(transforms.weekdayData).toEqual([ + { day: 'Mo', cost: 0, weekdayIndex: 0 }, + { day: 'Tu', cost: 0, weekdayIndex: 1 }, + { day: 'We', cost: 0, weekdayIndex: 2 }, + { day: 'Th', cost: 0, weekdayIndex: 3 }, + { day: 'Fr', cost: 0, weekdayIndex: 4 }, + { day: 'Sa', cost: 0, weekdayIndex: 5 }, + { day: 'Su', cost: 0, weekdayIndex: 6 }, + ]) + }) +}) diff --git a/tests/unit/e2e-auth-helpers.test.ts b/tests/unit/e2e-auth-helpers.test.ts new file mode 100644 index 0000000..88b3a63 --- /dev/null +++ b/tests/unit/e2e-auth-helpers.test.ts @@ -0,0 +1,66 @@ +import { afterEach, describe, expect, it } from 'vitest' +import { createApiAuthHeaders, gotoDashboard, readLocalAuthSession } from '../e2e/helpers' + +const originalAuthorizationHeader = process.env.PLAYWRIGHT_TEST_AUTHORIZATION_HEADER +const originalBootstrapUrl = process.env.PLAYWRIGHT_TEST_BOOTSTRAP_URL + +function restoreEnv() { + if (originalAuthorizationHeader === undefined) { + delete process.env.PLAYWRIGHT_TEST_AUTHORIZATION_HEADER + } else { + process.env.PLAYWRIGHT_TEST_AUTHORIZATION_HEADER = originalAuthorizationHeader + } + + if (originalBootstrapUrl === undefined) { + delete process.env.PLAYWRIGHT_TEST_BOOTSTRAP_URL + } else { + process.env.PLAYWRIGHT_TEST_BOOTSTRAP_URL = originalBootstrapUrl + } +} + +describe('E2E auth helpers', () => { + afterEach(() => { + restoreEnv() + }) + + it('uses fixture-provided auth environment instead of reading session files', () => { + process.env.PLAYWRIGHT_TEST_AUTHORIZATION_HEADER = 'Bearer test-token' + process.env.PLAYWRIGHT_TEST_BOOTSTRAP_URL = 'http://127.0.0.1:3015/?ttdash_token=test-token' + + expect(readLocalAuthSession()).toEqual({ + authorizationHeader: 'Bearer test-token', + bootstrapUrl: 'http://127.0.0.1:3015/?ttdash_token=test-token', + }) + expect(createApiAuthHeaders()).toEqual({ Authorization: 'Bearer test-token' }) + }) + + it('fails clearly when fixture auth environment is missing', () => { + delete process.env.PLAYWRIGHT_TEST_AUTHORIZATION_HEADER + process.env.PLAYWRIGHT_TEST_BOOTSTRAP_URL = 'http://127.0.0.1:3015/?ttdash_token=test-token' + + expect(() => createApiAuthHeaders()).toThrow( + 'PLAYWRIGHT_TEST_AUTHORIZATION_HEADER is required for Playwright API authentication', + ) + }) + + it('fails clearly when bootstrap URL is missing', () => { + process.env.PLAYWRIGHT_TEST_AUTHORIZATION_HEADER = 'Bearer test-token' + delete process.env.PLAYWRIGHT_TEST_BOOTSTRAP_URL + + expect(() => readLocalAuthSession()).toThrow( + 'PLAYWRIGHT_TEST_BOOTSTRAP_URL is required for Playwright API authentication', + ) + }) + + it('navigates to the fixture-provided bootstrap URL', async () => { + process.env.PLAYWRIGHT_TEST_AUTHORIZATION_HEADER = 'Bearer test-token' + process.env.PLAYWRIGHT_TEST_BOOTSTRAP_URL = 'http://127.0.0.1:3015/?ttdash_token=test-token' + const page = { + goto: async (url: string) => ({ url }), + } + + await expect(gotoDashboard(page as never)).resolves.toEqual({ + url: 'http://127.0.0.1:3015/?ttdash_token=test-token', + }) + }) +}) diff --git a/tests/unit/http-router-mutations.test.ts b/tests/unit/http-router-mutations.test.ts index 6c94a4b..a8128bf 100644 --- a/tests/unit/http-router-mutations.test.ts +++ b/tests/unit/http-router-mutations.test.ts @@ -1,137 +1,5 @@ -import path from 'node:path' -import { createRequire } from 'node:module' import { describe, expect, it, vi } from 'vitest' - -const require = createRequire(import.meta.url) -const { createHttpRouter } = require('../../server/http-router.js') as { - createHttpRouter: (options: Record) => { - handleServerRequest: ( - req: { url: string; method: string; headers: Record }, - res: MockResponse, - ) => Promise - } -} - -class MockResponse { - status = 0 - headers: Record = {} - body = '' - - writeHead(status: number, headers: Record) { - this.status = status - this.headers = headers - } - - end(body?: string | Buffer) { - this.body = Buffer.isBuffer(body) ? body.toString('utf8') : (body ?? '') - } -} - -function createRouter({ - autoImportRuntimeOverrides = {}, - dataRuntimeOverrides = {}, - readBody = vi.fn(async () => ({})), -}: { - autoImportRuntimeOverrides?: Record - dataRuntimeOverrides?: Record - readBody?: () => Promise -} = {}) { - const dataRuntime = { - extractSettingsImportPayload: vi.fn( - (payload: { settings?: unknown }) => payload.settings ?? payload, - ), - extractUsageImportPayload: vi.fn((payload: { data?: unknown }) => payload.data ?? payload), - isPayloadTooLargeError: vi.fn( - (error: { code?: string }) => error?.code === 'PAYLOAD_TOO_LARGE', - ), - isPersistedStateError: vi.fn(() => false), - mergeUsageData: vi.fn((_currentData, importedData) => ({ - data: importedData, - summary: { - importedDays: Array.isArray(importedData?.daily) ? importedData.daily.length : 0, - addedDays: Array.isArray(importedData?.daily) ? importedData.daily.length : 0, - unchangedDays: 0, - conflictingDays: 0, - totalDays: Array.isArray(importedData?.daily) ? importedData.daily.length : 0, - }, - })), - normalizeIncomingData: vi.fn((payload) => payload), - normalizeSettings: vi.fn((payload) => payload), - readData: vi.fn(() => null), - readSettings: vi.fn(() => ({ language: 'en' })), - unlinkIfExists: vi.fn(), - _updateDataLoadStateUnlocked: vi.fn(async () => undefined), - updateDataLoadState: vi.fn(async () => undefined), - updateSettings: vi.fn(async (body) => ({ ok: true, body })), - withFileMutationLock: vi.fn(async (_filePath: string, operation: () => Promise) => - operation(), - ), - withSettingsAndDataMutationLock: vi.fn(async (operation: () => Promise) => - operation(), - ), - writeData: vi.fn(async () => undefined), - writeSettings: vi.fn(async () => undefined), - paths: { - dataFile: '/data/data.json', - settingsFile: '/data/settings.json', - }, - ...dataRuntimeOverrides, - } - - const router = createHttpRouter({ - fs: { promises: { readFile: vi.fn() } }, - path, - staticRoot: '/app/dist', - securityHeaders: { 'X-Test-Security': '1' }, - httpUtils: { - json: (res: MockResponse, status: number, payload: unknown) => { - res.writeHead(status, { 'Content-Type': 'application/json; charset=utf-8' }) - res.end(JSON.stringify(payload)) - }, - readBody, - resolveApiPath: (pathname: string) => - pathname === '/api' - ? '/' - : pathname.startsWith('/api/') - ? pathname.slice('/api'.length) - : null, - sendBuffer: vi.fn(), - validateMutationRequest: vi.fn(() => null), - validateRequestHost: vi.fn(() => null), - }, - remoteAuth: { - resolveBootstrapResponse: () => null, - validateApiRequest: () => null, - }, - dataRuntime, - autoImportRuntime: { - lookupLatestToktrackVersion: vi.fn(async () => ({ - configuredVersion: '1.0.0', - latestVersion: '1.0.0', - isLatest: true, - lookupStatus: 'ok', - })), - ...autoImportRuntimeOverrides, - }, - generatePdfReport: vi.fn(), - getRuntimeSnapshot: vi.fn(), - }) - - return { dataRuntime, readBody, router } -} - -async function request( - router: ReturnType['router'], - url: string, - method: string, -) { - const res = new MockResponse() - await router.handleServerRequest( - { url, method, headers: { 'content-type': 'application/json' } }, - res, - ) - return { res, body: JSON.parse(res.body) } -} +import { createRouter, createValidUsageData, request } from './http-router-test-helpers' describe('HTTP router mutation errors', () => { it('keeps malformed settings requests as client errors', async () => { @@ -282,7 +150,7 @@ describe('HTTP router mutation errors', () => { }) it('returns server errors for upload write failures', async () => { - const usageData = { daily: [{ date: '2026-04-27' }], totals: { totalCost: 1 } } + const usageData = createValidUsageData() const { router } = createRouter({ readBody: vi.fn(async () => usageData), dataRuntimeOverrides: { @@ -298,8 +166,78 @@ describe('HTTP router mutation errors', () => { expect(body).toEqual({ message: 'Server error' }) }) + it('rejects normalized upload payloads that are not valid usage data', async () => { + const { dataRuntime, router } = createRouter({ + readBody: vi.fn(async () => ({ daily: [{ date: 42 }], totals: {} })), + }) + + const { res, body } = await request(router, '/api/upload', 'POST') + + expect(res.status).toBe(400) + expect(body).toEqual({ message: 'Invalid JSON' }) + expect(dataRuntime.writeData).not.toHaveBeenCalled() + }) + + it('rejects normalized upload payloads with partial totals', async () => { + const { dataRuntime, router } = createRouter({ + readBody: vi.fn(async () => ({ + daily: [{ date: '2026-04-27' }], + totals: {}, + })), + }) + + const { res, body } = await request(router, '/api/upload', 'POST') + + expect(res.status).toBe(400) + expect(body).toEqual({ message: 'Invalid JSON' }) + expect(dataRuntime.writeData).not.toHaveBeenCalled() + }) + + it('rejects normalized upload payloads missing detailed totals', async () => { + const { dataRuntime, router } = createRouter({ + readBody: vi.fn(async () => ({ + daily: [{ date: '2026-04-27' }], + totals: { + totalCost: 1, + totalTokens: 100, + requestCount: 2, + }, + })), + }) + + const { res, body } = await request(router, '/api/upload', 'POST') + + expect(res.status).toBe(400) + expect(body).toEqual({ message: 'Invalid JSON' }) + expect(dataRuntime.writeData).not.toHaveBeenCalled() + }) + + it('rejects normalized upload payloads with non-finite totals', async () => { + const { dataRuntime, router } = createRouter({ + readBody: vi.fn(async () => ({ + daily: [{ date: '2026-04-27' }], + totals: { + inputTokens: 60, + outputTokens: 20, + cacheCreationTokens: 5, + cacheReadTokens: 10, + thinkingTokens: 5, + totalCost: Number.POSITIVE_INFINITY, + totalTokens: 10, + requestCount: 1, + }, + })), + }) + + const { res, body } = await request(router, '/api/upload', 'POST') + + expect(res.status).toBe(400) + expect(body).toEqual({ message: 'Invalid JSON' }) + expect(dataRuntime.writeData).not.toHaveBeenCalled() + }) + it('returns server errors for usage import write failures', async () => { - const usageData = { daily: [{ date: '2026-04-27' }], totals: { totalCost: 1 } } + const usageData = createValidUsageData() const { router } = createRouter({ readBody: vi.fn(async () => ({ data: usageData })), dataRuntimeOverrides: { @@ -315,21 +253,79 @@ describe('HTTP router mutation errors', () => { expect(body).toEqual({ message: 'Server error' }) }) - it('returns service unavailable when toktrack version lookup throws', async () => { - const { router } = createRouter({ - autoImportRuntimeOverrides: { - lookupLatestToktrackVersion: vi.fn(async () => { - throw new Error('registry unavailable') - }), - }, + it('rejects normalized usage imports that are not valid usage data', async () => { + const { dataRuntime, router } = createRouter({ + readBody: vi.fn(async () => ({ data: { daily: [{ date: 42 }], totals: {} } })), }) - const { res, body } = await request(router, '/api/toktrack/version-status', 'GET') + const { res, body } = await request(router, '/api/usage/import', 'POST') - expect(res.status).toBe(503) - expect(body).toEqual({ - message: 'Service Unavailable', - detail: 'registry unavailable', + expect(res.status).toBe(400) + expect(body).toEqual({ message: 'Invalid usage backup file' }) + expect(dataRuntime.writeData).not.toHaveBeenCalled() + }) + + it('rejects normalized usage imports with partial totals', async () => { + const { dataRuntime, router } = createRouter({ + readBody: vi.fn(async () => ({ + data: { + daily: [{ date: '2026-04-27' }], + totals: {}, + }, + })), }) + + const { res, body } = await request(router, '/api/usage/import', 'POST') + + expect(res.status).toBe(400) + expect(body).toEqual({ message: 'Invalid usage backup file' }) + expect(dataRuntime.writeData).not.toHaveBeenCalled() + }) + + it('rejects normalized usage imports missing detailed totals', async () => { + const { dataRuntime, router } = createRouter({ + readBody: vi.fn(async () => ({ + data: { + daily: [{ date: '2026-04-27' }], + totals: { + totalCost: 1, + totalTokens: 100, + requestCount: 2, + }, + }, + })), + }) + + const { res, body } = await request(router, '/api/usage/import', 'POST') + + expect(res.status).toBe(400) + expect(body).toEqual({ message: 'Invalid usage backup file' }) + expect(dataRuntime.writeData).not.toHaveBeenCalled() + }) + + it('rejects normalized usage imports with non-finite totals', async () => { + const { dataRuntime, router } = createRouter({ + readBody: vi.fn(async () => ({ + data: { + daily: [{ date: '2026-04-27' }], + totals: { + inputTokens: 60, + outputTokens: 20, + cacheCreationTokens: 5, + cacheReadTokens: Number.NaN, + thinkingTokens: 5, + totalCost: 1, + totalTokens: 100, + requestCount: 2, + }, + }, + })), + }) + + const { res, body } = await request(router, '/api/usage/import', 'POST') + + expect(res.status).toBe(400) + expect(body).toEqual({ message: 'Invalid usage backup file' }) + expect(dataRuntime.writeData).not.toHaveBeenCalled() }) }) diff --git a/tests/unit/http-router-static.test.ts b/tests/unit/http-router-static.test.ts index 12e3b6c..63d14d1 100644 --- a/tests/unit/http-router-static.test.ts +++ b/tests/unit/http-router-static.test.ts @@ -1,91 +1,25 @@ -import path from 'node:path' -import { createRequire } from 'node:module' -import { describe, expect, it, vi } from 'vitest' - -const require = createRequire(import.meta.url) -const { createHttpRouter } = require('../../server/http-router.js') as { - createHttpRouter: (options: Record) => { - handleServerRequest: ( - req: { url: string; method: string; headers: Record }, - res: MockResponse, - ) => Promise - } -} - -class MockResponse { - status = 0 - headers: Record = {} - body = '' - - writeHead(status: number, headers: Record) { - this.status = status - this.headers = headers - } - - end(body?: string | Buffer) { - this.body = Buffer.isBuffer(body) ? body.toString('utf8') : (body ?? '') - } -} - -function createRouter(readFile: (filePath: string) => Promise) { - return createHttpRouter({ - fs: { - promises: { - readFile, - }, - }, - path, - staticRoot: '/app/dist', - securityHeaders: { 'X-Test-Security': '1' }, - httpUtils: { - json: (res: MockResponse, status: number, payload: unknown) => { - res.writeHead(status, { 'Content-Type': 'application/json; charset=utf-8' }) - res.end(JSON.stringify(payload)) - }, - readBody: vi.fn(), - resolveApiPath: () => null, - sendBuffer: vi.fn(), - validateMutationRequest: vi.fn(), - validateRequestHost: () => null, - }, - remoteAuth: { - resolveBootstrapResponse: () => null, - validateApiRequest: () => null, - }, - dataRuntime: { - extractSettingsImportPayload: vi.fn(), - extractUsageImportPayload: vi.fn(), - isPayloadTooLargeError: vi.fn(), - isPersistedStateError: vi.fn(), - mergeUsageData: vi.fn(), - readData: vi.fn(), - readSettings: vi.fn(), - unlinkIfExists: vi.fn(), - updateDataLoadState: vi.fn(), - updateSettings: vi.fn(), - withFileMutationLock: vi.fn(), - withSettingsAndDataMutationLock: vi.fn(), - writeData: vi.fn(), - writeSettings: vi.fn(), - normalizeSettings: vi.fn(), - paths: { - dataFile: '/data/data.json', - settingsFile: '/data/settings.json', - }, - }, - autoImportRuntime: {}, - generatePdfReport: vi.fn(), - getRuntimeSnapshot: vi.fn(), - }) -} +import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest' +import { MockResponse, createRouter } from './http-router-test-helpers' describe('HTTP router static file handling', () => { + let consoleError: ReturnType + + beforeEach(() => { + consoleError = vi.spyOn(console, 'error').mockImplementation(() => undefined) + }) + + afterEach(() => { + vi.restoreAllMocks() + }) + it('falls back to index.html when a static file is not found', async () => { - const router = createRouter(async (filePath) => { - if (filePath.endsWith('index.html')) { - return Buffer.from('') - } - throw Object.assign(new Error('missing'), { code: 'ENOENT' }) + const { router } = createRouter({ + readFile: async (filePath) => { + if (filePath.endsWith('index.html')) { + return Buffer.from('') + } + throw Object.assign(new Error('missing'), { code: 'ENOENT' }) + }, }) const res = new MockResponse() @@ -93,44 +27,144 @@ describe('HTTP router static file handling', () => { expect(res.status).toBe(200) expect(res.body).toContain('') + expect(consoleError).not.toHaveBeenCalled() + }) + + it('logs failed SPA fallback reads before returning a server error', async () => { + const fallbackError = Object.assign(new Error('index missing'), { code: 'ENOENT' }) + const { router } = createRouter({ + readFile: async (filePath) => { + if (filePath.endsWith('index.html')) { + throw fallbackError + } + throw Object.assign(new Error('missing'), { code: 'ENOENT' }) + }, + }) + const res = new MockResponse() + + await router.handleServerRequest({ url: '/dashboard', method: 'GET', headers: {} }, res) + + expect(res.status).toBe(500) + expect(res.headers['x-test-security']).toBe('1') + expect(JSON.parse(res.body)).toEqual({ message: 'Internal Server Error' }) + expect(consoleError).toHaveBeenCalledWith( + 'SPA fallback read failed', + expect.objectContaining({ + error: fallbackError, + indexPath: '/app/dist/index.html', + safePath: '/dashboard', + staticRoot: '/app/dist', + }), + ) + }) + + it('preserves base security headers when preparing HTML responses', async () => { + const { router } = createRouter({ + readFile: async (filePath) => { + if (filePath.endsWith('index.html')) { + return Buffer.from('') + } + throw Object.assign(new Error('missing'), { code: 'ENOENT' }) + }, + prepareHtmlResponse: (html) => ({ + body: html.replace('', ''), + headers: { + 'Content-Security-Policy': "default-src 'self'; script-src 'nonce-test'", + }, + }), + }) + const res = new MockResponse() + + await router.handleServerRequest({ url: '/dashboard', method: 'GET', headers: {} }, res) + + expect(res.status).toBe(200) + expect(res.headers['x-test-security']).toBe('1') + expect(res.headers['content-security-policy']).toBe( + "default-src 'self'; script-src 'nonce-test'", + ) + expect(res.body).toContain('ttdash-csp-nonce') + }) + + it('returns a bad request with static security headers for null-byte paths', async () => { + const readFile = vi.fn(async () => Buffer.from('')) + const { router } = createRouter({ readFile }) + const res = new MockResponse() + + await router.handleServerRequest({ url: '/assets/app%00.js', method: 'GET', headers: {} }, res) + + expect(res.status).toBe(400) + expect(res.headers['x-test-security']).toBe('1') + expect(JSON.parse(res.body)).toEqual({ message: 'Invalid request path' }) + expect(readFile).not.toHaveBeenCalled() + }) + + it('returns access denied with static security headers for escaped paths', async () => { + const readFile = vi.fn(async () => Buffer.from('')) + const { router } = createRouter({ readFile }) + const res = new MockResponse() + + await router.handleServerRequest({ url: '/..%2Fpackage.json', method: 'GET', headers: {} }, res) + + expect(res.status).toBe(403) + expect(res.headers['x-test-security']).toBe('1') + expect(JSON.parse(res.body)).toEqual({ message: 'Access denied' }) + expect(readFile).not.toHaveBeenCalled() }) it('returns not found for missing static assets instead of serving the SPA shell', async () => { - const router = createRouter(async (filePath) => { - if (filePath.endsWith('index.html')) { - return Buffer.from('') - } - throw Object.assign(new Error('missing'), { code: 'ENOENT' }) + const { router } = createRouter({ + readFile: async (filePath) => { + if (filePath.endsWith('index.html')) { + return Buffer.from('') + } + throw Object.assign(new Error('missing'), { code: 'ENOENT' }) + }, }) const res = new MockResponse() await router.handleServerRequest({ url: '/assets/app.js', method: 'GET', headers: {} }, res) expect(res.status).toBe(404) + expect(res.headers['x-test-security']).toBe('1') expect(JSON.parse(res.body)).toEqual({ message: 'Not Found' }) }) it('rejects directory reads instead of treating directories as static files', async () => { - const router = createRouter(async () => { - throw Object.assign(new Error('directory'), { code: 'EISDIR' }) + const { router } = createRouter({ + readFile: async () => { + throw Object.assign(new Error('directory'), { code: 'EISDIR' }) + }, }) const res = new MockResponse() await router.handleServerRequest({ url: '/assets', method: 'GET', headers: {} }, res) expect(res.status).toBe(403) + expect(res.headers['x-test-security']).toBe('1') expect(JSON.parse(res.body)).toEqual({ message: 'Access denied' }) }) it('returns a bad request for invalid static read paths', async () => { - const router = createRouter(async () => { - throw Object.assign(new Error('invalid'), { code: 'ERR_INVALID_ARG_VALUE' }) + const { router } = createRouter({ + readFile: async () => { + throw Object.assign(new Error('invalid'), { code: 'ERR_INVALID_ARG_VALUE' }) + }, }) const res = new MockResponse() await router.handleServerRequest({ url: '/asset.js', method: 'GET', headers: {} }, res) expect(res.status).toBe(400) + expect(res.headers['x-test-security']).toBe('1') expect(JSON.parse(res.body)).toEqual({ message: 'Invalid request path' }) + expect(consoleError).toHaveBeenCalledWith( + 'Static file read failed', + expect.objectContaining({ + error: expect.objectContaining({ code: 'ERR_INVALID_ARG_VALUE' }), + reqPath: '/app/dist/asset.js', + safePath: '/asset.js', + staticRoot: '/app/dist', + }), + ) }) }) diff --git a/tests/unit/http-router-test-helpers.ts b/tests/unit/http-router-test-helpers.ts new file mode 100644 index 0000000..18a1918 --- /dev/null +++ b/tests/unit/http-router-test-helpers.ts @@ -0,0 +1,248 @@ +import path from 'node:path' +import { createRequire } from 'node:module' +import { vi } from 'vitest' + +const require = createRequire(import.meta.url) +const { createHttpRouter } = require('../../server/http-router.js') as { + createHttpRouter: (options: Record) => { + handleServerRequest: ( + req: { + url: string + method: string + headers: Record + on?: (event: string, listener: () => void) => unknown + }, + res: MockResponse, + ) => Promise + } +} + +type HeaderValue = string | number | string[] + +export class MockResponse { + status = 0 + headers: Record = {} + body = '' + + setHeader(name: string, value: HeaderValue) { + const key = name.toLowerCase() + const previous = this.headers[key] + + if (previous === undefined) { + this.headers[key] = value + return + } + + const previousValues = Array.isArray(previous) ? previous : [String(previous)] + const nextValues = Array.isArray(value) ? value.map(String) : [String(value)] + this.headers[key] = [...previousValues, ...nextValues] + } + + writeHead(status: number, headers: Record) { + this.status = status + const normalizedHeaders = Object.fromEntries( + Object.entries(headers).map(([key, value]) => [key.toLowerCase(), value]), + ) + this.headers = { ...this.headers, ...normalizedHeaders } + } + + getHeader(name: string) { + return this.headers[name.toLowerCase()] + } + + write(body: string | Buffer) { + this.body += Buffer.isBuffer(body) ? body.toString('utf8') : body + } + + end(body?: string | Buffer) { + if (body !== undefined) { + this.body += Buffer.isBuffer(body) ? body.toString('utf8') : body + } + } +} + +export function createValidUsageData() { + return { + daily: [{ date: '2026-04-27' }], + totals: { + inputTokens: 60, + outputTokens: 20, + cacheCreationTokens: 5, + cacheReadTokens: 10, + thinkingTokens: 5, + totalCost: 1, + totalTokens: 100, + requestCount: 2, + }, + } +} + +export function createRouter({ + autoImportRuntimeOverrides = {}, + dataRuntimeOverrides = {}, + generatePdfReport = vi.fn(async () => ({ + buffer: Buffer.from(''), + filename: 'test.pdf', + })), + getRuntimeSnapshot = vi.fn(() => ({ + id: 'runtime-1', + mode: 'foreground', + port: 3000, + url: 'http://127.0.0.1:3000', + })), + httpUtilsOverrides = {}, + prepareHtmlResponse, + readBody = vi.fn(async () => ({})), + readFile = vi.fn(), + remoteAuthOverrides = {}, + securityHeaders = { 'X-Test-Security': '1' }, + staticRoot = '/app/dist', +}: { + autoImportRuntimeOverrides?: Record + dataRuntimeOverrides?: Record + generatePdfReport?: () => Promise<{ buffer: Buffer; filename: string }> + getRuntimeSnapshot?: () => unknown + httpUtilsOverrides?: Record + prepareHtmlResponse?: (html: string) => { body: string; headers: Record } + readBody?: () => Promise + readFile?: (filePath: string) => Promise + remoteAuthOverrides?: Record + securityHeaders?: Record + staticRoot?: string +} = {}) { + const dataRuntime = { + extractSettingsImportPayload: vi.fn( + (payload: { settings?: unknown }) => payload.settings ?? payload, + ), + extractUsageImportPayload: vi.fn((payload: { data?: unknown }) => payload.data ?? payload), + isPayloadTooLargeError: vi.fn( + (error: { code?: string }) => error?.code === 'PAYLOAD_TOO_LARGE', + ), + isPersistedStateError: vi.fn(() => false), + mergeUsageData: vi.fn((_currentData, importedData) => ({ + data: importedData, + summary: { + importedDays: Array.isArray(importedData?.daily) ? importedData.daily.length : 0, + addedDays: Array.isArray(importedData?.daily) ? importedData.daily.length : 0, + unchangedDays: 0, + conflictingDays: 0, + skippedDays: 0, + totalDays: Array.isArray(importedData?.daily) ? importedData.daily.length : 0, + }, + })), + normalizeIncomingData: vi.fn((payload) => payload), + normalizeSettings: vi.fn((payload) => payload), + readData: vi.fn(() => null), + readSettings: vi.fn(() => ({ language: 'en' })), + unlinkIfExists: vi.fn(), + _updateDataLoadStateUnlocked: vi.fn(async () => undefined), + updateDataLoadState: vi.fn(async () => undefined), + updateSettings: vi.fn(async (body) => ({ ok: true, body })), + withFileMutationLock: vi.fn(async (_filePath: string, operation: () => Promise) => + operation(), + ), + withSettingsAndDataMutationLock: vi.fn(async (operation: () => Promise) => + operation(), + ), + writeData: vi.fn(async () => undefined), + writeSettings: vi.fn(async () => undefined), + paths: { + dataFile: '/data/data.json', + settingsFile: '/data/settings.json', + }, + ...dataRuntimeOverrides, + } + + const router = createHttpRouter({ + fs: { promises: { readFile } }, + path, + prepareHtmlResponse, + staticRoot, + securityHeaders, + httpUtils: { + json: ( + res: MockResponse, + status: number, + payload: unknown, + headers: Record = {}, + ) => { + res.writeHead(status, { + 'Content-Type': 'application/json; charset=utf-8', + ...headers, + }) + res.end(JSON.stringify(payload)) + }, + readBody, + resolveApiPath: (pathname: string) => + pathname === '/api' + ? '/' + : pathname.startsWith('/api/') + ? pathname.slice('/api'.length) + : null, + sendBuffer: ( + res: MockResponse, + status: number, + headers: Record, + buffer: Buffer, + ) => { + res.writeHead(status, headers) + res.end(buffer) + }, + validateMutationRequest: vi.fn(() => null), + validateRequestHost: vi.fn(() => null), + ...httpUtilsOverrides, + }, + remoteAuth: { + resolveBootstrapResponse: () => null, + validateApiRequest: () => null, + ...remoteAuthOverrides, + }, + dataRuntime, + autoImportRuntime: { + lookupLatestToktrackVersion: vi.fn(async () => ({ + configuredVersion: '1.0.0', + latestVersion: '1.0.0', + isLatest: true, + lookupStatus: 'ok', + })), + ...autoImportRuntimeOverrides, + }, + generatePdfReport, + getRuntimeSnapshot, + }) + + return { dataRuntime, generatePdfReport, getRuntimeSnapshot, readBody, router } +} + +export async function requestRaw( + router: ReturnType['router'], + url: string, + method: string, + headers: Record = { 'content-type': 'application/json' }, +) { + const res = new MockResponse() + const req = { + url, + method, + headers, + on: vi.fn(), + } + await router.handleServerRequest(req, res) + return { req, res } +} + +export async function request( + router: ReturnType['router'], + url: string, + method: string, + headers: Record = { 'content-type': 'application/json' }, +) { + const { res } = await requestRaw(router, url, method, headers) + try { + return { res, body: JSON.parse(res.body) } + } catch { + throw new Error( + `Failed to parse JSON response (status=${res.status}): ${res.body.slice(0, 200)}`, + ) + } +} diff --git a/tests/unit/playwright-config.test.ts b/tests/unit/playwright-config.test.ts index 553015c..d0d1bd0 100644 --- a/tests/unit/playwright-config.test.ts +++ b/tests/unit/playwright-config.test.ts @@ -1,7 +1,19 @@ +import { readdir, readFile } from 'node:fs/promises' +import path from 'node:path' import { afterEach, describe, expect, it, vi } from 'vitest' import packageJson from '../../package.json' type PlaywrightConfig = { + fullyParallel?: boolean + reporter?: unknown + testDir?: string + timeout?: number + use?: { + screenshot?: string + trace?: string + video?: string + } + webServer?: unknown workers?: number } @@ -19,6 +31,18 @@ async function importPlaywrightConfig(ci: string | undefined): Promise filename.endsWith('.spec.ts')) + + return Promise.all( + filenames.sort().map(async (filename) => ({ + filename, + source: await readFile(path.join(e2eDir, filename), 'utf8'), + })), + ) +} + describe('playwright config', () => { afterEach(() => { if (originalCi === undefined) { @@ -28,14 +52,79 @@ describe('playwright config', () => { } }) - it('keeps single-worker mode for CI only', async () => { - await expect(importPlaywrightConfig(undefined)).resolves.toMatchObject({ + it('uses worker-scoped servers instead of the global Playwright web server', async () => { + const localConfig = await importPlaywrightConfig(undefined) + + expect(localConfig).toMatchObject({ + fullyParallel: true, + testDir: './tests/e2e', + timeout: 30_000, + use: { + screenshot: 'only-on-failure', + trace: 'on-first-retry', + video: 'retain-on-failure', + }, workers: undefined, }) - await expect(importPlaywrightConfig('true')).resolves.toMatchObject({ - workers: 1, + expect(localConfig.webServer).toBeUndefined() + + const ciConfig = await importPlaywrightConfig('true') + + expect(ciConfig).toMatchObject({ + fullyParallel: true, + workers: 2, }) - expect(packageJson.scripts['test:e2e:ci']).toBe('playwright test --workers=1') + expect(ciConfig.webServer).toBeUndefined() + expect(packageJson.scripts['test:e2e:ci']).toBe('playwright test --workers=2') expect(packageJson.scripts['test:e2e:ci']).not.toContain('CI=1') + }, 10_000) + + it('keeps Playwright reports in stable job-scoped output paths', async () => { + const localConfig = await importPlaywrightConfig(undefined) + + expect(localConfig.reporter).toEqual([ + ['list'], + ['html', { outputFolder: 'playwright-report', open: 'never' }], + ['junit', { outputFile: 'test-results/playwright.junit.xml' }], + ]) + }) + + it('keeps browser specs on the shared worker-isolated fixture', async () => { + const specs = await readE2ESpecs() + + expect(specs.map((spec) => spec.filename)).toEqual([ + 'command-palette.spec.ts', + 'dashboard-forecast-filters.spec.ts', + 'dashboard-load-upload.spec.ts', + 'dashboard-reporting.spec.ts', + 'dashboard-settings-backups.spec.ts', + ]) + + for (const spec of specs) { + expect(spec.source).toContain("from './fixtures'") + expect(spec.source).not.toContain("from '@playwright/test'") + } + }) + + it('keeps each browser spec isolated through the shared state-reset helpers', async () => { + const specs = await readE2ESpecs() + + for (const spec of specs) { + expect( + spec.source.includes('resetAppState(') || spec.source.includes('prepareDashboard('), + `${spec.filename} must reset or prepare isolated app state`, + ).toBe(true) + } + }) + + it('keeps Playwright as a representative smoke suite instead of a contract matrix', async () => { + const specs = await readE2ESpecs() + const totalTests = specs.reduce((count, spec) => { + return count + (spec.source.match(/\btest\(/g) ?? []).length + }, 0) + + expect(totalTests).toBe(11) + expect(packageJson.scripts['test:e2e:parallel']).toBe('npm run build:app && playwright test') + expect(packageJson.scripts['test:e2e']).toBe('npm run test:e2e:parallel') }) }) diff --git a/tests/unit/process-utils.test.ts b/tests/unit/process-utils.test.ts index 4643651..2153185 100644 --- a/tests/unit/process-utils.test.ts +++ b/tests/unit/process-utils.test.ts @@ -1,9 +1,11 @@ import { createRequire } from 'node:module' -import { describe, expect, it } from 'vitest' +import { describe, expect, it, vi } from 'vitest' const require = createRequire(import.meta.url) -const { formatDateTime } = require('../../server/process-utils.js') as { +const { formatDateTime, isProcessRunning, sleep } = require('../../server/process-utils.js') as { formatDateTime: (value: string | number | Date, locale?: string) => string + isProcessRunning: (pid: number, processObject?: Pick) => boolean + sleep: (durationMs: number) => Promise } describe('process utilities', () => { @@ -11,4 +13,44 @@ describe('process utilities', () => { expect(formatDateTime('2026-04-27T12:00:00Z', 'en-US')).toContain('4/27/26') expect(formatDateTime('not-a-date')).toBe('') }) + + it('detects running, missing, and permission-protected processes', () => { + expect(isProcessRunning(0)).toBe(false) + expect(isProcessRunning(-1)).toBe(false) + expect(isProcessRunning(123, { kill: vi.fn() })).toBe(true) + expect( + isProcessRunning(123, { + kill: vi.fn(() => { + throw Object.assign(new Error('missing'), { code: 'ESRCH' }) + }), + }), + ).toBe(false) + expect( + isProcessRunning(123, { + kill: vi.fn(() => { + throw Object.assign(new Error('permission denied'), { code: 'EPERM' }) + }), + }), + ).toBe(true) + }) + + it('resolves sleep after the requested timer duration', async () => { + vi.useFakeTimers() + + try { + let settled = false + const sleepPromise = sleep(250).then(() => { + settled = true + }) + + await vi.advanceTimersByTimeAsync(249) + expect(settled).toBe(false) + + await vi.advanceTimersByTimeAsync(1) + await sleepPromise + expect(settled).toBe(true) + } finally { + vi.useRealTimers() + } + }) }) diff --git a/tests/unit/report-pdf-routes.test.ts b/tests/unit/report-pdf-routes.test.ts new file mode 100644 index 0000000..5766f4b --- /dev/null +++ b/tests/unit/report-pdf-routes.test.ts @@ -0,0 +1,97 @@ +import { describe, expect, it, vi } from 'vitest' +import { createRouter, createValidUsageData, request, requestRaw } from './http-router-test-helpers' + +describe('PDF report routes', () => { + it('rejects PDF reports without usage data before reading the request body', async () => { + const readBody = vi.fn(async () => ({ title: 'Usage' })) + const { router } = createRouter({ + dataRuntimeOverrides: { + readData: vi.fn(() => null), + }, + readBody, + }) + + const { res, body } = await request(router, '/api/report/pdf', 'POST') + + expect(res.status).toBe(400) + expect(body).toEqual({ message: 'No data available for the report.' }) + expect(readBody).not.toHaveBeenCalled() + }) + + it('maps invalid PDF request bodies to 400', async () => { + const usageData = createValidUsageData() + const { router } = createRouter({ + dataRuntimeOverrides: { + readData: vi.fn(() => usageData), + }, + readBody: vi.fn(async () => { + throw new Error('broken report JSON') + }), + }) + + const { res, body } = await request(router, '/api/report/pdf', 'POST') + + expect(res.status).toBe(400) + expect(body).toEqual({ message: 'Invalid report request' }) + }) + + it('maps oversized PDF request bodies to 413', async () => { + const usageData = createValidUsageData() + const { router } = createRouter({ + dataRuntimeOverrides: { + readData: vi.fn(() => usageData), + }, + readBody: vi.fn(async () => { + throw Object.assign(new Error('too large'), { code: 'PAYLOAD_TOO_LARGE' }) + }), + }) + + const { res, body } = await request(router, '/api/report/pdf', 'POST') + + expect(res.status).toBe(413) + expect(body).toEqual({ message: 'Report request too large' }) + }) + + it('maps missing Typst PDF generator failures to 503', async () => { + const usageData = createValidUsageData() + const { router } = createRouter({ + dataRuntimeOverrides: { + readData: vi.fn(() => usageData), + }, + generatePdfReport: vi.fn(async () => { + throw Object.assign(new Error('Typst not found'), { code: 'TYPST_MISSING' }) + }), + readBody: vi.fn(async () => ({ locale: 'de-CH' })), + }) + + const { res, body } = await request(router, '/api/report/pdf', 'POST') + + expect(res.status).toBe(503) + expect(body).toEqual({ message: 'Typst not found' }) + }) + + it('sends generated PDF buffers with attachment headers', async () => { + const usageData = createValidUsageData() + const generatePdfReport = vi.fn(async () => ({ + buffer: Buffer.from('%PDF-1.4'), + filename: 'ttdash-report.pdf', + })) + const { router } = createRouter({ + dataRuntimeOverrides: { + readData: vi.fn(() => usageData), + }, + generatePdfReport, + readBody: vi.fn(async () => ({ locale: 'de-CH' })), + }) + + const { res } = await requestRaw(router, '/api/report/pdf', 'POST') + + expect(res.status).toBe(200) + expect(res.headers['content-type']).toBe('application/pdf') + expect(res.headers['content-disposition']).toBe( + 'attachment; filename="ttdash-report.pdf"; filename*=UTF-8\'\'ttdash-report.pdf', + ) + expect(res.body).toBe('%PDF-1.4') + expect(generatePdfReport).toHaveBeenCalledWith(usageData.daily, { locale: 'de-CH' }) + }) +}) diff --git a/tests/unit/report-test-timings.test.ts b/tests/unit/report-test-timings.test.ts new file mode 100644 index 0000000..b11ee84 --- /dev/null +++ b/tests/unit/report-test-timings.test.ts @@ -0,0 +1,110 @@ +import { createRequire } from 'node:module' +import { describe, expect, it } from 'vitest' + +const require = createRequire(import.meta.url) + +type TimingRow = { + name: string + suite?: string + time: number +} + +type TimingReport = { + slowCases: TimingRow[] + slowSuites: TimingRow[] + suiteFailures: TimingRow[] + suiteWarnings: TimingRow[] + testFailures: TimingRow[] + testWarnings: TimingRow[] +} + +type TimingScript = { + buildTimingReport: ( + junit: { cases: TimingRow[]; suites: TimingRow[] }, + options?: { + maxSuiteSeconds?: number + maxTestSeconds?: number + warnSuiteSeconds?: number + warnTestSeconds?: number + }, + ) => TimingReport + parseArgs: (argv: string[]) => { + junitPath: string + maxSuiteSeconds: number | null + maxTestSeconds: number | null + warnSuiteSeconds: number + warnTestSeconds: number + } + parseJUnit: (xml: string) => { cases: TimingRow[]; suites: TimingRow[] } +} + +const timingScript = require('../../scripts/report-test-timings.js') as TimingScript + +describe('test timing report budgets', () => { + it('parses Vitest JUnit suites and test cases', () => { + const report = timingScript.parseJUnit(` + + + + + + + `) + + expect(report.suites).toEqual([{ name: 'unit', time: 1.25 }]) + expect(report.cases).toEqual([ + { suite: 'tests/unit/example.test.ts', name: 'fast path', time: 0.04 }, + { suite: 'tests/unit/example.test.ts', name: 'slow path', time: 0.71 }, + ]) + }) + + it('separates warning budgets from hard timing failures', () => { + const report = timingScript.buildTimingReport( + { + suites: [ + { name: 'fast suite', time: 0.2 }, + { name: 'slow suite', time: 3 }, + { name: 'failed suite budget', time: 21 }, + ], + cases: [ + { suite: 'a.test.ts', name: 'fast case', time: 0.1 }, + { suite: 'b.test.ts', name: 'slow case', time: 0.8 }, + { suite: 'c.test.ts', name: 'failed case budget', time: 12.5 }, + ], + }, + { + warnSuiteSeconds: 2, + warnTestSeconds: 0.5, + maxSuiteSeconds: 20, + maxTestSeconds: 12, + }, + ) + + expect(report.suiteWarnings.map((suite) => suite.name)).toEqual([ + 'failed suite budget', + 'slow suite', + ]) + expect(report.testWarnings.map((testCase) => testCase.name)).toEqual([ + 'failed case budget', + 'slow case', + ]) + expect(report.suiteFailures.map((suite) => suite.name)).toEqual(['failed suite budget']) + expect(report.testFailures.map((testCase) => testCase.name)).toEqual(['failed case budget']) + }) + + it('parses configurable timing budgets from CLI arguments', () => { + const options = timingScript.parseArgs([ + './test-results/custom.junit.xml', + '--warn-suite-seconds=1.5', + '--warn-test-seconds=0.25', + '--max-suite-seconds=20', + '--max-test-seconds=12', + ]) + + expect(options.junitPath.replaceAll('\\', '/')).toMatch(/test-results\/custom\.junit\.xml$/) + expect(options.warnSuiteSeconds).toBe(1.5) + expect(options.warnTestSeconds).toBe(0.25) + expect(options.maxSuiteSeconds).toBe(20) + expect(options.maxTestSeconds).toBe(12) + }) +}) diff --git a/tests/unit/run-parallel-gate.test.ts b/tests/unit/run-parallel-gate.test.ts new file mode 100644 index 0000000..2a2a463 --- /dev/null +++ b/tests/unit/run-parallel-gate.test.ts @@ -0,0 +1,182 @@ +import { createRequire } from 'node:module' +import { EventEmitter } from 'node:events' +import { afterEach, describe, expect, it, vi } from 'vitest' + +const require = createRequire(import.meta.url) + +type GateTask = { + args: string[] + command: string + name: string + outputPaths: string[] +} + +type ParallelGateScript = { + createParallelGatePlan: (options?: { e2e?: boolean }) => GateTask[][] + findParallelOutputCollisions: ( + plan: GateTask[][], + ) => { group: number; outputPath: string; tasks: string[] }[] + parseArgs: (argv: string[]) => { dryRun: boolean; e2e: boolean; help: boolean } + run: ( + argv: string[], + streams: { + stdout: { write: (message: string) => void } + stderr: { write: (message: string) => void } + }, + spawnImpl: (command: string, args: string[], options: unknown) => EventEmitter, + ) => Promise +} + +class FakeChild extends EventEmitter { + stdout = new EventEmitter() + stderr = new EventEmitter() +} + +const parallelGate = require('../../scripts/run-parallel-gate.js') as ParallelGateScript + +describe('parallel verification gate', () => { + afterEach(() => { + vi.restoreAllMocks() + }) + + it('keeps independent work parallel and isolates high-contention test suites', () => { + const plan = parallelGate.createParallelGatePlan({ e2e: true }) + + expect(plan.map((group) => group.map((task) => task.name))).toEqual([ + ['static', 'integration', 'build'], + ['unit'], + ['frontend'], + ['architecture'], + ['integration-background'], + ['package-smoke', 'e2e'], + ]) + expect(plan[2]?.find((task) => task.name === 'frontend')?.args).toEqual([ + 'run', + 'test:vitest:frontend', + ]) + expect(plan[5]?.find((task) => task.name === 'e2e')?.args).toEqual(['run', 'test:e2e:ci']) + }) + + it('declares output paths without collisions inside parallel groups', () => { + const plan = parallelGate.createParallelGatePlan({ e2e: true }) + + expect(parallelGate.findParallelOutputCollisions(plan)).toEqual([]) + expect(plan[0]?.find((task) => task.name === 'integration')?.outputPaths).toEqual([ + 'test-results/vitest-integration.junit.xml', + ]) + expect(plan[1]?.find((task) => task.name === 'unit')?.outputPaths).toEqual([ + 'test-results/vitest-unit.junit.xml', + ]) + expect(plan[2]?.find((task) => task.name === 'frontend')?.outputPaths).toEqual([ + 'test-results/vitest-frontend.junit.xml', + ]) + expect(plan[5]?.find((task) => task.name === 'e2e')?.outputPaths).toEqual([ + 'playwright-report/', + 'test-results/', + ]) + }) + + it('detects duplicate report outputs within a parallel group', () => { + const duplicatePlan = [ + [ + { + args: ['run', 'left'], + command: 'npm', + name: 'left', + outputPaths: ['test-results/shared.junit.xml'], + }, + { + args: ['run', 'right'], + command: 'npm', + name: 'right', + outputPaths: ['test-results/shared.junit.xml'], + }, + ], + ] + + expect(parallelGate.findParallelOutputCollisions(duplicatePlan)).toEqual([ + { + group: 1, + outputPath: 'test-results/shared.junit.xml', + tasks: ['left', 'right'], + }, + ]) + }) + + it('parses dry-run and e2e options', () => { + expect(parallelGate.parseArgs(['--dry-run', '--e2e'])).toEqual({ + dryRun: true, + e2e: true, + help: false, + }) + }) + + it('rejects unknown options before spawning any gate tasks', async () => { + const stdout = { write: vi.fn() } + const stderr = { write: vi.fn() } + const spawnImpl = vi.fn() + + const status = await parallelGate.run(['--unknown'], { stdout, stderr }, spawnImpl) + + expect(status).toBe(1) + expect(spawnImpl).not.toHaveBeenCalled() + expect(stderr.write).toHaveBeenCalledWith('Unknown option: --unknown\n') + }) + + it('prints the task graph without spawning commands in dry-run mode', async () => { + const stdout = { write: vi.fn() } + const stderr = { write: vi.fn() } + const spawnImpl = vi.fn() + + const status = await parallelGate.run(['--dry-run', '--e2e'], { stdout, stderr }, spawnImpl) + + expect(status).toBe(0) + expect(spawnImpl).not.toHaveBeenCalled() + expect(stdout.write.mock.calls.join('\n')).toContain('group 1') + expect(stdout.write.mock.calls.join('\n')).toContain('frontend: npm run test:vitest:frontend') + expect(stdout.write.mock.calls.join('\n')).toContain( + 'outputs: test-results/vitest-frontend.junit.xml', + ) + expect(stdout.write.mock.calls.join('\n')).toContain('e2e: npm run test:e2e:ci') + }) + + it('stops after the first failing group', async () => { + const stdout = { write: vi.fn() } + const stderr = { write: vi.fn() } + const expectedFirstGroupSize = parallelGate.createParallelGatePlan({ e2e: false })[0].length + const spawnImpl = vi.fn(() => { + const child = new FakeChild() + queueMicrotask(() => child.emit('close', 1)) + return child + }) + + const status = await parallelGate.run([], { stdout, stderr }, spawnImpl) + + expect(status).toBe(1) + expect(spawnImpl).toHaveBeenCalledTimes(expectedFirstGroupSize) + }) + + it('prints task durations and a timing summary', async () => { + let now = 1_000 + vi.spyOn(Date, 'now').mockImplementation(() => now) + const stdout = { write: vi.fn() } + const stderr = { write: vi.fn() } + const spawnImpl = vi.fn(() => { + const child = new FakeChild() + queueMicrotask(() => { + now += 1_250 + child.emit('close', 0) + }) + return child + }) + + const status = await parallelGate.run([], { stdout, stderr }, spawnImpl) + const output = stdout.write.mock.calls.join('\n') + + expect(status).toBe(0) + expect(output).toContain('[static] exited with 0 after 1.25s') + expect(output).toContain('parallel gate timing summary') + expect(output).toContain('static: status=0 duration=1.25s') + expect(output).toContain('package-smoke: status=0 duration=1.25s') + }) +}) diff --git a/tests/unit/run-vitest-project-timings.test.ts b/tests/unit/run-vitest-project-timings.test.ts new file mode 100644 index 0000000..39884f6 --- /dev/null +++ b/tests/unit/run-vitest-project-timings.test.ts @@ -0,0 +1,205 @@ +import { createRequire } from 'node:module' +import fs from 'node:fs' +import path from 'node:path' +import { describe, expect, it, vi } from 'vitest' + +const require = createRequire(import.meta.url) + +type ProjectRun = { + iteration: number + project: string + reportPath: string +} + +type RunnerOptions = { + dryRun: boolean + maxSuiteSeconds: number + maxTestSeconds: number + projects: string[] + repeat: number + reportDir: string +} + +type RunnerScript = { + buildBudgetCommand: ( + run: ProjectRun, + options: Pick, + ) => { command: string; args: string[] } + buildProjectRuns: ( + options: Pick, + ) => ProjectRun[] + buildVitestCommand: (run: ProjectRun, platform?: string) => { command: string; args: string[] } + getNpxCommand: (platform?: string) => string + getReportPath: (project: string, iteration: number, repeat: number, reportDir: string) => string + median: (values: number[]) => number + parseArgs: (argv: string[]) => RunnerOptions + run: ( + argv: string[], + streams: { + stdout: { write: (message: string) => void } + stderr: { write: (message: string) => void } + }, + spawnSyncImpl: ( + command: string, + args: string[], + options: unknown, + ) => { error?: Error; status: number | null }, + ) => number +} + +const timingRunner = require('../../scripts/run-vitest-project-timings.js') as RunnerScript + +describe('Vitest project timing runner', () => { + it('parses repeatable project timing options', () => { + const options = timingRunner.parseArgs([ + '--projects=unit,frontend', + '--repeat=3', + '--report-dir=.cache/test-timings', + '--max-suite-seconds=15', + '--max-test-seconds=8', + ]) + + expect(options.projects).toEqual(['unit', 'frontend']) + expect(options.repeat).toBe(3) + expect(options.reportDir.replaceAll('\\', '/')).toMatch(/\.cache\/test-timings$/) + expect(options.maxSuiteSeconds).toBe(15) + expect(options.maxTestSeconds).toBe(8) + }) + + it('uses unique JUnit reports for each project and benchmark repeat', () => { + const reportDir = path.resolve('test-results') + const runs = timingRunner.buildProjectRuns({ + projects: ['unit', 'frontend'], + repeat: 2, + reportDir, + }) + + expect(runs).toEqual([ + { + iteration: 1, + project: 'unit', + reportPath: path.join(reportDir, 'vitest-unit.timing-run-1.junit.xml'), + }, + { + iteration: 1, + project: 'frontend', + reportPath: path.join(reportDir, 'vitest-frontend.timing-run-1.junit.xml'), + }, + { + iteration: 2, + project: 'unit', + reportPath: path.join(reportDir, 'vitest-unit.timing-run-2.junit.xml'), + }, + { + iteration: 2, + project: 'frontend', + reportPath: path.join(reportDir, 'vitest-frontend.timing-run-2.junit.xml'), + }, + ]) + + expect(timingRunner.getReportPath('unit', 1, 1, reportDir)).toBe( + path.join(reportDir, 'vitest-unit.timing.junit.xml'), + ) + }) + + it('builds Vitest and budget commands with isolated report paths', () => { + const projectRun = { + iteration: 1, + project: 'frontend', + reportPath: path.resolve('test-results/vitest-frontend.timing.junit.xml'), + } + + expect(timingRunner.buildVitestCommand(projectRun)).toEqual({ + command: timingRunner.getNpxCommand(), + args: [ + 'vitest', + 'run', + '--project', + 'frontend', + '--reporter=dot', + '--reporter=junit', + `--outputFile.junit=${projectRun.reportPath}`, + ], + }) + expect(timingRunner.buildVitestCommand(projectRun, 'win32').command).toBe('npx.cmd') + expect(timingRunner.buildVitestCommand(projectRun, 'linux').command).toBe('npx') + + const budgetCommand = timingRunner.buildBudgetCommand(projectRun, { + maxSuiteSeconds: 20, + maxTestSeconds: 12, + }) + + expect(budgetCommand.command).toBe(process.execPath) + expect(budgetCommand.args).toContain(projectRun.reportPath) + expect(budgetCommand.args).toContain('--max-suite-seconds=20') + expect(budgetCommand.args).toContain('--max-test-seconds=12') + }) + + it('prints dry-run commands without spawning Vitest', () => { + const stdout = { write: vi.fn() } + const stderr = { write: vi.fn() } + const spawnSyncImpl = vi.fn(() => ({ status: 0 })) + const mkdirSync = vi.spyOn(fs, 'mkdirSync') + + try { + const status = timingRunner.run( + ['--projects=unit', '--repeat=2', '--dry-run'], + { stdout, stderr }, + spawnSyncImpl, + ) + + expect(status).toBe(0) + expect(spawnSyncImpl).not.toHaveBeenCalled() + expect(mkdirSync).not.toHaveBeenCalled() + expect(stdout.write.mock.calls.join('\n')).toContain('vitest-unit.timing-run-1.junit.xml') + expect(stdout.write.mock.calls.join('\n')).toContain('vitest-unit.timing-run-2.junit.xml') + } finally { + mkdirSync.mockRestore() + } + }) + + it('fails explicitly when Vitest cannot be launched', () => { + const stdout = { write: vi.fn() } + const stderr = { write: vi.fn() } + const spawnSyncImpl = vi.fn(() => ({ + error: new Error('spawn npx ENOENT'), + status: null, + })) + const mkdirSync = vi.spyOn(fs, 'mkdirSync').mockImplementation(() => undefined) + + try { + const status = timingRunner.run(['--projects=unit'], { stdout, stderr }, spawnSyncImpl) + + expect(status).toBe(1) + expect(stderr.write).toHaveBeenCalledWith('Failed to launch vitest: spawn npx ENOENT\n') + } finally { + mkdirSync.mockRestore() + } + }) + + it('fails explicitly when the timing budget check cannot be launched', () => { + const stdout = { write: vi.fn() } + const stderr = { write: vi.fn() } + const spawnSyncImpl = vi + .fn() + .mockReturnValueOnce({ status: 0 }) + .mockReturnValueOnce({ error: new Error('spawn node ENOENT'), status: null }) + const mkdirSync = vi.spyOn(fs, 'mkdirSync').mockImplementation(() => undefined) + + try { + const status = timingRunner.run(['--projects=unit'], { stdout, stderr }, spawnSyncImpl) + + expect(status).toBe(1) + expect(stderr.write).toHaveBeenCalledWith( + 'Failed to launch budget check: spawn node ENOENT\n', + ) + } finally { + mkdirSync.mockRestore() + } + }) + + it('calculates median values for repeated timings', () => { + expect(timingRunner.median([3000, 1000, 2000])).toBe(2000) + expect(timingRunner.median([1000, 3000])).toBe(2000) + }) +}) diff --git a/tests/unit/runtime-routes.test.ts b/tests/unit/runtime-routes.test.ts new file mode 100644 index 0000000..c3f1591 --- /dev/null +++ b/tests/unit/runtime-routes.test.ts @@ -0,0 +1,103 @@ +import { describe, expect, it, vi } from 'vitest' +import { createRouter, request } from './http-router-test-helpers' + +describe('runtime and API guard routes', () => { + it('returns service unavailable when toktrack version lookup throws', async () => { + const { router } = createRouter({ + autoImportRuntimeOverrides: { + lookupLatestToktrackVersion: vi.fn(async () => { + throw new Error('registry unavailable') + }), + }, + }) + + const { res, body } = await request(router, '/api/toktrack/version-status', 'GET') + + expect(res.status).toBe(503) + expect(body).toEqual({ + message: 'Service Unavailable', + detail: 'registry unavailable', + }) + }) + + it('rejects untrusted hosts before API auth is evaluated', async () => { + const validateApiRequest = vi.fn(() => ({ + status: 401, + message: 'Authentication required', + })) + const { router } = createRouter({ + httpUtilsOverrides: { + validateRequestHost: vi.fn(() => ({ + status: 403, + message: 'Untrusted host header', + })), + }, + remoteAuthOverrides: { + validateApiRequest, + }, + }) + + const { res, body } = await request(router, '/api/runtime', 'GET') + + expect(res.status).toBe(403) + expect(body).toEqual({ message: 'Untrusted host header' }) + expect(validateApiRequest).not.toHaveBeenCalled() + }) + + it('forwards API authentication failures with challenge headers', async () => { + const { router } = createRouter({ + remoteAuthOverrides: { + validateApiRequest: vi.fn(() => ({ + headers: { 'WWW-Authenticate': 'Bearer realm="TTDash API"' }, + status: 401, + message: 'Authentication required', + })), + }, + }) + + const { res, body } = await request(router, '/api/runtime', 'GET') + + expect(res.status).toBe(401) + expect(res.headers['www-authenticate']).toBe('Bearer realm="TTDash API"') + expect(body).toEqual({ message: 'Authentication required' }) + }) + + it('serves runtime snapshots only through GET', async () => { + const snapshot = { + id: 'runtime-2', + mode: 'background', + port: 3020, + url: 'http://localhost:3020', + } + const getRuntimeSnapshot = vi.fn(() => snapshot) + const { router } = createRouter({ + getRuntimeSnapshot, + }) + + const runtimeResponse = await request(router, '/api/runtime', 'GET') + const rejectedMethod = await request(router, '/api/runtime', 'POST') + + expect(runtimeResponse.res.status).toBe(200) + expect(runtimeResponse.body).toEqual(snapshot) + expect(getRuntimeSnapshot).toHaveBeenCalledTimes(1) + expect(rejectedMethod.res.status).toBe(405) + expect(rejectedMethod.body).toEqual({ message: 'Method Not Allowed' }) + }) + + it('keeps unknown API endpoints distinct from stale API prefixes', async () => { + const { router } = createRouter({ + httpUtilsOverrides: { + // Simulate a configured API prefix with no handler separately from a stale /api URL. + resolveApiPath: (pathname: string) => (pathname === '/custom/unknown' ? '/unknown' : null), + }, + }) + + const unknownEndpoint = await request(router, '/custom/unknown', 'GET') + const stalePrefix = await request(router, '/api/not-configured', 'GET') + + expect(unknownEndpoint.res.status).toBe(404) + expect(unknownEndpoint.body).toEqual({ message: 'API endpoint not found' }) + expect(stalePrefix.res.status).toBe(404) + expect(stalePrefix.body).toEqual({ message: 'Not Found' }) + }) +}) diff --git a/tests/unit/server-helpers-auto-import-executor.test.ts b/tests/unit/server-helpers-auto-import-executor.test.ts new file mode 100644 index 0000000..3aeb27b --- /dev/null +++ b/tests/unit/server-helpers-auto-import-executor.test.ts @@ -0,0 +1,260 @@ +import { afterEach, describe, expect, it, vi } from 'vitest' +import { + TOKTRACK_VERSION, + createAutoImportRuntime, + createSpawnSequence, + resetServerHelperTestState, +} from './server-helpers.shared' + +function createRuntimeWithSpawn( + spawnImpl: ReturnType, + options: { + normalizeIncomingData?: (input: unknown) => unknown + updateDataLoadState?: ReturnType + withSettingsAndDataMutationLock?: ReturnType + writeData?: ReturnType + } = {}, +) { + return createAutoImportRuntime({ + fs: { + existsSync: () => false, + }, + processObject: { env: {} }, + spawnCrossPlatform: spawnImpl, + normalizeIncomingData: options.normalizeIncomingData ?? ((input: unknown) => input), + withSettingsAndDataMutationLock: + options.withSettingsAndDataMutationLock ?? + vi.fn(async (operation: () => Promise) => operation()), + writeData: options.writeData ?? vi.fn(), + updateDataLoadState: options.updateDataLoadState ?? vi.fn(async () => undefined), + toktrackPackageName: 'toktrack', + toktrackPackageSpec: `toktrack@${TOKTRACK_VERSION}`, + toktrackVersion: TOKTRACK_VERSION, + toktrackLocalBin: '/missing/toktrack', + npxCacheDir: '/tmp/ttdash-test-npx-cache', + isWindows: false, + processTerminationGraceMs: 1000, + toktrackLocalRunnerProbeTimeoutMs: 7000, + toktrackLocalRunnerVersionCheckTimeoutMs: 7000, + toktrackLocalRunnerImportTimeoutMs: 60000, + toktrackPackageRunnerProbeTimeoutMs: 45000, + toktrackPackageRunnerVersionCheckTimeoutMs: 45000, + toktrackPackageRunnerImportTimeoutMs: 60000, + toktrackLatestLookupTimeoutMs: 15000, + toktrackLatestCacheSuccessTtlMs: 5 * 60 * 1000, + toktrackLatestCacheFailureTtlMs: 60 * 1000, + probeLog: () => undefined, + }) +} + +afterEach(() => { + resetServerHelperTestState() +}) + +describe('server helper utilities: auto-import executor behavior', () => { + it('imports toktrack JSON through the mutation lock and reports structured progress', async () => { + const rawPayload = { daily: [{ date: '2026-05-01' }] } + const normalizedPayload = { + daily: [{ date: '2026-05-01' }], + totals: { totalCost: 12.5 }, + } + const spawnImpl = createSpawnSequence([ + { stdout: `toktrack ${TOKTRACK_VERSION}\n` }, + { stdout: `toktrack ${TOKTRACK_VERSION}\n` }, + { stdout: JSON.stringify(rawPayload), stderr: 'runner progress\n' }, + ]) + const normalizeIncomingData = vi.fn(() => normalizedPayload) + const writeData = vi.fn() + const updateDataLoadState = vi.fn(async () => undefined) + const lockEvents: string[] = [] + const withSettingsAndDataMutationLock = vi.fn(async (operation: () => Promise) => { + lockEvents.push('lock') + const result = await operation() + lockEvents.push('unlock') + return result + }) + const runtime = createRuntimeWithSpawn(spawnImpl, { + normalizeIncomingData, + updateDataLoadState, + withSettingsAndDataMutationLock, + writeData, + }) + const checks: Array> = [] + const progress: Array<{ key: string; vars?: Record }> = [] + const output: string[] = [] + + await expect( + runtime.performAutoImport({ + source: 'unit-test', + onCheck: (event) => checks.push(event), + onProgress: (event) => progress.push(event), + onOutput: (line) => output.push(line), + }), + ).resolves.toEqual({ + days: 1, + totalCost: 12.5, + }) + + expect(normalizeIncomingData).toHaveBeenCalledWith(rawPayload) + expect(withSettingsAndDataMutationLock).toHaveBeenCalledTimes(1) + expect(lockEvents).toEqual(['lock', 'unlock']) + expect(writeData).toHaveBeenCalledWith(normalizedPayload) + expect(updateDataLoadState).toHaveBeenCalledWith( + expect.objectContaining({ + lastLoadSource: 'unit-test', + }), + ) + expect(checks).toEqual([ + { tool: 'toktrack', status: 'checking' }, + { + tool: 'toktrack', + status: 'found', + method: 'bunx', + version: TOKTRACK_VERSION, + }, + ]) + expect(progress.map((event) => event.key)).toEqual([ + 'startingLocalImport', + 'warmingUpPackageRunner', + 'loadingUsageData', + ]) + expect(output).toEqual(['runner progress\n']) + expect(runtime.isAutoImportRunning()).toBe(false) + }) + + it('surfaces normalizer failures as structured invalid-data errors without writing data', async () => { + const spawnImpl = createSpawnSequence([ + { stdout: `toktrack ${TOKTRACK_VERSION}\n` }, + { stdout: `toktrack ${TOKTRACK_VERSION}\n` }, + { stdout: JSON.stringify({ daily: [] }) }, + ]) + const writeData = vi.fn() + const updateDataLoadState = vi.fn(async () => undefined) + const runtime = createRuntimeWithSpawn(spawnImpl, { + normalizeIncomingData: () => { + throw new Error('Unsupported usage payload') + }, + updateDataLoadState, + writeData, + }) + + await expect(runtime.performAutoImport()).rejects.toMatchObject({ + messageKey: 'toktrackInvalidData', + messageVars: { + message: 'Unsupported usage payload', + }, + }) + + expect(writeData).not.toHaveBeenCalled() + expect(updateDataLoadState).not.toHaveBeenCalled() + expect(runtime.isAutoImportRunning()).toBe(false) + }) + + it('rejects a second import while the singleton lease is active without spawning toktrack', async () => { + const spawnImpl = createSpawnSequence([]) + const runtime = createRuntimeWithSpawn(spawnImpl) + const lease = runtime.acquireAutoImportLease() + + try { + await expect(runtime.performAutoImport()).rejects.toMatchObject({ + messageKey: 'autoImportRunning', + }) + expect(spawnImpl).not.toHaveBeenCalled() + expect(runtime.isAutoImportRunning()).toBe(true) + } finally { + lease.release() + } + + expect(runtime.isAutoImportRunning()).toBe(false) + }) + + it('leaves caller-owned leases active until the caller releases them', async () => { + const spawnImpl = createSpawnSequence([ + { stdout: `toktrack ${TOKTRACK_VERSION}\n` }, + { stdout: `toktrack ${TOKTRACK_VERSION}\n` }, + { stdout: JSON.stringify({ daily: [{ date: '2026-05-01' }] }) }, + ]) + const runtime = createRuntimeWithSpawn(spawnImpl, { + normalizeIncomingData: () => ({ + daily: [{ date: '2026-05-01' }], + totals: { totalCost: 1.23 }, + }), + }) + const lease = runtime.acquireAutoImportLease() + + try { + await expect( + runtime.performAutoImport({ + lease, + }), + ).resolves.toEqual({ + days: 1, + totalCost: 1.23, + }) + expect(runtime.isAutoImportRunning()).toBe(true) + } finally { + lease.release() + } + + expect(runtime.isAutoImportRunning()).toBe(false) + }) + + it('formats startup auto-load totals with the shared server currency formatter', async () => { + const spawnImpl = createSpawnSequence([ + { stdout: `toktrack ${TOKTRACK_VERSION}\n` }, + { stdout: `toktrack ${TOKTRACK_VERSION}\n` }, + { stdout: JSON.stringify({ daily: [{ date: '2026-05-01' }] }) }, + ]) + const runtime = createRuntimeWithSpawn(spawnImpl, { + normalizeIncomingData: () => ({ + daily: [{ date: '2026-05-01' }], + totals: { totalCost: 1.23 }, + }), + }) + const logs: string[] = [] + const errors: string[] = [] + + await expect( + runtime.runStartupAutoLoad({ + log: (message) => logs.push(message), + errorLog: (message) => errors.push(message), + }), + ).resolves.toEqual({ + days: 1, + totalCost: 1.23, + }) + + expect(logs).toContain('Auto-load complete: imported 1 day, $ 1.23.') + expect(errors).toEqual([]) + }) + + it('logs non-error startup auto-load failures defensively', async () => { + const spawnImpl = createSpawnSequence([ + { stdout: `toktrack ${TOKTRACK_VERSION}\n` }, + { stdout: `toktrack ${TOKTRACK_VERSION}\n` }, + { stdout: JSON.stringify({ daily: [{ date: '2026-05-01' }] }) }, + ]) + const runtime = createRuntimeWithSpawn(spawnImpl, { + normalizeIncomingData: () => ({ + daily: [{ date: '2026-05-01' }], + totals: { totalCost: 1.23 }, + }), + withSettingsAndDataMutationLock: vi.fn(async () => { + throw 'lock failed' + }), + }) + const errors: string[] = [] + + await expect( + runtime.runStartupAutoLoad({ + log: () => undefined, + errorLog: (message) => errors.push(message), + }), + ).resolves.toBeNull() + + expect(errors).toEqual([ + 'Auto-load failed: lock failed', + 'Dashboard will start without newly imported data.', + ]) + }) +}) diff --git a/tests/unit/server-helpers-command-runner.test.ts b/tests/unit/server-helpers-command-runner.test.ts new file mode 100644 index 0000000..23289ce --- /dev/null +++ b/tests/unit/server-helpers-command-runner.test.ts @@ -0,0 +1,345 @@ +import { afterEach, describe, expect, it, vi } from 'vitest' +import { + EventEmitter, + FakeChildProcess, + createSpawnSequence, + resetServerHelperTestState, + runCommandWithSpawn, +} from './server-helpers.shared' +import { createRuntimeWithSpawn } from './server-helpers-runner-test-utils' + +afterEach(() => { + resetServerHelperTestState() +}) + +describe('server helper utilities: command runner', () => { + it('uses stdout as the toktrack command error message when stderr is empty', async () => { + const spawnImpl = createSpawnSequence([{ code: 1, stdout: 'stdout failure\n' }]) + const runtime = createRuntimeWithSpawn(spawnImpl) + + await expect( + runtime.runToktrack( + { + command: 'fake-runner', + prefixArgs: ['--prefix'], + env: { PATH: '/fake-bin' }, + }, + ['--version'], + ), + ).rejects.toMatchObject({ + message: 'stdout failure', + stdout: 'stdout failure\n', + stderr: '', + exitCode: 1, + }) + expect(spawnImpl).toHaveBeenCalledWith( + 'fake-runner', + ['--prefix', '--version'], + expect.objectContaining({ + stdio: ['ignore', 'pipe', 'pipe'], + env: { PATH: '/fake-bin' }, + }), + ) + }) + + it('streams stderr and lets callers terminate a running command on close', async () => { + const child = new FakeChildProcess() + const spawnImpl = vi.fn(() => child) + const stderrLines: string[] = [] + let closeCommand: (() => void) | null = null + + const outcomePromise = runCommandWithSpawn('fake-runner', ['daily', '--json'], { + streamStderr: true, + onStderr: (line) => stderrLines.push(line), + signalOnClose: (close) => { + closeCommand = close + }, + spawnImpl, + }) + + child.stderr.emit('data', Buffer.from('runner warning\n')) + expect(stderrLines).toEqual(['runner warning\n']) + + closeCommand?.() + + await expect(outcomePromise).rejects.toMatchObject({ + message: 'runner warning', + stderr: 'runner warning\n', + exitCode: 143, + }) + }) + + it('rejects and terminates the command when a streamed stderr callback throws', async () => { + const child = new FakeChildProcess() + const spawnImpl = vi.fn(() => child) + const callbackError = new Error('stderr listener failed') + const onStderr = vi.fn(() => { + throw callbackError + }) + const outcomePromise = runCommandWithSpawn('fake-runner', ['daily', '--json'], { + streamStderr: true, + onStderr, + spawnImpl, + }) + + child.stderr.emit('data', Buffer.from('runner warning\n')) + child.stderr.emit('data', Buffer.from('ignored warning\n')) + + await expect(outcomePromise).rejects.toBe(callbackError) + expect(onStderr).toHaveBeenCalledTimes(1) + expect(child.exitCode).toBe(143) + }) + + it('rejects in-band when a streamed stderr callback throws during decoder flush', async () => { + const child = new FakeChildProcess() + const spawnImpl = vi.fn(() => child) + const callbackError = new Error('stderr flush listener failed') + const onStderr = vi.fn(() => { + throw callbackError + }) + const outcomePromise = runCommandWithSpawn('fake-runner', ['daily', '--json'], { + streamStderr: true, + onStderr, + spawnImpl, + }) + const symbol = Buffer.from('€') + + child.stderr.emit('data', symbol.subarray(0, 1)) + child.exitCode = 1 + child.emit('close', 1) + + await expect(outcomePromise).rejects.toBe(callbackError) + expect(onStderr).toHaveBeenCalledWith('�') + }) + + it('captures stdout split across UTF-8 chunk boundaries', async () => { + const child = new FakeChildProcess() + const spawnImpl = vi.fn(() => child) + const outcomePromise = runCommandWithSpawn('fake-runner', ['daily', '--json'], { + spawnImpl, + }) + const output = Buffer.from('a€b') + + child.stdout.emit('data', output.subarray(0, 2)) + child.stdout.emit('data', output.subarray(2)) + child.exitCode = 1 + child.emit('close', 1) + + await expect(outcomePromise).rejects.toMatchObject({ + message: 'a€b', + stdout: 'a€b', + exitCode: 1, + }) + }) + + it('streams stderr split across UTF-8 chunk boundaries without replacement characters', async () => { + const child = new FakeChildProcess() + const spawnImpl = vi.fn(() => child) + const stderrLines: string[] = [] + const outcomePromise = runCommandWithSpawn('fake-runner', ['daily', '--json'], { + streamStderr: true, + onStderr: (line) => stderrLines.push(line), + spawnImpl, + }) + const prefix = Buffer.from('runner ') + const symbol = Buffer.from('€') + const suffix = Buffer.from(' warning\n') + + child.stderr.emit('data', Buffer.concat([prefix, symbol.subarray(0, 1)])) + child.stderr.emit('data', Buffer.concat([symbol.subarray(1), suffix])) + child.exitCode = 1 + child.emit('close', 1) + + await expect(outcomePromise).rejects.toMatchObject({ + message: 'runner € warning', + stderr: 'runner € warning\n', + exitCode: 1, + }) + expect(stderrLines.join('')).toBe('runner € warning\n') + expect(stderrLines.join('')).not.toContain('\uFFFD') + }) + + it('bounds captured output while preserving streamed stderr chunks', async () => { + const spawnImpl = createSpawnSequence([ + { + code: 1, + stdout: 'abcdef', + stderr: 'runner warning\n', + }, + ]) + const stderrLines: string[] = [] + + await expect( + runCommandWithSpawn('fake-runner', ['daily', '--json'], { + maxOutputBytes: 4, + streamStderr: true, + onStderr: (line) => stderrLines.push(line), + spawnImpl, + }), + ).rejects.toMatchObject({ + stdout: 'abcd', + stderr: 'runn', + outputTruncated: true, + exitCode: 1, + }) + expect(stderrLines).toEqual(['runner warning\n']) + }) + + it('truncates captured UTF-8 output on character boundaries', async () => { + const spawnImpl = createSpawnSequence([ + { + code: 1, + stdout: 'a€b', + }, + ]) + + await expect( + runCommandWithSpawn('fake-runner', ['daily', '--json'], { + maxOutputBytes: 3, + spawnImpl, + }), + ).rejects.toMatchObject({ + stdout: 'a', + outputTruncated: true, + exitCode: 1, + }) + }) + + it('wraps spawn errors with command diagnostics', async () => { + const child = new FakeChildProcess() + const spawnImpl = vi.fn(() => child) + const outcomePromise = runCommandWithSpawn('missing-runner', ['--version'], { + spawnImpl, + }) + + child.emit('error', new Error('spawn denied')) + + await expect(outcomePromise).rejects.toMatchObject({ + message: 'spawn denied', + command: 'missing-runner', + args: ['--version'], + stdout: '', + stderr: '', + }) + }) + + it('waits for timed-out toktrack commands to exit before rejecting', async () => { + vi.useFakeTimers() + + class FakeChild extends EventEmitter { + stdout = new EventEmitter() + stderr = new EventEmitter() + exitCode: number | null = null + + kill(signal: string) { + if (signal !== 'SIGTERM') { + this.exitCode = 137 + this.emit('close', 137) + return + } + + setTimeout(() => { + this.exitCode = 143 + this.emit('close', 143) + }, 150) + } + } + + const child = new FakeChild() + const spawnImpl = vi.fn(() => child) as unknown as ReturnType + const runtime = createRuntimeWithSpawn(spawnImpl) + let settled = false + + try { + const outcomePromise = runtime + .runToktrack( + { + command: 'fake-runner', + prefixArgs: [], + env: {}, + }, + ['daily', '--json'], + { timeoutMs: 50 }, + ) + .then((value) => ({ ok: true as const, value })) + .catch((error) => ({ ok: false as const, error })) + .finally(() => { + settled = true + }) + + await vi.advanceTimersByTimeAsync(50) + expect(settled).toBe(false) + + await vi.advanceTimersByTimeAsync(150) + await expect(outcomePromise).resolves.toMatchObject({ + ok: false, + error: { + message: expect.stringContaining('Command timed out'), + exitCode: 143, + timedOut: true, + }, + }) + } finally { + vi.useRealTimers() + } + }) + + it('uses a 5s process termination grace fallback when none is configured', async () => { + vi.useFakeTimers() + + class FakeChild extends EventEmitter { + stdout = new EventEmitter() + stderr = new EventEmitter() + exitCode: number | null = null + signals: string[] = [] + + kill(signal: string) { + this.signals.push(signal) + if (signal === 'SIGKILL') { + this.exitCode = 137 + this.emit('close', 137) + } + } + } + + const child = new FakeChild() + const spawnImpl = vi.fn(() => child) as unknown as ReturnType + const runtime = createRuntimeWithSpawn(spawnImpl, { + processTerminationGraceMs: undefined, + }) + + try { + const outcomePromise = runtime + .runToktrack( + { + command: 'fake-runner', + prefixArgs: [], + env: {}, + }, + ['daily', '--json'], + { timeoutMs: 50 }, + ) + .then((value) => ({ ok: true as const, value })) + .catch((error) => ({ ok: false as const, error })) + + await vi.advanceTimersByTimeAsync(50) + expect(child.signals).toEqual(['SIGTERM']) + + await vi.advanceTimersByTimeAsync(4999) + expect(child.signals).toEqual(['SIGTERM']) + + await vi.advanceTimersByTimeAsync(1) + expect(child.signals).toEqual(['SIGTERM', 'SIGKILL']) + await expect(outcomePromise).resolves.toMatchObject({ + ok: false, + error: { + message: expect.stringContaining('Command timed out'), + exitCode: 137, + timedOut: true, + }, + }) + } finally { + vi.useRealTimers() + } + }) +}) diff --git a/tests/unit/server-helpers-file-locks.test.ts b/tests/unit/server-helpers-file-locks.test.ts index ec25d92..1b134a2 100644 --- a/tests/unit/server-helpers-file-locks.test.ts +++ b/tests/unit/server-helpers-file-locks.test.ts @@ -32,6 +32,9 @@ const { createDataRuntime } = require('../../server/data-runtime.js') as { withFileMutationLock: (filePath: string, operation: () => Promise) => Promise } } +const { createDataRuntimeFileLocks } = require('../../server/data-runtime/file-locks.js') as { + createDataRuntimeFileLocks: (options: Record) => unknown +} afterEach(() => { resetServerHelperTestState() @@ -102,6 +105,17 @@ function getMode(filePath: string) { return fs.statSync(filePath).mode & 0o777 } +function escapeRegExp(value: string) { + return value.replace(/[.*+?^${}()|[\]\\]/g, '\\$&') +} + +function expectAtomicTempPath(tempPath: string, targetFile: string, timestamp: number) { + const prefix = `${targetFile}.${process.pid}.${timestamp}.` + + expect(tempPath).toMatch(new RegExp(`^${escapeRegExp(prefix)}[a-f0-9]+\\.tmp$`)) + expect(tempPath.length).toBeGreaterThan(prefix.length + '.tmp'.length) +} + function waitForChildMessage( child: ReturnType, timeoutMs: number, @@ -180,6 +194,38 @@ function waitForChildExit(child: ReturnType, timeoutMs: number) { } describe('server helper utilities: file mutation locks', () => { + it('fails fast when required file lock runtime options are invalid', () => { + expect(() => + createDataRuntimeFileLocks({ + fsPromises, + path, + processObject: process, + runtimeInstanceId: '', + isWindows: false, + secureDirMode: 0o1000, + secureFileMode: 0o600, + fileMutationLockTimeoutMs: -1, + fileMutationLockStaleMs: 30_000, + settingsFile: '/tmp/settings.json', + dataFile: '/tmp/data.json', + }), + ).toThrow('runtimeInstanceId, secureDirMode, fileMutationLockTimeoutMs') + + expect(() => + createDataRuntimeFileLocks({ + fsPromises, + path, + processObject: process, + runtimeInstanceId: 'runtime', + isWindows: false, + fileMutationLockTimeoutMs: 100, + fileMutationLockStaleMs: 30_000, + settingsFile: '/tmp/settings.json', + dataFile: '/tmp/data.json', + }), + ).toThrow('secureDirMode, secureFileMode') + }) + it('serializes operations for the same file path and cleans up locks afterwards', async () => { const tempDir = await fsPromises.mkdtemp(path.join(tmpdir(), 'ttdash-lock-same-file-')) const settingsFile = path.join(tempDir, 'settings.json') @@ -231,6 +277,39 @@ describe('server helper utilities: file mutation locks', () => { await fsPromises.rm(tempDir, { recursive: true, force: true }) }) + it('times out in-process waiters behind a hung predecessor and releases their queue slot', async () => { + const runtimeRoot = await fsPromises.mkdtemp(path.join(tmpdir(), 'ttdash-lock-queue-timeout-')) + const runtime = createFileModeRuntime(runtimeRoot, path.join(runtimeRoot, 'legacy-data.json')) + let releaseFirst: (() => void) | null = null + + try { + const first = runtime.withFileMutationLock( + runtime.paths.dataFile, + async () => + new Promise((resolve) => { + releaseFirst = resolve + }), + ) + + await vi.waitFor(() => { + expect(existsSync(runtime.getFileMutationLockDir(runtime.paths.dataFile))).toBe(true) + }) + + await expect( + runtime.withFileMutationLock(runtime.paths.dataFile, async () => 'late'), + ).rejects.toThrow('Timed out waiting for previous file mutation lock') + + releaseFirst?.() + await first + await expect( + runtime.withFileMutationLock(runtime.paths.dataFile, async () => 'ok'), + ).resolves.toBe('ok') + } finally { + releaseFirst?.() + await fsPromises.rm(runtimeRoot, { recursive: true, force: true }) + } + }) + it('serializes overlapping multi-file operations in a stable order', async () => { const tempDir = await fsPromises.mkdtemp(path.join(tmpdir(), 'ttdash-lock-ordered-')) const settingsFile = path.join(tempDir, 'settings.json') @@ -290,6 +369,35 @@ describe('server helper utilities: file mutation locks', () => { await fsPromises.rm(targetDir, { recursive: true, force: true }) }) + it('reaps old lock directories from a different runtime instance even when the pid is alive', async () => { + const runtimeRoot = await fsPromises.mkdtemp(path.join(tmpdir(), 'ttdash-lock-instance-reuse-')) + const runtime = createFileModeRuntime(runtimeRoot, path.join(runtimeRoot, 'legacy-data.json')) + const lockDir = runtime.getFileMutationLockDir(runtime.paths.dataFile) + + try { + await fsPromises.mkdir(lockDir, { recursive: true }) + await fsPromises.writeFile( + path.join(lockDir, 'owner.json'), + JSON.stringify( + { + pid: process.pid, + createdAt: new Date(Date.now() - 1000).toISOString(), + instanceId: 'different-runtime-instance', + }, + null, + 2, + ), + ) + + await expect( + runtime.withFileMutationLock(runtime.paths.dataFile, async () => 'ok'), + ).resolves.toBe('ok') + expect(existsSync(lockDir)).toBe(false) + } finally { + await fsPromises.rm(runtimeRoot, { recursive: true, force: true }) + } + }) + it('does not reap stale locks while the recorded owner pid is still running', async () => { const runtime = createShortTimeoutFileLockRuntime() const targetDir = await fsPromises.mkdtemp(path.join(tmpdir(), 'ttdash-stale-pid-lock-')) @@ -302,7 +410,7 @@ describe('server helper utilities: file mutation locks', () => { JSON.stringify({ pid: 4242, createdAt: new Date(Date.now() - 60_000).toISOString(), - instanceId: 'stale-instance', + instanceId: `test-${process.pid}`, }), ) @@ -354,7 +462,13 @@ dataRuntime.withFileMutationLock(filePath, async () => { if (typeof process.send === 'function') { process.send('locked') } - await new Promise((resolve) => setTimeout(resolve, 250)) + await new Promise((resolve) => { + process.once('message', (message) => { + if (message === 'release') { + resolve() + } + }) + }) }).then(() => process.exit(0)).catch((error) => { console.error(error) process.exit(1) @@ -370,11 +484,19 @@ dataRuntime.withFileMutationLock(filePath, async () => { try { await waitForChildMessage(child, 5_000) - const startedAt = Date.now() - await withFileMutationLock(targetFile, async () => undefined) - const waitedMs = Date.now() - startedAt + let parentAcquiredLock = false + const parentLock = withFileMutationLock(targetFile, async () => { + parentAcquiredLock = true + }) + + await vi.waitFor(() => { + expect(getPendingFileMutationLockCount()).toBeGreaterThan(0) + }) + expect(parentAcquiredLock).toBe(false) - expect(waitedMs).toBeGreaterThanOrEqual(150) + child.send('release') + await parentLock + expect(parentAcquiredLock).toBe(true) if (child.exitCode === null) { await waitForChildExit(child, 5_000) @@ -392,29 +514,59 @@ dataRuntime.withFileMutationLock(filePath, async () => { it('removes temporary files when atomic async writes fail after creating the temp file', async () => { const targetDir = await fsPromises.mkdtemp(path.join(tmpdir(), 'ttdash-write-json-')) const targetFile = path.join(targetDir, 'settings.json') - const expectedTempPath = `${targetFile}.${process.pid}.1700000000000.tmp` const renameError = Object.assign(new Error('rename failed'), { code: 'EXDEV' }) const renameSpy = vi.spyOn(fsPromises, 'rename').mockRejectedValue(renameError) const unlinkSpy = vi.spyOn(fsPromises, 'unlink') const nowSpy = vi.spyOn(Date, 'now').mockReturnValue(1700000000000) - await expect(writeJsonAtomicAsync(targetFile, { ok: true })).rejects.toBe(renameError) + try { + await expect(writeJsonAtomicAsync(targetFile, { ok: true })).rejects.toBe(renameError) - expect(renameSpy).toHaveBeenCalled() - expect(unlinkSpy).toHaveBeenCalledWith(expectedTempPath) - expect(existsSync(expectedTempPath)).toBe(false) + expect(renameSpy).toHaveBeenCalled() + const tempPath = unlinkSpy.mock.calls[0]?.[0] as string + expectAtomicTempPath(tempPath, targetFile, 1700000000000) + expect(existsSync(tempPath)).toBe(false) + } finally { + renameSpy.mockRestore() + unlinkSpy.mockRestore() + nowSpy.mockRestore() + await fsPromises.rm(targetDir, { recursive: true, force: true }) + } + }) - renameSpy.mockRestore() - unlinkSpy.mockRestore() - nowSpy.mockRestore() - await fsPromises.rm(targetDir, { recursive: true, force: true }) + it('surfaces async atomic write cleanup failures with the original write error', async () => { + const targetDir = await fsPromises.mkdtemp(path.join(tmpdir(), 'ttdash-write-json-cleanup-')) + const targetFile = path.join(targetDir, 'settings.json') + const renameError = Object.assign(new Error('rename failed'), { code: 'EXDEV' }) + const cleanupError = Object.assign(new Error('cleanup failed'), { code: 'EACCES' }) + + const renameSpy = vi.spyOn(fsPromises, 'rename').mockRejectedValue(renameError) + const unlinkSpy = vi.spyOn(fsPromises, 'unlink').mockRejectedValue(cleanupError) + + try { + let caughtError: unknown + try { + await writeJsonAtomicAsync(targetFile, { ok: true }) + } catch (error) { + caughtError = error + } + + expect(caughtError).toBeInstanceOf(AggregateError) + expect((caughtError as AggregateError).message).toContain( + 'Failed atomic JSON write and temp-file cleanup', + ) + expect((caughtError as AggregateError).errors).toEqual([renameError, cleanupError]) + } finally { + renameSpy.mockRestore() + unlinkSpy.mockRestore() + await fsPromises.rm(targetDir, { recursive: true, force: true }) + } }) it('removes temporary files when atomic sync writes fail after creating the temp file', async () => { const targetDir = await fsPromises.mkdtemp(path.join(tmpdir(), 'ttdash-write-json-sync-')) const targetFile = path.join(targetDir, 'settings.json') - const expectedTempPath = `${targetFile}.${process.pid}.1700000000002.tmp` const renameError = Object.assign(new Error('rename failed'), { code: 'EXDEV' }) const runtime = createFileModeRuntime(targetDir, path.join(targetDir, 'legacy-data.json')) @@ -426,8 +578,10 @@ dataRuntime.withFileMutationLock(filePath, async () => { try { expect(() => runtime.writeJsonAtomic(targetFile, { ok: true })).toThrow(renameError) - expect(renameSpy).toHaveBeenCalledWith(expectedTempPath, targetFile) - expect(existsSync(expectedTempPath)).toBe(false) + const tempPath = renameSpy.mock.calls[0]?.[0] as string + expect(renameSpy).toHaveBeenCalledWith(tempPath, targetFile) + expectAtomicTempPath(tempPath, targetFile, 1700000000002) + expect(existsSync(tempPath)).toBe(false) } finally { renameSpy.mockRestore() nowSpy.mockRestore() @@ -435,10 +589,45 @@ dataRuntime.withFileMutationLock(filePath, async () => { } }) + it('surfaces sync atomic write cleanup failures with the original write error', async () => { + const targetDir = await fsPromises.mkdtemp( + path.join(tmpdir(), 'ttdash-write-json-sync-cleanup-'), + ) + const targetFile = path.join(targetDir, 'settings.json') + const renameError = Object.assign(new Error('rename failed'), { code: 'EXDEV' }) + const cleanupError = Object.assign(new Error('cleanup failed'), { code: 'EACCES' }) + const runtime = createFileModeRuntime(targetDir, path.join(targetDir, 'legacy-data.json')) + + const renameSpy = vi.spyOn(fs, 'renameSync').mockImplementation(() => { + throw renameError + }) + const unlinkSpy = vi.spyOn(fs, 'unlinkSync').mockImplementation(() => { + throw cleanupError + }) + + try { + let caughtError: unknown + try { + runtime.writeJsonAtomic(targetFile, { ok: true }) + } catch (error) { + caughtError = error + } + + expect(caughtError).toBeInstanceOf(AggregateError) + expect((caughtError as AggregateError).message).toContain( + 'Failed atomic JSON write and temp-file cleanup', + ) + expect((caughtError as AggregateError).errors).toEqual([renameError, cleanupError]) + } finally { + renameSpy.mockRestore() + unlinkSpy.mockRestore() + await fsPromises.rm(targetDir, { recursive: true, force: true }) + } + }) + it('removes temporary files when writeFile rejects after creating the temp file', async () => { const targetDir = await fsPromises.mkdtemp(path.join(tmpdir(), 'ttdash-write-json-')) const targetFile = path.join(targetDir, 'settings.json') - const expectedTempPath = `${targetFile}.${process.pid}.1700000000001.tmp` const writeError = Object.assign(new Error('write failed'), { code: 'EIO' }) const originalWriteFile = fsPromises.writeFile.bind(fsPromises) const writeSpy = vi @@ -450,16 +639,19 @@ dataRuntime.withFileMutationLock(filePath, async () => { const unlinkSpy = vi.spyOn(fsPromises, 'unlink') const nowSpy = vi.spyOn(Date, 'now').mockReturnValue(1700000000001) - await expect(writeJsonAtomicAsync(targetFile, { ok: true })).rejects.toBe(writeError) - - expect(writeSpy).toHaveBeenCalled() - expect(unlinkSpy).toHaveBeenCalledWith(expectedTempPath) - expect(existsSync(expectedTempPath)).toBe(false) + try { + await expect(writeJsonAtomicAsync(targetFile, { ok: true })).rejects.toBe(writeError) - writeSpy.mockRestore() - unlinkSpy.mockRestore() - nowSpy.mockRestore() - await fsPromises.rm(targetDir, { recursive: true, force: true }) + expect(writeSpy).toHaveBeenCalled() + const tempPath = unlinkSpy.mock.calls[0]?.[0] as string + expectAtomicTempPath(tempPath, targetFile, 1700000000001) + expect(existsSync(tempPath)).toBe(false) + } finally { + writeSpy.mockRestore() + unlinkSpy.mockRestore() + nowSpy.mockRestore() + await fsPromises.rm(targetDir, { recursive: true, force: true }) + } }) it.runIf(process.platform !== 'win32')( diff --git a/tests/unit/server-helpers-latest-version.test.ts b/tests/unit/server-helpers-latest-version.test.ts new file mode 100644 index 0000000..00f42f5 --- /dev/null +++ b/tests/unit/server-helpers-latest-version.test.ts @@ -0,0 +1,115 @@ +import { afterEach, describe, expect, it, vi } from 'vitest' +import { + TOKTRACK_VERSION, + createSpawnSequence, + resetServerHelperTestState, +} from './server-helpers.shared' +import { createRuntimeWithSpawn } from './server-helpers-runner-test-utils' + +afterEach(() => { + resetServerHelperTestState() +}) + +describe('server helper utilities: toktrack latest-version lookup', () => { + it('returns a structured warning when the latest toktrack version lookup times out', async () => { + vi.useFakeTimers() + const runtime = createRuntimeWithSpawn(createSpawnSequence([{ hang: true }]), { + latestLookupTimeoutMs: 50, + }) + + try { + const statusPromise = runtime.lookupLatestToktrackVersion() + await vi.advanceTimersByTimeAsync(50) + + await expect(statusPromise).resolves.toMatchObject({ + configuredVersion: TOKTRACK_VERSION, + latestVersion: null, + isLatest: null, + lookupStatus: 'timeout', + message: expect.stringContaining('Command timed out'), + }) + } finally { + vi.useRealTimers() + } + }) + + it('returns a structured warning when the latest toktrack version lookup is malformed', async () => { + const runtime = createRuntimeWithSpawn(createSpawnSequence([{ stdout: 'not-a-version\n' }])) + + await expect(runtime.lookupLatestToktrackVersion()).resolves.toMatchObject({ + configuredVersion: TOKTRACK_VERSION, + latestVersion: null, + isLatest: null, + lookupStatus: 'malformed-output', + message: 'npm returned an invalid toktrack version: not-a-version', + }) + }) + + it('reuses a cached successful latest-version lookup until the TTL expires', async () => { + const spawnImpl = createSpawnSequence([ + { stdout: `${TOKTRACK_VERSION}\n` }, + { stdout: '9.9.9\n' }, + ]) + const runtime = createRuntimeWithSpawn(spawnImpl) + const nowSpy = vi.spyOn(Date, 'now') + + try { + nowSpy.mockReturnValue(1_000_000) + const firstStatus = await runtime.lookupLatestToktrackVersion() + nowSpy.mockReturnValue(1_000_000) + const secondStatus = await runtime.lookupLatestToktrackVersion() + nowSpy.mockReturnValue(1_000_000 + 5 * 60 * 1000 + 1) + const thirdStatus = await runtime.lookupLatestToktrackVersion() + + expect(firstStatus).toMatchObject({ + configuredVersion: TOKTRACK_VERSION, + latestVersion: TOKTRACK_VERSION, + isLatest: true, + lookupStatus: 'ok', + }) + expect(secondStatus).toEqual(firstStatus) + expect(thirdStatus).toMatchObject({ + latestVersion: '9.9.9', + isLatest: false, + lookupStatus: 'ok', + }) + expect(spawnImpl).toHaveBeenCalledTimes(2) + } finally { + nowSpy.mockRestore() + } + }) + + it('reuses a cached failed latest-version lookup until the failure TTL expires', async () => { + const spawnImpl = createSpawnSequence([ + { code: 1, stderr: 'lookup failed\n' }, + { code: 1, stderr: 'lookup still failed\n' }, + ]) + const runtime = createRuntimeWithSpawn(spawnImpl) + const nowSpy = vi.spyOn(Date, 'now') + + try { + nowSpy.mockReturnValue(2_000_000) + const firstStatus = await runtime.lookupLatestToktrackVersion() + nowSpy.mockReturnValue(2_000_000) + const secondStatus = await runtime.lookupLatestToktrackVersion() + nowSpy.mockReturnValue(2_000_000 + 60 * 1000 + 1) + const thirdStatus = await runtime.lookupLatestToktrackVersion() + + expect(firstStatus).toMatchObject({ + configuredVersion: TOKTRACK_VERSION, + latestVersion: null, + isLatest: null, + lookupStatus: 'failed', + message: 'lookup failed', + }) + expect(secondStatus).toEqual(firstStatus) + expect(thirdStatus).toMatchObject({ + lookupStatus: 'failed', + message: 'lookup still failed', + }) + expect(spawnImpl).toHaveBeenCalledTimes(2) + } finally { + nowSpy.mockRestore() + } + }) +}) diff --git a/tests/unit/server-helpers-runner-core.test.ts b/tests/unit/server-helpers-runner-core.test.ts deleted file mode 100644 index e202ed0..0000000 --- a/tests/unit/server-helpers-runner-core.test.ts +++ /dev/null @@ -1,154 +0,0 @@ -import { afterEach, describe, expect, it } from 'vitest' -import { - EventEmitter, - TOKTRACK_VERSION, - getExecutableName, - getLocalToktrackDisplayCommand, - getToktrackLatestLookupTimeoutMs, - getToktrackRunnerTimeouts, - parseToktrackVersionOutput, - resetServerHelperTestState, - runCommandWithSpawn, - toAutoImportRunnerResolutionError, -} from './server-helpers.shared' - -afterEach(() => { - resetServerHelperTestState() -}) - -describe('server helper utilities: toktrack runner core behavior', () => { - it('maps executable names correctly across platforms', () => { - expect(getExecutableName('npm', true)).toBe('npm.cmd') - expect(getExecutableName('bun', true)).toBe('bun.exe') - expect(getExecutableName('bunx', true)).toBe('bun.exe') - expect(getExecutableName('npx', true)).toBe('npx.cmd') - expect(getExecutableName('toktrack', true)).toBe('toktrack') - expect(getExecutableName('npm', false)).toBe('npm') - expect(getExecutableName('bun', false)).toBe('bun') - expect(getExecutableName('bunx', false)).toBe('bunx') - expect(getExecutableName('npx', false)).toBe('npx') - }) - - it('renders the local toktrack command example correctly across platforms', () => { - expect(getLocalToktrackDisplayCommand(false)).toBe('node_modules/.bin/toktrack daily --json') - expect(getLocalToktrackDisplayCommand(true)).toBe( - 'node_modules\\.bin\\toktrack.cmd daily --json', - ) - }) - - it('parses toktrack version banners down to the raw version', () => { - expect(parseToktrackVersionOutput(`toktrack ${TOKTRACK_VERSION}`)).toBe(TOKTRACK_VERSION) - expect(parseToktrackVersionOutput(`${TOKTRACK_VERSION}\n`)).toBe(TOKTRACK_VERSION) - }) - - it('uses longer warmup timeouts for package runners than for local toktrack', () => { - const localTimeouts = getToktrackRunnerTimeouts({ method: 'local' }) - const bunxTimeouts = getToktrackRunnerTimeouts({ method: 'bunx' }) - const npxTimeouts = getToktrackRunnerTimeouts({ method: 'npm' }) - - expect(localTimeouts).toEqual({ - probeMs: 7000, - versionCheckMs: 7000, - importMs: 60000, - }) - expect(bunxTimeouts).toEqual({ - probeMs: 45000, - versionCheckMs: 45000, - importMs: 60000, - }) - expect(npxTimeouts).toEqual(bunxTimeouts) - }) - - it('uses a less aggressive timeout for latest-version registry lookups', () => { - expect(getToktrackLatestLookupTimeoutMs()).toBe(15000) - }) - - it('waits for timed-out toktrack commands to exit before rejecting', async () => { - class FakeChild extends EventEmitter { - stdout = new EventEmitter() - stderr = new EventEmitter() - exitCode: number | null = null - - kill(signal: string) { - if (signal !== 'SIGTERM') { - this.exitCode = 137 - this.emit('close', 137) - return - } - - setTimeout(() => { - this.exitCode = 143 - this.emit('close', 143) - }, 150) - } - } - - const child = new FakeChild() - let settled = false - const outcomePromise = runCommandWithSpawn('fake-runner', ['daily', '--json'], { - timeoutMs: 50, - spawnImpl: () => child, - }) - .then((value) => ({ ok: true as const, value })) - .catch((error) => ({ ok: false as const, error })) - .finally(() => { - settled = true - }) - - await new Promise((resolve) => setTimeout(resolve, 75)) - expect(settled).toBe(false) - - await expect(outcomePromise).resolves.toMatchObject({ - ok: false, - error: { - message: expect.stringContaining('Command timed out'), - timedOut: true, - }, - }) - }) - - it('prioritizes local version mismatches over generic no-runner feedback', () => { - const error = toAutoImportRunnerResolutionError({ - localVersionMismatch: { - detectedVersion: '9.9.9', - expectedVersion: TOKTRACK_VERSION, - }, - localFailure: null, - runnerFailures: [{ label: 'bunx', message: 'missing', timedOut: false }], - }) - - expect(error.messageKey).toBe('localToktrackVersionMismatch') - expect(error.message).toContain('9.9.9') - expect(error.message).toContain(TOKTRACK_VERSION) - }) - - it('reports package runner failures when no compatible fallback succeeds', () => { - const error = toAutoImportRunnerResolutionError({ - localVersionMismatch: null, - localFailure: null, - runnerFailures: [ - { label: 'bunx', message: 'failed to start', timedOut: false }, - { label: 'npm exec', message: 'permission denied', timedOut: false }, - ], - }) - - expect(error.messageKey).toBe('packageRunnerFailed') - expect(error.message).toContain('bunx: failed to start') - expect(error.message).toContain('npm exec: permission denied') - }) - - it('surfaces a dedicated warmup-timeout message when only package runners time out', () => { - const error = toAutoImportRunnerResolutionError({ - localVersionMismatch: null, - localFailure: null, - runnerFailures: [ - { label: 'bunx', message: 'Command timed out', timedOut: true }, - { label: 'npm exec', message: 'Command timed out', timedOut: true }, - ], - }) - - expect(error.messageKey).toBe('packageRunnerWarmupTimedOut') - expect(error.message).toContain('bunx / npm exec') - expect(error.message).toContain('45s') - }) -}) diff --git a/tests/unit/server-helpers-runner-errors.test.ts b/tests/unit/server-helpers-runner-errors.test.ts new file mode 100644 index 0000000..d0b3643 --- /dev/null +++ b/tests/unit/server-helpers-runner-errors.test.ts @@ -0,0 +1,82 @@ +import { afterEach, describe, expect, it } from 'vitest' +import { + TOKTRACK_VERSION, + resetServerHelperTestState, + toAutoImportRunnerResolutionError, +} from './server-helpers.shared' + +afterEach(() => { + resetServerHelperTestState() +}) + +describe('server helper utilities: toktrack runner resolution errors', () => { + it('prioritizes local version mismatches over generic no-runner feedback', () => { + const error = toAutoImportRunnerResolutionError({ + localVersionMismatch: { + detectedVersion: '9.9.9', + expectedVersion: TOKTRACK_VERSION, + }, + localFailure: null, + runnerFailures: [{ label: 'bunx', message: 'missing', timedOut: false }], + }) + + expect(error.messageKey).toBe('localToktrackVersionMismatch') + expect(error.message).toContain('9.9.9') + expect(error.message).toContain(TOKTRACK_VERSION) + }) + + it('prioritizes local startup failures over package runner feedback', () => { + const error = toAutoImportRunnerResolutionError({ + localVersionMismatch: null, + localFailure: 'permission denied', + runnerFailures: [ + { label: 'bunx', message: 'missing', timedOut: false }, + { label: 'npm exec', message: 'missing', timedOut: false }, + ], + }) + + expect(error.messageKey).toBe('localToktrackFailed') + expect(error.message).toContain('permission denied') + }) + + it('reports package runner failures when no compatible fallback succeeds', () => { + const error = toAutoImportRunnerResolutionError({ + localVersionMismatch: null, + localFailure: null, + runnerFailures: [ + { label: 'bunx', message: 'failed to start', timedOut: false }, + { label: 'npm exec', message: 'permission denied', timedOut: false }, + ], + }) + + expect(error.messageKey).toBe('packageRunnerFailed') + expect(error.message).toContain('bunx: failed to start') + expect(error.message).toContain('npm exec: permission denied') + }) + + it('surfaces a dedicated warmup-timeout message when only package runners time out', () => { + const error = toAutoImportRunnerResolutionError({ + localVersionMismatch: null, + localFailure: null, + runnerFailures: [ + { label: 'bunx', message: 'Command timed out', timedOut: true }, + { label: 'npm exec', message: 'Command timed out', timedOut: true }, + ], + }) + + expect(error.messageKey).toBe('packageRunnerWarmupTimedOut') + expect(error.message).toContain('bunx / npm exec') + expect(error.message).toContain('45s') + }) + + it('reports a no-runner error when no resolution diagnostics are available', () => { + const error = toAutoImportRunnerResolutionError({ + localVersionMismatch: null, + localFailure: null, + runnerFailures: [], + }) + + expect(error.messageKey).toBe('noRunnerFound') + expect(error.message).toBe('No local toktrack, Bun, or npm exec installation found.') + }) +}) diff --git a/tests/unit/server-helpers-runner-process.test.ts b/tests/unit/server-helpers-runner-process.test.ts index e6ae978..61b7335 100644 --- a/tests/unit/server-helpers-runner-process.test.ts +++ b/tests/unit/server-helpers-runner-process.test.ts @@ -45,36 +45,6 @@ describe('server helper utilities: toktrack runner process integration', () => { } }) - it.runIf(process.platform !== 'win32')( - 'returns a structured warning when the latest toktrack version lookup times out', - async () => { - const tempDir = await fsPromises.mkdtemp(path.join(tmpdir(), 'ttdash-toktrack-timeout-')) - const originalPath = process.env.PATH - const nodePath = JSON.stringify(process.execPath) - process.env.PATH = tempDir - - try { - await writeExecutableScript( - tempDir, - 'npm', - `exec ${nodePath} -e "setTimeout(() => {}, 1000)"`, - ) - - const status = await lookupLatestToktrackVersion(50) - expect(status).toMatchObject({ - configuredVersion: TOKTRACK_VERSION, - latestVersion: null, - isLatest: null, - lookupStatus: 'failed', - }) - expect(status.message).toContain('Command timed out') - } finally { - process.env.PATH = originalPath - await fsPromises.rm(tempDir, { recursive: true, force: true }) - } - }, - ) - it.runIf(process.platform !== 'win32')( 'falls back to npx when bunx exists but cannot execute toktrack', async () => { @@ -166,97 +136,6 @@ describe('server helper utilities: toktrack runner process integration', () => { }, ) - it.runIf(process.platform !== 'win32')( - 'reuses a cached successful latest-version lookup until the TTL expires', - async () => { - const tempDir = await fsPromises.mkdtemp(path.join(tmpdir(), 'ttdash-latest-cache-success-')) - const countFile = path.join(tempDir, 'count.txt') - const originalPath = process.env.PATH - const nowSpy = vi.spyOn(Date, 'now') - process.env.PATH = tempDir - - try { - await writeExecutableScript( - tempDir, - 'npm', - `echo hit >> ${JSON.stringify(countFile)}\necho "${TOKTRACK_VERSION}"`, - ) - - nowSpy.mockReturnValue(1_000_000) - const firstStatus = await lookupLatestToktrackVersion() - nowSpy.mockReturnValue(1_000_000) - const secondStatus = await lookupLatestToktrackVersion() - nowSpy.mockReturnValue(1_000_000 + 5 * 60 * 1000 + 1) - const thirdStatus = await lookupLatestToktrackVersion() - - expect(firstStatus).toMatchObject({ - configuredVersion: TOKTRACK_VERSION, - latestVersion: TOKTRACK_VERSION, - isLatest: true, - lookupStatus: 'ok', - }) - expect(secondStatus).toEqual(firstStatus) - expect(thirdStatus).toEqual(firstStatus) - - const invocations = (await fsPromises.readFile(countFile, 'utf8')) - .trim() - .split('\n') - .filter(Boolean) - expect(invocations).toHaveLength(2) - } finally { - nowSpy.mockRestore() - process.env.PATH = originalPath - await fsPromises.rm(tempDir, { recursive: true, force: true }) - } - }, - ) - - it.runIf(process.platform !== 'win32')( - 'reuses a cached failed latest-version lookup until the failure TTL expires', - async () => { - const tempDir = await fsPromises.mkdtemp(path.join(tmpdir(), 'ttdash-latest-cache-failure-')) - const countFile = path.join(tempDir, 'count.txt') - const originalPath = process.env.PATH - const nowSpy = vi.spyOn(Date, 'now') - process.env.PATH = tempDir - - try { - await writeExecutableScript( - tempDir, - 'npm', - `echo hit >> ${JSON.stringify(countFile)}\necho "lookup failed" >&2\nexit 1`, - ) - - nowSpy.mockReturnValue(2_000_000) - const firstStatus = await lookupLatestToktrackVersion() - nowSpy.mockReturnValue(2_000_000) - const secondStatus = await lookupLatestToktrackVersion() - nowSpy.mockReturnValue(2_000_000 + 60 * 1000 + 1) - const thirdStatus = await lookupLatestToktrackVersion() - - expect(firstStatus).toMatchObject({ - configuredVersion: TOKTRACK_VERSION, - latestVersion: null, - isLatest: null, - lookupStatus: 'failed', - message: 'lookup failed', - }) - expect(secondStatus).toEqual(firstStatus) - expect(thirdStatus).toEqual(firstStatus) - - const invocations = (await fsPromises.readFile(countFile, 'utf8')) - .trim() - .split('\n') - .filter(Boolean) - expect(invocations).toHaveLength(2) - } finally { - nowSpy.mockRestore() - process.env.PATH = originalPath - await fsPromises.rm(tempDir, { recursive: true, force: true }) - } - }, - ) - it.runIf(process.platform === 'win32')( 'checks npx on Windows without emitting DEP0190 warnings', async () => { diff --git a/tests/unit/server-helpers-runner-resolution.test.ts b/tests/unit/server-helpers-runner-resolution.test.ts new file mode 100644 index 0000000..159d588 --- /dev/null +++ b/tests/unit/server-helpers-runner-resolution.test.ts @@ -0,0 +1,103 @@ +import { afterEach, describe, expect, it, vi } from 'vitest' +import { + TOKTRACK_VERSION, + createSpawnSequence, + resetServerHelperTestState, +} from './server-helpers.shared' +import { createRuntimeWithSpawn } from './server-helpers-runner-test-utils' + +afterEach(() => { + resetServerHelperTestState() +}) + +describe('server helper utilities: toktrack runner resolution', () => { + it('honors a configured local toktrack binary override for display and execution', async () => { + const spawnImpl = createSpawnSequence([{ stdout: `toktrack ${TOKTRACK_VERSION}` }]) + const runtime = createRuntimeWithSpawn(spawnImpl, { + localBinExists: true, + localBinOverride: '/custom/bin/toktrack', + }) + + expect(runtime.getLocalToktrackDisplayCommand()).toBe('/custom/bin/toktrack daily --json') + + await expect(runtime.resolveToktrackRunner()).resolves.toMatchObject({ + command: '/custom/bin/toktrack', + displayCommand: '/custom/bin/toktrack daily --json', + method: 'local', + }) + expect(spawnImpl).toHaveBeenCalledWith( + '/custom/bin/toktrack', + ['--version'], + expect.objectContaining({ + env: { TTDASH_TOKTRACK_LOCAL_BIN: '/custom/bin/toktrack' }, + }), + ) + }) + + it('resolves a compatible local toktrack runner without probing package fallbacks', async () => { + const spawnImpl = createSpawnSequence([{ stdout: `toktrack ${TOKTRACK_VERSION}\n` }]) + const runtime = createRuntimeWithSpawn(spawnImpl, { localBinExists: true }) + + await expect(runtime.resolveToktrackRunner()).resolves.toMatchObject({ + command: '/missing/toktrack', + method: 'local', + label: 'local toktrack', + }) + expect(spawnImpl).toHaveBeenCalledTimes(1) + expect(spawnImpl).toHaveBeenCalledWith( + '/missing/toktrack', + ['--version'], + expect.objectContaining({ + stdio: ['ignore', 'pipe', 'pipe'], + }), + ) + }) + + it('uses the local version-check timeout before probing package fallbacks', async () => { + vi.useFakeTimers() + const spawnImpl = createSpawnSequence([ + { hang: true }, + { stdout: `toktrack ${TOKTRACK_VERSION}\n` }, + ]) + const runtime = createRuntimeWithSpawn(spawnImpl, { + localBinExists: true, + localProbeTimeoutMs: 7000, + localVersionCheckTimeoutMs: 50, + }) + + try { + const resolutionPromise = runtime.resolveToktrackRunner() + await vi.advanceTimersByTimeAsync(50) + + expect(spawnImpl).toHaveBeenCalledTimes(2) + await expect(resolutionPromise).resolves.toMatchObject({ + command: 'bunx', + method: 'bunx', + }) + } finally { + vi.useRealTimers() + } + }) + + it('falls back to npm when the local runner version mismatches and bunx probing fails', async () => { + const warnSpy = vi.spyOn(console, 'warn').mockImplementation(() => undefined) + const spawnImpl = createSpawnSequence([ + { stdout: 'toktrack 0.0.0\n' }, + { code: 1, stderr: 'bunx failed\n' }, + { stdout: `toktrack ${TOKTRACK_VERSION}\n` }, + ]) + const runtime = createRuntimeWithSpawn(spawnImpl, { localBinExists: true }) + + try { + await expect(runtime.resolveToktrackRunner()).resolves.toMatchObject({ + command: 'npx', + method: 'npm', + label: 'npm exec', + }) + expect(spawnImpl).toHaveBeenCalledTimes(3) + expect(warnSpy).toHaveBeenCalledWith(expect.stringContaining('Failed to probe bunx')) + } finally { + warnSpy.mockRestore() + } + }) +}) diff --git a/tests/unit/server-helpers-runner-test-utils.ts b/tests/unit/server-helpers-runner-test-utils.ts new file mode 100644 index 0000000..260f2dc --- /dev/null +++ b/tests/unit/server-helpers-runner-test-utils.ts @@ -0,0 +1,53 @@ +import { TOKTRACK_VERSION, createAutoImportRuntime } from './server-helpers.shared' +import type { createSpawnSequence } from './server-helpers.shared' + +export function createRuntimeWithSpawn( + spawnImpl: ReturnType, + options: { + latestLookupTimeoutMs?: number + localBinExists?: boolean + localBinOverride?: string + localProbeTimeoutMs?: number + localVersionCheckTimeoutMs?: number + processTerminationGraceMs?: number | undefined + } = {}, +) { + const localBin = options.localBinOverride ?? '/missing/toktrack' + const processTerminationGraceMs = Object.prototype.hasOwnProperty.call( + options, + 'processTerminationGraceMs', + ) + ? options.processTerminationGraceMs + : 1000 + + return createAutoImportRuntime({ + fs: { + existsSync: (filePath: string) => Boolean(options.localBinExists && filePath === localBin), + }, + processObject: { + env: options.localBinOverride ? { TTDASH_TOKTRACK_LOCAL_BIN: options.localBinOverride } : {}, + }, + spawnCrossPlatform: spawnImpl, + normalizeIncomingData: (input: unknown) => input, + withSettingsAndDataMutationLock: async (operation: () => Promise) => operation(), + writeData: () => undefined, + updateDataLoadState: async () => undefined, + toktrackPackageName: 'toktrack', + toktrackPackageSpec: `toktrack@${TOKTRACK_VERSION}`, + toktrackVersion: TOKTRACK_VERSION, + toktrackLocalBin: localBin, + npxCacheDir: '/tmp/ttdash-test-npx-cache', + isWindows: false, + processTerminationGraceMs, + toktrackLocalRunnerProbeTimeoutMs: options.localProbeTimeoutMs ?? 7000, + toktrackLocalRunnerVersionCheckTimeoutMs: options.localVersionCheckTimeoutMs ?? 7000, + toktrackLocalRunnerImportTimeoutMs: 60000, + toktrackPackageRunnerProbeTimeoutMs: 45000, + toktrackPackageRunnerVersionCheckTimeoutMs: 45000, + toktrackPackageRunnerImportTimeoutMs: 60000, + toktrackLatestLookupTimeoutMs: options.latestLookupTimeoutMs ?? 15000, + toktrackLatestCacheSuccessTtlMs: 5 * 60 * 1000, + toktrackLatestCacheFailureTtlMs: 60 * 1000, + probeLog: (message: string) => console.warn(message), + }) +} diff --git a/tests/unit/server-helpers-runner-utils.test.ts b/tests/unit/server-helpers-runner-utils.test.ts new file mode 100644 index 0000000..e15d86e --- /dev/null +++ b/tests/unit/server-helpers-runner-utils.test.ts @@ -0,0 +1,62 @@ +import { afterEach, describe, expect, it } from 'vitest' +import { + TOKTRACK_VERSION, + getExecutableName, + getLocalToktrackDisplayCommand, + getToktrackLatestLookupTimeoutMs, + getToktrackRunnerTimeouts, + parseToktrackVersionOutput, + resetServerHelperTestState, +} from './server-helpers.shared' + +afterEach(() => { + resetServerHelperTestState() +}) + +describe('server helper utilities: toktrack runner utils', () => { + it('maps executable names correctly across platforms', () => { + expect(getExecutableName('npm', true)).toBe('npm.cmd') + expect(getExecutableName('bun', true)).toBe('bun.exe') + expect(getExecutableName('bunx', true)).toBe('bun.exe') + expect(getExecutableName('npx', true)).toBe('npx.cmd') + expect(getExecutableName('toktrack', true)).toBe('toktrack') + expect(getExecutableName('npm', false)).toBe('npm') + expect(getExecutableName('bun', false)).toBe('bun') + expect(getExecutableName('bunx', false)).toBe('bunx') + expect(getExecutableName('npx', false)).toBe('npx') + }) + + it('renders the local toktrack command example correctly across platforms', () => { + expect(getLocalToktrackDisplayCommand(false)).toBe('node_modules/.bin/toktrack daily --json') + expect(getLocalToktrackDisplayCommand(true)).toBe( + 'node_modules\\.bin\\toktrack.cmd daily --json', + ) + }) + + it('parses toktrack version banners down to the raw version', () => { + expect(parseToktrackVersionOutput(`toktrack ${TOKTRACK_VERSION}`)).toBe(TOKTRACK_VERSION) + expect(parseToktrackVersionOutput(`${TOKTRACK_VERSION}\n`)).toBe(TOKTRACK_VERSION) + }) + + it('uses longer warmup timeouts for package runners than for local toktrack', () => { + const localTimeouts = getToktrackRunnerTimeouts({ method: 'local' }) + const bunxTimeouts = getToktrackRunnerTimeouts({ method: 'bunx' }) + const npxTimeouts = getToktrackRunnerTimeouts({ method: 'npm' }) + + expect(localTimeouts).toEqual({ + probeMs: 7000, + versionCheckMs: 7000, + importMs: 60000, + }) + expect(bunxTimeouts).toEqual({ + probeMs: 45000, + versionCheckMs: 45000, + importMs: 60000, + }) + expect(npxTimeouts).toEqual(bunxTimeouts) + }) + + it('uses a less aggressive timeout for latest-version registry lookups', () => { + expect(getToktrackLatestLookupTimeoutMs()).toBe(15000) + }) +}) diff --git a/tests/unit/server-helpers.shared.ts b/tests/unit/server-helpers.shared.ts index 21c32eb..5260a82 100644 --- a/tests/unit/server-helpers.shared.ts +++ b/tests/unit/server-helpers.shared.ts @@ -56,7 +56,16 @@ const { listenOnAvailablePort } = require('../../server/runtime.js') as { } const { createAutoImportRuntime } = require('../../server/auto-import-runtime.js') as { createAutoImportRuntime: (options: Record) => { + acquireAutoImportLease: () => { release: () => void } commandExists: (command: string, args?: string[]) => Promise + createAutoImportMessageEvent: ( + key: string, + vars?: Record, + ) => { key: string; vars: Record } + formatAutoImportMessageEvent: (event: { + key: string + vars?: Record + }) => string getExecutableName: (baseName: string, isWindows?: boolean) => string getLocalToktrackDisplayCommand: (isWindows?: boolean) => string getToktrackLatestLookupTimeoutMs: () => number @@ -65,15 +74,29 @@ const { createAutoImportRuntime } = require('../../server/auto-import-runtime.js versionCheckMs: number importMs: number } - lookupLatestToktrackVersion: (timeoutMs?: number) => Promise<{ + lookupLatestToktrackVersion: () => Promise<{ configuredVersion: string latestVersion: string | null isLatest: boolean | null - lookupStatus: 'ok' | 'failed' + lookupStatus: 'ok' | 'failed' | 'malformed-output' | 'timeout' message?: string }> + isAutoImportRunning: () => boolean + performAutoImport: (options?: { + source?: string + onCheck?: (event: Record) => void + onProgress?: (event: { key: string; vars?: Record }) => void + onOutput?: (line: string) => void + signalOnClose?: (close: () => void) => void + lease?: { release: () => void } | null + }) => Promise<{ days: number; totalCost: number }> resetLatestToktrackVersionCache: () => void parseToktrackVersionOutput: (output: string) => string + runStartupAutoLoad: (options?: { + source?: string + log?: (message: string) => void + errorLog?: (message: string) => void + }) => Promise<{ days: number; totalCost: number } | null> resolveToktrackRunner: () => Promise<{ command: string prefixArgs: string[] @@ -115,6 +138,7 @@ const { createAutoImportRuntime } = require('../../server/auto-import-runtime.js onStderr?: (line: string) => void signalOnClose?: (close: () => void) => void timeoutMs?: number | null + maxOutputBytes?: number spawnImpl?: ( command: string, args: string[], @@ -127,9 +151,60 @@ const { createAutoImportRuntime } = require('../../server/auto-import-runtime.js } }, ) => Promise + toAutoImportErrorEvent: (error: Error) => { + key: string + vars?: Record + } + } +} + +export type FakeSpawnOutcome = { + code?: number + hang?: boolean + stderr?: string + stdout?: string +} + +export class FakeChildProcess extends EventEmitter { + stdout = new EventEmitter() + stderr = new EventEmitter() + exitCode: number | null = null + + kill(signal: string) { + if (this.exitCode !== null) { + return + } + + this.exitCode = signal === 'SIGKILL' ? 137 : 143 + queueMicrotask(() => this.emit('close', this.exitCode)) } } +export function createSpawnSequence(outcomes: FakeSpawnOutcome[]) { + const pendingOutcomes = [...outcomes] + + return vi.fn((command?: string, args: string[] = []) => { + if (pendingOutcomes.length === 0) { + const commandDetails = [command, ...args].filter(Boolean).join(' ') + throw new Error(`Unexpected extra spawn${commandDetails ? `: ${commandDetails}` : ''}`) + } + + const child = new FakeChildProcess() + const outcome = pendingOutcomes.shift() as FakeSpawnOutcome + + if (!outcome.hang) { + queueMicrotask(() => { + if (outcome.stdout) child.stdout.emit('data', Buffer.from(outcome.stdout)) + if (outcome.stderr) child.stderr.emit('data', Buffer.from(outcome.stderr)) + child.exitCode = outcome.code ?? 0 + child.emit('close', child.exitCode) + }) + } + + return child + }) +} + const dataRuntime = createDataRuntime({ fs, fsPromises, @@ -178,6 +253,7 @@ const autoImportRuntime = createAutoImportRuntime({ toktrackLatestLookupTimeoutMs: 15000, toktrackLatestCacheSuccessTtlMs: 5 * 60 * 1000, toktrackLatestCacheFailureTtlMs: 60 * 1000, + probeLog: (message: string) => console.warn(message), }) const { commandExists, @@ -206,6 +282,7 @@ export { EventEmitter, TOKTRACK_VERSION, commandExists, + createAutoImportRuntime, existsSync, fork, fsPromises, diff --git a/tests/unit/startup-runtime.test.ts b/tests/unit/startup-runtime.test.ts index 94c1d40..879bce5 100644 --- a/tests/unit/startup-runtime.test.ts +++ b/tests/unit/startup-runtime.test.ts @@ -3,7 +3,7 @@ import { describe, expect, it, vi } from 'vitest' import { TOKTRACK_VERSION } from '../../shared/toktrack-version.js' const require = createRequire(import.meta.url) -const { createStartupRuntime, formatCurrency, formatInteger } = +const { createStartupRuntime, formatCurrency, formatDayCount, formatErrorMessage, formatInteger } = require('../../server/startup-runtime.js') as { createStartupRuntime: (options: Record) => { describeDataFile: () => string @@ -14,6 +14,8 @@ const { createStartupRuntime, formatCurrency, formatInteger } = writeLocalAuthSessionFile: (url: string, runtimeInstance: { id: string }) => void } formatCurrency: (value: number) => string + formatDayCount: (value: number) => string + formatErrorMessage: (error: unknown) => string formatInteger: (value: number) => string } @@ -102,7 +104,13 @@ describe('startup runtime', () => { const { dataRuntime, runtime } = createStartupRuntimeFixture() expect(formatCurrency(123.45)).toBe('$ 123') + expect(formatCurrency(-123.45)).toBe('$-123') + expect(formatDayCount(1)).toBe('1 day') + expect(formatDayCount(2)).toBe('2 days') + expect(formatErrorMessage(' plain failure ')).toBe('plain failure') + expect(formatErrorMessage({ code: 'E_TTDASH' })).toBe('{"code":"E_TTDASH"}') expect(formatInteger(12_345)).toBe("12'345") + expect(formatInteger(12_345.6)).toBe("12'346") expect(runtime.describeDataFile()).toBe("1 day, $ 12.34, 5'678 tokens") dataRuntime.readData.mockReturnValue({ @@ -248,4 +256,25 @@ describe('startup runtime', () => { expect(logs).toContain('processingUsageData') expect(logs).toContain('Auto-load complete: imported 2 days, $ 1.23.') }) + + it('logs non-error auto-load failures defensively', async () => { + const markStartupAutoLoadCompleted = vi.fn() + const { errors, runtime } = createStartupRuntimeFixture({ + autoImportRuntime: { + formatAutoImportMessageEvent: (event: { key: string }) => event.key, + performAutoImport: vi.fn(async () => { + throw 'string failure' + }), + }, + markStartupAutoLoadCompleted, + }) + + await runtime.runStartupAutoLoad() + + expect(markStartupAutoLoadCompleted).not.toHaveBeenCalled() + expect(errors).toEqual([ + 'Auto-load failed: string failure', + 'Dashboard will start without newly imported data.', + ]) + }) }) diff --git a/tests/unit/test-pipeline-scripts.test.ts b/tests/unit/test-pipeline-scripts.test.ts new file mode 100644 index 0000000..233fd16 --- /dev/null +++ b/tests/unit/test-pipeline-scripts.test.ts @@ -0,0 +1,100 @@ +import { readFile } from 'node:fs/promises' +import { describe, expect, it } from 'vitest' +import packageJson from '../../package.json' + +const scripts = packageJson.scripts as Record + +describe('test pipeline scripts', () => { + it('exposes local gates that map to the CI test layers', () => { + expect(scripts.check).toBe('npm run test:static') + expect(scripts.test).toBe('npm run test:vitest') + expect(scripts['test:vitest']).toBe('npm run test:architecture && npm run test:unit') + expect(scripts['test:vitest:coverage']).toBe('npm run test:unit:coverage') + expect(scripts['test:e2e']).toBe('npm run test:e2e:parallel') + expect(scripts['test:timings:budget']).toContain('--max-suite-seconds=20') + expect(scripts['test:timings:budget']).toContain('--max-test-seconds=12') + expect(scripts['test:timings:projects']).toBe('node scripts/run-vitest-project-timings.js') + expect(scripts['test:timings:benchmark']).toBe( + 'node scripts/run-vitest-project-timings.js --repeat=3', + ) + expect(scripts['verify:parallel']).toBe('node scripts/run-parallel-gate.js') + expect(scripts['verify:full:parallel']).toBe('node scripts/run-parallel-gate.js --e2e') + expect(scripts['verify:ci']).toBe('npm run verify:release && npm run test:e2e:ci') + expect(scripts['verify:full']).toBe('npm run verify:ci') + }) + + it('keeps the static gate cached and avoids a duplicate docstring lint pass', () => { + expect(scripts['test:static']).toBe( + 'npm run format:check && npm run lint && npm run check:deps && npm run typecheck', + ) + expect(scripts['test:static']).not.toContain('lint:docstrings') + expect(scripts.lint).toContain('--cache-location .cache/eslint/full') + expect(scripts.lint).toContain('--cache-strategy content') + expect(scripts['lint:docstrings']).toContain('--cache-location .cache/eslint/docstrings') + expect(scripts.format).toContain('--cache-location .cache/prettier/write') + expect(scripts['format:check']).toContain('--cache-location .cache/prettier/check') + expect(scripts.typecheck).toContain('--tsBuildInfoFile .cache/tsc/tsconfig.tsbuildinfo') + }) + + it('enables the JUnit reporter whenever a Vitest script writes JUnit output', () => { + const scriptsWithJunitOutput = Object.entries(scripts).filter(([, command]) => + command.includes('--outputFile.junit'), + ) + + expect(scriptsWithJunitOutput.length).toBeGreaterThan(0) + for (const [scriptName, command] of scriptsWithJunitOutput) { + expect(command, scriptName).toContain('--reporter=junit') + } + }) + + it('keeps CI split into parallel jobs that share one production bundle artifact', async () => { + const workflow = await readFile('.github/workflows/ci.yml', 'utf8') + + for (const job of [ + 'static', + 'vitest', + 'coverage', + 'build', + 'package-smoke', + 'e2e', + 'windows-smoke', + 'ci-required', + ]) { + expect(workflow).toContain(`\n ${job}:`) + } + + expect(workflow).toContain('run: npm run test:static') + expect(workflow).toContain('run: npm run test:vitest:${{ matrix.project }}') + expect(workflow).toContain('node scripts/report-test-timings.js') + expect(workflow).toContain('--max-suite-seconds=20') + expect(workflow).toContain('--max-test-seconds=12') + expect(workflow).toContain('run: npm run test:vitest:coverage') + expect(workflow).toContain('name: production-dist') + expect(workflow).toContain('package-smoke:') + expect(workflow).toContain('e2e:') + expect(workflow).toContain('needs: build') + expect(workflow).toContain('name: CI Required') + expect(workflow).toContain('if: ${{ always() }}') + expect(workflow).toContain('- static') + expect(workflow).toContain('- vitest') + expect(workflow).toContain('- coverage') + expect(workflow).toContain('- package-smoke') + expect(workflow).toContain('- e2e') + expect(workflow).toContain('- windows-smoke') + expect(workflow).toContain( + 'check_result "windows-smoke" "${{ needs[\'windows-smoke\'].result }}"', + ) + expect(workflow).toContain('uses: actions/download-artifact@') + }) + + it('uses the required CI job as the release gate', async () => { + const releaseWorkflow = await readFile('.github/workflows/release.yml', 'utf8') + const verifyScript = await readFile('scripts/verify-main-ci.js', 'utf8') + + expect(releaseWorkflow).toContain('--workflow ci.yml') + expect(releaseWorkflow).toContain('--required-job "CI Required"') + expect(verifyScript).toContain('--required-job') + expect(verifyScript).toContain('/actions/runs/${runId}/jobs') + expect(verifyScript).toContain('Required CI job') + }) +}) diff --git a/tests/unit/toktrack-version.test.ts b/tests/unit/toktrack-version.test.ts index 7fdce73..31b7156 100644 --- a/tests/unit/toktrack-version.test.ts +++ b/tests/unit/toktrack-version.test.ts @@ -7,12 +7,28 @@ const packageJson = require('../../package.json') as { dependencies?: Record } +const exactSemverPattern = + /^(?:0|[1-9]\d*)\.(?:0|[1-9]\d*)\.(?:0|[1-9]\d*)(?:-[0-9A-Za-z-]+(?:\.[0-9A-Za-z-]+)*)?(?:\+[0-9A-Za-z-]+(?:\.[0-9A-Za-z-]+)*)?$/ + +function getToktrackDependency() { + const version = packageJson.dependencies?.toktrack + if (typeof version !== 'string') { + throw new Error('package.json dependencies.toktrack must be defined.') + } + + return version +} + describe('toktrack version constants', () => { it('keeps the shared pinned version aligned with package.json', () => { - expect(TOKTRACK_VERSION).toBe(packageJson.dependencies?.toktrack) + expect(TOKTRACK_VERSION).toBe(getToktrackDependency()) }) it('builds the package spec from the shared pinned version', () => { - expect(TOKTRACK_PACKAGE_SPEC).toBe(`toktrack@${packageJson.dependencies?.toktrack}`) + expect(TOKTRACK_PACKAGE_SPEC).toBe(`toktrack@${getToktrackDependency()}`) + }) + + it('keeps the package dependency pinned to an exact SemVer version', () => { + expect(getToktrackDependency()).toMatch(exactSemverPattern) }) }) diff --git a/tests/unit/vitest-coverage-config.test.ts b/tests/unit/vitest-coverage-config.test.ts index 135a409..28dc76d 100644 --- a/tests/unit/vitest-coverage-config.test.ts +++ b/tests/unit/vitest-coverage-config.test.ts @@ -18,6 +18,15 @@ type CoverageConfig = { type ResolvedVitestConfig = { test?: { coverage?: CoverageConfig + projects?: Array<{ + test?: { + environment?: string + maxWorkers?: number | string + name?: string + setupFiles?: string[] + testTimeout?: number + } + }> } } @@ -63,15 +72,63 @@ describe('vitest coverage configuration', () => { }) it('keeps coverage-heavy scripts on bounded non-interactive reporters', () => { - const coverageScripts = [ - packageJson.scripts['test:unit:coverage'], - packageJson.scripts['test:timings'], - ] + const coverageScript = packageJson.scripts['test:unit:coverage'] + const timingsScript = packageJson.scripts['test:timings'] + const coverageScripts = [coverageScript, timingsScript] for (const script of coverageScripts) { expect(script).toContain('--reporter=dot') expect(script).toContain('--reporter=junit') - expect(script).toContain('--outputFile.junit=./test-results/vitest.junit.xml') } + + expect(coverageScript).toContain('--outputFile.junit=./test-results/vitest-coverage.junit.xml') + expect(timingsScript).toContain('--outputFile.junit=./test-results/vitest-timings.junit.xml') + expect(timingsScript).toContain( + 'node scripts/report-test-timings.js ./test-results/vitest-timings.junit.xml', + ) + }) + + it('keeps React Testing Library setup out of Node-only projects', async () => { + const config = await resolveVitestConfig() + const projects = config.test?.projects ?? [] + const expectedSetupByProject = new Map([ + ['architecture', ['./vitest.setup.node.ts']], + ['unit', ['./vitest.setup.node.ts']], + ['frontend', ['./vitest.setup.node.ts', './vitest.setup.frontend.ts']], + ['integration', ['./vitest.setup.node.ts']], + ['integration-background', ['./vitest.setup.node.ts']], + ]) + + expect(projects).toHaveLength(expectedSetupByProject.size) + + for (const project of projects) { + const name = project.test?.name + expect(name).toBeDefined() + expect(expectedSetupByProject.has(name ?? '')).toBe(true) + expect(project.test?.setupFiles).toEqual(expectedSetupByProject.get(name ?? '')) + } + }) + + it('centralizes the longer jsdom timeout on the frontend project only', async () => { + const config = await resolveVitestConfig() + const projects = config.test?.projects ?? [] + const frontendProject = projects.find((project) => project.test?.name === 'frontend') + const nonFrontendProjects = projects.filter((project) => project.test?.name !== 'frontend') + + expect(frontendProject?.test?.testTimeout).toBe(30_000) + + for (const project of nonFrontendProjects) { + expect(project.test?.testTimeout).toBeUndefined() + } + }) + + it('keeps jsdom parallelism high but below full CPU saturation', async () => { + const config = await resolveVitestConfig() + const projects = config.test?.projects ?? [] + const frontendProject = projects.find((project) => project.test?.name === 'frontend') + const unitProject = projects.find((project) => project.test?.name === 'unit') + + expect(frontendProject?.test?.maxWorkers).toBe('80%') + expect(unitProject?.test?.maxWorkers).toBe('80%') }) }) diff --git a/vitest.config.ts b/vitest.config.ts index 2033a8c..875945d 100644 --- a/vitest.config.ts +++ b/vitest.config.ts @@ -1,6 +1,9 @@ import { defineConfig, mergeConfig } from 'vitest/config' import viteConfig from './vite.config' +const junitOutputFile = process.env.VITEST_JUNIT_FILE || './test-results/vitest.junit.xml' +const coverageReportsDirectory = process.env.VITEST_COVERAGE_DIR || './coverage' + export default defineConfig(async () => { // Resolve the imported Vite config explicitly before mergeConfig because // vite.config may export either a config object or an async config factory. @@ -22,12 +25,12 @@ export default defineConfig(async () => { include: [], reporters: ['default', 'junit'], outputFile: { - junit: './test-results/vitest.junit.xml', + junit: junitOutputFile, }, coverage: { provider: 'v8', reporter: ['text', 'html', 'lcov'], - reportsDirectory: './coverage', + reportsDirectory: coverageReportsDirectory, include: [ 'src/**/*.{ts,tsx}', 'server.js', @@ -51,7 +54,7 @@ export default defineConfig(async () => { include: ['tests/architecture/**/*.test.ts'], environment: 'node', globals: true, - setupFiles: ['./vitest.setup.ts'], + setupFiles: ['./vitest.setup.node.ts'], fileParallelism: false, sequence: { groupOrder: 0, @@ -64,7 +67,7 @@ export default defineConfig(async () => { name: 'unit', include: ['tests/unit/**/*.test.ts'], environment: 'node', - setupFiles: ['./vitest.setup.ts'], + setupFiles: ['./vitest.setup.node.ts'], maxWorkers: '80%', sequence: { groupOrder: 1, @@ -77,8 +80,9 @@ export default defineConfig(async () => { name: 'frontend', include: ['tests/frontend/**/*.test.{ts,tsx}'], environment: 'jsdom', - setupFiles: ['./vitest.setup.ts', './vitest.setup.frontend.ts'], - maxWorkers: '50%', + setupFiles: ['./vitest.setup.node.ts', './vitest.setup.frontend.ts'], + maxWorkers: '80%', + testTimeout: 30_000, sequence: { groupOrder: 2, }, @@ -91,7 +95,7 @@ export default defineConfig(async () => { include: ['tests/integration/**/*.test.ts'], exclude: ['tests/integration/**/*background*.test.ts'], environment: 'node', - setupFiles: ['./vitest.setup.ts'], + setupFiles: ['./vitest.setup.node.ts'], maxWorkers: '50%', sequence: { groupOrder: 3, @@ -104,7 +108,7 @@ export default defineConfig(async () => { name: 'integration-background', include: ['tests/integration/**/*background*.test.ts'], environment: 'node', - setupFiles: ['./vitest.setup.ts'], + setupFiles: ['./vitest.setup.node.ts'], maxWorkers: 2, sequence: { groupOrder: 4, diff --git a/vitest.setup.frontend.ts b/vitest.setup.frontend.ts index edaf2af..3188a22 100644 --- a/vitest.setup.frontend.ts +++ b/vitest.setup.frontend.ts @@ -1,8 +1,34 @@ -import { beforeAll, vi } from 'vitest' +import '@testing-library/jest-dom/vitest' +import { cleanup } from '@testing-library/react' +import { afterEach, beforeAll, beforeEach, vi } from 'vitest' import { initI18n } from '@/lib/i18n' +// jsdom can expose different Event/CustomEvent constructors on globalThis and window. +// Radix dispatches globalThis.CustomEvent instances through DOM nodes, so align both. +function syncDomEventConstructors() { + if (typeof window === 'undefined') { + return + } + + Object.defineProperty(globalThis, 'Event', { + configurable: true, + writable: true, + value: window.Event, + }) + Object.defineProperty(globalThis, 'CustomEvent', { + configurable: true, + writable: true, + value: window.CustomEvent, + }) +} + +afterEach(() => { + cleanup() +}) + beforeAll(async () => { await initI18n('de') + syncDomEventConstructors() if (typeof window === 'undefined') { return @@ -56,3 +82,8 @@ beforeAll(async () => { }) } }) + +// Some tests intentionally reset globals, so restore the jsdom event constructors per test. +beforeEach(() => { + syncDomEventConstructors() +}) diff --git a/vitest.setup.node.ts b/vitest.setup.node.ts new file mode 100644 index 0000000..4f66b25 --- /dev/null +++ b/vitest.setup.node.ts @@ -0,0 +1,7 @@ +import { afterEach, vi } from 'vitest' + +afterEach(() => { + vi.useRealTimers() + vi.restoreAllMocks() + vi.unstubAllGlobals() +}) diff --git a/vitest.setup.ts b/vitest.setup.ts index d15a884..95fc963 100644 --- a/vitest.setup.ts +++ b/vitest.setup.ts @@ -1,13 +1 @@ -import '@testing-library/jest-dom/vitest' -import { afterEach, vi } from 'vitest' -import { cleanup } from '@testing-library/react' - -afterEach(() => { - vi.useRealTimers() - vi.restoreAllMocks() - vi.unstubAllGlobals() - - if (typeof document !== 'undefined') { - cleanup() - } -}) +import './vitest.setup.node'