diff --git a/.cursor/agents/quality-gates.md b/.cursor/agents/quality-gates.md new file mode 100644 index 00000000..cb2c4810 --- /dev/null +++ b/.cursor/agents/quality-gates.md @@ -0,0 +1,63 @@ +--- +name: quality-gates +description: Runs SpecFact CLI quality gates (format, lint, type-check, smart-test), fixes issues found, and verifies code integrity to avoid conflicts with other agents. Use when asked to run quality checks, fix lint/type/test failures, or confirm no conflicting changes. Optionally run full test suite (smart-test-full) when user explicitly requests it. +--- + +You are a quality-gate specialist for the SpecFact CLI project. You run the standard hatch quality pipeline, fix any issues, and ensure code integrity when working alongside other agents. + +When invoked: + +1. **Establish baseline (integrity)** + Before making changes, record the current state so you can detect conflicting edits later: + - Run `git status -sb` and note modified/untracked files. + - Optionally run `git diff --stat` (or `git diff` for key files) and keep a mental/summary of what changed. + - If the user or context mentions "another agent" or "parallel work", treat integrity check as mandatory. + +2. **Run quality gates in this order** + Execute from the project root (repository where `pyproject.toml` and `hatch` are used): + - `hatch run format` – formatting (black, isort, etc.). + - `hatch run lint` – linting (ruff, pylint, etc.) and type-check. + - `hatch run type-check` – type checker (e.g. basedpyright) if not already covered by lint. + - `hatch run smart-test` – smart test suite (incremental/fast by default). + + **Only if the user explicitly asks for the full test suite** (e.g. "run full tests", "smart-test-full", "run all tests"): + - `hatch run smart-test-full` – full test run (slow; use when user requests it to surface any potential test failures). + +3. **Fix issues found** + For each failing step: + - Address format/lint/type errors in the indicated files with minimal, targeted edits. + - For test failures: fix the code or tests as appropriate; prefer fixing the implementation unless the test is wrong. + - Re-run the failing gate (and any that depend on it) after fixes to confirm success. + - Do not change unrelated code; stay scoped to the failures. + +4. **Re-check integrity (conflict avoidance)** + Before or after fixing, verify the codebase wasn’t modified by another agent: + - Run `git status -sb` and `git diff --stat` again. + - If new modifications appear that you did not make (e.g. other files changed, or your target files changed in unexpected ways), report: + - "Integrity check: unexpected changes detected (list files or summary). Possible conflict with another agent or process. Recommend reviewing diffs before committing." + - If the only changes are your own fixes, report: "Integrity check: no conflicting changes detected; only my fixes are present." + +5. **Summarize** + - List which gates were run and their result (pass/fail). + - List what was fixed (files and type of fix). + - State result of the integrity check. + +**Commands reference** + +| Command | Purpose | +|--------|---------| +| `hatch run format` | Apply project formatting | +| `hatch run lint` | Lint and type-check (run after format) | +| `hatch run type-check` | Type checker only (if separate from lint) | +| `hatch run smart-test` | Smart/incremental tests (default; use unless user asks for full) | +| `hatch run smart-test-full` | Full test suite – **only when user explicitly requests full tests** | + +**Integrity / conflict avoidance** + +- "Double check the code hasn’t been modified in the meantime by another agent" means: compare state before and after your edits (e.g. via `git status` and `git diff`); if you see changes you didn’t make, report a possible conflict and do not overwrite others’ work. +- Prefer reporting and pausing over silently overwriting when conflicts are detected. + +**Output** + +- Be concise: which gates ran, pass/fail, what you fixed, and the outcome of the integrity check. +- If you skip `smart-test-full` because the user didn’t ask for it, say so and note that they can request "run full tests" or "smart-test-full" for a full run. diff --git a/CHANGELOG.md b/CHANGELOG.md index a61f8220..a7a6b246 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,28 @@ All notable changes to this project will be documented in this file. --- +## [0.26.13] - 2026-01-29 + +### Fixed (0.26.13) + +- **Debug log parity for `specfact upgrade`**: When `--debug` is set, the "up to date" success path now writes to `~/.specfact/logs/specfact-debug.log` (same as the "installed" path), with `debug_log_operation` and narrative "upgrade: success (up to date)" including version in extra + +--- + +## [0.26.12] - 2026-01-28 + +### Added (0.26.12) + +- **Debug logs under ~/.specfact/logs**: When `--debug` is enabled, debug output is written to both console and a rotating log file at `~/.specfact/logs/specfact-debug.log` + - **User-level directory**: `get_specfact_home_logs_dir()` returns `~/.specfact/logs` (created with mode 0o755 on first use) + - **debug_print()**: Routes to console and to the debug log file when debug is on + - **debug_log_operation()**: New helper to log structured operation metadata (operation, target, status, error, extra) when debug is on; no-op when debug is off; target/extra redacted via LoggerSetup.redact_secrets + - **Adapters**: ADO (WIQL, Work Items GET, PATCH) and GitHub (API GET) log operation metadata when debug is on + - **Commands**: backlog refine export/import and init template resolution log file read/write and template resolution steps when debug is on + - **CLI**: After `set_debug_mode(debug)`, `init_debug_log_file()` is called when debug is True so the log file is ready for the first write + +--- + ## [0.26.11] - 2026-01-27 ### Fixed (0.26.11) diff --git a/README.md b/README.md index 323400e4..eef26c84 100644 --- a/README.md +++ b/README.md @@ -339,6 +339,7 @@ hatch run contract-test-full - 💬 **Questions?** [GitHub Discussions](https://github.com/nold-ai/specfact-cli/discussions) - 🐛 **Found a bug?** [GitHub Issues](https://github.com/nold-ai/specfact-cli/issues) +- 🔍 **Debugging I/O or API issues?** Run with `--debug`; logs are written to `~/.specfact/logs/specfact-debug.log`. See [Debug Logging](docs/reference/debug-logging.md). - 📧 **Need help?** [hello@noldai.com](mailto:hello@noldai.com) - 🌐 **Learn more:** [specfact.com](https://specfact.com) • [specfact.io](https://specfact.io) • [specfact.dev](https://specfact.dev) diff --git a/docs/guides/troubleshooting.md b/docs/guides/troubleshooting.md index 603c4bcf..1307abff 100644 --- a/docs/guides/troubleshooting.md +++ b/docs/guides/troubleshooting.md @@ -773,7 +773,11 @@ If you're still experiencing issues: 1. **Check logs**: + - **Debug log file** (when using `--debug`): Debug output and structured operation metadata are written to `~/.specfact/logs/specfact-debug.log`. See [Debug Logging](../reference/debug-logging.md) for what is logged and how to use it. + - **Verbose repro** (ad-hoc capture): + ```bash + specfact --debug # Writes to ~/.specfact/logs/specfact-debug.log specfact repro --verbose 2>&1 | tee debug.log ``` diff --git a/docs/reference/README.md b/docs/reference/README.md index 84b9be00..14d99406 100644 --- a/docs/reference/README.md +++ b/docs/reference/README.md @@ -13,6 +13,7 @@ Complete technical reference for SpecFact CLI. - **[Commands](commands.md)** - Complete command reference with all options - **[Authentication](authentication.md)** - Device code auth flows and token storage - **[Architecture](architecture.md)** - Technical design, module structure, and internals +- **[Debug Logging](debug-logging.md)** - Where and what is logged when using `--debug` - **[Operational Modes](modes.md)** - CI/CD vs CoPilot modes - **[Specmatic API](specmatic.md)** - Specmatic integration API reference (functions, classes, integration points) - **[Telemetry](telemetry.md)** - Opt-in analytics and privacy guarantees diff --git a/docs/reference/commands.md b/docs/reference/commands.md index 745e2f3d..b58d7567 100644 --- a/docs/reference/commands.md +++ b/docs/reference/commands.md @@ -197,6 +197,7 @@ specfact [OPTIONS] COMMAND [ARGS]... - `--help`, `-h` - Show help message and exit - `--help-advanced`, `-ha` - Show all options including advanced configuration (progressive disclosure) - `--no-banner` - Hide ASCII art banner (useful for CI/CD) +- `--debug` - Enable debug mode: show debug messages in the console and write them (plus structured operation metadata) to `~/.specfact/logs/specfact-debug.log`. See [Debug Logging](debug-logging.md). - `--verbose` - Enable verbose output - `--quiet` - Suppress non-error output - `--mode {cicd|copilot}` - Operational mode (default: auto-detect) diff --git a/docs/reference/debug-logging.md b/docs/reference/debug-logging.md new file mode 100644 index 00000000..8b3f93c0 --- /dev/null +++ b/docs/reference/debug-logging.md @@ -0,0 +1,169 @@ +--- +layout: default +title: Debug Logging +permalink: /debug-logging/ +--- + + +# Debug Logging + +When you run SpecFact CLI with the global `--debug` flag, the CLI writes debug output to your **console** and to a **rotating log file** under your user directory. This helps diagnose I/O, API, and template issues without cluttering normal output. + +## For Users + +### Enabling Debug Mode + +Pass `--debug` before any command: + +```bash +specfact --debug init +specfact --debug backlog refine --adapter ado --project my-project +specfact --debug plan select +``` + +Debug output appears in the terminal and is also appended to a log file. + +### Where Logs Are Written + +| Location | Purpose | +|---------|---------| +| **Console** | Same debug messages you see in the terminal (Rich formatting). | +| **`~/.specfact/logs/specfact-debug.log`** | Rotating log file (plain text). Created on first use when `--debug` is set. Directory is created with mode `0o755` if missing. | + +- **Path**: `~` is your home directory (e.g. `/home/you` on Linux, `C:\Users\you` on Windows). +- **Rotation**: The file rotates at 5 MB and keeps up to 5 backup files (`specfact-debug.log.1`, …). +- **Scope**: User-level only; not tied to a specific repo or bundle. + +### What Gets Logged + +When `--debug` is on, the CLI logs: + +1. **Debug messages** + Any line emitted via the internal `debug_print()`: template resolution steps, path discovery, fallbacks (e.g. in `specfact init`). Each line is prefixed with a **timestamp** and **caller** (module and function) for context. + +2. **Structured operation metadata** + One JSON line per operation, with: + - **operation** – Type (e.g. `api_request`, `file_read`, `template_resolution`). + - **target** – Path or URL (sensitive parts redacted). + - **status** – Result (e.g. `success`, HTTP status, `error`, `prepared`, `finished`, `failed`). + - **caller** – Module and function that logged the operation (for context). + - **error** – Optional error message on failure. + - **extra** – Optional extra fields (redacted); for API calls may include payload (sanitized), response, reason. + +**Log format**: Every line in the debug log file starts with a timestamp (`YYYY-MM-DD HH:MM:SS`), then a pipe and the message or structured JSON. Narrative lines include caller (module:function) before the message. File operations log status `prepared`/`finished`/`failed`; API operations log operation, URL (redacted), payload (sanitized), response, status, error, and reason where applicable. + +**Redaction**: URLs, paths, and `extra` are passed through `LoggerSetup.redact_secrets` so tokens and secrets are masked in the log file. + +### What Is Logged by Component + +| Component | Operations / events logged (when `--debug`) | +|----------|---------------------------------------------| +| **auth azure-devops** | Start, success (PAT or OAuth), or error; key steps (OAuth flow, device code) when `--debug` is on. | +| **init** | Template resolution: paths tried, success/failure, fallbacks (e.g. development path, package path, `importlib` fallbacks). | +| **backlog refine** | File read for import: path, success/error (e.g. `--import-from-tmp`). File write for export: path, success/error (e.g. `--export-to-tmp`). | +| **Azure DevOps adapter** | WIQL request (redacted URL, method, status); Work Items GET (redacted URL, status); Work Items PATCH (redacted URL, status); on failure, error snippet. | +| **GitHub adapter** | API request/response (redacted URL, method, status); on failure, redacted error snippet. | + +### Example Log Snippets + +**Plain debug line (from `debug_print`; timestamp and caller prefixed):** + +```text +2025-01-28 14:30:00 | specfact_cli.commands.init:run | Debug: Trying development path: /path/to/templates +``` + +**Structured operation (from `debug_log_operation`; timestamp prefixed by formatter):** + +```text +2025-01-28 14:30:01 | debug_log_operation {"operation": "file_read", "target": "/path/to/export.md", "status": "success", "caller": "specfact_cli.commands.backlog_commands:export_backlog"} +2025-01-28 14:30:02 | debug_log_operation {"operation": "ado_wiql", "target": "https://dev.azure.com/***/***/_apis/...", "status": "200", "caller": "specfact_cli.adapters.ado:..."} +2025-01-28 14:30:03 | debug_log_operation {"operation": "template_resolution", "target": "/usr/lib/.../templates/backlog", "status": "success", "caller": "specfact_cli.commands.init:..."} +``` + +### Troubleshooting With Debug Logs + +1. Run the failing command with `--debug`: + + ```bash + specfact --debug + ``` + +2. Reproduce the issue, then open `~/.specfact/logs/specfact-debug.log`. +3. Look for: + - **template_resolution** – Where `init` looked for templates and whether it succeeded. + - **file_read** / **file_write** – Paths and success/error for backlog export/import. + - **ado_wiql**, **ado_get**, **ado_patch** – ADO API calls (URLs redacted, status/error present). + - **api_request** – GitHub (or other) API calls with status and optional error. + +See also [Troubleshooting](../guides/troubleshooting.md). + +--- + +## For Developers + +### Debug log standard (apply consistently) + +Debug logs are **critical for anomaly analysis, unexpected errors/failures, reporting, and bug reports**. Every debug log must follow the same pattern so logs are useful like in a regular production tool—**no single-line INFO-style entries**; every significant operation must provide **full context**. + +**Required pattern for every significant operation:** + +| Phase | What to log | Example status / extra | +|-------|----------------|-------------------------| +| **Started / prepared** | Once when the operation begins | `status=started` or `prepared`; `target`; `extra` (e.g. flow, method, cache) | +| **Progress / attempt** | For each distinct step (if multi-step) | `status=attempt`; `extra.method`, `extra.reason` (what was tried) | +| **Outcome** | Exactly once when the operation ends | **Success**: `status=success` (or HTTP status); `extra` (method, cache, reason). **Failure**: `status=failed` or `error`; `error=`; `extra.reason` | + +**Minimum content:** + +- **Every line**: timestamp (automatic), caller (automatic or explicit). +- **Structured lines**: `operation`, `target` (redacted), `status`; when applicable: `error`, `extra` (payload, response, reason, method, cache—sanitized). + +**Apply everywhere:** Auth flows, file I/O, API calls, template resolution, and any operation that can fail or affect behavior. Reference: `auth azure-devops` (started → cache_prepared → attempt interactive_browser → success/fallback → attempt device_code → success/failed → success token_stored). + +### Runtime API + +- **`specfact_cli.runtime.set_debug_mode(debug: bool)`** + Turn global debug mode on or off (e.g. from the CLI callback when `--debug` is set). + +- **`specfact_cli.runtime.is_debug_mode() -> bool`** + Returns whether debug mode is currently on. + +- **`specfact_cli.runtime.init_debug_log_file()`** + Ensures the debug log file under `~/.specfact/logs` is created and the file handler is set up. Called by the CLI when `--debug` is True so the first write goes to the file immediately. + +- **`specfact_cli.runtime.debug_print(*args, **kwargs)`** + If debug is on: prints to the configured Rich console and appends a plain-text line to `~/.specfact/logs/specfact-debug.log` (args only; no Rich markup in the file). If debug is off: no-op. + +- **`specfact_cli.runtime.debug_log_operation(operation, target, status, error=None, extra=None, caller=None)`** + If debug is on: writes one JSON line to the debug log file with `operation`, `target`, `status`, optional `error`, optional `extra`, and `caller` (inferred if not provided). `target` and `extra` are redacted via `LoggerSetup.redact_secrets`. If debug is off: no-op. Follow the debug log standard: log started/prepared → attempt → success/failed with reason. + +### User-Level Log Directory + +- **`specfact_cli.common.logger_setup.get_specfact_home_logs_dir() -> str`** + Returns `os.path.expanduser("~/.specfact/logs")`, creating the directory with `mode=0o755` and `exist_ok=True` on first use. Use this if you need the path for the debug log or related files. + +### When to Use What + +- **`debug_print(...)`** + For human-oriented messages (template paths, “trying X”, “using Y”). Shown in console and written as a single plain line to the log file. + +- **`debug_log_operation(...)`** + For machine-friendly operation records (API calls, file I/O, template resolution result). Always use for URLs or paths; redaction is applied to `target` and `extra`. + +### Adding New Log Points + +1. **Follow the debug log standard** + For each significant operation: log **started/prepared** → **attempt** (if multi-step) → **success** or **failed** with **reason/error**. Include operation, target, status, error, extra (payload/response/reason—sanitized). Never log only one line for an operation that can succeed or fail; always log outcome and reason. + +2. **New adapter or command** + When `is_debug_mode()` is True, call `debug_log_operation(operation, target, status, error=..., extra=..., caller=...)` at start, at each attempt, and at outcome. Use clear operation names (e.g. `ado_wiql`, `file_read`, `template_resolution`). For file ops: prepared → finished/failed. For API ops: attempt → success/failed with payload (sanitized), response, reason. + +3. **New debug messages** + Use `debug_print(...)` for narrative steps; they will appear in console and in `specfact-debug.log` as plain text. Prefer pairing with `debug_log_operation` so the log has both human-readable and machine-friendly context. + +4. **Sensitive data** + Pass URLs/paths/tokens only as `target` or inside `extra`; they are redacted before being written to the log file. + +### Relation to Other Logging + +- **`~/.specfact/logs/`** is for the **global** `--debug` session log only (`specfact-debug.log`). It is **not** the same as bundle-specific `.specfact/projects//logs/` (used for other runtime/agent logs). See [Directory Structure](directory-structure.md). diff --git a/docs/reference/directory-structure.md b/docs/reference/directory-structure.md index 572dcbdf..94d1190d 100644 --- a/docs/reference/directory-structure.md +++ b/docs/reference/directory-structure.md @@ -22,6 +22,8 @@ All SpecFact artifacts are stored under `.specfact/` in the repository root. Thi - **Clear separation**: Plans (versioned) vs reports (ephemeral) - **CLI-first**: All artifacts are local, no cloud storage required +**User-level debug logs**: When you run with `--debug`, the CLI also writes a rotating debug log under your home directory: `~/.specfact/logs/specfact-debug.log`. This is separate from repo-level `.specfact/` and is used only for global debug output. See [Debug Logging](debug-logging.md). + ## Canonical Structure ```bash diff --git a/openspec/changes/add-debug-logs-specfact-home/CHANGE_VALIDATION.md b/openspec/changes/add-debug-logs-specfact-home/CHANGE_VALIDATION.md new file mode 100644 index 00000000..df28e4de --- /dev/null +++ b/openspec/changes/add-debug-logs-specfact-home/CHANGE_VALIDATION.md @@ -0,0 +1,58 @@ +# Change Validation Report: add-debug-logs-specfact-home + +**Validation Date**: 2026-01-28 +**Change Proposal**: [proposal.md](./proposal.md) +**Validation Method**: Dry-run simulation and OpenSpec strict validation + +## Executive Summary + +- **Breaking Changes**: 0 detected +- **Dependent Files**: Additive only (new function, extended behavior when --debug) +- **Impact Level**: Low +- **Validation Result**: Pass +- **User Decision**: Proceed with implementation + +## Breaking Changes Detected + +None. All changes are additive or extend behavior only when `--debug` is set. + +- **get_specfact_home_logs_dir()**: New function; no existing callers. +- **debug_print()**: Extended to also write to file when debug on; console behavior unchanged. +- **debug_log_operation()**: New function; no existing callers. +- **get_runtime_logs_dir()**: Unchanged per proposal. + +## Dependencies Affected + +### No Critical Updates Required + +- Existing callers of `get_runtime_logs_dir()` and `debug_print()` require no changes. + +### Recommended + +- Adapters and commands that perform file/API operations: add `debug_log_operation()` calls when `is_debug_mode()` (as specified in tasks). + +## Impact Assessment + +- **Code Impact**: New helper and extended runtime; adapters and selected commands gain optional debug logging. +- **Test Impact**: New unit tests for get_specfact_home_logs_dir, debug_print file routing, debug_log_operation. +- **Documentation Impact**: Update --debug help and CHANGELOG. +- **Release Impact**: Minor (new feature, backward compatible). + +## Format Validation + +- **proposal.md Format**: Pass (Why, What Changes, Capabilities, Impact present). +- **tasks.md Format**: Pass (hierarchical numbered format; branch creation first, PR creation last). +- **specs Format**: Pass (ADDED Requirements, #### Scenario: blocks). +- **design.md**: Present; contract and fallback documented. + +## OpenSpec Validation + +- **Status**: Pass +- **Validation Command**: `openspec validate add-debug-logs-specfact-home --strict` +- **Issues Found**: 0 (after adding delta headers to spec) +- **Re-validated**: Yes + +## Next Steps + +1. Create GitHub issue in nold-ai/specfact-cli for backlog tracking. +2. Proceed with implementation per tasks.md (branch, implementation, tests, PR). diff --git a/openspec/changes/add-debug-logs-specfact-home/design.md b/openspec/changes/add-debug-logs-specfact-home/design.md new file mode 100644 index 00000000..166a3589 --- /dev/null +++ b/openspec/changes/add-debug-logs-specfact-home/design.md @@ -0,0 +1,86 @@ +# Design: Add debug logs under ~/.specfact/logs + +## Overview + +When `--debug` is enabled globally, the CLI writes debug output to both the console (existing `debug_print()`) and a rotating log file under `~/.specfact/logs`. Every log line includes a **timestamp**. A helper `debug_log_operation()` writes structured metadata (operation, target, status, error, extra, optional caller) for file and API operations. **File operations** log prepared / finished / failed and status; **API operations** log operation, URL (redacted), payload (sanitized), response, status, error, reason (via extra). Logger-module helpers (`plain_text_for_debug_log`, `format_debug_log_message`) and runtime `_append_debug_log()` centralize formatting so call sites stay minimal. + +## Debug log standard (mandatory pattern) + +Debug logs are **critical for anomaly analysis, unexpected errors/failures, reporting, and bug reports**. The same standard applies everywhere we emit debug log entries—no single-line “INFO-style” lines; every significant operation must provide **full context** as in a production tool. + +**Required pattern for every significant operation:** + +1. **Started / prepared** + Log once when the operation begins: `status=started` or `status=prepared`, with `target` (path/URL) and optional `extra` (e.g. flow, method, cache). + +2. **Progress / attempt** (if multi-step) + For each distinct step (e.g. “try interactive browser”, “try device code”, “read file”, “call API”): log `status=attempt` with `extra` (e.g. `method`, `reason`) so the log shows what was tried. + +3. **Outcome** + Log exactly once when the operation ends: + - **Success**: `status=success` (or HTTP status for API), with `extra` (e.g. `method`, `cache`, `reason`) so it is clear *why* and *how* it succeeded. + - **Failure**: `status=failed` (or `status=error`), with `error=` and `extra.reason` (or equivalent) so failures are diagnosable and reproducible. + +**What to include (minimum):** + +- **Every line**: timestamp (formatter), caller (module:function). +- **Structured lines**: `operation`, `target` (redacted), `status`, and when applicable: `error`, `extra` (payload/response/reason/cache/method—sanitized). + +**Apply consistently:** Auth flows, file I/O, API calls, template resolution, and any other operation that can fail or affect behavior must follow this pattern. Reference implementation: `auth azure-devops` (started → cache_prepared → attempt interactive_browser → success/fallback → attempt device_code → success/failed → success token_stored). + +## Architecture + +- **Runtime**: `debug_print()` writes to console and, via `_append_debug_log()`, to a file handler backed by `~/.specfact/logs/specfact-debug.log`. The file handler uses a formatter with **timestamp** (e.g. `%(asctime)s | %(message)s`). `debug_log_operation(..., caller=...)` writes structured JSON lines; caller (module/method) is included when provided. File handler is initialized in the app callback after `set_debug_mode(True)`. +- **LoggerSetup**: `get_specfact_home_logs_dir()` returns `~/.specfact/logs` and ensures the directory exists (0o755). `plain_text_for_debug_log(text)` strips Rich markup; `format_debug_log_message(*args, **kwargs)` produces a single plain line for the debug log file. No change to `get_runtime_logs_dir()`. +- **Adapters / Commands**: When `runtime.is_debug_mode()` is True, call `debug_log_operation(operation, target, status, error=..., extra=..., caller=...)` around file IO and API calls. For **file ops**: log prepared (status=prepared/started), then finished or failed (status=finished/failed); include path in target. For **API ops**: include in extra (redacted): payload, response, reason; status is HTTP status or success/failure. Redact via `LoggerSetup.redact_secrets` before logging. + +## Contract enforcement + +- `get_specfact_home_logs_dir()`: `@require` path is expandable; `@ensure` result is non-empty string and directory exists after first call when used for writing. +- `debug_print()`: existing contract; add side-effect of writing to file when debug on (idempotent per run). +- `debug_log_operation()`: `@require` operation and target are strings when provided; no-op when `not is_debug_mode()`. + +## Fallback and offline + +- If `~/.specfact` or `~/.specfact/logs` cannot be created (e.g. read-only HOME), fall back to console-only debug output and log a one-line warning via `debug_print()`. +- No network dependency for debug logging; all writes are local file IO. + +## Sequence (debug on, first write) + +1. User runs `specfact --debug `. +2. App callback sets `set_debug_mode(True)`. +3. Callback ensures debug log file is initialized: `get_specfact_home_logs_dir()`, create dir if needed, open rotating file handler, register with runtime for `debug_print()` and `debug_log_operation()`. +4. During command execution, `debug_print()` and `debug_log_operation()` write to console and file. +5. On exit, close file handler (or let process exit flush). + +## Module coverage (consistent debug across commands) + +All CLI command modules should support `--debug` with consistent context: + +| Command module | Already has debug | Add / extend | +|----------------|-------------------|--------------| +| auth | yes (debug_print, debug_log_operation) | Ensure caller; API payload/response/reason in extra | +| init | yes (debug_print, template resolution) | debug_log_operation for template resolution status; caller | +| backlog_commands | yes (debug_log_operation for file IO) | debug_print for key steps; file prepared/finished/failed | +| analyze | no | debug_print at entry; debug_log_operation for file/API | +| contract_cmd | no | debug_print at entry; file ops with prepared/finished/failed | +| drift | no | debug_print at entry; file/API ops | +| enforce | no | debug_print at entry; file/API ops | +| generate | no | debug_print at entry; file ops | +| import_cmd | no | debug_print at entry; file ops (prepared/finished/failed) | +| migrate | no | debug_print at entry; file ops | +| plan | no | debug_print at entry; file/API ops | +| project_cmd | no | debug_print at entry; file ops | +| repro | no | debug_print at entry; file/API ops | +| sdd | no | debug_print at entry; file ops | +| spec | no | debug_print at entry; API ops (payload/response/reason) | +| sync | no | debug_print at entry; file/API ops | +| update | no | debug_print at entry; API ops | +| validate | no | debug_print at entry; file/API ops | + +Adapters (ado, github): already log operation/target/status; extend extra with payload (sanitized), response, reason where applicable. + +## Risks + +- **Disk usage**: Use rotating file (e.g. RotatingFileHandler, 5 MB × 5) to cap size. +- **Secrets**: Always redact in `debug_log_operation()` and when logging response/request bodies; avoid logging full bodies by default. diff --git a/openspec/changes/add-debug-logs-specfact-home/proposal.md b/openspec/changes/add-debug-logs-specfact-home/proposal.md new file mode 100644 index 00000000..473d5ae4 --- /dev/null +++ b/openspec/changes/add-debug-logs-specfact-home/proposal.md @@ -0,0 +1,33 @@ +# Change: Add debug logs under ~/.specfact/logs with rich operation metadata + +## Why + +When `--debug` is enabled globally, users and developers need consistent debug logs to diagnose IO, file handling, and API issues. Today debug output is only printed to the console via `debug_print()` and is not persisted; there is no user-level log directory and no structured operation metadata (status, return, error) for file or API operations. Storing debug logs under `~/.specfact/logs` and including rich operation metadata makes it possible to identify issues when something does not work as expected without cluttering normal CLI output. + +## What Changes + +- **NEW**: User-level debug log directory `~/.specfact/logs` (create on first use when debug is enabled; mode 0o755). Add `get_specfact_home_logs_dir()` in `logger_setup.py` returning this path. +- **EXTEND**: When `--debug` is set, route `debug_print()` output to both console (current) and a debug log file under `~/.specfact/logs` (e.g. rotating `specfact-debug.log`). +- **NEW**: Optional helper `debug_log_operation(operation, target, status, error=None, extra=None, caller=None)` that no-ops when debug is off and when debug is on writes structured metadata to the debug log file; optional `caller` (module/method) for context. +- **EXTEND**: In key places (file IO, API calls), when debug is on, log operation metadata: operation type, target (path/URL redacted), status/result, error snippet; use `LoggerSetup.redact_secrets` for any response/request bodies. +- **EXTEND**: Adapters (ADO, GitHub) and commands (backlog refine, init, and all other CLI command modules) log file/API operation metadata when debug is enabled. +- **IMPLEMENTED**: Logger-module helpers to avoid duplicated formatting: `plain_text_for_debug_log(text)` and `format_debug_log_message(*args, **kwargs)` in `logger_setup.py`; runtime uses `_append_debug_log(*args, **kwargs)` so `debug_print()` stays a single call at call sites. +- **EXTEND**: Debug log file formatter includes **timestamp** on every line (e.g. `%(asctime)s | %(message)s`). Structured lines include **caller** (module/method) when provided or inferred. **File operations** log: prepared / finished / failed and status; **API operations** log: operation, URL (redacted), payload (sanitized), response, status, error, reason (via `extra`). +- **NOTE**: `get_runtime_logs_dir()` remains unchanged for existing callers (agent_flow, etc.); `~/.specfact/logs` is used only for the dedicated debug session log. + +## Capabilities + +- **debug-logging**: ADDED requirement for user-level debug log directory, debug file routing, and structured operation metadata when --debug is enabled. + +## Impact + +- **Affected specs**: debug-logging (new capability). +- **Affected code**: `src/specfact_cli/runtime.py` (debug_print file routing, _append_debug_log, debug_log_operation, timestamp formatter, caller context); `src/specfact_cli/common/logger_setup.py` (get_specfact_home_logs_dir, plain_text_for_debug_log, format_debug_log_message); `src/specfact_cli/cli.py` (debug file init); `src/specfact_cli/adapters/ado.py`, `src/specfact_cli/adapters/github.py` (operation metadata with payload/response/status/error/reason); all command modules under `src/specfact_cli/commands/` for consistent debug_print/debug_log_operation (auth, init, backlog_commands already; analyze, contract_cmd, drift, enforce, generate, import_cmd, migrate, plan, project_cmd, repro, sdd, spec, sync, update, validate to be covered). +- **Integration points**: runtime.set_debug_mode / is_debug_mode (existing); LoggerSetup.redact_secrets (existing). + +## Source Tracking + +- **GitHub Issue**: #158 +- **Issue URL**: +- **Repository**: nold-ai/specfact-cli +- **Last Synced Status**: proposed diff --git a/openspec/changes/add-debug-logs-specfact-home/specs/debug-logging/spec.md b/openspec/changes/add-debug-logs-specfact-home/specs/debug-logging/spec.md new file mode 100644 index 00000000..a0498066 --- /dev/null +++ b/openspec/changes/add-debug-logs-specfact-home/specs/debug-logging/spec.md @@ -0,0 +1,129 @@ +# debug-logging (delta) + +## ADDED Requirements + +### Requirement: User-level debug log directory + +The system SHALL provide a user-level directory for debug logs when debug mode is enabled. + +#### Scenario: Resolve ~/.specfact/logs + +- **GIVEN** debug mode may be enabled +- **WHEN** `get_specfact_home_logs_dir()` is called +- **THEN** returns path equivalent to `os.path.expanduser("~/.specfact/logs")` +- **AND** creates the directory with `os.makedirs(..., mode=0o755, exist_ok=True)` on first use + +#### Scenario: No directory when debug is off + +- **GIVEN** debug mode is disabled +- **WHEN** no debug log has been written in this run +- **THEN** `~/.specfact/logs` is not required to exist +- **AND** `get_specfact_home_logs_dir()` may still return the path for callers that need it + +### Requirement: Debug output routing + +The system SHALL route debug output to both console and a debug log file when debug mode is enabled. + +#### Scenario: debug_print writes to console and file when debug on + +- **GIVEN** debug mode is enabled and debug log file is initialized +- **WHEN** `debug_print(...)` is called +- **THEN** output is written to the configured Rich console (current behavior) +- **AND** the same content is appended to the debug log file under `~/.specfact/logs` + +#### Scenario: debug_print console-only when debug off + +- **GIVEN** debug mode is disabled +- **WHEN** `debug_print(...)` is called +- **THEN** no output is produced (current behavior) +- **AND** no debug log file is written + +### Requirement: Structured operation metadata + +The system SHALL support logging structured operation metadata when debug mode is enabled. + +#### Scenario: debug_log_operation no-op when debug off + +- **GIVEN** debug mode is disabled +- **WHEN** `debug_log_operation(operation=..., target=..., status=..., error=...)` is called +- **THEN** no log file write occurs +- **AND** no console output is produced + +#### Scenario: debug_log_operation writes metadata when debug on + +- **GIVEN** debug mode is enabled and debug log file is initialized +- **WHEN** `debug_log_operation(operation="api_request", target=url_redacted, status=200, error=None)` is called +- **THEN** a structured log line (or block) is written to the debug log file +- **AND** the line includes operation, target, status, and optionally error and extra fields +- **AND** sensitive values in target or extra are redacted (e.g. via LoggerSetup.redact_secrets) + +#### Scenario: Adapters log API metadata when debug on + +- **GIVEN** debug mode is enabled +- **WHEN** an adapter performs an API request (e.g. ADO WIQL, Work Items PATCH, GitHub REST) +- **THEN** the adapter logs operation metadata (operation type, URL redacted, method, status code) +- **AND** on failure, logs error snippet or response body (redacted) +- **AND** does not log full request/response bodies that may contain secrets + +### Requirement: Debug log standard (consistent pattern for anomaly analysis and bug reports) + +The system SHALL apply a consistent debug log pattern for every significant operation when debug mode is enabled, so that logs support anomaly analysis, unexpected error/failure diagnosis, reporting, and bug reports (production-tool quality). + +#### Scenario: Every significant operation has started, progress, and outcome + +- **GIVEN** debug mode is enabled +- **WHEN** a significant operation is performed (auth flow, file I/O, API call, template resolution, or any operation that can fail or affect behavior) +- **THEN** the implementation logs at least: **started/prepared** (once at begin), **attempt** (for each distinct step if multi-step), and **outcome** (exactly once: success with reason/method/cache or failed with error and reason) +- **AND** no operation is represented by only a single INFO-style line without outcome and reason +- **AND** structured lines include operation, target (redacted), status, and when applicable error, extra (payload/response/reason sanitized), caller + +#### Scenario: Reference implementation + +- **GIVEN** the auth azure-devops flow +- **WHEN** debug mode is enabled and the user runs the OAuth flow +- **THEN** the debug log contains: started → cache_prepared → attempt (interactive_browser) → success or fallback (with reason) → attempt (device_code) → success or failed (with error/reason) → success (token_stored with method/cache) +- **AND** a reader can determine from the log alone whether the flow succeeded or failed and why + +### Requirement: Debug log context (timestamp, caller, file/API details) + +The system SHALL include context in every debug log line when debug mode is enabled. + +#### Scenario: Timestamp on every line + +- **GIVEN** debug mode is enabled and debug log file is initialized +- **WHEN** any line is written to the debug log file (via debug_print or debug_log_operation) +- **THEN** the line is prefixed with a timestamp (e.g. `%(asctime)s | %(message)s` with datefmt `%Y-%m-%d %H:%M:%S`) + +#### Scenario: Caller (module/method) in structured lines + +- **GIVEN** debug mode is enabled +- **WHEN** `debug_log_operation(..., caller=...)` is called with a caller string (e.g. `module:function`) +- **THEN** the structured log line includes the caller in the payload +- **AND** call sites may infer caller via inspect or pass explicitly + +#### Scenario: File operations log prepared / finished / failed + +- **GIVEN** debug mode is enabled +- **WHEN** a command or adapter performs file IO (read/write) +- **THEN** it logs operation metadata with status prepared/started before the operation +- **AND** logs again with status finished or failed and error/reason when applicable +- **AND** target is the path (redacted if sensitive); extra may include size, mime, etc. + +#### Scenario: API operations log operation, URL, payload (sanitized), response, status, error, reason + +- **GIVEN** debug mode is enabled +- **WHEN** an adapter performs an API request +- **THEN** it logs operation metadata with operation type, target (URL redacted), status (HTTP or success/failure) +- **AND** extra includes payload (sanitized via LoggerSetup.redact_secrets), response (sanitized), and reason when applicable +- **AND** on failure, error and reason are included + +### Requirement: Backward compatibility + +The system SHALL preserve existing behavior when debug mode is disabled. + +#### Scenario: get_runtime_logs_dir unchanged + +- **GIVEN** any mode +- **WHEN** `get_runtime_logs_dir()` is called +- **THEN** returns the same path as before (repo-relative logs or /app/logs in Docker) +- **AND** behavior of LoggerSetup and existing loggers is unchanged diff --git a/openspec/changes/add-debug-logs-specfact-home/tasks.md b/openspec/changes/add-debug-logs-specfact-home/tasks.md new file mode 100644 index 00000000..825743bc --- /dev/null +++ b/openspec/changes/add-debug-logs-specfact-home/tasks.md @@ -0,0 +1,49 @@ +# Tasks: Add debug logs under ~/.specfact/logs + +## 1. Create git branch + +- [x] 1.1.1 Ensure we're on dev and up to date: `git checkout dev && git pull origin dev` +- [x] 1.1.2 Create branch with Development link to issue: `gh issue develop 158 --repo nold-ai/specfact-cli --name feature/add-debug-logs-specfact-home --checkout` (or `git checkout -b feature/add-debug-logs-specfact-home` if gh not available) +- [x] 1.1.3 Verify branch: `git branch --show-current` + +## 2. User-level debug log directory and logger helpers + +- [x] 2.1.1 Add `get_specfact_home_logs_dir()` in `src/specfact_cli/common/logger_setup.py` returning `os.path.expanduser("~/.specfact/logs")`, creating directory with `os.makedirs(..., mode=0o755, exist_ok=True)` on first use. +- [x] 2.1.2 Add `@icontract` and `@beartype` decorators; ensure result is non-empty string. +- [x] 2.1.3 Add unit test for `get_specfact_home_logs_dir()` (temp HOME, verify path and directory creation). +- [x] 2.1.4 Run `hatch run format` and `hatch run type-check` +- [x] 2.1.5 Add `plain_text_for_debug_log(text)` and `format_debug_log_message(*args, **kwargs)` in logger_setup.py so call sites do not duplicate Rich markup stripping; runtime uses `_append_debug_log(*args, **kwargs)` and `format_debug_log_message` so debug_print stays a single call. + +## 3. Debug file routing in runtime + +- [x] 3.1.1 In `src/specfact_cli/runtime.py`, when `debug_print()` is called and `_debug_mode` is True, also write the same content to a debug log file under `~/.specfact/logs` (e.g. rotating `specfact-debug.log`). Initialize file handler lazily on first debug_print when debug is on. +- [x] 3.1.2 Add `debug_log_operation(operation: str, target: str, status: str, error: str | None = None, extra: dict | None = None, caller: str | None = None)` that no-ops when `not is_debug_mode()` and when debug is on writes a structured line to the debug log file. Redact `target`/`extra` via `LoggerSetup.redact_secrets`. Optional `caller` (module/method) for context. +- [x] 3.1.3 In `src/specfact_cli/cli.py` app callback, after `set_debug_mode(debug)`, if `debug` is True ensure debug log file is initialized (e.g. call a one-time init that creates dir and sets up file handler for runtime). +- [x] 3.1.4 Add unit tests for `debug_print()` writing to file when debug on (temp dir); for `debug_log_operation()` no-op when debug off and write when on. +- [x] 3.1.5 Run `hatch run format` and `hatch run type-check` +- [x] 3.1.6 Debug log formatter: use `%(asctime)s | %(message)s` with `datefmt="%Y-%m-%d %H:%M:%S"` so every line has a timestamp. +- [x] 3.1.7 Include caller (module/method) in debug_log_operation payload when provided; infer caller via inspect for _append_debug_log so narrative lines also have context (_get_debug_caller()). + +## 4. Operation metadata in adapters and commands + +- [x] 4.1.1 In `src/specfact_cli/adapters/ado.py`, when `is_debug_mode()` is True, log operation metadata for WIQL, Work Items fetch, and PATCH (URL redacted, method, status code; on failure error snippet). Extend extra with payload (sanitized), response, reason where applicable. +- [x] 4.1.2 In `src/specfact_cli/adapters/github.py`, when debug is on, log API request/response metadata (URL redacted, method, status; on failure redacted snippet). Extend extra with payload (sanitized), response, reason. +- [x] 4.1.3 In `src/specfact_cli/commands/backlog_commands.py`, when debug is on, log file read/write for export/import (path, prepared/finished/failed, status, error). +- [x] 4.1.4 In `src/specfact_cli/commands/init.py`, when debug is on, ensure template resolution steps are also written to debug log (in addition to existing `debug_print()`); add debug_log_operation for template resolution status with caller. +- [x] 4.1.5 Run `hatch run format` and `hatch run type-check` +- [ ] 4.1.6 Apply the **debug log standard** consistently to remaining command modules and adapters: analyze, contract_cmd, drift, enforce, generate, import_cmd, migrate, plan, project_cmd, repro, sdd, spec, sync, update, validate. For each: log started/prepared → attempt (if multi-step) → success or failed with reason/error; include all context needed for anomaly analysis and bug reports (operation, target, status, error, extra: payload/response/reason sanitized). File ops: prepared → finished/failed. API ops: attempt → success/failed with payload, response, reason. No single-line INFO-style entries; every significant operation must have full context (see design “Debug log standard” and auth azure-devops reference). + +## 5. Tests and quality + +- [x] 5.1.1 Add or extend tests for debug log file creation, routing, and `debug_log_operation()` (unit with temp dir; mock `is_debug_mode()`). +- [x] 5.1.2 Run `hatch run contract-test` (or `hatch run smart-test`) +- [x] 5.1.3 Run `hatch run lint` +- [x] 5.1.4 Run `openspec validate add-debug-logs-specfact-home --strict` + +## 6. Documentation and PR + +- [x] 6.1.1 Update CLI help for `--debug` to mention log location (`~/.specfact/logs`) and purpose. +- [x] 6.1.2 Update versions and increase patch version. Sync versions across `pyproject.toml`, `setup.py`, `src/__init__.py`, and `src/specfact_cli/__init__.py`. +- [x] 6.1.3 Update CHANGELOG.md with new behavior and use the new patch version from 6.1.2. +- [x] 6.1.4 Add `docs/reference/debug-logging.md` (user and developer reference); link from `docs/reference/commands.md`, `docs/reference/README.md`, `docs/reference/directory-structure.md`, `docs/guides/troubleshooting.md`, and `README.md`. +- [ ] 6.1.5 Push branch and create Pull Request: `git push -u origin feature/add-debug-logs-specfact-home` then `gh pr create --base dev --title "feat: add debug logs under ~/.specfact/logs" --body "Closes #158. OpenSpec change: add-debug-logs-specfact-home."` (or use GitHub web UI). diff --git a/pyproject.toml b/pyproject.toml index 5d8ec3c1..2a262283 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "hatchling.build" [project] name = "specfact-cli" -version = "0.26.11" +version = "0.26.13" description = "Brownfield-first CLI: Reverse engineer legacy Python → specs → enforced contracts. Automate legacy code documentation and prevent modernization regressions." readme = "README.md" requires-python = ">=3.11" diff --git a/setup.py b/setup.py index 3ab50a83..8f2973f2 100644 --- a/setup.py +++ b/setup.py @@ -7,7 +7,7 @@ if __name__ == "__main__": _setup = setup( name="specfact-cli", - version="0.26.11", + version="0.26.13", description="SpecFact CLI - Spec -> Contract -> Sentinel tool for contract-driven development", packages=find_packages(where="src"), package_dir={"": "src"}, diff --git a/src/__init__.py b/src/__init__.py index e56d979e..33f642b5 100644 --- a/src/__init__.py +++ b/src/__init__.py @@ -3,4 +3,4 @@ """ # Package version: keep in sync with pyproject.toml, setup.py, src/specfact_cli/__init__.py -__version__ = "0.26.11" +__version__ = "0.26.13" diff --git a/src/specfact_cli/__init__.py b/src/specfact_cli/__init__.py index 582d1d64..7dab7940 100644 --- a/src/specfact_cli/__init__.py +++ b/src/specfact_cli/__init__.py @@ -9,6 +9,6 @@ - Validating reproducibility """ -__version__ = "0.26.11" +__version__ = "0.26.13" __all__ = ["__version__"] diff --git a/src/specfact_cli/__main__.py b/src/specfact_cli/__main__.py new file mode 100644 index 00000000..459fbb56 --- /dev/null +++ b/src/specfact_cli/__main__.py @@ -0,0 +1,6 @@ +"""Allow running the CLI as python -m specfact_cli.""" + +if __name__ == "__main__": + from specfact_cli.cli import cli_main + + cli_main() diff --git a/src/specfact_cli/adapters/ado.py b/src/specfact_cli/adapters/ado.py index 556bf234..20b7ced5 100644 --- a/src/specfact_cli/adapters/ado.py +++ b/src/specfact_cli/adapters/ado.py @@ -11,7 +11,8 @@ from __future__ import annotations import os -import re + +# import re from datetime import UTC, datetime from pathlib import Path from typing import Any @@ -31,7 +32,7 @@ from specfact_cli.models.bridge import BridgeConfig from specfact_cli.models.capabilities import ToolCapabilities from specfact_cli.models.change import ChangeProposal, ChangeTracking -from specfact_cli.runtime import debug_print +from specfact_cli.runtime import debug_log_operation, debug_print, is_debug_mode from specfact_cli.utils.auth_tokens import get_token, set_token @@ -372,6 +373,8 @@ def extract_change_proposal_data(self, item_data: dict[str, Any]) -> dict[str, A rationale = "" impact = "" + import re + # Parse markdown sections (Why, What Changes) if description_raw: # Extract "Why" section (stop at What Changes or OpenSpec footer) @@ -696,6 +699,8 @@ def export_artifact( ValueError: If required configuration is missing requests.RequestException: If Azure DevOps API call fails """ + import re as _re + if not self.api_token: msg = ( "Azure DevOps API token required. Options:\n" @@ -751,7 +756,7 @@ def export_artifact( parsed = urlparse(source_url) if parsed.hostname and parsed.hostname.lower() == "dev.azure.com": target_org = target_repo.split("/")[0] - ado_org_match = re.search(r"dev\.azure\.com/([^/]+)/", source_url) + ado_org_match = _re.search(r"dev\.azure\.com/([^/]+)/", source_url) if ado_org_match and ado_org_match.group(1) == target_org: # Org matches - this is likely the same ADO organization work_item_id = entry.get("source_id") @@ -773,8 +778,8 @@ def export_artifact( # 3. AND (project is unknown in entry OR project is unknown in target OR both contain GUIDs) # This prevents matching org/project-a with org/project-b when both have known project names source_url = entry.get("source_url", "") - entry_has_guid = source_url and re.search( - r"dev\.azure\.com/[^/]+/[0-9a-f-]{36}", source_url, re.IGNORECASE + entry_has_guid = source_url and _re.search( + r"dev\.azure\.com/[^/]+/[0-9a-f-]{36}", source_url, _re.IGNORECASE ) project_unknown = ( not entry_project # Entry has no project part @@ -967,8 +972,10 @@ def _parse_work_item_reference(self, item_ref: str) -> tuple[str, str, int]: Returns: Tuple of (org, project, work_item_id) """ + import re as _re + cleaned = item_ref.strip().lstrip("#") - url_match = re.search(r"dev\.azure\.com/([^/]+)/([^/]+)/.*?/(\d+)", cleaned, re.IGNORECASE) + url_match = _re.search(r"dev\.azure\.com/([^/]+)/([^/]+)/.*?/(\d+)", cleaned, _re.IGNORECASE) if url_match: return url_match.group(1), url_match.group(2), int(url_match.group(3)) @@ -1439,6 +1446,13 @@ def _find_work_item_by_change_id(self, change_id: str, org: str, project: str) - try: response = requests.post(url, json=wiql, headers=headers, timeout=10) + if is_debug_mode(): + debug_log_operation( + "ado_wiql", + url, + str(response.status_code), + error=None if response.ok else (response.text[:200] if response.text else None), + ) if response.status_code != 200: return None work_items = response.json().get("workItems", []) @@ -1453,7 +1467,9 @@ def _find_work_item_by_change_id(self, change_id: str, org: str, project: str) - "source_type": "ado", "source_repo": f"{org}/{project}", } - except requests.RequestException: + except requests.RequestException as e: + if is_debug_mode(): + debug_log_operation("ado_wiql", url, "error", error=str(e)) return None def _create_work_item_from_proposal( @@ -1473,6 +1489,8 @@ def _create_work_item_from_proposal( Returns: Dict with work item data: {"work_item_id": int, "work_item_url": str, "state": str} """ + import re as _re + title = proposal_data.get("title", "Untitled Change Proposal") description = proposal_data.get("description", "") rationale = proposal_data.get("rationale", "") @@ -1489,7 +1507,7 @@ def _create_work_item_from_proposal( else: body_parts = [] - display_title = re.sub(r"^\[change\]\s*", "", title, flags=re.IGNORECASE).strip() + display_title = _re.sub(r"^\[change\]\s*", "", title, flags=_re.IGNORECASE).strip() if display_title: body_parts.append(f"# {display_title}") body_parts.append("") @@ -1572,6 +1590,13 @@ def _create_work_item_from_proposal( try: response = requests.patch(url, json=patch_document, headers=headers, timeout=30) + if is_debug_mode(): + debug_log_operation( + "ado_patch", + url, + str(response.status_code), + error=None if response.ok else (response.text[:200] if response.text else None), + ) response.raise_for_status() work_item_data = response.json() @@ -1617,6 +1642,8 @@ def _create_work_item_from_proposal( "state": ado_state, } except requests.RequestException as e: + if is_debug_mode(): + debug_log_operation("ado_patch", url, "error", error=str(e)) msg = f"Failed to create Azure DevOps work item: {e}" console.print(f"[bold red]✗[/bold red] {msg}") raise @@ -1740,6 +1767,8 @@ def _update_work_item_body( Returns: Dict with updated work item data: {"work_item_id": int, "work_item_url": str, "state": str} """ + import re as _re + title = proposal_data.get("title", "Untitled Change Proposal") description = proposal_data.get("description", "") rationale = proposal_data.get("rationale", "") @@ -1756,7 +1785,7 @@ def _update_work_item_body( else: body_parts = [] - display_title = re.sub(r"^\[change\]\s*", "", title, flags=re.IGNORECASE).strip() + display_title = _re.sub(r"^\[change\]\s*", "", title, flags=_re.IGNORECASE).strip() if display_title: body_parts.append(f"# {display_title}") body_parts.append("") @@ -2908,8 +2937,22 @@ def fetch_backlog_items(self, filters: BacklogFilters) -> list[BacklogItem]: try: response = requests.get(url, headers=workitems_headers, params=params, timeout=30) + if is_debug_mode(): + debug_log_operation( + "ado_workitems_get", + url, + str(response.status_code), + error=None if response.ok else (response.text[:200] if response.text else None), + ) response.raise_for_status() except requests.HTTPError as e: + if is_debug_mode(): + debug_log_operation( + "ado_workitems_get", + url, + "error", + error=str(e.response.status_code) if e.response is not None else str(e), + ) # Provide better error message with URL details error_detail = "" if e.response is not None: @@ -3095,11 +3138,12 @@ def update_backlog_item(self, item: BacklogItem, update_fields: list[str] | None # Update description (body_markdown) - always use System.Description if update_fields is None or "body" in update_fields or "body_markdown" in update_fields: - # Convert TODO markers to proper Markdown checkboxes for ADO rendering import re - markdown_content = item.body_markdown - # Pattern matches: * [TODO: ...] or - [TODO: ...] or *[TODO: ...] or -[TODO: ...] + # Never send null: ADO rejects null for /fields/System.Description (HTTP 400) + raw_body = item.body_markdown + markdown_content = raw_body if raw_body is not None else "" + # Convert TODO markers to proper Markdown checkboxes for ADO rendering todo_pattern = r"^(\s*)[-*]\s*\[TODO[:\s]+([^\]]+)\](.*)$" markdown_content = re.sub( todo_pattern, @@ -3108,11 +3152,9 @@ def update_backlog_item(self, item: BacklogItem, update_fields: list[str] | None flags=re.MULTILINE | re.IGNORECASE, ) - # Get mapped description field name (honors custom mappings) description_field = reverse_mappings.get("description", "System.Description") - # Set multiline field format to Markdown FIRST (before setting content) + # Set multiline field format to Markdown first (optional; many ADO instances return 400 for this path) operations.append({"op": "add", "path": f"/multilineFieldsFormat/{description_field}", "value": "Markdown"}) - # Then set description content with Markdown format operations.append({"op": "replace", "path": f"/fields/{description_field}", "value": markdown_content}) # Update acceptance criteria using mapped field name (honors custom mappings) @@ -3158,7 +3200,8 @@ def update_backlog_item(self, item: BacklogItem, update_fields: list[str] | None response = requests.patch(url, headers=headers, json=operations, timeout=30) response.raise_for_status() except requests.HTTPError as e: - # Handle various error cases + # Handle 400/422: often caused by /multilineFieldsFormat/ not being supported by ADO API + response = None if e.response and e.response.status_code in (400, 422): error_message = "" try: @@ -3167,91 +3210,72 @@ def update_backlog_item(self, item: BacklogItem, update_fields: list[str] | None except Exception: pass - # Check if error is about multilineFieldsFormat already existing (use "replace" instead) - if "already exists" in error_message.lower() or "cannot add" in error_message.lower(): - # Try with "replace" operation for multilineFieldsFormat + # First retry: omit multilineFieldsFormat entirely (only /fields/ updates). + # Many ADO instances reject /multilineFieldsFormat/ path with 400 Bad Request. + operations_no_format = [ + op for op in operations if not (op.get("path") or "").startswith("/multilineFieldsFormat/") + ] + if operations_no_format != operations: + try: + resp = requests.patch(url, headers=headers, json=operations_no_format, timeout=30) + resp.raise_for_status() + response = resp + except requests.HTTPError: + pass + + if response is None and ( + "already exists" in error_message.lower() or "cannot add" in error_message.lower() + ): + # Second: try "replace" instead of "add" for multilineFieldsFormat operations_replace = [] for op in operations: - if op.get("path") == "/multilineFieldsFormat/System.Description": - # Change to replace operation - operations_replace.append({"op": "replace", "path": op["path"], "value": op["value"]}) + path = op.get("path") or "" + if path.startswith("/multilineFieldsFormat/"): + operations_replace.append({"op": "replace", "path": path, "value": op["value"]}) else: operations_replace.append(op) - try: - response = requests.patch(url, headers=headers, json=operations_replace, timeout=30) - response.raise_for_status() + resp = requests.patch(url, headers=headers, json=operations_replace, timeout=30) + resp.raise_for_status() + response = resp except requests.HTTPError: - # If replace also fails, fallback to HTML conversion - console.print("[yellow]⚠ Markdown format not supported, converting to HTML[/yellow]") - operations_html = [ - op for op in operations if "/multilineFieldsFormat/" not in op.get("path", "") - ] - # Find description operation and convert markdown to HTML - for op in operations_html: - if op.get("path") == "/fields/System.Description": - # Convert TODO markers to HTML checkboxes before converting to HTML - import re - - markdown_for_html = op["value"] - # Convert TODO markers to checkboxes first - todo_pattern = r"^(\s*)[-*]\s*\[TODO[:\s]+([^\]]+)\](.*)$" - markdown_for_html = re.sub( - todo_pattern, - r"\1- [ ] \2", - markdown_for_html, - flags=re.MULTILINE | re.IGNORECASE, - ) - # Simple markdown to HTML conversion (basic) - try: - import markdown - - html_body = markdown.markdown( - markdown_for_html, extensions=["fenced_code", "tables"] - ) - op["value"] = html_body - except ImportError: - # markdown library not available - use raw text - console.print("[yellow]⚠ markdown library not available, using raw text[/yellow]") - # Keep original markdown as-is (ADO may still render it) - break - - response = requests.patch(url, headers=headers, json=operations_html, timeout=30) - response.raise_for_status() - else: - # Other 400/422 errors - try HTML fallback - console.print("[yellow]⚠ Markdown format not supported, converting to HTML[/yellow]") - operations_html = [op for op in operations if "/multilineFieldsFormat/" not in op.get("path", "")] - # Find description operation and convert markdown to HTML + pass + + if response is None: + # Third: HTML fallback (no multilineFieldsFormat, description as HTML) + import re as _re + + console.print("[yellow]⚠ Markdown format not supported, converting description to HTML[/yellow]") + operations_html = [ + op for op in operations if not (op.get("path") or "").startswith("/multilineFieldsFormat/") + ] + description_field = reverse_mappings.get("description", "System.Description") + desc_path = f"/fields/{description_field}" for op in operations_html: - if op.get("path") == "/fields/System.Description": - # Convert TODO markers to HTML checkboxes before converting to HTML - import re - - markdown_for_html = op["value"] - # Convert TODO markers to checkboxes first + if op.get("path") == desc_path: + markdown_for_html = op.get("value") or "" todo_pattern = r"^(\s*)[-*]\s*\[TODO[:\s]+([^\]]+)\](.*)$" - markdown_for_html = re.sub( + markdown_for_html = _re.sub( todo_pattern, r"\1- [ ] \2", markdown_for_html, - flags=re.MULTILINE | re.IGNORECASE, + flags=_re.MULTILINE | _re.IGNORECASE, ) - # Simple markdown to HTML conversion (basic) try: import markdown - html_body = markdown.markdown(markdown_for_html, extensions=["fenced_code", "tables"]) - op["value"] = html_body + op["value"] = markdown.markdown(markdown_for_html, extensions=["fenced_code", "tables"]) except ImportError: - # markdown library not available - use raw text - console.print("[yellow]⚠ markdown library not available, using raw text[/yellow]") - # Keep original markdown as-is (ADO may still render it) + pass break + try: + resp = requests.patch(url, headers=headers, json=operations_html, timeout=30) + resp.raise_for_status() + response = resp + except requests.HTTPError: + raise - response = requests.patch(url, headers=headers, json=operations_html, timeout=30) - response.raise_for_status() - else: + if response is None: raise updated_work_item = response.json() diff --git a/src/specfact_cli/adapters/github.py b/src/specfact_cli/adapters/github.py index bae8cf18..01377d9a 100644 --- a/src/specfact_cli/adapters/github.py +++ b/src/specfact_cli/adapters/github.py @@ -35,6 +35,7 @@ from specfact_cli.models.bridge import BridgeConfig from specfact_cli.models.capabilities import ToolCapabilities from specfact_cli.models.change import ChangeProposal, ChangeTracking +from specfact_cli.runtime import debug_log_operation, is_debug_mode from specfact_cli.utils.auth_tokens import get_token @@ -817,6 +818,13 @@ def fetch_backlog_item(self, item_ref: str) -> dict[str, Any]: "Accept": "application/vnd.github.v3+json", } response = requests.get(url, headers=headers, timeout=30) + if is_debug_mode(): + debug_log_operation( + "github_api_get", + url, + str(response.status_code), + error=None if response.ok else (response.text[:200] if response.text else None), + ) response.raise_for_status() return response.json() diff --git a/src/specfact_cli/cli.py b/src/specfact_cli/cli.py index 58ec2d0a..c0cd7f0d 100644 --- a/src/specfact_cli/cli.py +++ b/src/specfact_cli/cli.py @@ -75,7 +75,7 @@ def _normalized_detect_shell(pid=None, max_depth=10): # type: ignore[misc] validate, ) from specfact_cli.modes import OperationalMode, detect_mode -from specfact_cli.runtime import get_configured_console, set_debug_mode +from specfact_cli.runtime import get_configured_console, init_debug_log_file, set_debug_mode from specfact_cli.utils.progressive_disclosure import ProgressiveDisclosureGroup from specfact_cli.utils.structured_io import StructuredFormat @@ -246,7 +246,7 @@ def main( debug: bool = typer.Option( False, "--debug", - help="Enable debug output (shows detailed logging and diagnostic information)", + help="Enable debug output: console diagnostics and log file at ~/.specfact/logs/specfact-debug.log (operation metadata for file I/O and API calls)", ), skip_checks: bool = typer.Option( False, @@ -297,6 +297,8 @@ def main( # Set debug mode set_debug_mode(debug) + if debug: + init_debug_log_file() runtime.configure_io_formats(input_format=input_format, output_format=output_format) # Invert logic: --interactive means not non-interactive, --no-interactive means non-interactive diff --git a/src/specfact_cli/commands/analyze.py b/src/specfact_cli/commands/analyze.py index 77d500bf..acff0a75 100644 --- a/src/specfact_cli/commands/analyze.py +++ b/src/specfact_cli/commands/analyze.py @@ -17,6 +17,7 @@ from rich.table import Table from specfact_cli.models.quality import CodeQuality, QualityTracking +from specfact_cli.runtime import debug_log_operation, debug_print, is_debug_mode from specfact_cli.telemetry import telemetry from specfact_cli.utils import print_error, print_success from specfact_cli.utils.progress import load_bundle_with_progress @@ -63,12 +64,23 @@ def analyze_contracts( **Examples:** specfact analyze contracts --repo . --bundle legacy-api """ + if is_debug_mode(): + debug_log_operation("command", "analyze contracts", "started", extra={"repo": str(repo), "bundle": bundle}) + debug_print("[dim]analyze contracts: started[/dim]") console = Console() # Use active plan as default if bundle not provided if bundle is None: bundle = SpecFactStructure.get_active_bundle_name(repo) if bundle is None: + if is_debug_mode(): + debug_log_operation( + "command", + "analyze contracts", + "failed", + error="Bundle name required", + extra={"reason": "no_bundle"}, + ) console.print("[bold red]✗[/bold red] Bundle name required") console.print("[yellow]→[/yellow] Use --bundle option or run 'specfact plan select' to set active plan") raise typer.Exit(1) @@ -78,6 +90,14 @@ def analyze_contracts( bundle_dir = SpecFactStructure.project_dir(base_path=repo_path, bundle_name=bundle) if not bundle_dir.exists(): + if is_debug_mode(): + debug_log_operation( + "command", + "analyze contracts", + "failed", + error=f"Bundle not found: {bundle_dir}", + extra={"reason": "bundle_missing"}, + ) print_error(f"Project bundle not found: {bundle_dir}") raise typer.Exit(1) @@ -200,6 +220,14 @@ def sort_key(item: tuple[str, CodeQuality]) -> tuple[bool, int, str]: "files_with_crosshair": files_with_crosshair, } ) + if is_debug_mode(): + debug_log_operation( + "command", + "analyze contracts", + "success", + extra={"files_analyzed": files_analyzed, "bundle": bundle}, + ) + debug_print("[dim]analyze contracts: success[/dim]") def _analyze_file_quality(file_path: Path) -> CodeQuality: diff --git a/src/specfact_cli/commands/auth.py b/src/specfact_cli/commands/auth.py index 2928506d..1254349d 100644 --- a/src/specfact_cli/commands/auth.py +++ b/src/specfact_cli/commands/auth.py @@ -12,7 +12,7 @@ from beartype import beartype from icontract import ensure, require -from specfact_cli.runtime import get_configured_console +from specfact_cli.runtime import debug_log_operation, debug_print, get_configured_console from specfact_cli.utils.auth_tokens import ( clear_all_tokens, clear_token, @@ -232,6 +232,8 @@ def prompt_callback(verification_uri: str, user_code: str, expires_on: datetime) # Users should set expiration when creating PAT (up to 1 year) } set_token("azure-devops", token_data) + debug_log_operation("auth", "azure-devops", "success", extra={"method": "pat"}) + debug_print("[dim]auth azure-devops: PAT stored[/dim]") console.print("[bold green]✓[/bold green] Personal Access Token stored") console.print( "[dim]PAT stored successfully. PATs can have expiration up to 1 year when created in Azure DevOps.[/dim]" @@ -241,6 +243,8 @@ def prompt_callback(verification_uri: str, user_code: str, expires_on: datetime) # OAuth flow with persistent token cache (automatic refresh) # Try interactive browser first, fall back to device code if it fails + debug_log_operation("auth", "azure-devops", "started", extra={"flow": "oauth"}) + debug_print("[dim]auth azure-devops: OAuth flow started[/dim]") console.print("[bold]Starting Azure DevOps OAuth authentication...[/bold]") # Enable persistent token cache for automatic token refresh (like Azure CLI) @@ -260,6 +264,8 @@ def prompt_callback(verification_uri: str, user_code: str, expires_on: datetime) name="specfact-azure-devops", # Shared cache name across processes allow_unencrypted_storage=False, # Prefer encrypted storage ) + debug_log_operation("auth", "azure-devops", "cache_prepared", extra={"cache": "encrypted"}) + debug_print("[dim]auth azure-devops: token cache prepared (encrypted)[/dim]") # Don't claim encrypted cache is enabled until we verify it works # We'll print a message after successful authentication # Check if we're on Linux and provide helpful info @@ -284,6 +290,13 @@ def prompt_callback(verification_uri: str, user_code: str, expires_on: datetime) allow_unencrypted_storage=True, # Fallback: unencrypted storage ) use_unencrypted_cache = True + debug_log_operation( + "auth", + "azure-devops", + "cache_prepared", + extra={"cache": "unencrypted", "reason": "encrypted_unavailable"}, + ) + debug_print("[dim]auth azure-devops: token cache prepared (unencrypted fallback)[/dim]") console.print( "[yellow]Note:[/yellow] Encrypted cache unavailable (keyring locked). " "Using unencrypted cache instead.\n" @@ -320,6 +333,13 @@ def prompt_callback(verification_uri: str, user_code: str, expires_on: datetime) ) except Exception: # Persistent cache completely unavailable, use in-memory only + debug_log_operation( + "auth", + "azure-devops", + "cache_prepared", + extra={"cache": "none", "reason": "persistent_unavailable"}, + ) + debug_print("[dim]auth azure-devops: no persistent cache, in-memory only[/dim]") console.print( "[yellow]Note:[/yellow] Persistent cache not available, using in-memory cache only. " "Tokens will need to be refreshed manually after expiration." @@ -405,21 +425,45 @@ def try_authenticate_with_fallback(credential_class, credential_kwargs): # Try interactive browser first (better UX), fall back to device code if it fails token = None if not use_device_code: + debug_log_operation("auth", "azure-devops", "attempt", extra={"method": "interactive_browser"}) + debug_print("[dim]auth azure-devops: attempting interactive browser[/dim]") try: console.print("[dim]Trying interactive browser authentication...[/dim]") token = try_authenticate_with_fallback(InteractiveBrowserCredential, {}) + debug_log_operation("auth", "azure-devops", "success", extra={"method": "interactive_browser"}) + debug_print("[dim]auth azure-devops: interactive browser succeeded[/dim]") console.print("[bold green]✓[/bold green] Interactive browser authentication successful") except Exception as e: # Interactive browser failed (no display, headless environment, etc.) + debug_log_operation( + "auth", + "azure-devops", + "fallback", + error=str(e), + extra={"method": "interactive_browser", "reason": "unavailable"}, + ) + debug_print(f"[dim]auth azure-devops: interactive browser failed, falling back: {e!s}[/dim]") console.print(f"[yellow]⚠[/yellow] Interactive browser unavailable: {type(e).__name__}") console.print("[dim]Falling back to device code flow...[/dim]") # Use device code flow if interactive browser failed or was explicitly requested if token is None: + debug_log_operation("auth", "azure-devops", "attempt", extra={"method": "device_code"}) + debug_print("[dim]auth azure-devops: trying device code[/dim]") console.print("[bold]Using device code authentication...[/bold]") try: token = try_authenticate_with_fallback(DeviceCodeCredential, {"prompt_callback": prompt_callback}) + debug_log_operation("auth", "azure-devops", "success", extra={"method": "device_code"}) + debug_print("[dim]auth azure-devops: device code succeeded[/dim]") except Exception as e: + debug_log_operation( + "auth", + "azure-devops", + "failed", + error=str(e), + extra={"method": "device_code", "reason": type(e).__name__}, + ) + debug_print(f"[dim]auth azure-devops: device code failed: {e!s}[/dim]") console.print(f"[bold red]✗[/bold red] Authentication failed: {e}") raise typer.Exit(1) from e @@ -456,6 +500,18 @@ def try_authenticate_with_fallback(credential_class, credential_kwargs): } set_token("azure-devops", token_data) + cache_type = ( + "encrypted" + if cache_options and not use_unencrypted_cache + else ("unencrypted" if use_unencrypted_cache else "none") + ) + debug_log_operation( + "auth", + "azure-devops", + "success", + extra={"method": "oauth", "cache": cache_type, "reason": "token_stored"}, + ) + debug_print("[dim]auth azure-devops: OAuth complete, token stored[/dim]") console.print("[bold green]✓[/bold green] Azure DevOps authentication complete") console.print("Stored token for provider: azure-devops") diff --git a/src/specfact_cli/commands/backlog_commands.py b/src/specfact_cli/commands/backlog_commands.py index 36dd3a7e..cb020f1d 100644 --- a/src/specfact_cli/commands/backlog_commands.py +++ b/src/specfact_cli/commands/backlog_commands.py @@ -36,6 +36,7 @@ from specfact_cli.backlog.template_detector import TemplateDetector from specfact_cli.models.backlog_item import BacklogItem from specfact_cli.models.dor_config import DefinitionOfReady +from specfact_cli.runtime import debug_log_operation, is_debug_mode from specfact_cli.templates.registry import TemplateRegistry @@ -772,7 +773,14 @@ def refine( raise typer.Exit(1) console.print(f"[bold cyan]Importing refined content from: {import_file}[/bold cyan]") - raw = import_file.read_text(encoding="utf-8") + try: + raw = import_file.read_text(encoding="utf-8") + if is_debug_mode(): + debug_log_operation("file_read", str(import_file), "success") + except OSError as e: + if is_debug_mode(): + debug_log_operation("file_read", str(import_file), "error", error=str(e)) + raise parsed_by_id = _parse_refined_export_markdown(raw) if not parsed_by_id: console.print( @@ -785,7 +793,8 @@ def refine( if item.id not in parsed_by_id: continue data = parsed_by_id[item.id] - item.body_markdown = data.get("body_markdown", item.body_markdown or "") + body = data.get("body_markdown", item.body_markdown or "") + item.body_markdown = body if body is not None else (item.body_markdown or "") if "acceptance_criteria" in data: item.acceptance_criteria = data["acceptance_criteria"] if data.get("title"): diff --git a/src/specfact_cli/commands/drift.py b/src/specfact_cli/commands/drift.py index bf2e0d78..b34b93f9 100644 --- a/src/specfact_cli/commands/drift.py +++ b/src/specfact_cli/commands/drift.py @@ -15,6 +15,7 @@ from icontract import ensure, require from rich.console import Console +from specfact_cli.runtime import debug_log_operation, debug_print, is_debug_mode from specfact_cli.telemetry import telemetry from specfact_cli.utils import print_error, print_success @@ -75,6 +76,11 @@ def detect_drift( specfact drift detect legacy-api --repo . specfact drift detect my-bundle --repo . --format json --out drift-report.json """ + if is_debug_mode(): + debug_log_operation( + "command", "drift detect", "started", extra={"bundle": bundle, "repo": str(repo), "format": output_format} + ) + debug_print("[dim]drift detect: started[/dim]") from rich.console import Console from specfact_cli.utils.structure import SpecFactStructure @@ -85,6 +91,10 @@ def detect_drift( if bundle is None: bundle = SpecFactStructure.get_active_bundle_name(repo) if bundle is None: + if is_debug_mode(): + debug_log_operation( + "command", "drift detect", "failed", error="Bundle name required", extra={"reason": "no_bundle"} + ) console.print("[bold red]✗[/bold red] Bundle name required") console.print("[yellow]→[/yellow] Use --bundle option or run 'specfact plan select' to set active plan") raise typer.Exit(1) @@ -127,6 +137,14 @@ def detect_drift( else: console.print(output) else: + if is_debug_mode(): + debug_log_operation( + "command", + "drift detect", + "failed", + error=f"Unknown format: {output_format}", + extra={"reason": "invalid_format"}, + ) print_error(f"Unknown output format: {output_format}") raise typer.Exit(1) @@ -156,6 +174,14 @@ def detect_drift( "total_issues": total_issues, } ) + if is_debug_mode(): + debug_log_operation( + "command", + "drift detect", + "success", + extra={"bundle": bundle, "total_issues": total_issues}, + ) + debug_print("[dim]drift detect: success[/dim]") def _display_drift_report_table(report: Any) -> None: diff --git a/src/specfact_cli/commands/enforce.py b/src/specfact_cli/commands/enforce.py index 1ac5f872..9c9269a3 100644 --- a/src/specfact_cli/commands/enforce.py +++ b/src/specfact_cli/commands/enforce.py @@ -19,6 +19,7 @@ from specfact_cli.models.deviation import Deviation, DeviationSeverity, DeviationType, ValidationReport from specfact_cli.models.enforcement import EnforcementConfig, EnforcementPreset from specfact_cli.models.sdd import SDDManifest +from specfact_cli.runtime import debug_log_operation, debug_print, is_debug_mode from specfact_cli.telemetry import telemetry from specfact_cli.utils.structure import SpecFactStructure from specfact_cli.utils.yaml_utils import dump_yaml @@ -54,6 +55,9 @@ def stage( specfact enforce stage --preset strict specfact enforce stage --preset minimal """ + if is_debug_mode(): + debug_log_operation("command", "enforce stage", "started", extra={"preset": preset}) + debug_print("[dim]enforce stage: started[/dim]") telemetry_metadata = { "preset": preset.lower(), } @@ -65,6 +69,14 @@ def stage( raise typer.Exit(1) if preset.lower() not in ("minimal", "balanced", "strict"): + if is_debug_mode(): + debug_log_operation( + "command", + "enforce stage", + "failed", + error=f"Unknown preset: {preset}", + extra={"reason": "invalid_preset"}, + ) console.print(f"[bold red]✗[/bold red] Unknown preset: {preset}") console.print("Valid presets: minimal, balanced, strict") raise typer.Exit(1) @@ -75,6 +87,10 @@ def stage( try: preset_enum = EnforcementPreset(preset) except ValueError as err: + if is_debug_mode(): + debug_log_operation( + "command", "enforce stage", "failed", error=str(err), extra={"reason": "invalid_preset"} + ) console.print(f"[bold red]✗[/bold red] Unknown preset: {preset}") console.print("Valid presets: minimal, balanced, strict") raise typer.Exit(1) from err @@ -103,6 +119,11 @@ def stage( dump_yaml(config.model_dump(mode="json"), config_path) record({"config_saved": True, "enabled": config.enabled}) + if is_debug_mode(): + debug_log_operation( + "command", "enforce stage", "success", extra={"preset": preset, "config_path": str(config_path)} + ) + debug_print("[dim]enforce stage: success[/dim]") console.print(f"\n[bold green]✓[/bold green] Enforcement mode set to {preset}") console.print(f"[dim]Configuration saved to: {config_path}[/dim]") @@ -168,6 +189,11 @@ def enforce_sdd( specfact enforce sdd auth-module --output-format json --out validation-report.json specfact enforce sdd legacy-api --no-interactive """ + if is_debug_mode(): + debug_log_operation( + "command", "enforce sdd", "started", extra={"bundle": bundle, "output_format": output_format} + ) + debug_print("[dim]enforce sdd: started[/dim]") from rich.console import Console from specfact_cli.models.sdd import SDDManifest @@ -180,6 +206,10 @@ def enforce_sdd( if bundle is None: bundle = SpecFactStructure.get_active_bundle_name(Path(".")) if bundle is None: + if is_debug_mode(): + debug_log_operation( + "command", "enforce sdd", "failed", error="Bundle name required", extra={"reason": "no_bundle"} + ) console.print("[bold red]✗[/bold red] Bundle name required") console.print("[yellow]→[/yellow] Use --bundle option or run 'specfact plan select' to set active plan") raise typer.Exit(1) @@ -203,6 +233,14 @@ def enforce_sdd( base_path = Path(".") bundle_dir = SpecFactStructure.project_dir(base_path=base_path, bundle_name=bundle) if not bundle_dir.exists(): + if is_debug_mode(): + debug_log_operation( + "command", + "enforce sdd", + "failed", + error=f"Bundle not found: {bundle_dir}", + extra={"reason": "bundle_missing"}, + ) console.print(f"[bold red]✗[/bold red] Project bundle not found: {bundle_dir}") console.print(f"[dim]Create one with: specfact plan init {bundle}[/dim]") raise typer.Exit(1) @@ -213,6 +251,14 @@ def enforce_sdd( base_path = Path(".") discovered_sdd = find_sdd_for_bundle(bundle, base_path, sdd) if discovered_sdd is None: + if is_debug_mode(): + debug_log_operation( + "command", + "enforce sdd", + "failed", + error="SDD manifest not found", + extra={"reason": "sdd_not_found", "bundle": bundle}, + ) console.print("[bold red]✗[/bold red] SDD manifest not found") console.print(f"[dim]Searched for: .specfact/projects/{bundle}/sdd.yaml (bundle-specific)[/dim]") console.print(f"[dim]Create one with: specfact plan harden {bundle}[/dim]") @@ -238,6 +284,14 @@ def enforce_sdd( project_hash = summary.content_hash if not project_hash: + if is_debug_mode(): + debug_log_operation( + "command", + "enforce sdd", + "failed", + error="Failed to compute project bundle hash", + extra={"reason": "hash_compute_failed"}, + ) console.print("[bold red]✗[/bold red] Failed to compute project bundle hash") raise typer.Exit(1) @@ -464,13 +518,30 @@ def enforce_sdd( console.print(" 2. Add contracts: Review features and add @icontract decorators") console.print(" 3. Re-validate: Run this command again after fixes") + if is_debug_mode(): + debug_log_operation( + "command", + "enforce sdd", + "failed", + error="SDD validation failed", + extra={"reason": "deviations", "total_deviations": report.total_deviations}, + ) record({"passed": False, "deviations": report.total_deviations}) raise typer.Exit(1) console.print("\n[bold green]✓[/bold green] SDD validation passed") record({"passed": True, "deviations": 0}) + if is_debug_mode(): + debug_log_operation( + "command", "enforce sdd", "success", extra={"bundle": bundle, "report_path": str(out)} + ) + debug_print("[dim]enforce sdd: success[/dim]") except Exception as e: + if is_debug_mode(): + debug_log_operation( + "command", "enforce sdd", "failed", error=str(e), extra={"reason": type(e).__name__} + ) console.print(f"[bold red]✗[/bold red] Validation failed: {e}") raise typer.Exit(1) from e diff --git a/src/specfact_cli/commands/generate.py b/src/specfact_cli/commands/generate.py index f0b9361e..e8ac150b 100644 --- a/src/specfact_cli/commands/generate.py +++ b/src/specfact_cli/commands/generate.py @@ -15,7 +15,7 @@ from specfact_cli.generators.contract_generator import ContractGenerator from specfact_cli.migrations.plan_migrator import load_plan_bundle from specfact_cli.models.sdd import SDDManifest -from specfact_cli.runtime import get_configured_console +from specfact_cli.runtime import debug_log_operation, debug_print, get_configured_console, is_debug_mode from specfact_cli.telemetry import telemetry from specfact_cli.utils import print_error, print_info, print_success, print_warning from specfact_cli.utils.env_manager import ( @@ -105,6 +105,12 @@ def generate_contracts( "no_interactive": no_interactive, } + if is_debug_mode(): + debug_log_operation( + "command", "generate contracts", "started", extra={"bundle": bundle, "repo": str(repo or ".")} + ) + debug_print("[dim]generate contracts: started[/dim]") + with telemetry.track_command("generate.contracts", telemetry_metadata) as record: try: # Determine repository path @@ -124,6 +130,14 @@ def generate_contracts( if bundle: bundle_dir = SpecFactStructure.project_dir(base_path=base_path, bundle_name=bundle) if not bundle_dir.exists(): + if is_debug_mode(): + debug_log_operation( + "command", + "generate contracts", + "failed", + error=f"Project bundle not found: {bundle_dir}", + extra={"reason": "bundle_not_found", "bundle": bundle}, + ) print_error(f"Project bundle not found: {bundle_dir}") print_info(f"Create one with: specfact plan init {bundle}") raise typer.Exit(1) @@ -135,6 +149,14 @@ def generate_contracts( else: # Use --plan and --sdd paths if provided if plan is None: + if is_debug_mode(): + debug_log_operation( + "command", + "generate contracts", + "failed", + error="Bundle or plan path is required", + extra={"reason": "no_plan_or_bundle"}, + ) print_error("Bundle or plan path is required") print_info("Run 'specfact plan init ' then rerun with --bundle ") raise typer.Exit(1) @@ -188,6 +210,14 @@ def generate_contracts( if fallback_sdd.exists(): sdd_path = fallback_sdd else: + if is_debug_mode(): + debug_log_operation( + "command", + "generate contracts", + "failed", + error=f"SDD manifest not found: {sdd_path}", + extra={"reason": "sdd_not_found"}, + ) print_error(f"SDD manifest not found: {sdd_path}") print_info("Run 'specfact plan harden' to create SDD manifest") raise typer.Exit(1) @@ -283,6 +313,17 @@ def generate_contracts( print_error(f" - {error}") if result["generated_files"]: + if is_debug_mode(): + debug_log_operation( + "command", + "generate contracts", + "success", + extra={ + "generated_files": len(result["generated_files"]), + "contracts_dir": str(contracts_dir), + }, + ) + debug_print("[dim]generate contracts: success[/dim]") print_success(f"Generated {len(result['generated_files'])} contract file(s):") for file_path in result["generated_files"]: print_info(f" - {file_path}") @@ -334,6 +375,14 @@ def generate_contracts( print_warning("No contract files generated (no contracts/invariants found in SDD HOW section)") except Exception as e: + if is_debug_mode(): + debug_log_operation( + "command", + "generate contracts", + "failed", + error=str(e), + extra={"reason": type(e).__name__}, + ) print_error(f"Failed to generate contracts: {e}") record({"error": str(e)}) raise typer.Exit(1) from e @@ -573,6 +622,15 @@ def generate_contracts_prompt( print_error(f"Valid types: 'all-contracts', {', '.join(valid_contracts)}") raise typer.Exit(1) + if is_debug_mode(): + debug_log_operation( + "command", + "generate contracts-prompt", + "started", + extra={"files_count": len(file_paths), "bundle": bundle, "contracts": contracts_to_apply}, + ) + debug_print("[dim]generate contracts-prompt: started[/dim]") + telemetry_metadata = { "files_count": len(file_paths), "bundle": bundle, @@ -851,6 +909,14 @@ def generate_contracts_prompt( if output: console.print("[dim]Note: --output option is currently unused. Prompts saved to .specfact/prompts/[/dim]") + if is_debug_mode(): + debug_log_operation( + "command", + "generate contracts-prompt", + "success", + extra={"generated_count": generated_count, "failed_count": failed_count}, + ) + debug_print("[dim]generate contracts-prompt: success[/dim]") record( { "prompt_generated": generated_count > 0, @@ -926,6 +992,15 @@ def apply_enhanced_contracts( repo_path = Path(".").resolve() + if is_debug_mode(): + debug_log_operation( + "command", + "generate contracts-apply", + "started", + extra={"enhanced_file": str(enhanced_file), "original_file": str(original_file) if original_file else None}, + ) + debug_print("[dim]generate contracts-apply: started[/dim]") + # Auto-detect original file if not provided if original_file is None: # Try to infer from enhanced file name @@ -1351,6 +1426,14 @@ class ImportResult: # Step 9: Apply changes (only if all validations passed) try: original_file.write_text(enhanced_content, encoding="utf-8") + if is_debug_mode(): + debug_log_operation( + "command", + "generate contracts-apply", + "success", + extra={"original_file": str(original_file.relative_to(repo_path))}, + ) + debug_print("[dim]generate contracts-apply: success[/dim]") print_success(f"Enhanced code applied to: {original_file.relative_to(repo_path)}") console.print("\n[bold green]✓ All validations passed and changes applied successfully![/bold green]") console.print("\n[bold]Next Steps:[/bold]") @@ -1358,6 +1441,14 @@ class ImportResult: console.print("2. Run full test suite: specfact repro (or pytest)") console.print("3. Commit the enhanced code") except Exception as e: + if is_debug_mode(): + debug_log_operation( + "command", + "generate contracts-apply", + "failed", + error=str(e), + extra={"reason": type(e).__name__, "original_file": str(original_file)}, + ) print_error(f"Failed to apply changes: {e}") console.print("\n[yellow]This is a filesystem error. Please check file permissions.[/yellow]") raise typer.Exit(1) from e @@ -1447,6 +1538,15 @@ def generate_fix_prompt( "no_interactive": no_interactive, } + if is_debug_mode(): + debug_log_operation( + "command", + "generate fix-prompt", + "started", + extra={"gap_id": gap_id, "bundle": bundle}, + ) + debug_print("[dim]generate fix-prompt: started[/dim]") + with telemetry.track_command("generate.fix-prompt", telemetry_metadata) as record: try: # Determine bundle directory @@ -1675,11 +1775,27 @@ def generate_fix_prompt( console.print("3. Review and apply the suggested changes") console.print("4. Validate with `specfact enforce sdd`") + if is_debug_mode(): + debug_log_operation( + "command", + "generate fix-prompt", + "success", + extra={"gap_id": gap_id, "output": str(output)}, + ) + debug_print("[dim]generate fix-prompt: success[/dim]") record({"action": "generate_prompt", "gap_id": gap_id, "output": str(output)}) except typer.Exit: raise except Exception as e: + if is_debug_mode(): + debug_log_operation( + "command", + "generate fix-prompt", + "failed", + error=str(e), + extra={"reason": type(e).__name__}, + ) print_error(f"Failed to generate fix prompt: {e}") record({"error": str(e)}) raise typer.Exit(1) from e @@ -1762,6 +1878,15 @@ def generate_test_prompt( "no_interactive": no_interactive, } + if is_debug_mode(): + debug_log_operation( + "command", + "generate test-prompt", + "started", + extra={"file": str(file) if file else None, "bundle": bundle}, + ) + debug_print("[dim]generate test-prompt: started[/dim]") + with telemetry.track_command("generate.test-prompt", telemetry_metadata) as record: try: # Determine bundle directory @@ -1970,11 +2095,27 @@ def generate_test_prompt( console.print(f"4. Save to `tests/unit/test_{file.stem}.py`") console.print("5. Run tests with `pytest`") + if is_debug_mode(): + debug_log_operation( + "command", + "generate test-prompt", + "success", + extra={"file": str(file_rel), "output": str(output)}, + ) + debug_print("[dim]generate test-prompt: success[/dim]") record({"action": "generate_prompt", "file": str(file_rel), "output": str(output)}) except typer.Exit: raise except Exception as e: + if is_debug_mode(): + debug_log_operation( + "command", + "generate test-prompt", + "failed", + error=str(e), + extra={"reason": type(e).__name__}, + ) print_error(f"Failed to generate test prompt: {e}") record({"error": str(e)}) raise typer.Exit(1) from e diff --git a/src/specfact_cli/commands/import_cmd.py b/src/specfact_cli/commands/import_cmd.py index 9dc285ba..f4084ac4 100644 --- a/src/specfact_cli/commands/import_cmd.py +++ b/src/specfact_cli/commands/import_cmd.py @@ -23,7 +23,7 @@ from specfact_cli.adapters.registry import AdapterRegistry from specfact_cli.models.plan import Feature, PlanBundle from specfact_cli.models.project import BundleManifest, BundleVersions, ProjectBundle -from specfact_cli.runtime import get_configured_console +from specfact_cli.runtime import debug_log_operation, debug_print, get_configured_console, is_debug_mode from specfact_cli.telemetry import telemetry from specfact_cli.utils.performance import track_performance from specfact_cli.utils.progress import save_bundle_with_progress @@ -1949,6 +1949,15 @@ def from_bridge( from specfact_cli.sync.bridge_probe import BridgeProbe from specfact_cli.utils.structure import SpecFactStructure + if is_debug_mode(): + debug_log_operation( + "command", + "import from-bridge", + "started", + extra={"repo": str(repo), "adapter": adapter, "dry_run": dry_run, "write": write}, + ) + debug_print("[dim]import from-bridge: started[/dim]") + # Auto-detect adapter if not specified if adapter == "speckit" or adapter == "auto": probe = BridgeProbe(repo) @@ -1956,6 +1965,14 @@ def from_bridge( # Use detected tool directly (e.g., "speckit", "openspec", "github") # BridgeProbe already tries all registered adapters if detected_capabilities.tool == "unknown": + if is_debug_mode(): + debug_log_operation( + "command", + "import from-bridge", + "failed", + error="Could not auto-detect adapter", + extra={"reason": "adapter_unknown"}, + ) console.print("[bold red]✗[/bold red] Could not auto-detect adapter") console.print("[dim]No registered adapter detected this repository structure[/dim]") registered = AdapterRegistry.list_adapters() @@ -2326,6 +2343,18 @@ def from_bridge( console.print("[dim]Semgrep Rules: .semgrep/async-anti-patterns.yml[/dim]") console.print("[dim]GitHub Action: .github/workflows/specfact-gate.yml[/dim]") + if is_debug_mode(): + debug_log_operation( + "command", + "import from-bridge", + "success", + extra={ + "protocol_states": len(protocol.states) if protocol else 0, + "features": len(plan_bundle.features) if plan_bundle else 0, + }, + ) + debug_print("[dim]import from-bridge: success[/dim]") + # Record import results if protocol and plan_bundle: record( @@ -2449,10 +2478,27 @@ def from_code( from specfact_cli.modes import get_router from specfact_cli.utils.structure import SpecFactStructure + if is_debug_mode(): + debug_log_operation( + "command", + "import from-code", + "started", + extra={"bundle": bundle, "repo": str(repo), "force": force, "shadow_only": shadow_only}, + ) + debug_print("[dim]import from-code: started[/dim]") + # Use active plan as default if bundle not provided if bundle is None: bundle = SpecFactStructure.get_active_bundle_name(repo) if bundle is None: + if is_debug_mode(): + debug_log_operation( + "command", + "import from-code", + "failed", + error="Bundle name required", + extra={"reason": "no_bundle"}, + ) console.print("[bold red]✗[/bold red] Bundle name required") console.print("[yellow]→[/yellow] Use --bundle option or run 'specfact plan select' to set active plan") raise typer.Exit(1) @@ -2825,6 +2871,15 @@ def on_incremental_update(features_count: int, themes: list[str]) -> None: _generate_report(repo, bundle_dir, plan_bundle, confidence, enrichment, report) + if is_debug_mode(): + debug_log_operation( + "command", + "import from-code", + "success", + extra={"bundle": bundle, "bundle_dir": str(bundle_dir), "report": str(report)}, + ) + debug_print("[dim]import from-code: success[/dim]") + # Phase 4.10: Print performance report if slow operations detected perf_report = perf_monitor.get_report() if perf_report.slow_operations and not os.environ.get("CI"): @@ -2838,5 +2893,13 @@ def on_incremental_update(features_count: int, themes: list[str]) -> None: # Re-raise typer.Exit (used for clean exits) raise except Exception as e: + if is_debug_mode(): + debug_log_operation( + "command", + "import from-code", + "failed", + error=str(e), + extra={"reason": type(e).__name__, "bundle": bundle}, + ) console.print(f"[bold red]✗ Import failed:[/bold red] {e}") raise typer.Exit(1) from e diff --git a/src/specfact_cli/commands/init.py b/src/specfact_cli/commands/init.py index d913a619..e05de7d3 100644 --- a/src/specfact_cli/commands/init.py +++ b/src/specfact_cli/commands/init.py @@ -17,7 +17,7 @@ from rich.console import Console from rich.panel import Panel -from specfact_cli.runtime import debug_print +from specfact_cli.runtime import debug_log_operation, debug_print, is_debug_mode from specfact_cli.telemetry import telemetry from specfact_cli.utils.env_manager import EnvManager, build_tool_command, detect_env_manager from specfact_cli.utils.ide_setup import ( @@ -460,7 +460,16 @@ def init( except Exception as e: console.print(f"[yellow]⚠[/yellow] Error with __file__ fallback: {type(e).__name__}: {e}") + if templates_dir and templates_dir.exists() and is_debug_mode(): + debug_log_operation("template_resolution", str(templates_dir), "success") if not templates_dir or not templates_dir.exists(): + if is_debug_mode() and tried_locations: + debug_log_operation( + "template_resolution", + str(tried_locations[-1]) if tried_locations else "unknown", + "failure", + error="Templates directory not found after all attempts", + ) console.print() console.print("[red]Error:[/red] Templates directory not found after all attempts") console.print() diff --git a/src/specfact_cli/commands/migrate.py b/src/specfact_cli/commands/migrate.py index ad862065..f2c486fd 100644 --- a/src/specfact_cli/commands/migrate.py +++ b/src/specfact_cli/commands/migrate.py @@ -17,6 +17,7 @@ from rich.console import Console from specfact_cli.models.plan import Feature +from specfact_cli.runtime import debug_log_operation, debug_print, is_debug_mode from specfact_cli.utils import print_error, print_info, print_success, print_warning from specfact_cli.utils.progress import load_bundle_with_progress, save_bundle_with_progress from specfact_cli.utils.structure import SpecFactStructure @@ -71,6 +72,15 @@ def cleanup_legacy( specfact migrate cleanup-legacy --repo . --dry-run specfact migrate cleanup-legacy --repo . --force # Remove even if files exist """ + if is_debug_mode(): + debug_log_operation( + "command", + "migrate cleanup-legacy", + "started", + extra={"repo": str(repo), "dry_run": dry_run, "force": force}, + ) + debug_print("[dim]migrate cleanup-legacy: started[/dim]") + specfact_dir = repo / SpecFactStructure.ROOT if not specfact_dir.exists(): console.print(f"[yellow]⚠[/yellow] No .specfact directory found at {specfact_dir}") @@ -159,6 +169,14 @@ def cleanup_legacy( ) if removed_count == 0 and skipped_count == 0: console.print("[dim]No legacy directories found to remove[/dim]") + if is_debug_mode(): + debug_log_operation( + "command", + "migrate cleanup-legacy", + "success", + extra={"removed_count": removed_count, "skipped_count": skipped_count}, + ) + debug_print("[dim]migrate cleanup-legacy: success[/dim]") @app.command("to-contracts") @@ -252,6 +270,15 @@ def to_contracts( "dry_run": dry_run, } + if is_debug_mode(): + debug_log_operation( + "command", + "migrate to-contracts", + "started", + extra={"bundle": bundle, "repo": str(repo_path), "dry_run": dry_run}, + ) + debug_print("[dim]migrate to-contracts: started[/dim]") + with telemetry.track_command("migrate.to_contracts", telemetry_metadata) as record: console.print(f"[bold cyan]Migrating bundle:[/bold cyan] {bundle}") console.print(f"[dim]Repository:[/dim] {repo_path}") @@ -413,6 +440,18 @@ def to_contracts( if clean_verbose_specs: console.print(f"[dim]Would clean verbose specs in {verbose_specs_cleaned} stories[/dim]") + if is_debug_mode(): + debug_log_operation( + "command", + "migrate to-contracts", + "success", + extra={ + "contracts_created": contracts_created, + "contracts_validated": contracts_validated, + "verbose_specs_cleaned": verbose_specs_cleaned, + }, + ) + debug_print("[dim]migrate to-contracts: success[/dim]") record( { "contracts_created": contracts_created, @@ -422,6 +461,14 @@ def to_contracts( ) except Exception as e: + if is_debug_mode(): + debug_log_operation( + "command", + "migrate to-contracts", + "failed", + error=str(e), + extra={"reason": type(e).__name__}, + ) print_error(f"Migration failed: {e}") record({"error": str(e)}) raise typer.Exit(1) from e @@ -612,6 +659,15 @@ def migrate_artifacts( print_error("No project bundles found. Create one with 'specfact plan init' or 'specfact import from-code'") raise typer.Exit(1) + if is_debug_mode(): + debug_log_operation( + "command", + "migrate artifacts", + "started", + extra={"bundles": bundles_to_migrate, "repo": str(repo_path), "dry_run": dry_run}, + ) + debug_print("[dim]migrate artifacts: started[/dim]") + console.print(f"[bold cyan]Migrating artifacts for {len(bundles_to_migrate)} bundle(s)[/bold cyan]") if dry_run: print_warning("DRY RUN MODE - No changes will be made") @@ -664,6 +720,19 @@ def migrate_artifacts( if dry_run: print_warning("DRY RUN - No changes were made. Run without --dry-run to perform migration.") + if is_debug_mode(): + debug_log_operation( + "command", + "migrate artifacts", + "success", + extra={ + "bundles_processed": len(bundles_to_migrate), + "total_moved": total_moved, + "total_errors": total_errors, + }, + ) + debug_print("[dim]migrate artifacts: success[/dim]") + def _migrate_reports(base_path: Path, bundle_name: str, bundle_dir: Path, dry_run: bool, backup: bool) -> int: """Migrate reports from global location to bundle-specific location.""" diff --git a/src/specfact_cli/commands/plan.py b/src/specfact_cli/commands/plan.py index f7c7d9fa..556458dd 100644 --- a/src/specfact_cli/commands/plan.py +++ b/src/specfact_cli/commands/plan.py @@ -29,6 +29,7 @@ from specfact_cli.models.project import BundleManifest, BundleVersions, ProjectBundle from specfact_cli.models.sdd import SDDHow, SDDManifest, SDDWhat, SDDWhy from specfact_cli.modes import detect_mode +from specfact_cli.runtime import debug_log_operation, debug_print, is_debug_mode from specfact_cli.telemetry import telemetry from specfact_cli.utils import ( display_summary, @@ -103,6 +104,15 @@ def init( "scaffold": scaffold, } + if is_debug_mode(): + debug_log_operation( + "command", + "plan init", + "started", + extra={"bundle": bundle, "interactive": interactive, "scaffold": scaffold}, + ) + debug_print("[dim]plan init: started[/dim]") + with telemetry.track_command("plan.init", telemetry_metadata) as record: print_section("SpecFact CLI - Project Bundle Builder") @@ -118,6 +128,14 @@ def init( # Get project bundle directory bundle_dir = SpecFactStructure.project_dir(bundle_name=bundle) if bundle_dir.exists(): + if is_debug_mode(): + debug_log_operation( + "command", + "plan init", + "failed", + error=f"Project bundle already exists: {bundle_dir}", + extra={"reason": "bundle_exists", "bundle": bundle}, + ) print_error(f"Project bundle already exists: {bundle_dir}") print_info("Use a different bundle name or remove the existing bundle") raise typer.Exit(1) @@ -147,12 +165,28 @@ def init( } ) + if is_debug_mode(): + debug_log_operation( + "command", + "plan init", + "success", + extra={"bundle": bundle, "bundle_dir": str(bundle_dir)}, + ) + debug_print("[dim]plan init: success[/dim]") print_success(f"Project bundle created successfully: {bundle_dir}") except KeyboardInterrupt: print_warning("\nBundle creation cancelled") raise typer.Exit(1) from None except Exception as e: + if is_debug_mode(): + debug_log_operation( + "command", + "plan init", + "failed", + error=str(e), + extra={"reason": type(e).__name__, "bundle": bundle}, + ) print_error(f"Failed to create bundle: {e}") raise typer.Exit(1) from e diff --git a/src/specfact_cli/commands/project_cmd.py b/src/specfact_cli/commands/project_cmd.py index 77b65b0b..00b1c3be 100644 --- a/src/specfact_cli/commands/project_cmd.py +++ b/src/specfact_cli/commands/project_cmd.py @@ -27,6 +27,7 @@ ProjectMetadata, SectionLock, ) +from specfact_cli.runtime import debug_log_operation, debug_print, is_debug_mode from specfact_cli.utils import print_error, print_info, print_section, print_success, print_warning from specfact_cli.utils.progress import load_bundle_with_progress, save_bundle_with_progress from specfact_cli.utils.structure import SpecFactStructure @@ -407,6 +408,14 @@ def export_persona( specfact project export --bundle legacy-api --persona architect --output-dir docs/plans specfact project export --bundle legacy-api --persona developer --stdout """ + if is_debug_mode(): + debug_log_operation( + "command", + "project export", + "started", + extra={"repo": str(repo), "bundle": bundle, "persona": persona}, + ) + debug_print("[dim]project export: started[/dim]") # Get bundle name if bundle is None: @@ -417,6 +426,14 @@ def export_persona( plans = SpecFactStructure.list_plans(repo) if not plans: + if is_debug_mode(): + debug_log_operation( + "command", + "project export", + "failed", + error="No project bundles found", + extra={"reason": "no_bundles"}, + ) print_error("No project bundles found") raise typer.Exit(1) bundle_names = [str(p["name"]) for p in plans if p.get("name")] @@ -436,6 +453,14 @@ def export_persona( # Get bundle directory bundle_dir = SpecFactStructure.project_dir(base_path=repo, bundle_name=bundle) if not bundle_dir.exists(): + if is_debug_mode(): + debug_log_operation( + "command", + "project export", + "failed", + error=f"Project bundle not found: {bundle_dir}", + extra={"reason": "bundle_not_found", "bundle": bundle}, + ) print_error(f"Project bundle not found: {bundle_dir}") raise typer.Exit(1) @@ -542,6 +567,14 @@ def export_persona( print_error(f"Export failed: {e}") raise typer.Exit(1) from e + if is_debug_mode(): + debug_log_operation( + "command", + "project export", + "success", + extra={"bundle": bundle, "persona": persona, "output_path": str(output_path)}, + ) + debug_print("[dim]project export: success[/dim]") print_success(f"Exported persona '{persona}' sections to {output_path}") print_info(f"Template: {persona}.md.j2") @@ -605,6 +638,15 @@ def import_persona( specfact project import --bundle legacy-api --persona product-owner --input product-owner.md specfact project import --bundle legacy-api --persona architect --input architect.md --dry-run """ + if is_debug_mode(): + debug_log_operation( + "command", + "project import", + "started", + extra={"repo": str(repo), "bundle": bundle, "persona": persona, "input_file": str(input_file)}, + ) + debug_print("[dim]project import: started[/dim]") + from specfact_cli.models.persona_template import PersonaTemplate, SectionType, TemplateSection from specfact_cli.parsers.persona_importer import PersonaImporter, PersonaImportError @@ -898,6 +940,14 @@ def lock_section( specfact project lock --bundle legacy-api --section idea --persona product-owner specfact project lock --bundle legacy-api --section "features.*.stories" --persona product-owner """ + if is_debug_mode(): + debug_log_operation( + "command", + "project lock", + "started", + extra={"repo": str(repo), "bundle": bundle, "section": section, "persona": persona}, + ) + debug_print("[dim]project lock: started[/dim]") # Get bundle name if bundle is None: diff --git a/src/specfact_cli/commands/repro.py b/src/specfact_cli/commands/repro.py index 1c68d7d7..e8da6538 100644 --- a/src/specfact_cli/commands/repro.py +++ b/src/specfact_cli/commands/repro.py @@ -17,6 +17,7 @@ from rich.progress import Progress, SpinnerColumn, TextColumn, TimeElapsedColumn from rich.table import Table +from specfact_cli.runtime import debug_log_operation, debug_print, is_debug_mode from specfact_cli.telemetry import telemetry from specfact_cli.utils.env_manager import check_tool_in_env, detect_env_manager, detect_source_directories from specfact_cli.utils.structure import SpecFactStructure @@ -206,6 +207,15 @@ def main( if ctx.invoked_subcommand is not None: return + if is_debug_mode(): + debug_log_operation( + "command", + "repro", + "started", + extra={"repo": str(repo), "budget": budget, "sidecar": sidecar, "sidecar_bundle": sidecar_bundle}, + ) + debug_print("[dim]repro: started[/dim]") + # Type checking for parameters (after subcommand check) if not _is_valid_repo_path(repo): raise typer.BadParameter("Repo path must exist and be directory") @@ -299,6 +309,18 @@ def main( console.print(f" [dim]Skipped: {report.skipped_checks}[/dim]") console.print(f" Total duration: {report.total_duration:.2f}s") + if is_debug_mode(): + debug_log_operation( + "command", + "repro", + "success", + extra={ + "total_checks": report.total_checks, + "passed": report.passed_checks, + "failed": report.failed_checks, + }, + ) + debug_print("[dim]repro: success[/dim]") record_event( { "checks_total": report.total_checks, @@ -407,6 +429,7 @@ def setup( """ Set up CrossHair configuration for contract exploration. + Automatically generates [tool.crosshair] configuration in pyproject.toml to enable contract exploration with CrossHair during repro runs. @@ -487,11 +510,22 @@ def setup( } if _update_pyproject_crosshair_config(pyproject_path, crosshair_config): + if is_debug_mode(): + debug_log_operation("command", "repro setup", "success", extra={"pyproject_path": str(pyproject_path)}) + debug_print("[dim]repro setup: success[/dim]") console.print(f"[green]✓[/green] Updated {pyproject_path.relative_to(repo)} with CrossHair configuration") console.print("\n[bold]CrossHair Configuration:[/bold]") for key, value in crosshair_config.items(): console.print(f" {key} = {value}") else: + if is_debug_mode(): + debug_log_operation( + "command", + "repro setup", + "failed", + error=f"Failed to update {pyproject_path}", + extra={"reason": "update_failed"}, + ) console.print(f"[red]✗[/red] Failed to update {pyproject_path.relative_to(repo)}") raise typer.Exit(1) diff --git a/src/specfact_cli/commands/sdd.py b/src/specfact_cli/commands/sdd.py index 2d15854a..90d6b102 100644 --- a/src/specfact_cli/commands/sdd.py +++ b/src/specfact_cli/commands/sdd.py @@ -16,7 +16,7 @@ from rich.table import Table from specfact_cli.enrichers.constitution_enricher import ConstitutionEnricher -from specfact_cli.runtime import get_configured_console +from specfact_cli.runtime import debug_log_operation, debug_print, get_configured_console, is_debug_mode from specfact_cli.utils import print_error, print_info, print_success from specfact_cli.utils.sdd_discovery import list_all_sdds from specfact_cli.utils.structure import SpecFactStructure @@ -73,6 +73,10 @@ def sdd_list( specfact sdd list specfact sdd list --repo /path/to/repo """ + if is_debug_mode(): + debug_log_operation("command", "sdd list", "started", extra={"repo": str(repo)}) + debug_print("[dim]sdd list: started[/dim]") + console.print("\n[bold cyan]SpecFact CLI - SDD Manifest List[/bold cyan]") console.print("=" * 60) @@ -80,6 +84,9 @@ def sdd_list( all_sdds = list_all_sdds(base_path) if not all_sdds: + if is_debug_mode(): + debug_log_operation("command", "sdd list", "success", extra={"count": 0, "reason": "none_found"}) + debug_print("[dim]sdd list: success (none found)[/dim]") console.print("[yellow]No SDD manifests found[/yellow]") console.print(f"[dim]Searched in: {base_path / SpecFactStructure.PROJECTS}/*/sdd.yaml[/dim]") console.print( @@ -141,6 +148,9 @@ def sdd_list( console.print() console.print(table) console.print(f"\n[dim]Total SDD manifests: {len(all_sdds)}[/dim]") + if is_debug_mode(): + debug_log_operation("command", "sdd list", "success", extra={"count": len(all_sdds)}) + debug_print("[dim]sdd list: success[/dim]") # Show layout information bundle_specific_count = sum(1 for path, _ in all_sdds if "/projects/" in str(path) or "\\projects\\" in str(path)) diff --git a/src/specfact_cli/commands/spec.py b/src/specfact_cli/commands/spec.py index 32af4b59..934e0a3b 100644 --- a/src/specfact_cli/commands/spec.py +++ b/src/specfact_cli/commands/spec.py @@ -28,6 +28,7 @@ generate_specmatic_tests, validate_spec_with_specmatic, ) +from specfact_cli.runtime import debug_log_operation, debug_print, is_debug_mode from specfact_cli.utils import print_error, print_info, print_success, print_warning, prompt_text from specfact_cli.utils.progress import load_bundle_with_progress from specfact_cli.utils.structure import SpecFactStructure @@ -105,6 +106,15 @@ def validate( """ from specfact_cli.telemetry import telemetry + if is_debug_mode(): + debug_log_operation( + "command", + "spec validate", + "started", + extra={"spec_path": str(spec_path) if spec_path else None, "bundle": bundle}, + ) + debug_print("[dim]spec validate: started[/dim]") + repo_path = Path(".").resolve() # Use active plan as default if bundle not provided @@ -123,6 +133,14 @@ def validate( # Load all contracts from bundle bundle_dir = SpecFactStructure.project_dir(base_path=repo_path, bundle_name=bundle) if not bundle_dir.exists(): + if is_debug_mode(): + debug_log_operation( + "command", + "spec validate", + "failed", + error=f"Project bundle not found: {bundle_dir}", + extra={"reason": "bundle_not_found", "bundle": bundle}, + ) print_error(f"Project bundle not found: {bundle_dir}") raise typer.Exit(1) @@ -209,6 +227,14 @@ def validate( # Check if Specmatic is available is_available, error_msg = check_specmatic_available() if not is_available: + if is_debug_mode(): + debug_log_operation( + "command", + "spec validate", + "failed", + error=error_msg or "Specmatic not available", + extra={"reason": "specmatic_unavailable"}, + ) print_error(f"Specmatic not available: {error_msg}") console.print("\n[bold]Installation:[/bold]") console.print("Visit https://docs.specmatic.io/ for installation instructions") @@ -385,6 +411,15 @@ def format_details(items: list[str], max_length: int = 100) -> str: console.print(f" - {error}") failed_count += 1 + if is_debug_mode(): + debug_log_operation( + "command", + "spec validate", + "success", + extra={"validated": validated_count, "skipped": skipped_count, "failed": failed_count}, + ) + debug_print("[dim]spec validate: success[/dim]") + # Summary if len(spec_paths) > 1: console.print("\n[bold]Summary:[/bold]") diff --git a/src/specfact_cli/commands/sync.py b/src/specfact_cli/commands/sync.py index ec243406..49ef6496 100644 --- a/src/specfact_cli/commands/sync.py +++ b/src/specfact_cli/commands/sync.py @@ -23,7 +23,7 @@ from specfact_cli.adapters.registry import AdapterRegistry from specfact_cli.models.bridge import AdapterType from specfact_cli.models.plan import Feature, PlanBundle -from specfact_cli.runtime import get_configured_console +from specfact_cli.runtime import debug_log_operation, debug_print, get_configured_console, is_debug_mode from specfact_cli.telemetry import telemetry from specfact_cli.utils.terminal import get_progress_config @@ -1231,6 +1231,15 @@ def sync_bridge( See docs/guides/devops-adapter-integration.md for complete documentation. """ + if is_debug_mode(): + debug_log_operation( + "command", + "sync bridge", + "started", + extra={"repo": str(repo), "bundle": bundle, "adapter": adapter, "bidirectional": bidirectional}, + ) + debug_print("[dim]sync bridge: started[/dim]") + # Auto-detect adapter if not specified from specfact_cli.sync.bridge_probe import BridgeProbe @@ -1899,6 +1908,9 @@ def sync_callback(changes: list[FileChange]) -> None: overwrite=overwrite, adapter_type=adapter_type, ) + if is_debug_mode(): + debug_log_operation("command", "sync bridge", "success", extra={"adapter": adapter, "bundle": bundle}) + debug_print("[dim]sync bridge: success[/dim]") record({"sync_completed": True}) @@ -1961,6 +1973,15 @@ def sync_repository( Example: specfact sync repository --repo . --confidence 0.5 """ + if is_debug_mode(): + debug_log_operation( + "command", + "sync repository", + "started", + extra={"repo": str(repo), "target": str(target) if target else None, "watch": watch}, + ) + debug_print("[dim]sync repository: started[/dim]") + from specfact_cli.sync.repository_sync import RepositorySync telemetry_metadata = { @@ -2061,6 +2082,14 @@ def sync_callback(changes: list[FileChange]) -> None: task = progress.add_task("Tracking deviations...", total=None) progress.update(task, description=f"✓ Found {len(result.deviations)} deviations") + if is_debug_mode(): + debug_log_operation( + "command", + "sync repository", + "success", + extra={"code_changes": len(result.code_changes)}, + ) + debug_print("[dim]sync repository: success[/dim]") # Record sync results record( { @@ -2182,6 +2211,14 @@ def sync_intelligent( specfact sync intelligent my-bundle --repo . --watch specfact sync intelligent my-bundle --repo . --code-to-spec auto --spec-to-code llm-prompt --tests specmatic """ + if is_debug_mode(): + debug_log_operation( + "command", + "sync intelligent", + "started", + extra={"bundle": bundle, "repo": str(repo), "watch": watch}, + ) + debug_print("[dim]sync intelligent: started[/dim]") from specfact_cli.utils.structure import SpecFactStructure @@ -2308,4 +2345,7 @@ def sync_callback(_changes: list) -> None: else: perform_sync() + if is_debug_mode(): + debug_log_operation("command", "sync intelligent", "success", extra={"bundle": bundle}) + debug_print("[dim]sync intelligent: success[/dim]") record({"sync_completed": True}) diff --git a/src/specfact_cli/commands/update.py b/src/specfact_cli/commands/update.py index 20fa499a..86ecef30 100644 --- a/src/specfact_cli/commands/update.py +++ b/src/specfact_cli/commands/update.py @@ -21,6 +21,7 @@ from rich.prompt import Confirm from specfact_cli import __version__ +from specfact_cli.runtime import debug_log_operation, debug_print, is_debug_mode from specfact_cli.utils.metadata import update_metadata from specfact_cli.utils.startup_checks import check_pypi_version @@ -208,15 +209,40 @@ def upgrade( # Check and install without confirmation specfact upgrade --yes """ + if is_debug_mode(): + debug_log_operation( + "command", + "upgrade", + "started", + extra={"check_only": check_only, "yes": yes}, + ) + debug_print("[dim]upgrade: started[/dim]") + # Check for updates console.print("[cyan]Checking for updates...[/cyan]") version_result = check_pypi_version() if version_result.error: + if is_debug_mode(): + debug_log_operation( + "command", + "upgrade", + "failed", + error=version_result.error or "Unknown error", + extra={"reason": "check_error"}, + ) console.print(f"[red]Error checking for updates: {version_result.error}[/red]") sys.exit(1) if not version_result.update_available: + if is_debug_mode(): + debug_log_operation( + "command", + "upgrade", + "success", + extra={"reason": "up_to_date", "version": version_result.current_version}, + ) + debug_print("[dim]upgrade: success (up to date)[/dim]") console.print(f"[green]✓ You're up to date![/green] (version {version_result.current_version})") # Update metadata even if no update available from datetime import datetime @@ -260,9 +286,20 @@ def upgrade( success = install_update(method, yes=yes) if success: + if is_debug_mode(): + debug_log_operation("command", "upgrade", "success", extra={"reason": "installed"}) + debug_print("[dim]upgrade: success[/dim]") console.print("\n[green]✓ Update complete![/green]") console.print("[dim]Run 'specfact --version' to verify the new version.[/dim]") else: + if is_debug_mode(): + debug_log_operation( + "command", + "upgrade", + "failed", + error="Update was not installed", + extra={"reason": "install_failed"}, + ) console.print("\n[yellow]Update was not installed.[/yellow]") console.print("[dim]You can manually update using the command shown above.[/dim]") sys.exit(1) diff --git a/src/specfact_cli/commands/validate.py b/src/specfact_cli/commands/validate.py index 55f97f51..8a27ac83 100644 --- a/src/specfact_cli/commands/validate.py +++ b/src/specfact_cli/commands/validate.py @@ -13,7 +13,7 @@ from beartype import beartype from icontract import require -from specfact_cli.runtime import get_configured_console +from specfact_cli.runtime import debug_log_operation, debug_print, get_configured_console, is_debug_mode from specfact_cli.validators.sidecar.crosshair_summary import format_summary_line from specfact_cli.validators.sidecar.models import SidecarConfig from specfact_cli.validators.sidecar.orchestrator import initialize_sidecar_workspace, run_sidecar_validation @@ -142,6 +142,15 @@ def init( **Next steps:** After initialization, run `specfact validate sidecar run` to execute validation. """ + if is_debug_mode(): + debug_log_operation( + "command", + "validate sidecar init", + "started", + extra={"bundle_name": bundle_name, "repo_path": str(repo_path)}, + ) + debug_print("[dim]validate sidecar init: started[/dim]") + config = SidecarConfig.create(bundle_name, repo_path) console.print(f"[bold]Initializing sidecar workspace for bundle: {bundle_name}[/bold]") @@ -152,6 +161,14 @@ def init( if config.django_settings_module: console.print(f" Django settings: {config.django_settings_module}") else: + if is_debug_mode(): + debug_log_operation( + "command", + "validate sidecar init", + "failed", + error="Failed to initialize sidecar workspace", + extra={"reason": "init_failed", "bundle_name": bundle_name}, + ) console.print("[red]✗[/red] Failed to initialize sidecar workspace") raise typer.Exit(1) @@ -197,10 +214,25 @@ def run( ``` **Output:** + - Validation results displayed in console - Reports saved to `.specfact/projects//reports/sidecar/` - Progress indicators for long-running operations """ + if is_debug_mode(): + debug_log_operation( + "command", + "validate sidecar run", + "started", + extra={ + "bundle_name": bundle_name, + "repo_path": str(repo_path), + "run_crosshair": run_crosshair, + "run_specmatic": run_specmatic, + }, + ) + debug_print("[dim]validate sidecar run: started[/dim]") + config = SidecarConfig.create(bundle_name, repo_path) config.tools.run_crosshair = run_crosshair config.tools.run_specmatic = run_specmatic @@ -271,3 +303,12 @@ def run( success = value.get("success", False) status = "[green]✓[/green]" if success else "[red]✗[/red]" console.print(f" {status} {key}") + + if is_debug_mode(): + debug_log_operation( + "command", + "validate sidecar run", + "success", + extra={"bundle_name": bundle_name, "routes_extracted": results.get("routes_extracted", 0)}, + ) + debug_print("[dim]validate sidecar run: success[/dim]") diff --git a/src/specfact_cli/common/logger_setup.py b/src/specfact_cli/common/logger_setup.py index 36b5d2aa..bea8e829 100644 --- a/src/specfact_cli/common/logger_setup.py +++ b/src/specfact_cli/common/logger_setup.py @@ -78,6 +78,74 @@ def get_runtime_logs_dir() -> str: return runtime_logs_dir +@beartype +@ensure(lambda result: isinstance(result, str) and len(result) > 0, "Must return non-empty string path") +def get_specfact_home_logs_dir() -> str: + """ + Get the path to the user-level debug logs directory (~/.specfact/logs) and ensure it exists. + + Used when --debug is enabled to write debug output to a persistent location. + Creates the directory with mode 0o755 on first use. + + Returns: + str: Path to ~/.specfact/logs (expanded from HOME). + """ + logs_dir = os.path.join(os.path.expanduser("~"), ".specfact", "logs") + with contextlib.suppress(PermissionError): + os.makedirs(logs_dir, mode=0o755, exist_ok=True) + return os.path.abspath(logs_dir) + + +# Rich markup pattern: [tag] or [/tag] (e.g. [dim], [/dim], [bold]) +_RICH_MARKUP_PATTERN = re.compile(r"\[/?[^\]]*\]") + + +@beartype +@require(lambda text: isinstance(text, str), "Text must be string") +@ensure(lambda result: isinstance(result, str), "Must return string") +def plain_text_for_debug_log(text: str) -> str: + """ + Convert Rich-marked or other formatted text to plain text suitable for debug log files. + + Strips Rich markup (e.g. [dim], [/dim], [bold]) and normalizes whitespace so the + log file contains readable plain text without console formatting codes. + + Use this when writing debug log content that may contain Rich markup, so callers + can pass the same string to console (with markup) and to file (plain) without + maintaining two versions. + + Args: + text: String that may contain Rich markup or extra whitespace. + + Returns: + Plain-text string with markup removed and whitespace normalized. + """ + stripped = _RICH_MARKUP_PATTERN.sub("", text) + return " ".join(stripped.split()) + + +@beartype +@ensure(lambda result: isinstance(result, str), "Must return string") +def format_debug_log_message(*args: Any, **kwargs: Any) -> str: + """ + Format print-style arguments into a single plain-text line for debug log files. + + Use this when writing debug log content that mirrors console.print(*args, **kwargs). + Strips Rich markup and normalizes whitespace so call sites do not duplicate + formatting or markup-stripping logic. kwargs are ignored for file output but + accepted for signature compatibility with print. + + Args: + *args: Objects to stringify and join (same as passed to console.print). + **kwargs: Ignored for file output; present for compatibility with print. + + Returns: + Plain-text string suitable for writing to the debug log file. + """ + line = " ".join(str(a) for a in args) + return plain_text_for_debug_log(line) if line else "" + + class MessageFlowFormatter(logging.Formatter): """ Custom formatter that recognizes message flow patterns and formats them accordingly diff --git a/src/specfact_cli/runtime.py b/src/specfact_cli/runtime.py index e124ec86..cb8a8402 100644 --- a/src/specfact_cli/runtime.py +++ b/src/specfact_cli/runtime.py @@ -7,19 +7,33 @@ from __future__ import annotations +import inspect +import json +import logging import os import sys from enum import Enum +from logging.handlers import RotatingFileHandler +from typing import Any from beartype import beartype from icontract import ensure from rich.console import Console +from specfact_cli.common.logger_setup import ( + LoggerSetup, + format_debug_log_message, + get_specfact_home_logs_dir, +) from specfact_cli.modes import OperationalMode from specfact_cli.utils.structured_io import StructuredFormat from specfact_cli.utils.terminal import detect_terminal_capabilities, get_console_config +DEBUG_LOG_DATEFMT = "%Y-%m-%d %H:%M:%S" +DEBUG_LOG_FORMAT = "%(asctime)s | %(message)s" + + class TerminalMode(str, Enum): """Terminal output modes for Rich Console and Progress.""" @@ -34,6 +48,8 @@ class TerminalMode(str, Enum): _non_interactive_override: bool | None = None _debug_mode: bool = False _console_cache: dict[TerminalMode, Console] = {} +_debug_file_handler: RotatingFileHandler | None = None +_debug_logger: logging.Logger | None = None @beartype @@ -175,18 +191,126 @@ def get_configured_console() -> Console: return _console_cache[mode] +def _get_debug_caller() -> str: + """Return module:function for the caller of debug_print/debug_log_operation (first frame outside runtime).""" + for frame_info in inspect.stack(): + module = inspect.getmodule(frame_info.frame) + if module is None: + continue + name = getattr(module, "__name__", "") or "" + if "specfact_cli.runtime" in name: + continue + func = frame_info.function + return f"{name}:{func}" + return "unknown" + + +def _ensure_debug_log_file() -> None: + """Initialize debug log file under ~/.specfact/logs when debug is on (lazy, once per run).""" + global _debug_file_handler, _debug_logger + if _debug_logger is not None: + return + try: + logs_dir = get_specfact_home_logs_dir() + log_path = os.path.join(logs_dir, "specfact-debug.log") + _debug_file_handler = RotatingFileHandler( + log_path, + maxBytes=5 * 1024 * 1024, + backupCount=5, + mode="a", + encoding="utf-8", + ) + _debug_file_handler.setLevel(logging.DEBUG) + _debug_file_handler.setFormatter(logging.Formatter(DEBUG_LOG_FORMAT, datefmt=DEBUG_LOG_DATEFMT)) + _debug_logger = logging.getLogger("specfact.debug") + _debug_logger.setLevel(logging.DEBUG) + _debug_logger.propagate = False + _debug_logger.handlers.clear() + _debug_logger.addHandler(_debug_file_handler) + except (OSError, PermissionError): + _debug_file_handler = None + _debug_logger = None + + @beartype -def debug_print(*args, **kwargs) -> None: +def init_debug_log_file() -> None: + """ + Ensure debug log file is initialized when debug mode is on. + + Call this after set_debug_mode(True) so that the first debug_print or + debug_log_operation writes to ~/.specfact/logs/specfact-debug.log immediately. + """ + if _debug_mode: + _ensure_debug_log_file() + + +def _append_debug_log(*args: Any, **kwargs: Any) -> None: + """Write print-style message to the debug log file. No-op if debug off or file unavailable.""" + if not _debug_mode: + return + _ensure_debug_log_file() + if _debug_logger is None: + return + plain = format_debug_log_message(*args, **kwargs) + if plain: + caller = _get_debug_caller() + _debug_logger.debug("%s | %s", caller, plain) + + +@beartype +def debug_print(*args: Any, **kwargs: Any) -> None: """ Print debug messages only if debug mode is enabled. This function behaves like console.print() but only outputs when --debug flag is set. - Use this for diagnostic information, URL construction details, authentication status, etc. + Also writes the same content to ~/.specfact/logs/specfact-debug.log when debug is on. Args: *args: Arguments to pass to console.print() **kwargs: Keyword arguments to pass to console.print() """ if _debug_mode: - console = get_configured_console() - console.print(*args, **kwargs) + get_configured_console().print(*args, **kwargs) + _append_debug_log(*args, **kwargs) + + +@beartype +def debug_log_operation( + operation: str, + target: str, + status: str, + error: str | None = None, + extra: dict[str, object] | None = None, + caller: str | None = None, +) -> None: + """ + Log structured operation metadata to the debug log file when debug mode is enabled. + + No-op when debug is off. When debug is on, writes a structured line (operation, target, + status, error, extra, caller) to ~/.specfact/logs/specfact-debug.log. Redacts target/extra + via LoggerSetup.redact_secrets. Caller (module/method) is included when provided or inferred. + + Args: + operation: Operation type (e.g. file_read, api_request). + target: Target path or URL (will be redacted). + status: Status or result (e.g. success, 200, failure, prepared, finished, failed). + error: Optional error message. + extra: Optional dict of extra fields (will be redacted); for API: payload, response, reason. + caller: Optional module:function for context; if None, inferred from call stack. + """ + if not _debug_mode: + return + _ensure_debug_log_file() + if _debug_logger is None: + return + payload: dict[str, object] = { + "operation": operation, + "target": LoggerSetup.redact_secrets(target), + "status": status, + "caller": caller if caller is not None else _get_debug_caller(), + } + if error is not None: + payload["error"] = error + if extra is not None: + payload["extra"] = LoggerSetup.redact_secrets(extra) + _debug_logger.debug("debug_log_operation %s", json.dumps(payload, default=str)) diff --git a/tests/e2e/test_openspec_bridge_workflow.py b/tests/e2e/test_openspec_bridge_workflow.py index befaa03c..ee7c4aa1 100644 --- a/tests/e2e/test_openspec_bridge_workflow.py +++ b/tests/e2e/test_openspec_bridge_workflow.py @@ -6,6 +6,8 @@ from __future__ import annotations +import subprocess +import sys from pathlib import Path from textwrap import dedent @@ -322,28 +324,28 @@ def test_openspec_cross_repo_workflow(self, tmp_path: Path) -> None: main_repo = tmp_path / "main-repo" main_repo.mkdir() - # Sync with external base path - result = runner.invoke( - app, - [ - "sync", - "bridge", - "--repo", - str(main_repo), - "--adapter", - "openspec", - "--mode", - "read-only", - "--external-base-path", - str(external_repo), - ], + # Sync with external base path. Use subprocess to avoid "I/O operation on + # closed file" when Click's isolation streams are closed by Azure Identity + # or Rich during the command (runner.invoke then fails in its finally block). + args = [ + "sync", + "bridge", + "--repo", + str(main_repo), + "--adapter", + "openspec", + "--mode", + "read-only", + "--external-base-path", + str(external_repo), + ] + proc = subprocess.run( + [sys.executable, "-m", "specfact_cli", *args], + capture_output=True, + text=True, + timeout=60, ) - - # Access stdout immediately to prevent I/O operation on closed file error - _ = result.stdout - - # Should succeed - assert result.exit_code == 0 + assert proc.returncode == 0, f"stdout: {proc.stdout!r}\nstderr: {proc.stderr!r}" # Verify bundle was created in main repo bundle_dir = main_repo / SpecFactStructure.PROJECTS / "main" diff --git a/tests/e2e/test_semgrep_integration_e2e.py b/tests/e2e/test_semgrep_integration_e2e.py index 3462d66c..7d98db0d 100644 --- a/tests/e2e/test_semgrep_integration_e2e.py +++ b/tests/e2e/test_semgrep_integration_e2e.py @@ -18,6 +18,7 @@ class TestSemgrepIntegrationE2E: """End-to-end tests for Semgrep integration in CodeAnalyzer.""" + @pytest.mark.timeout(30) def test_semgrep_detects_fastapi_routes(self): """Test that Semgrep detects FastAPI routes and enhances features.""" with tempfile.TemporaryDirectory() as tmpdir: @@ -73,6 +74,7 @@ def delete_user(user_id: int): # API endpoint detection adds +0.1 to confidence assert feature.confidence >= 0.3 + @pytest.mark.timeout(30) def test_semgrep_detects_flask_routes(self): """Test that Semgrep detects Flask routes and enhances features.""" with tempfile.TemporaryDirectory() as tmpdir: diff --git a/tests/unit/specfact_cli/common/__init__.py b/tests/unit/specfact_cli/common/__init__.py new file mode 100644 index 00000000..48f176ec --- /dev/null +++ b/tests/unit/specfact_cli/common/__init__.py @@ -0,0 +1 @@ +"""Unit tests for specfact_cli.common.""" diff --git a/tests/unit/specfact_cli/common/test_logger_setup.py b/tests/unit/specfact_cli/common/test_logger_setup.py new file mode 100644 index 00000000..98f4d332 --- /dev/null +++ b/tests/unit/specfact_cli/common/test_logger_setup.py @@ -0,0 +1,100 @@ +""" +Unit tests for specfact_cli.common.logger_setup. +""" + +from __future__ import annotations + +import os +from pathlib import Path + +import pytest + +from specfact_cli.common.logger_setup import ( + format_debug_log_message, + get_specfact_home_logs_dir, + plain_text_for_debug_log, +) + + +class TestGetSpecfactHomeLogsDir: + """Tests for get_specfact_home_logs_dir().""" + + def test_returns_non_empty_string(self) -> None: + """Result is a non-empty string path.""" + result = get_specfact_home_logs_dir() + assert isinstance(result, str) + assert len(result) > 0 + + def test_path_contains_specfact_logs(self) -> None: + """Returned path ends with .specfact/logs or contains .specfact and logs.""" + result = get_specfact_home_logs_dir() + assert ".specfact" in result + assert "logs" in result + + def test_creates_directory_on_first_use(self, tmp_path: Path) -> None: + """Directory is created when function is called (temp HOME).""" + home = tmp_path / "fake_home" + home.mkdir() + logs_expected = home / ".specfact" / "logs" + assert not logs_expected.exists() + with pytest.MonkeyPatch.context() as m: + m.setattr( + os.path, + "expanduser", + lambda x: str(home) if x == "~" else os.path.expanduser(x), + ) + result = get_specfact_home_logs_dir() + assert os.path.isdir(result) + assert result == str(logs_expected.resolve()) + + def test_idempotent_second_call(self, tmp_path: Path) -> None: + """Second call returns same path and does not fail.""" + home = tmp_path / "fake_home" + home.mkdir() + with pytest.MonkeyPatch.context() as m: + m.setattr( + os.path, + "expanduser", + lambda x: str(home) if x == "~" else os.path.expanduser(x), + ) + first = get_specfact_home_logs_dir() + second = get_specfact_home_logs_dir() + assert first == second + assert os.path.isdir(first) + + +class TestPlainTextForDebugLog: + """Tests for plain_text_for_debug_log().""" + + def test_strips_rich_markup(self) -> None: + """Rich markup like [dim], [/dim], [bold] is removed.""" + assert plain_text_for_debug_log("hello [dim]world[/dim]") == "hello world" + assert plain_text_for_debug_log("[bold]x[/bold]") == "x" + + def test_normalizes_whitespace(self) -> None: + """Multiple spaces and newlines are collapsed to single space.""" + assert plain_text_for_debug_log("a b c") == "a b c" + + def test_returns_plain_string_unchanged(self) -> None: + """Plain text is returned with only whitespace normalized.""" + assert plain_text_for_debug_log("plain message") == "plain message" + + +class TestFormatDebugLogMessage: + """Tests for format_debug_log_message().""" + + def test_joins_args_and_strips_markup(self) -> None: + """*args are joined and Rich markup is stripped.""" + assert format_debug_log_message("hello", "[dim]world[/dim]") == "hello world" + + def test_single_arg(self) -> None: + """Single argument is stringified and stripped.""" + assert format_debug_log_message("[bold]x[/bold]") == "x" + + def test_empty_args_returns_empty_string(self) -> None: + """No args returns empty string.""" + assert format_debug_log_message() == "" + + def test_kwargs_ignored(self) -> None: + """**kwargs do not affect output (signature compatibility with print).""" + assert format_debug_log_message("msg", style="bold") == "msg" diff --git a/tests/unit/test_runtime.py b/tests/unit/test_runtime.py index 0bedc885..02f2afb2 100644 --- a/tests/unit/test_runtime.py +++ b/tests/unit/test_runtime.py @@ -5,10 +5,13 @@ from __future__ import annotations import os +import re +from pathlib import Path from unittest.mock import patch from specfact_cli.runtime import ( TerminalMode, + debug_log_operation, debug_print, get_configured_console, get_terminal_mode, @@ -139,3 +142,48 @@ def test_debug_print_when_disabled(self) -> None: set_debug_mode(False) # Should not raise exception, but output should be suppressed debug_print("test message") + + def test_debug_print_writes_to_file_when_debug_on(self, tmp_path: Path) -> None: + """When debug is on, debug_print also writes to debug log file.""" + with ( + patch("specfact_cli.runtime.get_specfact_home_logs_dir", return_value=str(tmp_path)), + patch("specfact_cli.runtime._debug_logger", None), + patch("specfact_cli.runtime._debug_file_handler", None), + ): + import specfact_cli.runtime as runtime_mod + + runtime_mod._debug_logger = None + runtime_mod._debug_file_handler = None + set_debug_mode(True) + debug_print("hello debug") + log_file = tmp_path / "specfact-debug.log" + assert log_file.exists() + content = log_file.read_text() + assert "hello debug" in content + # Formatter adds timestamp (YYYY-MM-DD HH:MM:SS) and caller (module:function) + assert re.search(r"\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2} \| .+ \| hello debug", content) + + def test_debug_log_operation_no_op_when_debug_off(self) -> None: + """debug_log_operation does nothing when debug is off.""" + set_debug_mode(False) + debug_log_operation("file_read", "/tmp/foo", "success") + # No exception; no file written (we don't create log dir when off) + + def test_debug_log_operation_writes_when_debug_on(self, tmp_path: Path) -> None: + """When debug is on, debug_log_operation writes structured line to file.""" + with ( + patch("specfact_cli.runtime.get_specfact_home_logs_dir", return_value=str(tmp_path)), + ): + import specfact_cli.runtime as runtime_mod + + runtime_mod._debug_logger = None + runtime_mod._debug_file_handler = None + set_debug_mode(True) + debug_log_operation("api_request", "https://example.com", "200", error=None, extra=None) + log_file = tmp_path / "specfact-debug.log" + assert log_file.exists() + content = log_file.read_text() + assert "debug_log_operation" in content + assert "api_request" in content + assert "200" in content + assert '"caller"' in content