diff --git a/.eslintrc.json b/.eslintrc.json index 5643685..dbd94ba 100644 --- a/.eslintrc.json +++ b/.eslintrc.json @@ -13,12 +13,18 @@ "linebreak-style": ["error", "unix"], "quotes": ["error", "single"], "semi": ["error", "always"], - "no-unused-vars": ["warn", { "argsIgnorePattern": "^_", "varsIgnorePattern": "^_", "caughtErrorsIgnorePattern": "^_" }], + "no-unused-vars": ["error", { "argsIgnorePattern": "^_", "varsIgnorePattern": "^_", "caughtErrorsIgnorePattern": "^_" }], "no-console": ["warn", { "allow": ["warn", "error"] }], "no-trailing-spaces": ["error"], "eol-last": ["error", "always"], "comma-dangle": ["error", "only-multiline"], "object-curly-spacing": ["error", "always"], "array-bracket-spacing": ["error", "never"] - } + }, + "overrides": [ + { + "files": ["tests/**/*.js", "examples/**/*.js", "debug/**/*.js", "scripts/**/*.{js,mjs,cjs}"], + "rules": { "no-console": "off" } + } + ] } \ No newline at end of file diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 6a2573d..a7fae7b 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -13,7 +13,7 @@ jobs: fail-fast: false matrix: os: [ubuntu-latest, macos-latest, windows-latest] - node: [18, 20, 22] + node: [20, 22, 24] steps: - uses: actions/checkout@v5 - uses: actions/setup-node@v5 diff --git a/AGENTS.md b/AGENTS.md index 411043b..27a4b49 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -1,7 +1,7 @@ # GitNexus โ€” Code Intelligence -This project is indexed by GitNexus as **claude-code-ssh** (1326 symbols, 3627 relationships, 110 execution flows). Use the GitNexus MCP tools to understand code, assess impact, and navigate safely. +This project is indexed by GitNexus as **claude-code-ssh** (1340 symbols, 3668 relationships, 111 execution flows). Use the GitNexus MCP tools to understand code, assess impact, and navigate safely. > If any GitNexus tool warns the index is stale, run `npx gitnexus analyze` in terminal first. diff --git a/CLAUDE.md b/CLAUDE.md index 4e481b9..eb444af 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -97,7 +97,7 @@ The server exposes these tools to Claude Code and OpenAI Codex: - `ssh_health_check`: Comprehensive server health check (CPU, RAM, Disk, Network) - `ssh_service_status`: Check status of services (nginx, mysql, docker, etc.) - `ssh_process_manager`: List, monitor, or kill processes -- `ssh_alert_setup`: Configure health monitoring alerts and thresholds +- `ssh_alert_setup`: Configure CPU/memory/disk thresholds per server; `check` action compares live metrics to thresholds ### Database Management (v2.3+) - `ssh_db_dump`: Create database dumps (MySQL, PostgreSQL, MongoDB) @@ -207,7 +207,7 @@ Configuration is stored in `~/.config/claude-code/claude_code_config.json` # GitNexus โ€” Code Intelligence -This project is indexed by GitNexus as **claude-code-ssh** (1326 symbols, 3627 relationships, 110 execution flows). Use the GitNexus MCP tools to understand code, assess impact, and navigate safely. +This project is indexed by GitNexus as **claude-code-ssh** (1340 symbols, 3668 relationships, 111 execution flows). Use the GitNexus MCP tools to understand code, assess impact, and navigate safely. > If any GitNexus tool warns the index is stale, run `npx gitnexus analyze` in terminal first. diff --git a/debug/test-efaje-connection.js b/debug/test-efaje-connection.js index 6f35343..9d5469d 100644 --- a/debug/test-efaje-connection.js +++ b/debug/test-efaje-connection.js @@ -1,7 +1,6 @@ #!/usr/bin/env node import { Client } from 'ssh2'; -import fs from 'fs'; import path from 'path'; import { fileURLToPath } from 'url'; import dotenv from 'dotenv'; @@ -83,4 +82,4 @@ conn.connect({ debug: (info) => { console.log('[SSH2 DEBUG]', info); } -}); \ No newline at end of file +}); diff --git a/debug/test-host-verification.js b/debug/test-host-verification.js index 9b46e34..b448a8d 100644 --- a/debug/test-host-verification.js +++ b/debug/test-host-verification.js @@ -1,7 +1,7 @@ #!/usr/bin/env node import SSHManager from '../src/ssh-manager.js'; -import { isHostKnown, getCurrentHostKey, removeHostKey } from '../src/ssh-key-manager.js'; +import { isHostKnown, getCurrentHostKey } from '../src/ssh-key-manager.js'; import dotenv from 'dotenv'; import path from 'path'; import { fileURLToPath } from 'url'; @@ -12,7 +12,6 @@ const __dirname = path.dirname(__filename); dotenv.config({ path: path.join(__dirname, '..', '.env') }); // Test configuration -const testServer = 'efaje_staging'; const config = { host: process.env.SSH_SERVER_EFAJE_STAGING_HOST || '35.198.113.119', port: parseInt(process.env.SSH_SERVER_EFAJE_STAGING_PORT) || 14072, @@ -65,4 +64,4 @@ try { } console.log(''); -console.log('โœ… All tests passed!'); \ No newline at end of file +console.log('โœ… All tests passed!'); diff --git a/debug/test-logger.js b/debug/test-logger.js index cec4ac5..b219dcb 100755 --- a/debug/test-logger.js +++ b/debug/test-logger.js @@ -3,13 +3,13 @@ /** * Test script for the logger system * Run with different environment variables to test different modes: - * + * * SSH_VERBOSE=true node debug/test-logger.js * SSH_LOG_LEVEL=DEBUG node debug/test-logger.js * SSH_LOG_LEVEL=ERROR node debug/test-logger.js */ -import { logger, LOG_LEVELS } from '../src/logger.js'; +import { logger } from '../src/logger.js'; console.log('๐Ÿงช Testing Logger System'); console.log('========================'); @@ -79,8 +79,8 @@ setTimeout(() => { history.forEach(entry => { console.log(` - [${entry.timestamp}] ${entry.server}: ${entry.command?.substring(0, 50)} - ${entry.success ? 'โœ…' : 'โŒ'}`); }); - + console.log('\nโœ… Logger test complete!'); console.log(`Log file: ${process.env.SSH_LOG_FILE || '.ssh-manager.log'}`); - console.log(`History file: .ssh-command-history.json`); -}, 500); \ No newline at end of file + console.log('History file: .ssh-command-history.json'); +}, 500); diff --git a/debug/test-mcp.js b/debug/test-mcp.js index d40fca4..c33c297 100644 --- a/debug/test-mcp.js +++ b/debug/test-mcp.js @@ -37,7 +37,7 @@ server.on('error', (error) => { // Wait for server to start setTimeout(() => { console.log('\n๐Ÿ“‹ Sending initialization request...\n'); - + // Send initialization request const initRequest = { jsonrpc: '2.0', @@ -52,22 +52,22 @@ setTimeout(() => { }, id: 1 }; - + server.stdin.write(JSON.stringify(initRequest) + '\n'); - + // Wait and then request tools list setTimeout(() => { console.log('\n๐Ÿ”ง Requesting tools list...\n'); - + const toolsRequest = { jsonrpc: '2.0', method: 'tools/list', params: {}, id: 2 }; - + server.stdin.write(JSON.stringify(toolsRequest) + '\n'); - + // Give time for response then exit setTimeout(() => { console.log('\nโœ… Test complete. Shutting down...'); @@ -75,4 +75,4 @@ setTimeout(() => { process.exit(0); }, 2000); }, 1000); -}, 500); \ No newline at end of file +}, 500); diff --git a/debug/test-ssh-command.js b/debug/test-ssh-command.js index 7fae79e..840b475 100644 --- a/debug/test-ssh-command.js +++ b/debug/test-ssh-command.js @@ -53,13 +53,13 @@ setTimeout(() => { }, id: ++requestId }; - + server.stdin.write(JSON.stringify(initRequest) + '\n'); - + // Test ssh_list_servers setTimeout(() => { console.log('\n๐Ÿ“‹ Testing ssh_list_servers...\n'); - + const listRequest = { jsonrpc: '2.0', method: 'tools/call', @@ -69,13 +69,13 @@ setTimeout(() => { }, id: ++requestId }; - + server.stdin.write(JSON.stringify(listRequest) + '\n'); - + // Test ssh_execute setTimeout(() => { console.log('\n๐Ÿš€ Testing ssh_execute (ls -la)...\n'); - + const execRequest = { jsonrpc: '2.0', method: 'tools/call', @@ -88,13 +88,13 @@ setTimeout(() => { }, id: ++requestId }; - + server.stdin.write(JSON.stringify(execRequest) + '\n'); - + // Test ssh_execute with working directory setTimeout(() => { console.log('\n๐Ÿ“ Testing ssh_execute with working directory...\n'); - + const execCwdRequest = { jsonrpc: '2.0', method: 'tools/call', @@ -108,9 +108,9 @@ setTimeout(() => { }, id: ++requestId }; - + server.stdin.write(JSON.stringify(execCwdRequest) + '\n'); - + // Exit after tests setTimeout(() => { console.log('\nโœ… All tests complete. Shutting down...'); @@ -120,4 +120,4 @@ setTimeout(() => { }, 2000); }, 2000); }, 1000); -}, 500); \ No newline at end of file +}, 500); diff --git a/docs/TOOL_MANAGEMENT.md b/docs/TOOL_MANAGEMENT.md index 208e014..8248fac 100644 --- a/docs/TOOL_MANAGEMENT.md +++ b/docs/TOOL_MANAGEMENT.md @@ -88,7 +88,7 @@ System health checks and monitoring: - `ssh_process_manager` - List, monitor, or kill processes - `ssh_monitor` - Real-time system resource monitoring - `ssh_tail` - Tail log files in real-time -- `ssh_alert_setup` - Configure health monitoring alerts and thresholds +- `ssh_alert_setup` - Configure CPU/memory/disk thresholds; `check` compares live health_check to thresholds **When to use**: Enable for server administration, DevOps work, or troubleshooting. diff --git a/examples/backup-workflow.js b/examples/backup-workflow.js index 1c8692d..ed8f265 100644 --- a/examples/backup-workflow.js +++ b/examples/backup-workflow.js @@ -1,9 +1,12 @@ +// @ts-nocheck /** * Backup Workflow Examples for claude-code-ssh * * This file demonstrates various backup and restore workflows * that can be executed through Claude Code or OpenAI Codex. + * The declarations below are illustrative; they are not executed here. */ +/* eslint-disable no-unused-vars, no-undef */ // ============================================================================ // EXAMPLE 1: Simple MySQL Backup Before Deployment @@ -255,7 +258,7 @@ Complete deployment workflow with backup safety net async function preDeploymentWorkflow() { // Step 1: Create backup - console.log("Creating pre-deployment backup..."); + console.log('Creating pre-deployment backup...'); const backup = await createBackup({ server: 'production', type: 'mysql', @@ -266,28 +269,28 @@ async function preDeploymentWorkflow() { console.log(`Backup created: ${backup.backup_id}`); // Step 2: Deploy changes - console.log("Deploying new version..."); + console.log('Deploying new version...'); await deploy({ server: 'production', branch: 'main' }); // Step 3: Run health check - console.log("Running health checks..."); + console.log('Running health checks...'); const health = await healthCheck({ server: 'production' }); // Step 4: If deployment fails, restore backup if (!health.success) { - console.error("Deployment failed! Rolling back..."); + console.error('Deployment failed! Rolling back...'); await restoreBackup({ server: 'production', backupId: backup.backup_id }); - console.log("Rollback completed"); + console.log('Rollback completed'); } else { - console.log("Deployment successful!"); + console.log('Deployment successful!'); } } @@ -316,7 +319,7 @@ async function disasterRecovery() { }); if (!yesterdayBackup) { - throw new Error("No backup found from yesterday"); + throw new Error('No backup found from yesterday'); } // Step 3: Restore @@ -326,7 +329,7 @@ async function disasterRecovery() { backupId: yesterdayBackup.id }); - console.log("Recovery completed successfully"); + console.log('Recovery completed successfully'); } // ============================================================================ @@ -400,33 +403,31 @@ const monthlyCompliance = { // CRON SCHEDULE REFERENCE // ============================================================================ -/* -Common cron schedules: - -Daily: - - "0 2 * * *" // Every day at 2 AM - - "0 0 * * *" // Every day at midnight - -Hourly: - - "0 * * * *" // Every hour at minute 0 - - "0 */6 * * *" // Every 6 hours - -Weekly: - - "0 0 * * 0" // Every Sunday at midnight - - "0 3 * * 1" // Every Monday at 3 AM - -Monthly: - - "0 0 1 * *" // 1st of month at midnight - - "0 2 15 * *" // 15th of month at 2 AM - -Weekdays: - - "0 1 * * 1-5" // Mon-Fri at 1 AM - -Custom: - - "*/30 * * * *" // Every 30 minutes - - "0 */4 * * *" // Every 4 hours - - "0 9-17 * * *" // Every hour from 9 AM to 5 PM -*/ +// Common cron schedules: +// +// Daily: +// "0 2 * * *" -> Every day at 2 AM +// "0 0 * * *" -> Every day at midnight +// +// Hourly: +// "0 * * * *" -> Every hour at minute 0 +// "0 */6 * * *" -> Every 6 hours +// +// Weekly: +// "0 0 * * 0" -> Every Sunday at midnight +// "0 3 * * 1" -> Every Monday at 3 AM +// +// Monthly: +// "0 0 1 * *" -> 1st of month at midnight +// "0 2 15 * *" -> 15th of month at 2 AM +// +// Weekdays: +// "0 1 * * 1-5" -> Mon-Fri at 1 AM +// +// Custom: +// "*/30 * * * *" -> Every 30 minutes +// "0 */4 * * *" -> Every 4 hours +// "0 9-17 * * *" -> Every hour from 9 AM to 5 PM // ============================================================================ // NOTES @@ -466,7 +467,7 @@ Best Practices: - Test disaster recovery procedures */ -module.exports = { +export { mysqlBackup, postgresBackup, filesBackup, diff --git a/package.json b/package.json index 85a63a6..f34061f 100644 --- a/package.json +++ b/package.json @@ -18,7 +18,7 @@ "test:tools": "node tests/test-tool-registry.js", "test:all": "npm test && ./scripts/validate.sh", "validate": "./scripts/validate.sh", - "lint": "eslint src/*.js", + "lint": "eslint 'src/**/*.js'", "format": "prettier --write 'src/**/*.js' 'tests/**/*.js'" }, "keywords": [ @@ -54,6 +54,6 @@ "prettier": "^3.2.4" }, "engines": { - "node": ">=18.0.0" + "node": ">=20.19.0" } } diff --git a/src/backup-manager.js b/src/backup-manager.js deleted file mode 100644 index 4107bbc..0000000 --- a/src/backup-manager.js +++ /dev/null @@ -1,468 +0,0 @@ -/** - * Backup Manager for claude-code-ssh - * Handles creation, listing, restoration, and scheduling of backups - * Supports databases (MySQL, PostgreSQL, MongoDB) and file backups - */ - -import path from 'path'; -import crypto from 'crypto'; -import { logger } from './logger.js'; - -// Backup types -export const BACKUP_TYPES = { - MYSQL: 'mysql', - POSTGRESQL: 'postgresql', - MONGODB: 'mongodb', - FILES: 'files', - FULL: 'full' -}; - -// Default backup directory -export const DEFAULT_BACKUP_DIR = '/var/backups/ssh-manager'; - -/** - * Generate unique backup ID - */ -export function generateBackupId(type, name) { - const timestamp = new Date().toISOString().replace(/[:.]/g, '-'); - const random = crypto.randomBytes(4).toString('hex'); - return `${type}_${name}_${timestamp}_${random}`; -} - -/** - * Get backup metadata file path - */ -export function getBackupMetadataPath(backupId, backupDir = DEFAULT_BACKUP_DIR) { - return path.join(backupDir, `${backupId}.meta.json`); -} - -/** - * Get backup file path - */ -export function getBackupFilePath(backupId, backupDir = DEFAULT_BACKUP_DIR, extension = '.gz') { - return path.join(backupDir, `${backupId}${extension}`); -} - -/** - * Build MySQL dump command - */ -export function buildMySQLDumpCommand(options) { - const { - database, - user, - password, - host = 'localhost', - port = 3306, - outputFile, - singleTransaction = true, - compress = true - } = options; - - let command = 'mysqldump'; - - // Connection parameters - if (user) command += ` -u${user}`; - if (password) command += ` -p'${password}'`; - if (host) command += ` -h ${host}`; - if (port) command += ` -P ${port}`; - - // Dump options - if (singleTransaction) command += ' --single-transaction'; - command += ' --routines --triggers'; - - // Database name - command += ` ${database}`; - - // Output handling - if (compress) { - command += ` | gzip > "${outputFile}"`; - } else { - command += ` > "${outputFile}"`; - } - - return command; -} - -/** - * Build PostgreSQL dump command - */ -export function buildPostgreSQLDumpCommand(options) { - const { - database, - user, - password, - host = 'localhost', - port = 5432, - outputFile, - compress = true - } = options; - - // PostgreSQL uses PGPASSWORD environment variable - let command = ''; - if (password) { - command = `PGPASSWORD='${password}' `; - } - - command += 'pg_dump'; - - // Connection parameters - if (user) command += ` -U ${user}`; - if (host) command += ` -h ${host}`; - if (port) command += ` -p ${port}`; - - // Dump options - command += ' --format=custom --clean --if-exists'; - - // Database name - command += ` ${database}`; - - // Output handling - if (compress) { - command += ` | gzip > "${outputFile}"`; - } else { - command += ` > "${outputFile}"`; - } - - return command; -} - -/** - * Build MongoDB dump command - */ -export function buildMongoDBDumpCommand(options) { - const { - database, - user, - password, - host = 'localhost', - port = 27017, - outputDir, - compress = true - } = options; - - let command = 'mongodump'; - - // Connection parameters - if (host) command += ` --host ${host}`; - if (port) command += ` --port ${port}`; - if (user) command += ` --username ${user}`; - if (password) command += ` --password '${password}'`; - - // Database selection - if (database) command += ` --db ${database}`; - - // Output directory - command += ` --out "${outputDir}"`; - - // Compress the output directory - if (compress) { - const archiveName = `${outputDir}.tar.gz`; - command += ` && tar -czf "${archiveName}" -C "$(dirname ${outputDir})" "$(basename ${outputDir})"`; - command += ` && rm -rf "${outputDir}"`; - } - - return command; -} - -/** - * Build files backup command (tar + gzip) - */ -export function buildFilesBackupCommand(options) { - const { - paths, - outputFile, - exclude = [], - compress = true - } = options; - - if (!Array.isArray(paths) || paths.length === 0) { - throw new Error('paths must be a non-empty array'); - } - - let command = 'tar'; - - // Compression flag - if (compress) { - command += ' -czf'; - } else { - command += ' -cf'; - } - - // Output file - command += ` "${outputFile}"`; - - // Exclude patterns - for (const pattern of exclude) { - command += ` --exclude="${pattern}"`; - } - - // Paths to backup - command += ` ${paths.map(p => `"${p}"`).join(' ')}`; - - return command; -} - -/** - * Build backup restore command based on type - */ -export function buildRestoreCommand(backupType, backupFile, options = {}) { - switch (backupType) { - case BACKUP_TYPES.MYSQL: - return buildMySQLRestoreCommand(backupFile, options); - case BACKUP_TYPES.POSTGRESQL: - return buildPostgreSQLRestoreCommand(backupFile, options); - case BACKUP_TYPES.MONGODB: - return buildMongoDBRestoreCommand(backupFile, options); - case BACKUP_TYPES.FILES: - return buildFilesRestoreCommand(backupFile, options); - default: - throw new Error(`Unknown backup type: ${backupType}`); - } -} - -/** - * Build MySQL restore command - */ -function buildMySQLRestoreCommand(backupFile, options) { - const { - database, - user, - password, - host = 'localhost', - port = 3306 - } = options; - - let command = ''; - - // Decompress if needed - if (backupFile.endsWith('.gz')) { - command = `gunzip -c "${backupFile}" | `; - } else { - command = `cat "${backupFile}" | `; - } - - command += 'mysql'; - - // Connection parameters - if (user) command += ` -u${user}`; - if (password) command += ` -p'${password}'`; - if (host) command += ` -h ${host}`; - if (port) command += ` -P ${port}`; - if (database) command += ` ${database}`; - - return command; -} - -/** - * Build PostgreSQL restore command - */ -function buildPostgreSQLRestoreCommand(backupFile, options) { - const { - database, - user, - password, - host = 'localhost', - port = 5432 - } = options; - - let command = ''; - if (password) { - command = `PGPASSWORD='${password}' `; - } - - command += 'pg_restore'; - - // Connection parameters - if (user) command += ` -U ${user}`; - if (host) command += ` -h ${host}`; - if (port) command += ` -p ${port}`; - if (database) command += ` -d ${database}`; - - // Restore options - command += ' --clean --if-exists'; - - // Handle compressed files - if (backupFile.endsWith('.gz')) { - command = `gunzip -c "${backupFile}" | ${command}`; - } else { - command += ` "${backupFile}"`; - } - - return command; -} - -/** - * Build MongoDB restore command - */ -function buildMongoDBRestoreCommand(backupFile, options) { - const { - user, - password, - host = 'localhost', - port = 27017, - drop = true - } = options; - - let command = ''; - - // Extract if compressed - if (backupFile.endsWith('.tar.gz')) { - const extractDir = backupFile.replace('.tar.gz', ''); - command = `tar -xzf "${backupFile}" -C "$(dirname ${backupFile})" && `; - command += 'mongorestore'; - - if (drop) command += ' --drop'; - if (host) command += ` --host ${host}`; - if (port) command += ` --port ${port}`; - if (user) command += ` --username ${user}`; - if (password) command += ` --password '${password}'`; - - command += ` "${extractDir}"`; - command += ` && rm -rf "${extractDir}"`; - } else { - command = 'mongorestore'; - if (drop) command += ' --drop'; - if (host) command += ` --host ${host}`; - if (port) command += ` --port ${port}`; - if (user) command += ` --username ${user}`; - if (password) command += ` --password '${password}'`; - command += ` "${backupFile}"`; - } - - return command; -} - -/** - * Build files restore command - */ -function buildFilesRestoreCommand(backupFile, options) { - const { targetPath = '/' } = options; - - let command = 'tar'; - - // Auto-detect compression - if (backupFile.endsWith('.gz') || backupFile.endsWith('.tgz')) { - command += ' -xzf'; - } else { - command += ' -xf'; - } - - command += ` "${backupFile}"`; - command += ` -C "${targetPath}"`; - - return command; -} - -/** - * Create backup metadata object - */ -export function createBackupMetadata(backupId, type, options = {}) { - return { - id: backupId, - type, - created_at: new Date().toISOString(), - server: options.server || 'unknown', - database: options.database || null, - paths: options.paths || [], - size: null, // Will be filled after backup - compressed: options.compress !== false, - retention: options.retention || 7, // days - status: 'pending', - error: null - }; -} - -/** - * Build command to save metadata to remote server - */ -export function buildSaveMetadataCommand(metadata, metadataPath) { - const jsonData = JSON.stringify(metadata, null, 2); - // Escape single quotes in JSON for shell - const escapedJson = jsonData.replace(/'/g, '\'\\\'\''); - return `echo '${escapedJson}' > "${metadataPath}"`; -} - -/** - * Build command to list backups from remote server - */ -export function buildListBackupsCommand(backupDir = DEFAULT_BACKUP_DIR, type = null) { - let command = `find "${backupDir}" -name "*.meta.json" -type f`; - - if (type) { - command += ` | grep "${type}_"`; - } - - // Read and parse each metadata file - command += ' | while read -r file; do cat "$file"; echo "---"; done'; - - return command; -} - -/** - * Parse list backups output - */ -export function parseBackupsList(output) { - if (!output || !output.trim()) { - return []; - } - - const backups = []; - const metadataBlocks = output.split('---').filter(b => b.trim()); - - for (const block of metadataBlocks) { - try { - const metadata = JSON.parse(block.trim()); - backups.push(metadata); - } catch (error) { - logger.warn('Failed to parse backup metadata', { error: error.message, block }); - } - } - - // Sort by created_at descending - return backups.sort((a, b) => new Date(b.created_at) - new Date(a.created_at)); -} - -/** - * Build cleanup old backups command (based on retention) - */ -export function buildCleanupCommand(backupDir = DEFAULT_BACKUP_DIR, retentionDays = 7) { - // Find backup files older than retention period and delete them - return `find "${backupDir}" -name "*_*_*" -type f -mtime +${retentionDays} -delete`; -} - -/** - * Build cron schedule command - */ -export function buildCronScheduleCommand(schedule, backupCommand, cronComment) { - // Add cron job with comment - const cronLine = `${schedule} ${backupCommand} # ${cronComment}`; - return `(crontab -l 2>/dev/null; echo '${cronLine}') | crontab -`; -} - -/** - * Parse cron list output - */ -export function parseCronJobs(output) { - if (!output || !output.trim()) { - return []; - } - - const jobs = []; - const lines = output.split('\n'); - - for (const line of lines) { - if (line.trim() && !line.startsWith('#') && line.includes('ssh-manager-backup')) { - const parts = line.split('#'); - const schedule = parts[0].trim(); - const comment = parts[1] ? parts[1].trim() : ''; - - jobs.push({ - schedule, - comment, - command: schedule.split(/\s+/).slice(5).join(' ') - }); - } - } - - return jobs; -} diff --git a/src/config-loader.js b/src/config-loader.js index 91e2d46..cd54ae6 100644 --- a/src/config-loader.js +++ b/src/config-loader.js @@ -237,10 +237,13 @@ export class ConfigLoader { async saveToCodexConfig(codexConfigPath = path.join(os.homedir(), '.codex', 'config.toml')) { let config = {}; - // Load existing config if it exists - if (fs.existsSync(codexConfigPath)) { + // Load existing config if it exists (read directly, ignore ENOENT -- avoids + // the check-then-act race existsSync + readFileSync would introduce). + try { const content = fs.readFileSync(codexConfigPath, 'utf8'); config = TOML.parse(content); + } catch (e) { + if (e.code !== 'ENOENT') throw e; } // Add MCP server configuration diff --git a/src/config.js b/src/config.js index 31b19fc..4dd1b0b 100644 --- a/src/config.js +++ b/src/config.js @@ -1,70 +1,55 @@ -// Configuration constants for claude-code-ssh - -// Output limits to prevent Claude Code crashes -export const OUTPUT_LIMITS = { - // Maximum length of stdout/stderr in responses (characters) - MAX_OUTPUT_LENGTH: process.env.MCP_SSH_MAX_OUTPUT_LENGTH - ? parseInt(process.env.MCP_SSH_MAX_OUTPUT_LENGTH) - : 10000, - - // Maximum length for log file tailing - MAX_TAIL_LINES: process.env.MCP_SSH_MAX_TAIL_LINES - ? parseInt(process.env.MCP_SSH_MAX_TAIL_LINES) - : 100, - - // Maximum length for rsync verbose output - MAX_RSYNC_OUTPUT: process.env.MCP_SSH_MAX_RSYNC_OUTPUT - ? parseInt(process.env.MCP_SSH_MAX_RSYNC_OUTPUT) - : 5000, -}; - -// Timeout configuration -export const TIMEOUTS = { - // Default command execution timeout (milliseconds) - DEFAULT_COMMAND_TIMEOUT: process.env.MCP_SSH_DEFAULT_TIMEOUT - ? parseInt(process.env.MCP_SSH_DEFAULT_TIMEOUT) - : 120000, // 2 minutes - - // Maximum allowed command timeout (milliseconds) - MAX_COMMAND_TIMEOUT: process.env.MCP_SSH_MAX_TIMEOUT - ? parseInt(process.env.MCP_SSH_MAX_TIMEOUT) - : 300000, // 5 minutes - - // Connection timeout (milliseconds) - CONNECTION_TIMEOUT: process.env.MCP_SSH_CONNECTION_TIMEOUT - ? parseInt(process.env.MCP_SSH_CONNECTION_TIMEOUT) - : 1800000, // 30 minutes - - // Keepalive interval (milliseconds) - KEEPALIVE_INTERVAL: process.env.MCP_SSH_KEEPALIVE_INTERVAL - ? parseInt(process.env.MCP_SSH_KEEPALIVE_INTERVAL) - : 60000, // 1 minute -}; - -// Response formatting -export const RESPONSE_FORMAT = { - // Whether to use compact JSON (no formatting) - COMPACT_JSON: process.env.MCP_SSH_COMPACT_JSON === 'true', +/** + * Operator-tunable runtime limits. + * + * Exposed as env vars so a user can trim Claude Code's context load or turn + * up detail temporarily without editing code: + * + * MCP_SSH_MAX_OUTPUT_LENGTH default 10_000 -- stdout/stderr truncation cap + * MCP_SSH_MAX_TAIL_LINES default 500 -- ssh_tail_read ring buffer cap + * MCP_SSH_MAX_RSYNC_OUTPUT default 5_000 -- rsync stderr truncation cap + * MCP_SSH_COMPACT_JSON default false -- if true, emit minified JSON + * MCP_SSH_DEBUG default false -- if true, logger.debug fires + * + * Values are read at import-time. To change at runtime, restart the server. + */ + +function intFromEnv(name, defaultValue, { min = 1, max = 10_000_000 } = {}) { + const raw = process.env[name]; + if (raw == null || raw === '') return defaultValue; + const n = Number.parseInt(raw, 10); + if (!Number.isFinite(n) || n < min || n > max) return defaultValue; + return n; +} - // Whether to include debug information in responses - INCLUDE_DEBUG_INFO: process.env.MCP_SSH_DEBUG === 'true', -}; +function boolFromEnv(name, defaultValue) { + const raw = process.env[name]; + if (raw == null) return defaultValue; + return /^(1|true|yes|on)$/i.test(String(raw).trim()); +} -// Helper function to truncate output +export const OUTPUT_LIMITS = Object.freeze({ + MAX_OUTPUT_LENGTH: intFromEnv('MCP_SSH_MAX_OUTPUT_LENGTH', 10_000, { min: 100, max: 10_000_000 }), + MAX_TAIL_LINES: intFromEnv('MCP_SSH_MAX_TAIL_LINES', 500, { min: 1, max: 1_000_000 }), + MAX_RSYNC_OUTPUT: intFromEnv('MCP_SSH_MAX_RSYNC_OUTPUT', 5_000, { min: 100, max: 10_000_000 }), +}); + +export const RESPONSE_FORMAT = Object.freeze({ + COMPACT_JSON: boolFromEnv('MCP_SSH_COMPACT_JSON', false), + DEBUG: boolFromEnv('MCP_SSH_DEBUG', false), +}); + +/** + * Convenience truncation used by tool handlers that don't already flow through + * output-formatter.truncateHeadTail. Keeps head + tail, elides middle. Returns + * the input unchanged when under the cap. + */ export function truncateOutput(text, maxLength = OUTPUT_LIMITS.MAX_OUTPUT_LENGTH) { if (!text) return ''; - - if (text.length <= maxLength) return text; - - const truncated = text.length - maxLength; - return text.substring(0, maxLength) + `\n\n... [${truncated} characters truncated]`; -} - -// Helper function to format JSON response -export function formatJSONResponse(data) { - return JSON.stringify( - data, - null, - RESPONSE_FORMAT.COMPACT_JSON ? 0 : 2 - ); + const s = String(text); + if (s.length <= maxLength) return s; + const keep = Math.max(1, Math.floor(maxLength / 2)); + const head = s.slice(0, keep); + const tail = s.slice(-keep); + const elided = s.length - head.length - tail.length; + return `${head}\n\n... [${elided} characters elided] ...\n\n${tail}`; } diff --git a/src/database-manager.js b/src/database-manager.js deleted file mode 100644 index fd0af6b..0000000 --- a/src/database-manager.js +++ /dev/null @@ -1,552 +0,0 @@ -/** - * Database Manager for claude-code-ssh - * Provides database operations for MySQL, PostgreSQL, and MongoDB - */ - -// Supported database types -export const DB_TYPES = { - MYSQL: 'mysql', - POSTGRESQL: 'postgresql', - MONGODB: 'mongodb' -}; - -// Default ports -export const DB_PORTS = { - mysql: 3306, - postgresql: 5432, - mongodb: 27017 -}; - -/** - * Build MySQL dump command - */ -export function buildMySQLDumpCommand(options) { - const { - database, - user, - password, - host = 'localhost', - port = 3306, - outputFile, - compress = true, - tables = null - } = options; - - let command = 'mysqldump'; - - if (user) command += ` -u${user}`; - if (password) command += ` -p'${password}'`; - if (host) command += ` -h ${host}`; - if (port) command += ` -P ${port}`; - - command += ' --single-transaction --routines --triggers'; - command += ` ${database}`; - - if (tables && Array.isArray(tables)) { - command += ` ${tables.join(' ')}`; - } - - if (compress) { - command += ` | gzip > "${outputFile}"`; - } else { - command += ` > "${outputFile}"`; - } - - return command; -} - -/** - * Build PostgreSQL dump command - */ -export function buildPostgreSQLDumpCommand(options) { - const { - database, - user, - password, - host = 'localhost', - port = 5432, - outputFile, - compress = true, - tables = null - } = options; - - let command = ''; - if (password) { - command = `PGPASSWORD='${password}' `; - } - - command += 'pg_dump'; - if (user) command += ` -U ${user}`; - if (host) command += ` -h ${host}`; - if (port) command += ` -p ${port}`; - command += ' --format=custom --clean --if-exists'; - - if (tables && Array.isArray(tables)) { - for (const table of tables) { - command += ` -t ${table}`; - } - } - - command += ` ${database}`; - - if (compress) { - command += ` | gzip > "${outputFile}"`; - } else { - command += ` > "${outputFile}"`; - } - - return command; -} - -/** - * Build MongoDB dump command - */ -export function buildMongoDBDumpCommand(options) { - const { - database, - user, - password, - host = 'localhost', - port = 27017, - outputDir, - compress = true, - collections = null - } = options; - - let command = 'mongodump'; - if (host) command += ` --host ${host}`; - if (port) command += ` --port ${port}`; - if (user) command += ` --username ${user}`; - if (password) command += ` --password '${password}'`; - if (database) command += ` --db ${database}`; - - if (collections && Array.isArray(collections)) { - for (const collection of collections) { - command += ` --collection ${collection}`; - } - } - - command += ` --out "${outputDir}"`; - - if (compress) { - command += ` && tar -czf "${outputDir}.tar.gz" -C "$(dirname ${outputDir})" "$(basename ${outputDir})"`; - command += ` && rm -rf "${outputDir}"`; - } - - return command; -} - -/** - * Build MySQL import command - */ -export function buildMySQLImportCommand(options) { - const { - database, - user, - password, - host = 'localhost', - port = 3306, - inputFile - } = options; - - let command = ''; - - if (inputFile.endsWith('.gz')) { - command = `gunzip -c "${inputFile}" | `; - } else { - command = `cat "${inputFile}" | `; - } - - command += 'mysql'; - if (user) command += ` -u${user}`; - if (password) command += ` -p'${password}'`; - if (host) command += ` -h ${host}`; - if (port) command += ` -P ${port}`; - command += ` ${database}`; - - return command; -} - -/** - * Build PostgreSQL import command - */ -export function buildPostgreSQLImportCommand(options) { - const { - database, - user, - password, - host = 'localhost', - port = 5432, - inputFile - } = options; - - let command = ''; - if (password) { - command = `PGPASSWORD='${password}' `; - } - - command += 'pg_restore'; - if (user) command += ` -U ${user}`; - if (host) command += ` -h ${host}`; - if (port) command += ` -p ${port}`; - command += ' --clean --if-exists'; - command += ` -d ${database}`; - - if (inputFile.endsWith('.gz')) { - command = `gunzip -c "${inputFile}" | ${command}`; - } else { - command += ` "${inputFile}"`; - } - - return command; -} - -/** - * Build MongoDB restore command - */ -export function buildMongoDBRestoreCommand(options) { - const { - user, - password, - host = 'localhost', - port = 27017, - inputPath, - drop = true - } = options; - - let command = ''; - - if (inputPath.endsWith('.tar.gz')) { - const extractDir = inputPath.replace('.tar.gz', ''); - command = `tar -xzf "${inputPath}" -C "$(dirname ${inputPath})" && `; - command += 'mongorestore'; - if (drop) command += ' --drop'; - if (host) command += ` --host ${host}`; - if (port) command += ` --port ${port}`; - if (user) command += ` --username ${user}`; - if (password) command += ` --password '${password}'`; - command += ` "${extractDir}"`; - command += ` && rm -rf "${extractDir}"`; - } else { - command = 'mongorestore'; - if (drop) command += ' --drop'; - if (host) command += ` --host ${host}`; - if (port) command += ` --port ${port}`; - if (user) command += ` --username ${user}`; - if (password) command += ` --password '${password}'`; - command += ` "${inputPath}"`; - } - - return command; -} - -/** - * Build MySQL list databases command - */ -export function buildMySQLListDatabasesCommand(options) { - const { user, password, host = 'localhost', port = 3306 } = options; - - let command = 'mysql'; - if (user) command += ` -u${user}`; - if (password) command += ` -p'${password}'`; - if (host) command += ` -h ${host}`; - if (port) command += ` -P ${port}`; - command += ' -e "SHOW DATABASES;" | tail -n +2'; - - return command; -} - -/** - * Build MySQL list tables command - */ -export function buildMySQLListTablesCommand(options) { - const { database, user, password, host = 'localhost', port = 3306 } = options; - - let command = 'mysql'; - if (user) command += ` -u${user}`; - if (password) command += ` -p'${password}'`; - if (host) command += ` -h ${host}`; - if (port) command += ` -P ${port}`; - command += ` -e "USE ${database}; SHOW TABLES;" | tail -n +2`; - - return command; -} - -/** - * Build PostgreSQL list databases command - */ -export function buildPostgreSQLListDatabasesCommand(options) { - const { user, password, host = 'localhost', port = 5432 } = options; - - let command = ''; - if (password) { - command = `PGPASSWORD='${password}' `; - } - - command += 'psql'; - if (user) command += ` -U ${user}`; - if (host) command += ` -h ${host}`; - if (port) command += ` -p ${port}`; - command += ' -t -c "SELECT datname FROM pg_database WHERE datistemplate = false;" | sed \'/^$/d\' | sed \'s/^[ \\t]*//\''; - - return command; -} - -/** - * Build PostgreSQL list tables command - */ -export function buildPostgreSQLListTablesCommand(options) { - const { database, user, password, host = 'localhost', port = 5432 } = options; - - let command = ''; - if (password) { - command = `PGPASSWORD='${password}' `; - } - - command += 'psql'; - if (user) command += ` -U ${user}`; - if (host) command += ` -h ${host}`; - if (port) command += ` -p ${port}`; - command += ` -d ${database}`; - command += ' -t -c "SELECT tablename FROM pg_tables WHERE schemaname = \'public\';" | sed \'/^$/d\' | sed \'s/^[ \\t]*//\''; - - return command; -} - -/** - * Build MongoDB list databases command - */ -export function buildMongoDBListDatabasesCommand(options) { - const { user, password, host = 'localhost', port = 27017 } = options; - - let command = 'mongo'; - if (host) command += ` --host ${host}`; - if (port) command += ` --port ${port}`; - if (user) command += ` --username ${user}`; - if (password) command += ` --password '${password}'`; - command += ' --quiet --eval "db.adminCommand(\'listDatabases\').databases.forEach(function(d){print(d.name)})"'; - - return command; -} - -/** - * Build MongoDB list collections command - */ -export function buildMongoDBListCollectionsCommand(options) { - const { database, user, password, host = 'localhost', port = 27017 } = options; - - let command = 'mongo'; - if (host) command += ` --host ${host}`; - if (port) command += ` --port ${port}`; - if (user) command += ` --username ${user}`; - if (password) command += ` --password '${password}'`; - command += ` ${database}`; - command += ' --quiet --eval "db.getCollectionNames().forEach(function(c){print(c)})"'; - - return command; -} - -/** - * Build MySQL query command (SELECT only) - */ -export function buildMySQLQueryCommand(options) { - const { database, query, user, password, host = 'localhost', port = 3306, format = 'json' } = options; - - // Validate query is SELECT only - if (!isSafeQuery(query)) { - throw new Error('Only SELECT queries are allowed'); - } - - let command = 'mysql'; - if (user) command += ` -u${user}`; - if (password) command += ` -p'${password}'`; - if (host) command += ` -h ${host}`; - if (port) command += ` -P ${port}`; - command += ` ${database}`; - - if (format === 'json') { - // Use JSON output if MySQL 5.7.8+ - command += ` -e "${query}" --batch --skip-column-names | awk 'BEGIN{print "["} {if(NR>1)print ","; printf "{\\"row\\":%d,\\"data\\":\\"%s\\"}", NR, $0} END{print "]"}'`; - } else { - command += ` -e "${query}"`; - } - - return command; -} - -/** - * Build PostgreSQL query command (SELECT only) - */ -export function buildPostgreSQLQueryCommand(options) { - const { database, query, user, password, host = 'localhost', port = 5432 } = options; - - if (!isSafeQuery(query)) { - throw new Error('Only SELECT queries are allowed'); - } - - let command = ''; - if (password) { - command = `PGPASSWORD='${password}' `; - } - - command += 'psql'; - if (user) command += ` -U ${user}`; - if (host) command += ` -h ${host}`; - if (port) command += ` -p ${port}`; - command += ` -d ${database}`; - command += ` -c "${query}"`; - - return command; -} - -/** - * Build MongoDB query command - */ -export function buildMongoDBQueryCommand(options) { - const { database, collection, query, user, password, host = 'localhost', port = 27017 } = options; - - let command = 'mongo'; - if (host) command += ` --host ${host}`; - if (port) command += ` --port ${port}`; - if (user) command += ` --username ${user}`; - if (password) command += ` --password '${password}'`; - command += ` ${database}`; - command += ` --quiet --eval "db.${collection}.find(${query || '{}'}).forEach(printjson)"`; - - return command; -} - -/** - * Validate query is safe (SELECT only) - */ -export function isSafeQuery(query) { - const trimmedQuery = query.trim().toLowerCase(); - - // Must start with SELECT - if (!trimmedQuery.startsWith('select')) { - return false; - } - - // Block dangerous keywords - const dangerousKeywords = [ - 'insert', 'update', 'delete', 'drop', 'create', 'alter', - 'truncate', 'grant', 'revoke', 'exec', 'execute' - ]; - - for (const keyword of dangerousKeywords) { - if (trimmedQuery.includes(keyword)) { - return false; - } - } - - return true; -} - -/** - * Parse database list output - */ -export function parseDatabaseList(output, type) { - const lines = output.trim().split('\n').filter(l => l.trim()); - - // Filter out system databases - return lines.filter(db => { - const dbLower = db.toLowerCase(); - if (type === DB_TYPES.MYSQL) { - return !['information_schema', 'performance_schema', 'mysql', 'sys'].includes(dbLower); - } else if (type === DB_TYPES.POSTGRESQL) { - return !['template0', 'template1', 'postgres'].includes(dbLower); - } else if (type === DB_TYPES.MONGODB) { - return !['admin', 'config', 'local'].includes(dbLower); - } - return true; - }); -} - -/** - * Parse table/collection list output - */ -export function parseTableList(output) { - return output.trim().split('\n').filter(l => l.trim()); -} - -/** - * Estimate dump size command - */ -export function buildEstimateSizeCommand(type, database, options = {}) { - const { user, password, host = 'localhost', port } = options; - - switch (type) { - case DB_TYPES.MYSQL: { - let command = 'mysql'; - if (user) command += ` -u${user}`; - if (password) command += ` -p'${password}'`; - if (host) command += ` -h ${host}`; - if (port) command += ` -P ${port}`; - command += ` -e "SELECT SUM(data_length + index_length) FROM information_schema.TABLES WHERE table_schema='${database}';" | tail -n 1`; - return command; - } - - case DB_TYPES.POSTGRESQL: { - let command = ''; - if (password) { - command = `PGPASSWORD='${password}' `; - } - command += 'psql'; - if (user) command += ` -U ${user}`; - if (host) command += ` -h ${host}`; - if (port) command += ` -p ${port}`; - command += ` -d ${database}`; - command += ` -t -c "SELECT pg_database_size('${database}');" | sed 's/^[ \\t]*//'`; - return command; - } - - case DB_TYPES.MONGODB: { - let command = 'mongo'; - if (host) command += ` --host ${host}`; - if (port) command += ` --port ${port}`; - if (user) command += ` --username ${user}`; - if (password) command += ` --password '${password}'`; - command += ` ${database}`; - command += ' --quiet --eval "db.stats().dataSize"'; - return command; - } - - default: - throw new Error(`Unknown database type: ${type}`); - } -} - -/** - * Parse size output to bytes - */ -export function parseSize(output) { - const size = parseInt(output.trim()); - return isNaN(size) ? 0 : size; -} - -/** - * Format bytes to human readable - */ -export function formatBytes(bytes) { - if (bytes === 0) return '0 Bytes'; - const k = 1024; - const sizes = ['Bytes', 'KB', 'MB', 'GB', 'TB']; - const i = Math.floor(Math.log(bytes) / Math.log(k)); - return parseFloat((bytes / Math.pow(k, i)).toFixed(2)) + ' ' + sizes[i]; -} - -/** - * Get database connection info - */ -export function getConnectionInfo(type, options) { - const { host = 'localhost', port, user, database } = options; - const defaultPort = DB_PORTS[type]; - - return { - type, - host, - port: port || defaultPort, - user: user || 'default', - database: database || 'all' - }; -} diff --git a/src/health-monitor.js b/src/health-monitor.js deleted file mode 100644 index 8272101..0000000 --- a/src/health-monitor.js +++ /dev/null @@ -1,428 +0,0 @@ -/** - * Health Monitor for claude-code-ssh - * Provides system health checks, service monitoring, and process management - */ - -import { logger } from './logger.js'; - -// Health status levels -export const HEALTH_STATUS = { - HEALTHY: 'healthy', - WARNING: 'warning', - CRITICAL: 'critical', - UNKNOWN: 'unknown' -}; - -// Common services to monitor -export const COMMON_SERVICES = { - nginx: { systemd: 'nginx', sysv: 'nginx' }, - apache: { systemd: 'apache2', sysv: 'apache2' }, - mysql: { systemd: 'mysql', sysv: 'mysql' }, - postgresql: { systemd: 'postgresql', sysv: 'postgresql' }, - mongodb: { systemd: 'mongod', sysv: 'mongod' }, - redis: { systemd: 'redis', sysv: 'redis-server' }, - docker: { systemd: 'docker', sysv: 'docker' }, - ssh: { systemd: 'sshd', sysv: 'ssh' } -}; - -/** - * Build command to check CPU usage - */ -export function buildCPUCheckCommand() { - // Get CPU usage using top, show idle percentage, calculate used - return 'top -bn1 | grep "Cpu(s)" | sed "s/.*, *\\([0-9.]*\\)%* id.*/\\1/" | awk \'{print 100 - $1}\''; -} - -/** - * Build command to check memory usage - */ -export function buildMemoryCheckCommand() { - // Returns: total, used, free, available in MB and percentage - return 'free -m | awk \'NR==2{printf "{\\"total\\":%s,\\"used\\":%s,\\"free\\":%s,\\"percent\\":%.2f}", $2,$3,$4,$3*100/$2}\''; -} - -/** - * Build command to check disk usage - */ -export function buildDiskCheckCommand(mountPoint = '/') { - // Returns JSON with disk usage for specific mount point or all - if (mountPoint === 'all') { - return 'df -h | awk \'NR>1 {gsub(/%/,"",$5); printf "{\\"mount\\":\\"%s\\",\\"size\\":\\"%s\\",\\"used\\":\\"%s\\",\\"avail\\":\\"%s\\",\\"percent\\":%s}\\n", $6,$2,$3,$4,$5}\''; - } - return `df -h "${mountPoint}" | awk 'NR>1 {gsub(/%/,"",$5); printf "{\\"mount\\":\\"%s\\",\\"size\\":\\"%s\\",\\"used\\":\\"%s\\",\\"avail\\":\\"%s\\",\\"percent\\":%s}", $6,$2,$3,$4,$5}'`; -} - -/** - * Build command to check network statistics - */ -export function buildNetworkCheckCommand() { - // Get basic network stats (RX/TX bytes) - return 'cat /proc/net/dev | awk \'NR>2 {printf "{\\"interface\\":\\"%s\\",\\"rx_bytes\\":%s,\\"tx_bytes\\":%s}\\n", $1,$2,$10}\' | grep -v "lo:"'; -} - -/** - * Build command to check load average - */ -export function buildLoadAverageCommand() { - return 'uptime | awk -F\'load average:\' \'{print $2}\' | sed \'s/^[ \\t]*//\''; -} - -/** - * Build command to check system uptime - */ -export function buildUptimeCommand() { - return 'uptime -p 2>/dev/null || uptime | awk \'{print $3,$4}\' | sed \'s/,//\''; -} - -/** - * Parse CPU usage output - */ -export function parseCPUUsage(output) { - const usage = parseFloat(output.trim()); - return { - usage: usage.toFixed(2), - percent: usage, - status: usage > 90 ? HEALTH_STATUS.CRITICAL : usage > 70 ? HEALTH_STATUS.WARNING : HEALTH_STATUS.HEALTHY - }; -} - -/** - * Parse memory usage output - */ -export function parseMemoryUsage(output) { - try { - const mem = JSON.parse(output.trim()); - return { - total_mb: mem.total, - used_mb: mem.used, - free_mb: mem.free, - percent: parseFloat(mem.percent), - status: mem.percent > 90 ? HEALTH_STATUS.CRITICAL : mem.percent > 80 ? HEALTH_STATUS.WARNING : HEALTH_STATUS.HEALTHY - }; - } catch (error) { - logger.warn('Failed to parse memory output', { error: error.message }); - return { status: HEALTH_STATUS.UNKNOWN }; - } -} - -/** - * Parse disk usage output - */ -export function parseDiskUsage(output) { - const lines = output.trim().split('\n').filter(l => l); - const disks = []; - - for (const line of lines) { - try { - const disk = JSON.parse(line); - disk.status = disk.percent > 90 ? HEALTH_STATUS.CRITICAL : disk.percent > 80 ? HEALTH_STATUS.WARNING : HEALTH_STATUS.HEALTHY; - disks.push(disk); - } catch (error) { - logger.warn('Failed to parse disk line', { line, error: error.message }); - } - } - - return disks; -} - -/** - * Parse network statistics - */ -export function parseNetworkStats(output) { - const lines = output.trim().split('\n').filter(l => l); - const interfaces = []; - - for (const line of lines) { - try { - const iface = JSON.parse(line); - // Convert bytes to MB - iface.rx_mb = (iface.rx_bytes / 1024 / 1024).toFixed(2); - iface.tx_mb = (iface.tx_bytes / 1024 / 1024).toFixed(2); - interfaces.push(iface); - } catch (error) { - logger.warn('Failed to parse network line', { line, error: error.message }); - } - } - - return interfaces; -} - -/** - * Determine overall health status - */ -export function determineOverallHealth(cpu, memory, disks) { - const statuses = [cpu.status, memory.status, ...disks.map(d => d.status)]; - - if (statuses.includes(HEALTH_STATUS.CRITICAL)) { - return HEALTH_STATUS.CRITICAL; - } - if (statuses.includes(HEALTH_STATUS.WARNING)) { - return HEALTH_STATUS.WARNING; - } - if (statuses.includes(HEALTH_STATUS.UNKNOWN)) { - return HEALTH_STATUS.UNKNOWN; - } - return HEALTH_STATUS.HEALTHY; -} - -/** - * Build command to check service status (systemd or sysv) - */ -export function buildServiceStatusCommand(serviceName) { - // Try systemd first, fallback to sysv - return ` - if command -v systemctl >/dev/null 2>&1; then - systemctl is-active ${serviceName} 2>/dev/null >/dev/null && echo "ACTIVE" || echo "INACTIVE" - systemctl is-enabled ${serviceName} 2>/dev/null >/dev/null && echo "ENABLED" || echo "DISABLED" - systemctl status ${serviceName} 2>/dev/null | grep "Main PID" | awk '{print $3}' | cut -d'(' -f1 - systemctl status ${serviceName} 2>/dev/null | grep "Active:" | sed 's/.*Active: //' | awk '{print $1,$2,$3}' - elif command -v service >/dev/null 2>&1; then - service ${serviceName} status >/dev/null 2>&1 && echo "ACTIVE" || echo "INACTIVE" - echo "UNKNOWN" - pgrep -f ${serviceName} | head -1 || echo "" - echo "sysv" - else - echo "UNKNOWN" - echo "UNKNOWN" - echo "" - echo "no-init-system" - fi - `.trim(); -} - -/** - * Parse service status output - */ -export function parseServiceStatus(output, serviceName) { - const lines = output.trim().split('\n'); - const [status, enabled, pid, details] = lines; - - return { - name: serviceName, - status: status === 'ACTIVE' ? 'running' : 'stopped', - enabled: enabled === 'ENABLED' ? 'yes' : enabled === 'DISABLED' ? 'no' : 'unknown', - pid: pid && pid !== '' ? parseInt(pid) : null, - details: details || 'unknown', - health: status === 'ACTIVE' ? HEALTH_STATUS.HEALTHY : HEALTH_STATUS.CRITICAL - }; -} - -/** - * Build command to list running processes - */ -export function buildProcessListCommand(options = {}) { - const { - sortBy = 'cpu', // cpu, memory, pid - limit = 20, - filter = null - } = options; - - let sortFlag = sortBy === 'memory' ? '-m' : '-c'; // -c for CPU, -m for memory - let command = `ps aux --sort=${sortFlag === '-c' ? '-pcpu' : '-pmem'} | head -n ${limit + 1}`; - - if (filter) { - command += ` | grep -i "${filter}"`; - } - - // Format output as JSON-like structure - command += ' | awk \'NR>1 {printf "{\\"user\\":\\"%s\\",\\"pid\\":%s,\\"cpu\\":%.1f,\\"mem\\":%.1f,\\"vsz\\":%s,\\"rss\\":%s,\\"stat\\":\\"%s\\",\\"start\\":\\"%s\\",\\"time\\":\\"%s\\",\\"command\\":\\"%s\\"}\\n", $1,$2,$3,$4,$5,$6,$8,$9,$10,substr($0,index($0,$11))}\''; - - return command; -} - -/** - * Parse process list output - */ -export function parseProcessList(output) { - const lines = output.trim().split('\n').filter(l => l); - const processes = []; - - for (const line of lines) { - try { - const proc = JSON.parse(line); - processes.push(proc); - } catch (error) { - logger.warn('Failed to parse process line', { line, error: error.message }); - } - } - - return processes; -} - -/** - * Build command to kill a process - */ -export function buildKillProcessCommand(pid, signal = 'TERM') { - // Validate PID is numeric - if (!Number.isInteger(pid) || pid <= 0) { - throw new Error(`Invalid PID: ${pid}`); - } - - const validSignals = ['TERM', 'KILL', 'HUP', 'INT', 'QUIT']; - if (!validSignals.includes(signal)) { - throw new Error(`Invalid signal: ${signal}. Valid signals: ${validSignals.join(', ')}`); - } - - return `kill -${signal} ${pid}`; -} - -/** - * Build command to get process info - */ -export function buildProcessInfoCommand(pid) { - return `ps -p ${pid} -o user,pid,pcpu,pmem,vsz,rss,stat,start,time,cmd --no-headers | awk '{printf "{\\"user\\":\\"%s\\",\\"pid\\":%s,\\"cpu\\":%.1f,\\"mem\\":%.1f,\\"vsz\\":%s,\\"rss\\":%s,\\"stat\\":\\"%s\\",\\"start\\":\\"%s\\",\\"time\\":\\"%s\\",\\"command\\":\\"%s\\"}", $1,$2,$3,$4,$5,$6,$7,$8,$9,substr($0,index($0,$10))}'`; -} - -/** - * Create alert configuration - */ -export function createAlertConfig(thresholds) { - const defaults = { - cpu: 80, - memory: 90, - disk: 85, - enabled: true - }; - - return { - ...defaults, - ...thresholds, - created_at: new Date().toISOString() - }; -} - -/** - * Build command to save alert config - */ -export function buildSaveAlertConfigCommand(config, configPath = '/etc/ssh-manager-alerts.json') { - const jsonData = JSON.stringify(config, null, 2); - const escapedJson = jsonData.replace(/'/g, '\'\\\'\''); - return `echo '${escapedJson}' > "${configPath}"`; -} - -/** - * Build command to load alert config - */ -export function buildLoadAlertConfigCommand(configPath = '/etc/ssh-manager-alerts.json') { - return `cat "${configPath}" 2>/dev/null || echo '{}'`; -} - -/** - * Check if thresholds are exceeded - */ -export function checkAlertThresholds(metrics, thresholds) { - const alerts = []; - - if (thresholds.cpu && metrics.cpu && metrics.cpu.percent > thresholds.cpu) { - alerts.push({ - type: 'cpu', - severity: 'warning', - message: `CPU usage (${metrics.cpu.percent}%) exceeds threshold (${thresholds.cpu}%)`, - value: metrics.cpu.percent, - threshold: thresholds.cpu - }); - } - - if (thresholds.memory && metrics.memory && metrics.memory.percent > thresholds.memory) { - alerts.push({ - type: 'memory', - severity: 'warning', - message: `Memory usage (${metrics.memory.percent}%) exceeds threshold (${thresholds.memory}%)`, - value: metrics.memory.percent, - threshold: thresholds.memory - }); - } - - if (thresholds.disk && metrics.disks) { - for (const disk of metrics.disks) { - if (disk.percent > thresholds.disk) { - alerts.push({ - type: 'disk', - severity: 'warning', - message: `Disk usage on ${disk.mount} (${disk.percent}%) exceeds threshold (${thresholds.disk}%)`, - mount: disk.mount, - value: disk.percent, - threshold: thresholds.disk - }); - } - } - } - - return alerts; -} - -/** - * Build comprehensive health check command - */ -export function buildComprehensiveHealthCheckCommand() { - return ` - echo "=== CPU ===" - ${buildCPUCheckCommand()} - echo "=== MEMORY ===" - ${buildMemoryCheckCommand()} - echo "=== DISK ===" - ${buildDiskCheckCommand('all')} - echo "=== LOAD ===" - ${buildLoadAverageCommand()} - echo "=== UPTIME ===" - ${buildUptimeCommand()} - echo "=== NETWORK ===" - ${buildNetworkCheckCommand()} - `.trim(); -} - -/** - * Parse comprehensive health check output - */ -export function parseComprehensiveHealthCheck(output) { - const sections = output.split('=== ').filter(s => s); - const result = {}; - - for (const section of sections) { - const [name, ...content] = section.split('\n'); - const data = content.join('\n').trim(); - - switch (name.toLowerCase().trim()) { - case 'cpu ===': - result.cpu = parseCPUUsage(data); - break; - case 'memory ===': - result.memory = parseMemoryUsage(data); - break; - case 'disk ===': - result.disks = parseDiskUsage(data); - break; - case 'load ===': - result.load_average = data; - break; - case 'uptime ===': - result.uptime = data; - break; - case 'network ===': - result.network = parseNetworkStats(data); - break; - } - } - - // Determine overall health - if (result.cpu && result.memory && result.disks) { - result.overall_status = determineOverallHealth(result.cpu, result.memory, result.disks); - } - - return result; -} - -/** - * Get common service names for detection - */ -export function getCommonServices() { - return Object.keys(COMMON_SERVICES); -} - -/** - * Resolve service name (handle both systemd and sysv names) - */ -export function resolveServiceName(shortName) { - const service = COMMON_SERVICES[shortName.toLowerCase()]; - return service ? service.systemd : shortName; -} diff --git a/src/index.js b/src/index.js index e44eab8..575899b 100755 --- a/src/index.js +++ b/src/index.js @@ -48,111 +48,17 @@ import { deleteGroup, addServersToGroup, removeServersFromGroup, - listGroups, - executeOnGroup, - EXECUTION_STRATEGIES + listGroups } from './server-groups.js'; -import { - createTunnel, - getTunnel, - listTunnels, - closeTunnel, - closeServerTunnels, - TUNNEL_TYPES -} from './tunnel-manager.js'; -import { - getHostKeyFingerprint, - isHostKnown, - getCurrentHostKey, - removeHostKey, - addHostKey, - updateHostKey, - hasHostKeyChanged, - listKnownHosts, - detectSSHKeyError, - extractHostFromSSHError -} from './ssh-key-manager.js'; -import { - BACKUP_TYPES, - DEFAULT_BACKUP_DIR, - generateBackupId, - getBackupMetadataPath, - getBackupFilePath, - buildMySQLDumpCommand, - buildPostgreSQLDumpCommand, - buildMongoDBDumpCommand, - buildFilesBackupCommand, - buildRestoreCommand, - createBackupMetadata, - buildSaveMetadataCommand, - buildListBackupsCommand, - parseBackupsList, - buildCleanupCommand, - buildCronScheduleCommand, - parseCronJobs -} from './backup-manager.js'; -import { - HEALTH_STATUS, - COMMON_SERVICES, - buildCPUCheckCommand, - buildMemoryCheckCommand, - buildDiskCheckCommand, - buildNetworkCheckCommand, - buildLoadAverageCommand, - buildUptimeCommand, - parseCPUUsage, - parseMemoryUsage, - parseDiskUsage, - parseNetworkStats, - determineOverallHealth, - buildServiceStatusCommand, - parseServiceStatus, - buildProcessListCommand, - parseProcessList, - buildKillProcessCommand, - buildProcessInfoCommand, - createAlertConfig, - buildSaveAlertConfigCommand, - buildLoadAlertConfigCommand, - checkAlertThresholds, - buildComprehensiveHealthCheckCommand, - parseComprehensiveHealthCheck, - getCommonServices, - resolveServiceName -} from './health-monitor.js'; -import { - DB_TYPES, - DB_PORTS, - buildMySQLDumpCommand as buildDBMySQLDumpCommand, - buildPostgreSQLDumpCommand as buildDBPostgreSQLDumpCommand, - buildMongoDBDumpCommand as buildDBMongoDBDumpCommand, - buildMySQLImportCommand, - buildPostgreSQLImportCommand, - buildMongoDBRestoreCommand, - buildMySQLListDatabasesCommand, - buildMySQLListTablesCommand, - buildPostgreSQLListDatabasesCommand, - buildPostgreSQLListTablesCommand, - buildMongoDBListDatabasesCommand, - buildMongoDBListCollectionsCommand, - buildMySQLQueryCommand, - buildPostgreSQLQueryCommand, - buildMongoDBQueryCommand, - isSafeQuery, - parseDatabaseList, - parseTableList, - buildEstimateSizeCommand, - parseSize, - formatBytes, - getConnectionInfo -} from './database-manager.js'; import { loadToolConfig, isToolEnabled } from './tool-config-manager.js'; +import { withAnnotations } from './tool-annotations.js'; // Modularized tool handlers (src/tools/*.js) -- 10/10 "gamechanger" versions import { handleSshExecute, handleSshExecuteSudo, handleSshExecuteGroup } from './tools/exec-tools.js'; import { handleSshUpload, handleSshDownload, handleSshSync, handleSshDiff, handleSshEdit } from './tools/transfer-tools.js'; import { handleSshTail, handleSshTailStart, handleSshTailRead, handleSshTailStop } from './tools/tail-tools.js'; import { handleSshHealthCheck, handleSshMonitor, handleSshServiceStatus, handleSshProcessManager } from './tools/monitoring-tools.js'; +import { handleSshAlertSetup } from './tools/alerts-tools.js'; import { handleSshDbQuery, handleSshDbList, handleSshDbDump, handleSshDbImport } from './tools/db-tools.js'; import { handleSshBackupCreate, handleSshBackupList, handleSshBackupRestore, handleSshBackupSchedule } from './tools/backup-tools.js'; import { handleSshDeploy } from './tools/deploy-tools.js'; @@ -503,13 +409,17 @@ async function getConnection(serverName) { return connections.get(normalizedName); } -// Create MCP server +// Create MCP server. Version pulled from package.json so the wire version +// never drifts from the released build. +const __pkgDir = path.dirname(fileURLToPath(import.meta.url)); +const __pkgJson = JSON.parse(fs.readFileSync(path.join(__pkgDir, '..', 'package.json'), 'utf8')); +const SERVER_VERSION = __pkgJson.version; const server = new McpServer({ name: 'claude-code-ssh', - version: '3.2.2', + version: SERVER_VERSION, }); -logger.info('MCP Server initialized', { version: '3.2.2' }); +logger.info('MCP Server initialized', { version: SERVER_VERSION }); /** * Helper function to conditionally register tools based on configuration @@ -518,12 +428,23 @@ logger.info('MCP Server initialized', { version: '3.2.2' }); * @param {Function} handler - Tool handler function */ function registerToolConditional(toolName, schema, handler) { - if (isToolEnabled(toolName)) { - server.registerTool(toolName, schema, handler); - logger.debug(`Registered tool: ${toolName}`); - } else { + if (!isToolEnabled(toolName)) { logger.debug(`Skipped disabled tool: ${toolName}`); + return; } + // Thread MCP cancellation through to the tool handler. The SDK delivers an + // AbortSignal at `extra.signal`; tools surface it as `args.abortSignal` + // (which streamExecCommand already accepts) so long-running remote commands + // stop when the client hits Esc instead of running to completion on the + // target host. + const wrapped = async (args, extra) => { + const mergedArgs = extra && extra.signal + ? { ...args, abortSignal: extra.signal } + : args; + return handler(mergedArgs, extra); + }; + server.registerTool(toolName, withAnnotations(toolName, schema), wrapped); + logger.debug(`Registered tool: ${toolName}`); } // Register available tools @@ -538,7 +459,9 @@ registerToolConditional( description: 'Execute command on remote SSH server (streaming, UTF-8 safe, ANSI-clean markdown)', inputSchema: { server: z.string().describe('Server name from configuration'), - command: z.string().describe('Command to execute'), + // Cap at 512 KB -- keeps us well under every UNIX ARG_MAX (typical 128KB-2MB) + // even after shQuote expansion, which can ~2x the size for quote-heavy input. + command: z.string().min(1).max(524_288).describe('Command to execute (max 512 KB)'), cwd: z.string().optional().describe('Working directory (uses default_dir if configured)'), timeout: z.number().optional().describe('Command timeout in ms (default 120000, max 300000)'), format: z.enum(['markdown', 'json']).optional().describe('Output format') @@ -758,7 +681,8 @@ registerToolConditional( type: 'text', text: `[err] Error retrieving history: ${error.message}` } - ] + ], + isError: true }; } } @@ -823,17 +747,6 @@ registerToolConditional( }) ); -// Helper function to format duration -function formatDuration(seconds) { - if (seconds < 60) { - return `${seconds}s`; - } else if (seconds < 3600) { - return `${Math.floor(seconds / 60)}m ${seconds % 60}s`; - } else { - return `${Math.floor(seconds / 3600)}h ${Math.floor((seconds % 3600) / 60)}m`; - } -} - // Server Group Management Tools registerToolConditional( @@ -990,7 +903,8 @@ registerToolConditional( type: 'text', text: `[err] Group management error: ${error.message}` } - ] + ], + isError: true }; } } @@ -1123,6 +1037,7 @@ registerToolConditional( text: `[err] Deployment failed: ${error.message}`, }, ], + isError: true, }; } } @@ -1245,6 +1160,7 @@ registerToolConditional( text: `[err] Command alias operation failed: ${error.message}`, }, ], + isError: true, }; } } @@ -1308,7 +1224,7 @@ registerToolConditional( content: [ { type: 'text', - text: `[err] Hook disabled: ${hook}`, + text: `[ok] Hook disabled: ${hook}`, }, ], }; @@ -1337,6 +1253,7 @@ registerToolConditional( text: `[err] Hook operation failed: ${error.message}`, }, ], + isError: true, }; } } @@ -1417,6 +1334,7 @@ registerToolConditional( text: `[err] Profile operation failed: ${error.message}`, }, ], + isError: true, }; } } @@ -1534,6 +1452,7 @@ registerToolConditional( text: `[err] Connection management failed: ${error.message}`, }, ], + isError: true, }; } } @@ -1546,7 +1465,7 @@ registerToolConditional( description: 'Create SSH tunnel (DNS+TCP reachability preview, typed state)', inputSchema: { server: z.string().describe('Server name or alias'), - type: z.enum(['local', 'remote', 'dynamic']).describe('Tunnel type'), + type: z.enum(['local', 'remote', 'dynamic']).describe('local port forward, remote reverse tunnel, or dynamic SOCKS5 proxy'), localHost: z.string().optional().describe('Local host (alias for local_host)'), local_host: z.string().optional().describe('Local host'), localPort: z.number().optional().describe('Local port (alias for local_port)'), @@ -1703,6 +1622,7 @@ registerToolConditional( text: `[err] Alias operation failed: ${error.message}`, }, ], + isError: true, }; } } @@ -1903,181 +1823,18 @@ registerToolConditional( registerToolConditional( 'ssh_alert_setup', { - description: 'Configure health monitoring alerts and thresholds', + description: 'Configure and check health-threshold alerts. Stores config per server on the operator machine; `check` compares current health_check metrics to thresholds. No background runner -- wire `check` into cron or ssh_hooks for continuous monitoring.', inputSchema: { server: z.string().describe('Server name'), - action: z.enum(['set', 'get', 'check']) - .describe('Action: set thresholds, get config, or check current metrics against thresholds'), - cpuThreshold: z.number().optional() - .describe('CPU usage threshold percentage (e.g., 80)'), - memoryThreshold: z.number().optional() - .describe('Memory usage threshold percentage (e.g., 90)'), - diskThreshold: z.number().optional() - .describe('Disk usage threshold percentage (e.g., 85)'), - enabled: z.boolean().optional() - .describe('Enable or disable alerts (default: true)') + action: z.enum(['set', 'get', 'check']).describe('set thresholds, get config, or check current metrics against thresholds'), + cpuThreshold: z.number().min(0).max(100).optional().describe('CPU usage threshold percent (0-100)'), + memoryThreshold: z.number().min(0).max(100).optional().describe('Memory usage threshold percent (0-100)'), + diskThreshold: z.number().min(0).max(100).optional().describe('Disk usage threshold percent applied to every mount (0-100)'), + enabled: z.boolean().optional().describe('Enable or disable alert evaluation (default true)'), + format: z.enum(['markdown', 'json']).optional().describe('Output format'), } }, - async ({ server: serverName, action, cpuThreshold, memoryThreshold, diskThreshold, enabled = true }) => { - try { - const ssh = await getConnection(serverName); - const configPath = '/etc/ssh-manager-alerts.json'; - - logger.info(`Alert setup action: ${action}`, { - server: serverName - }); - - let response; - - switch (action) { - case 'set': { - // Create alert configuration - const config = createAlertConfig({ - cpu: cpuThreshold, - memory: memoryThreshold, - disk: diskThreshold, - enabled - }); - - // Save to server - const saveCommand = buildSaveAlertConfigCommand(config, configPath); - const saveResult = await ssh.execCommand(saveCommand); - - if (saveResult.code !== 0) { - throw new Error(`Failed to save alert config: ${saveResult.stderr}`); - } - - response = { - server: serverName, - action: 'set', - config, - config_path: configPath, - success: true - }; - - logger.info('Alert thresholds configured', { - server: serverName, - thresholds: config - }); - break; - } - - case 'get': { - // Load configuration - const loadCommand = buildLoadAlertConfigCommand(configPath); - const result = await ssh.execCommand(loadCommand); - - let config = {}; - if (result.stdout && result.stdout.trim()) { - try { - config = JSON.parse(result.stdout); - } catch (e) { - config = { error: 'Failed to parse config' }; - } - } - - response = { - server: serverName, - action: 'get', - config, - config_path: configPath - }; - break; - } - - case 'check': { - // Load thresholds - const loadCommand = buildLoadAlertConfigCommand(configPath); - const loadResult = await ssh.execCommand(loadCommand); - - let thresholds = {}; - if (loadResult.stdout && loadResult.stdout.trim()) { - try { - thresholds = JSON.parse(loadResult.stdout); - } catch (e) { - throw new Error('No alert configuration found. Use action=set to configure.'); - } - } else { - throw new Error('No alert configuration found. Use action=set to configure.'); - } - - if (!thresholds.enabled) { - response = { - server: serverName, - action: 'check', - message: 'Alerts are disabled', - thresholds - }; - break; - } - - // Get current metrics - const healthCommand = buildComprehensiveHealthCheckCommand(); - const healthResult = await ssh.execCommand(healthCommand); - - if (healthResult.code !== 0) { - throw new Error('Failed to get current metrics'); - } - - const metrics = parseComprehensiveHealthCheck(healthResult.stdout); - - // Check thresholds - const alerts = checkAlertThresholds(metrics, thresholds); - - response = { - server: serverName, - action: 'check', - thresholds, - current_metrics: { - cpu: metrics.cpu, - memory: metrics.memory, - disks: metrics.disks - }, - alerts, - alert_count: alerts.length, - status: alerts.length === 0 ? 'ok' : 'alerts_triggered' - }; - - if (alerts.length > 0) { - logger.warn('Health alerts triggered', { - server: serverName, - alert_count: alerts.length, - alerts - }); - } - break; - } - - default: - throw new Error(`Unknown action: ${action}`); - } - - return { - content: [ - { - type: 'text', - text: JSON.stringify(response, null, 2) - } - ] - }; - - } catch (error) { - logger.error('Alert setup failed', { - server: serverName, - action, - error: error.message - }); - - return { - content: [ - { - type: 'text', - text: `[err] Alert setup failed: ${error.message}` - } - ] - }; - } - } + async (args) => handleSshAlertSetup({ getConnection, args }) ); // ============================================================================ diff --git a/src/logger.js b/src/logger.js index 5d46257..a43cc4a 100644 --- a/src/logger.js +++ b/src/logger.js @@ -37,9 +37,13 @@ const ICONS = { class Logger { constructor() { - // Set log level from environment variable - const envLevel = process.env.SSH_LOG_LEVEL?.toUpperCase() || 'INFO'; - this.currentLevel = LOG_LEVELS[envLevel] ?? LOG_LEVELS.INFO; + // Set log level. Priority: SSH_LOG_LEVEL > MCP_SSH_DEBUG (shorthand for + // LEVEL=DEBUG) > default INFO. + const envLevel = process.env.SSH_LOG_LEVEL?.toUpperCase(); + const debugShorthand = /^(1|true|yes|on)$/i.test(process.env.MCP_SSH_DEBUG || ''); + this.currentLevel = envLevel != null + ? (LOG_LEVELS[envLevel] ?? LOG_LEVELS.INFO) + : (debugShorthand ? LOG_LEVELS.DEBUG : LOG_LEVELS.INFO); // Enable verbose mode from environment this.verbose = process.env.SSH_VERBOSE === 'true'; diff --git a/src/output-formatter.js b/src/output-formatter.js index 1b4688f..f1ae5bb 100644 --- a/src/output-formatter.js +++ b/src/output-formatter.js @@ -6,8 +6,14 @@ * no emoji, single-char dividers. * * Callers build an ExecResult, then choose MCP content via makeMcpContent(). + * + * Truncation cap defaults to OUTPUT_LIMITS.MAX_OUTPUT_LENGTH (tunable via + * MCP_SSH_MAX_OUTPUT_LENGTH env var). Tool handlers may still pass an + * explicit maxLen -- it overrides the env default. */ +import { OUTPUT_LIMITS } from './config.js'; + // ANSI CSI / OSC stripping. Covers color, cursor, title sequences. // eslint-disable-next-line no-control-regex const ANSI_RE = /\x1b\[[0-9;?]*[ -/]*[@-~]|\x1b\][^\x07]*(?:\x07|\x1b\\)/g; @@ -21,13 +27,32 @@ export function stripAnsi(s) { return String(s).replace(ANSI_RE, ''); } +/** + * Escape a cell value for a GitHub-Flavored Markdown table. + * + * NOT a security sanitizer -- we escape only what breaks the table layout: + * backslash first (so it doesn't double-up our own escape), then pipe + * (column delimiter), then newlines (collapsed to spaces so a single + * value can't break the row). + * + * Callers pass strings that are already untrusted remote content -- the + * table is rendered into a chat client's markdown view, not executed. + */ +export function escapeMdCell(s) { + if (s == null) return ''; + return String(s) + .replace(/\\/g, '\\\\') + .replace(/\|/g, '\\|') + .replace(/\r?\n/g, ' '); +} + /** * Truncate a string keeping head and tail, middle elided. * Returns { text, originalBytes, truncatedBytes }. * - If input fits, truncatedBytes = 0 and text is unchanged. * - Else keeps max/2 chars from head and from tail. */ -export function truncateHeadTail(s, max = 10_000) { +export function truncateHeadTail(s, max = OUTPUT_LIMITS.MAX_OUTPUT_LENGTH) { const input = s == null ? '' : String(s); const originalBytes = input.length; if (originalBytes <= max) { @@ -58,7 +83,7 @@ export function formatExecResult({ stderr, code, durationMs, - maxLen = 10_000, + maxLen = OUTPUT_LIMITS.MAX_OUTPUT_LENGTH, }) { const out = truncateHeadTail(stripAnsi(stdout), maxLen); const err = truncateHeadTail(stripAnsi(stderr), maxLen); diff --git a/src/profile-loader.js b/src/profile-loader.js index 4c3a308..51c72e9 100644 --- a/src/profile-loader.js +++ b/src/profile-loader.js @@ -146,12 +146,6 @@ export function setActiveProfile(profileName) { export function createProfile(name, config) { try { const profilePath = path.join(PROFILES_DIR, `${name}.json`); - - // Check if profile already exists - if (fs.existsSync(profilePath)) { - throw new Error(`Profile '${name}' already exists`); - } - const profile = { name: name, description: config.description || `Custom profile: ${name}`, @@ -159,10 +153,22 @@ export function createProfile(name, config) { hooks: config.hooks || {} }; - fs.writeFileSync(profilePath, JSON.stringify(profile, null, 2)); + // Atomic "create if not exists": open with wx flag so the kernel refuses + // to clobber an existing file. Eliminates the check-then-act race that + // existsSync() + writeFileSync() would introduce. + const fd = fs.openSync(profilePath, 'wx'); + try { + fs.writeSync(fd, JSON.stringify(profile, null, 2)); + } finally { + fs.closeSync(fd); + } return true; } catch (error) { - console.error(`Error creating profile: ${error.message}`); + if (error.code === 'EEXIST') { + console.error(`Error creating profile: Profile '${name}' already exists`); + } else { + console.error(`Error creating profile: ${error.message}`); + } return false; } } diff --git a/src/server-groups.js b/src/server-groups.js index 83d82e3..caaf435 100644 --- a/src/server-groups.js +++ b/src/server-groups.js @@ -115,7 +115,7 @@ class ServerGroups { // This will be populated from the main server config const servers = []; - for (const [key, value] of Object.entries(process.env)) { + for (const key of Object.keys(process.env)) { if (key.startsWith('SSH_SERVER_') && key.endsWith('_HOST')) { const serverName = key.replace('SSH_SERVER_', '').replace('_HOST', '').toLowerCase(); servers.push(serverName); diff --git a/src/session-manager.js b/src/session-manager.js deleted file mode 100644 index 8f4435f..0000000 --- a/src/session-manager.js +++ /dev/null @@ -1,411 +0,0 @@ -/** - * SSH Session Manager - * Manages persistent SSH sessions with state and context - */ - -import { v4 as uuidv4 } from 'uuid'; -import { logger } from './logger.js'; - -// Map to store active sessions -const sessions = new Map(); - -// Session states -export const SESSION_STATES = { - INITIALIZING: 'initializing', - READY: 'ready', - BUSY: 'busy', - ERROR: 'error', - CLOSED: 'closed' -}; - -class SSHSession { - constructor(id, serverName, ssh) { - this.id = id; - this.serverName = serverName; - this.ssh = ssh; - this.state = SESSION_STATES.INITIALIZING; - this.context = { - cwd: null, - env: {}, - history: [], - variables: {} - }; - this.createdAt = new Date(); - this.lastActivity = new Date(); - this.shell = null; - this.outputBuffer = ''; - this.errorBuffer = ''; - } - - /** - * Initialize the session with a shell - */ - async initialize() { - try { - logger.info(`Initializing SSH session ${this.id}`, { - server: this.serverName - }); - - // Start an interactive shell - this.shell = await this.ssh.requestShell({ - term: 'xterm-256color', - cols: 80, - rows: 24 - }); - - // Setup event handlers - this.shell.on('data', (data) => { - this.outputBuffer += data.toString(); - this.lastActivity = new Date(); - - // Log output in verbose mode - if (logger.verbose) { - logger.debug(`Session ${this.id} output`, { - data: data.toString().substring(0, 200) - }); - } - }); - - this.shell.on('close', () => { - logger.info(`Session ${this.id} shell closed`); - this.state = SESSION_STATES.CLOSED; - this.cleanup(); - }); - - this.shell.stderr.on('data', (data) => { - this.errorBuffer += data.toString(); - logger.warn(`Session ${this.id} stderr`, { - error: data.toString() - }); - }); - - // Wait for shell prompt - await this.waitForPrompt(); - - // Allow context queries through standard execute flow - this.state = SESSION_STATES.READY; - - // Get initial working directory - await this.updateContext(); - - logger.info(`Session ${this.id} initialized`, { - server: this.serverName, - cwd: this.context.cwd - }); - - } catch (error) { - this.state = SESSION_STATES.ERROR; - logger.error(`Failed to initialize session ${this.id}`, { - error: error.message - }); - throw error; - } - } - - /** - * Wait for shell prompt - */ - async waitForPrompt(timeout = 5000) { - const startTime = Date.now(); - - while (Date.now() - startTime < timeout) { - // Check if we have a prompt (ends with $ or # typically) - if (this.outputBuffer.match(/[$#>]\s*$/)) { - return true; - } - - // Wait a bit - await new Promise(resolve => setTimeout(resolve, 100)); - } - - throw new Error('Timeout waiting for shell prompt'); - } - - /** - * Update session context (pwd, env) - */ - async updateContext() { - try { - // Get current directory - const pwdResult = await this.execute('pwd', { silent: true }); - if (pwdResult.success) { - this.context.cwd = pwdResult.output.trim(); - } - - // Get environment variables (selective) - const envResult = await this.execute('echo $PATH:$USER:$HOME', { silent: true }); - if (envResult.success) { - const [path, user, home] = envResult.output.trim().split(':'); - this.context.env = { PATH: path, USER: user, HOME: home }; - } - } catch (error) { - logger.warn(`Failed to update context for session ${this.id}`, { - error: error.message - }); - } - } - - /** - * Execute a command in the session - */ - async execute(command, options = {}) { - if (this.state !== SESSION_STATES.READY) { - throw new Error(`Session ${this.id} is not ready (state: ${this.state})`); - } - - this.state = SESSION_STATES.BUSY; - this.lastActivity = new Date(); - - try { - // Clear buffers - this.outputBuffer = ''; - this.errorBuffer = ''; - - // Add to history unless silent - if (!options.silent) { - this.context.history.push({ - command, - timestamp: new Date(), - cwd: this.context.cwd - }); - - logger.info(`Session ${this.id} executing`, { - command: command.substring(0, 100), - server: this.serverName - }); - } - - // Send command - this.shell.write(command + '\n'); - - // Wait for command to complete - await this.waitForPrompt(options.timeout || 30000); - - // Parse output (remove command echo and prompt) - let output = this.outputBuffer; - - // Remove the command echo (first line) - const lines = output.split('\n'); - if (lines[0].includes(command)) { - lines.shift(); - } - - // Remove the prompt (last line) - const lastLine = lines[lines.length - 1]; - if (lastLine.match(/[$#>]\s*$/)) { - lines.pop(); - } - - output = lines.join('\n').trim(); - - // Check for command success (basic heuristic) - const success = !this.errorBuffer && !output.includes('command not found'); - - // Update context if command might have changed it - if (command.startsWith('cd ') || command.startsWith('export ')) { - await this.updateContext(); - } - - this.state = SESSION_STATES.READY; - - return { - success, - output, - error: this.errorBuffer, - session: this.id - }; - - } catch (error) { - this.state = SESSION_STATES.ERROR; - logger.error(`Session ${this.id} execution failed`, { - command, - error: error.message - }); - throw error; - } - } - - /** - * Set session variable - */ - setVariable(name, value) { - this.context.variables[name] = value; - this.lastActivity = new Date(); - } - - /** - * Get session variable - */ - getVariable(name) { - return this.context.variables[name]; - } - - /** - * Get session info - */ - getInfo() { - return { - id: this.id, - server: this.serverName, - state: this.state, - cwd: this.context.cwd, - env: this.context.env, - created: this.createdAt, - lastActivity: this.lastActivity, - historyCount: this.context.history.length, - variables: Object.keys(this.context.variables) - }; - } - - /** - * Close the session - */ - close() { - logger.info(`Closing session ${this.id}`); - - if (this.shell) { - this.shell.write('exit\n'); - this.shell.end(); - this.shell = null; - } - - this.state = SESSION_STATES.CLOSED; - this.cleanup(); - } - - /** - * Cleanup resources - */ - cleanup() { - sessions.delete(this.id); - this.outputBuffer = ''; - this.errorBuffer = ''; - this.context.history = []; - } -} - -/** - * Create a new SSH session - */ -export async function createSession(serverName, ssh) { - const sessionId = `ssh_${Date.now()}_${uuidv4().substring(0, 8)}`; - - const session = new SSHSession(sessionId, serverName, ssh); - sessions.set(sessionId, session); - - try { - await session.initialize(); - - logger.info('SSH session created', { - id: sessionId, - server: serverName - }); - - return session; - } catch (error) { - sessions.delete(sessionId); - throw error; - } -} - -/** - * Get an existing session - */ -export function getSession(sessionId) { - const session = sessions.get(sessionId); - - if (!session) { - throw new Error(`Session ${sessionId} not found`); - } - - if (session.state === SESSION_STATES.CLOSED) { - throw new Error(`Session ${sessionId} is closed`); - } - - return session; -} - -/** - * List all active sessions - */ -export function listSessions() { - const activeSessions = []; - - for (const [id, session] of sessions.entries()) { - if (session.state !== SESSION_STATES.CLOSED) { - activeSessions.push(session.getInfo()); - } - } - - return activeSessions; -} - -/** - * Close a session - */ -export function closeSession(sessionId) { - const session = sessions.get(sessionId); - - if (!session) { - throw new Error(`Session ${sessionId} not found`); - } - - session.close(); - return true; -} - -/** - * Close all sessions for a server - */ -export function closeServerSessions(serverName) { - let closedCount = 0; - - for (const [id, session] of sessions.entries()) { - if (session.serverName === serverName) { - session.close(); - closedCount++; - } - } - - return closedCount; -} - -/** - * Cleanup old sessions - */ -export function cleanupSessions(maxAge = 30 * 60 * 1000) { // 30 minutes default - const now = Date.now(); - let cleanedCount = 0; - - for (const [id, session] of sessions.entries()) { - const age = now - session.lastActivity.getTime(); - - if (age > maxAge) { - logger.info(`Cleaning up inactive session ${id}`, { - age: Math.floor(age / 1000) + 's' - }); - session.close(); - cleanedCount++; - } - } - - return cleanedCount; -} - -// Periodic cleanup of inactive sessions -setInterval(() => { - const cleaned = cleanupSessions(); - if (cleaned > 0) { - logger.info(`Cleaned up ${cleaned} inactive sessions`); - } -}, 5 * 60 * 1000); // Every 5 minutes - -export default { - createSession, - getSession, - listSessions, - closeSession, - closeServerSessions, - cleanupSessions, - SESSION_STATES -}; diff --git a/src/ssh-key-manager.js b/src/ssh-key-manager.js index 0b85e64..eecebf8 100644 --- a/src/ssh-key-manager.js +++ b/src/ssh-key-manager.js @@ -154,9 +154,11 @@ export function removeHostKey(host, port = 22) { */ export async function addHostKey(host, port = 22, keyData = null) { try { - // Backup current known_hosts - if (fs.existsSync(KNOWN_HOSTS_PATH)) { + // Backup current known_hosts (ignore if not present -- atomic, no TOCTOU). + try { fs.copyFileSync(KNOWN_HOSTS_PATH, KNOWN_HOSTS_BACKUP); + } catch (e) { + if (e.code !== 'ENOENT') throw e; } // If no key data provided, fetch it @@ -168,11 +170,9 @@ export async function addHostKey(host, port = 22, keyData = null) { keyData = fingerprints.map(fp => fp.fullKey).join('\n'); } - // Ensure .ssh directory exists + // Ensure .ssh directory exists (mkdir recursive is idempotent, no TOCTOU). const sshDir = path.dirname(KNOWN_HOSTS_PATH); - if (!fs.existsSync(sshDir)) { - fs.mkdirSync(sshDir, { mode: 0o700, recursive: true }); - } + fs.mkdirSync(sshDir, { mode: 0o700, recursive: true }); // Append to known_hosts fs.appendFileSync(KNOWN_HOSTS_PATH, keyData + '\n'); diff --git a/src/ssh-manager.js b/src/ssh-manager.js index 3399395..e99448f 100644 --- a/src/ssh-manager.js +++ b/src/ssh-manager.js @@ -1,10 +1,8 @@ import { Client } from 'ssh2'; import fs from 'fs'; import os from 'os'; -import { promisify } from 'util'; import crypto from 'crypto'; -import { isHostKnown, getCurrentHostKey, addHostKey, updateHostKey } from './ssh-key-manager.js'; -import { configLoader } from './config-loader.js'; +import { isHostKnown, getCurrentHostKey, addHostKey } from './ssh-key-manager.js'; import { logger } from './logger.js'; class SSHManager { @@ -246,67 +244,6 @@ class SSHManager { }); } - async execCommandStream(command, options = {}) { - if (!this.connected) { - throw new Error('Not connected to SSH server'); - } - - const { cwd, onStdout, onStderr } = options; - const fullCommand = cwd ? `cd ${cwd} && ${command}` : command; - - return new Promise((resolve, reject) => { - this.client.exec(fullCommand, (err, stream) => { - if (err) { - reject(err); - return; - } - - let stdout = ''; - let stderr = ''; - - stream.on('close', (code, signal) => { - resolve({ - stdout, - stderr, - code: code || 0, - signal, - stream - }); - }); - - stream.on('data', (data) => { - const chunk = data.toString(); - stdout += chunk; - if (onStdout) onStdout(chunk); - }); - - stream.stderr.on('data', (data) => { - const chunk = data.toString(); - stderr += chunk; - if (onStderr) onStderr(chunk); - }); - - stream.on('error', reject); - }); - }); - } - - async requestShell(options = {}) { - if (!this.connected) { - throw new Error('Not connected to SSH server'); - } - - return new Promise((resolve, reject) => { - this.client.shell(options, (err, stream) => { - if (err) { - reject(err); - return; - } - resolve(stream); - }); - }); - } - async getSFTP() { if (this._sftpHandle) return this._sftpHandle; @@ -460,23 +397,6 @@ class SSHManager { }); } - async putFiles(files, options = {}) { - const sftp = await this.getSFTP(); - const results = []; - - for (const file of files) { - try { - await this.putFile(file.local, file.remote); - results.push({ ...file, success: true }); - } catch (error) { - results.push({ ...file, success: false, error: error.message }); - if (options.stopOnError) break; - } - } - - return results; - } - isConnected() { return this.connected && this.client && !this.client.destroyed; } diff --git a/src/structured-result.js b/src/structured-result.js index 0a0b0b3..5dc9ffc 100644 --- a/src/structured-result.js +++ b/src/structured-result.js @@ -28,16 +28,44 @@ export function ok(tool, data, meta = {}) { /** * Build an error result. `error` may be Error, string, or object. + * Normalizes shapes so the rendered error text is always useful: + * - Error: -> e.message (keeps stack in `stack` field for debugging) + * - string -> passthrough + * - object -> JSON.stringify (falls back to String() if cyclic) + * - null -> 'unknown error' */ export function fail(tool, error, meta = {}) { - return { + let message; + let stack; + if (error == null) { + message = 'unknown error'; + } else if (error instanceof Error) { + message = error.message || error.name || 'Error'; + stack = error.stack; + } else if (typeof error === 'string') { + message = error; + } else if (typeof error === 'object') { + if (typeof error.message === 'string' && error.message) { + message = error.message; + } else { + try { message = JSON.stringify(error); } + catch (_) { message = String(error); } + } + } else { + message = String(error); + } + const out = { success: false, tool, server: meta.server ?? null, data: null, meta: strip(meta, ['server']), - error: String(error && error.message ? error.message : error), + error: message, }; + if (stack && process.env.MCP_SSH_INCLUDE_STACK === '1') { + out.error_stack = stack; + } + return out; } /** diff --git a/src/tool-annotations.js b/src/tool-annotations.js new file mode 100644 index 0000000..fe33e5c --- /dev/null +++ b/src/tool-annotations.js @@ -0,0 +1,128 @@ +/** + * Per-tool MCP annotations + human titles. + * + * MCP 2025-06-18 lets tool hosts (Claude Code, Codex, etc.) render better UX + * and make smarter auto-approve decisions when tools self-declare intent: + * + * - readOnlyHint : tool never mutates remote state (safe to auto-run) + * - destructiveHint : tool performs destructive ops (caller should confirm) + * - idempotentHint : running twice has same effect as running once + * - openWorldHint : tool interacts with systems outside the declared server + * + * `title` is the human-readable tool name shown in Claude Code's /mcp palette; + * `description` (already set at registration) is for the LLM. + * + * If a tool is absent from this map, it registers with no annotations and no + * title -- that's the "unknown / not annotated" case that clients should treat + * conservatively. + */ + +export const TOOL_ANNOTATIONS = { + // Core + ssh_execute: { + title: 'Execute Remote Command', + annotations: { destructiveHint: true, openWorldHint: true }, + }, + ssh_upload: { + title: 'Upload File to Server', + annotations: { destructiveHint: true, idempotentHint: true, openWorldHint: true }, + }, + ssh_download: { + title: 'Download File from Server', + annotations: { readOnlyHint: true, openWorldHint: true }, + }, + ssh_sync: { + title: 'Rsync Files', + annotations: { destructiveHint: true, idempotentHint: true, openWorldHint: true }, + }, + ssh_list_servers: { + title: 'List Configured Servers', + annotations: { readOnlyHint: true, idempotentHint: true }, + }, + + // Sessions + ssh_session_start: { title: 'Start Interactive Session', annotations: { openWorldHint: true } }, + ssh_session_send: { title: 'Send Command to Session', annotations: { destructiveHint: true, openWorldHint: true } }, + ssh_session_list: { title: 'List Sessions', annotations: { readOnlyHint: true, idempotentHint: true } }, + ssh_session_close: { title: 'Close Session', annotations: { idempotentHint: true } }, + ssh_session_replay: { title: 'Replay Session History', annotations: { readOnlyHint: true, idempotentHint: true } }, + ssh_session_memory: { title: 'Session State Snapshot', annotations: { readOnlyHint: true, idempotentHint: true } }, + + // Monitoring + ssh_health_check: { title: 'Server Health Check', annotations: { readOnlyHint: true, idempotentHint: true, openWorldHint: true } }, + ssh_service_status: { title: 'Check Service Status', annotations: { readOnlyHint: true, idempotentHint: true, openWorldHint: true } }, + ssh_process_manager: { title: 'Manage Remote Processes', annotations: { destructiveHint: true, openWorldHint: true } }, + ssh_monitor: { title: 'Resource Monitor Snapshot', annotations: { readOnlyHint: true, idempotentHint: true, openWorldHint: true } }, + ssh_tail: { title: 'Tail Log File', annotations: { readOnlyHint: true, idempotentHint: true, openWorldHint: true } }, + ssh_alert_setup: { + title: 'Configure Health Alerts', + // set/get mutate local config; check is read-only against the remote. + // Taken together, not readOnly and not destructive -- just stateful config. + annotations: { idempotentHint: true }, + }, + + // Backup + ssh_backup_create: { title: 'Create Backup', annotations: { destructiveHint: true, openWorldHint: true } }, + ssh_backup_list: { title: 'List Backups', annotations: { readOnlyHint: true, idempotentHint: true, openWorldHint: true } }, + ssh_backup_restore: { title: 'Restore Backup', annotations: { destructiveHint: true, openWorldHint: true } }, + ssh_backup_schedule: { title: 'Schedule Backup (cron)', annotations: { destructiveHint: true, openWorldHint: true } }, + + // Database + ssh_db_dump: { title: 'Dump Database', annotations: { destructiveHint: true, openWorldHint: true } }, + ssh_db_import: { title: 'Import Database Dump', annotations: { destructiveHint: true, openWorldHint: true } }, + ssh_db_list: { title: 'List Databases / Tables', annotations: { readOnlyHint: true, idempotentHint: true, openWorldHint: true } }, + ssh_db_query: { title: 'Run Read-Only Query', annotations: { readOnlyHint: true, openWorldHint: true } }, + + // Deploy + ssh_deploy: { title: 'Deploy Artifact', annotations: { destructiveHint: true, openWorldHint: true } }, + ssh_deploy_artifact: { title: 'Deploy Artifact (alias)', annotations: { destructiveHint: true, openWorldHint: true } }, + ssh_execute_sudo: { title: 'Execute With Sudo', annotations: { destructiveHint: true, openWorldHint: true } }, + ssh_execute_group: { title: 'Execute Across Group', annotations: { destructiveHint: true, openWorldHint: true } }, + + // Admin / config + ssh_alias: { title: 'Manage Server Aliases', annotations: { idempotentHint: true } }, + ssh_command_alias: { title: 'Manage Command Aliases', annotations: { idempotentHint: true } }, + ssh_hooks: { title: 'Manage Automation Hooks', annotations: { idempotentHint: true } }, + ssh_profile: { title: 'Manage Active Profile', annotations: { idempotentHint: true } }, + ssh_group_manage: { title: 'Manage Server Groups', annotations: { idempotentHint: true } }, + ssh_connection_status: { title: 'Connection Pool Status', annotations: { readOnlyHint: true, idempotentHint: true } }, + ssh_history: { title: 'Command History', annotations: { readOnlyHint: true, idempotentHint: true } }, + + // Tunnels + ssh_tunnel_create: { title: 'Create SSH Tunnel', annotations: { destructiveHint: true, openWorldHint: true } }, + ssh_tunnel_list: { title: 'List Tunnels', annotations: { readOnlyHint: true, idempotentHint: true } }, + ssh_tunnel_close: { title: 'Close Tunnel', annotations: { idempotentHint: true } }, + + // Host keys / auth + ssh_key_manage: { title: 'Manage SSH Host Keys', annotations: { idempotentHint: true, openWorldHint: true } }, + + // Gamechanger + ssh_cat: { title: 'View Remote File', annotations: { readOnlyHint: true, idempotentHint: true, openWorldHint: true } }, + ssh_systemctl: { title: 'Systemd Unit Control', annotations: { destructiveHint: true, openWorldHint: true } }, + ssh_journalctl: { title: 'Systemd Journal Query', annotations: { readOnlyHint: true, idempotentHint: true, openWorldHint: true } }, + ssh_docker: { title: 'Docker Control', annotations: { destructiveHint: true, openWorldHint: true } }, + ssh_port_test: { title: 'Port / TLS / HTTP Probe', annotations: { readOnlyHint: true, idempotentHint: true, openWorldHint: true } }, + ssh_diff: { title: 'Diff Two Files', annotations: { readOnlyHint: true, idempotentHint: true, openWorldHint: true } }, + ssh_edit: { title: 'Atomic File Edit', annotations: { destructiveHint: true, openWorldHint: true } }, + ssh_tail_start: { title: 'Start Live Tail', annotations: { openWorldHint: true } }, + ssh_tail_read: { title: 'Read Live Tail Buffer', annotations: { readOnlyHint: true, idempotentHint: true } }, + ssh_tail_stop: { title: 'Stop Live Tail', annotations: { idempotentHint: true } }, + ssh_plan: { title: 'Plan + Approve Execution', annotations: { destructiveHint: true, openWorldHint: true } }, +}; + +/** + * Merge MCP annotations + title into a registerTool schema for the given + * tool. If we don't know the tool, returns the schema unchanged. + */ +export function withAnnotations(toolName, schema) { + const ann = TOOL_ANNOTATIONS[toolName]; + if (!ann) return schema; + return { + ...schema, + ...(ann.title && !schema.title ? { title: ann.title } : {}), + // Spread table defaults first, then caller-provided annotations so the + // caller can selectively override defaults (e.g. flip openWorldHint off + // for a future tool where it doesn't apply). + ...(ann.annotations ? { annotations: { ...ann.annotations, ...(schema.annotations || {}) } } : {}), + }; +} diff --git a/src/tool-exec-stream.js b/src/tool-exec-stream.js deleted file mode 100644 index e03fde1..0000000 --- a/src/tool-exec-stream.js +++ /dev/null @@ -1,87 +0,0 @@ -/** - * End-to-end glue for the `ssh_execute_stream` tool handler. - * - * Pure function: takes an ssh2-shaped client, runs streamExecCommand, formats - * with formatExecResult, emits MCP content. Keeps the handler thin and testable. - */ - -import { streamExecCommand } from './stream-exec.js'; -import { - formatExecResult, - makeMcpContent, -} from './output-formatter.js'; - -/** - * Run a streaming exec and return an MCP tool response. - * - * @param {Object} args - * @param {Object} args.client ssh2 Client (or shape-compatible) - * @param {string} args.server logical server name for the header - * @param {string} args.command remote command - * @param {string} [args.cwd] working directory (shell-quoted internally) - * @param {number} [args.timeoutMs=120000] - * @param {number} [args.maxLen=10000] per-stream render cap - * @param {number} [args.debounceMs=50] - * @param {'markdown'|'json'|'both'} [args.format='markdown'] - * @param {Function} [args.onChunk] forwarded to streamExecCommand - * @returns {Promise<{content: Array, isError?: boolean}>} - */ -export async function runStreamedExec({ - client, - server, - command, - cwd, - timeoutMs = 120_000, - maxLen = 10_000, - debounceMs = 50, - format = 'markdown', - onChunk, -}) { - const startedAt = Date.now(); - let result; - let error; - try { - result = await streamExecCommand(client, command, { - cwd, - timeoutMs, - debounceMs, - onChunk, - }); - } catch (e) { - error = e; - } - const durationMs = Date.now() - startedAt; - - if (error) { - const exec = formatExecResult({ - server, - command, - cwd, - stdout: '', - stderr: String(error.message || error), - code: -1, - durationMs, - maxLen, - }); - return { - content: makeMcpContent(exec, { format }), - isError: true, - }; - } - - const exec = formatExecResult({ - server, - command, - cwd, - stdout: result.stdout, - stderr: result.stderr, - code: result.code, - durationMs, - maxLen, - }); - return { - content: makeMcpContent(exec, { format }), - // Non-zero exit is NOT a tool-level isError -- the command ran, just failed. - // Claude can read exit_code from the JSON or badge from the markdown. - }; -} diff --git a/src/tools/alerts-tools.js b/src/tools/alerts-tools.js new file mode 100644 index 0000000..0aac6e7 --- /dev/null +++ b/src/tools/alerts-tools.js @@ -0,0 +1,229 @@ +/** + * ssh_alert_setup -- threshold-based alerting on top of ssh_health_check. + * + * Three actions: + * - set: persist {cpu, memory, disk, enabled} for a server to the + * operator-local store (~/.ssh-manager/alerts/.json). + * - get: read the persisted config for a server. + * - check: run ssh_health_check, compare each metric against the persisted + * thresholds, return {alerts:[...], alert_count, status}. + * + * We deliberately store thresholds on the operator's machine (not on the + * target host). Reasons: + * + * - Multiple operators targeting the same fleet don't overwrite each + * other's thresholds. + * - No sudo / remote filesystem writes are needed to configure alerts. + * - Thresholds survive target-host reboots or disk changes. + * + * This is a *setup + on-demand check* tool. There is no background runner -- + * operators wire `check` into cron, CI, or an `ssh_hooks` action to get + * continuous monitoring behavior. That choice keeps the MCP server stateless + * and composable. + */ + +import fs from 'fs'; +import os from 'os'; +import path from 'path'; +import { ok, fail, toMcp, defaultRender } from '../structured-result.js'; +import { handleSshHealthCheck } from './monitoring-tools.js'; + +const ALERTS_DIR = path.join(os.homedir(), '.ssh-manager', 'alerts'); + +// Defaults are deliberately conservative -- set() stores thresholds explicitly, +// we don't silently infer "you probably meant 80%". +const VALID_ACTIONS = new Set(['set', 'get', 'check']); + +function configPathFor(server) { + // Server names are already normalized to [a-z0-9_-] via server-aliases; guard + // anyway so a crafted name can't escape ALERTS_DIR. + const safe = String(server).toLowerCase().replace(/[^a-z0-9_-]/g, '_'); + return path.join(ALERTS_DIR, `${safe}.json`); +} + +function readConfig(server) { + const p = configPathFor(server); + if (!fs.existsSync(p)) return null; + try { + const raw = fs.readFileSync(p, 'utf8'); + const parsed = JSON.parse(raw); + if (parsed && typeof parsed === 'object' && typeof parsed.version === 'number') { + return parsed; + } + } catch (_) { /* corrupt file -> treat as missing */ } + return null; +} + +function writeConfig(server, cfg) { + fs.mkdirSync(ALERTS_DIR, { recursive: true }); + const p = configPathFor(server); + // Atomic write: tmp + rename so a crash mid-write can't corrupt the file. + const tmp = `${p}.tmp`; + fs.writeFileSync(tmp, JSON.stringify(cfg, null, 2), { mode: 0o600 }); + fs.renameSync(tmp, p); + return p; +} + +function evaluateThresholds(metrics, cfg) { + const alerts = []; + // CPU usage: metrics.cpu.usage_percent (see monitoring-tools.parseTopCpu). + const cpuPct = Number(metrics?.cpu?.usage_percent); + if (Number.isFinite(cpuPct) && Number.isFinite(cfg.cpuThreshold) && cpuPct >= cfg.cpuThreshold) { + alerts.push({ + metric: 'cpu', observed: cpuPct, threshold: cfg.cpuThreshold, + message: `CPU at ${cpuPct.toFixed(1)}% >= threshold ${cfg.cpuThreshold}%`, + }); + } + // Memory used%: parseFreeMem returns { total_bytes, used_bytes, free_bytes, used_percent }. + const memPct = Number(metrics?.memory?.used_percent); + if (Number.isFinite(memPct) && Number.isFinite(cfg.memoryThreshold) && memPct >= cfg.memoryThreshold) { + alerts.push({ + metric: 'memory', observed: memPct, threshold: cfg.memoryThreshold, + message: `memory at ${memPct.toFixed(1)}% >= threshold ${cfg.memoryThreshold}%`, + }); + } + // Disk: parseDf returns an array of { filesystem, mount, used_percent, ... }. + if (Array.isArray(metrics?.disk) && Number.isFinite(cfg.diskThreshold)) { + for (const fs_ of metrics.disk) { + const pct = Number(fs_?.used_percent); + if (Number.isFinite(pct) && pct >= cfg.diskThreshold) { + alerts.push({ + metric: 'disk', mount: fs_.mount || fs_.filesystem, + observed: pct, threshold: cfg.diskThreshold, + message: `disk ${fs_.mount || fs_.filesystem} at ${pct.toFixed(1)}% >= threshold ${cfg.diskThreshold}%`, + }); + } + } + } + return alerts; +} + +function renderAlertResult(result) { + if (!result.success) return defaultRender(result); + const d = result.data; + const lines = []; + const srv = result.server ? ` | \`${result.server}\`` : ''; + lines.push(`[ok] **ssh_alert_setup**${srv} | action: \`${d.action}\``); + if (d.action === 'set') { + lines.push(''); + lines.push(`thresholds saved to \`${d.config_path}\``); + lines.push('```json'); + lines.push(JSON.stringify(d.config, null, 2)); + lines.push('```'); + } else if (d.action === 'get') { + lines.push(''); + if (d.config) { + lines.push('```json'); + lines.push(JSON.stringify(d.config, null, 2)); + lines.push('```'); + } else { + lines.push('_no alert config_ -- run with `action=set` first'); + } + } else if (d.action === 'check') { + lines.push(''); + lines.push(`status: \`${d.status}\` | alerts: ${d.alert_count}`); + if (d.alerts && d.alerts.length > 0) { + for (const a of d.alerts) lines.push(`- **${a.metric}**: ${a.message}`); + } + } + return lines.join('\n'); +} + +export async function handleSshAlertSetup({ getConnection, args }) { + const { + server, action, + cpuThreshold, memoryThreshold, diskThreshold, + enabled = true, + format = 'markdown', + } = args || {}; + + if (!server) { + return toMcp(fail('ssh_alert_setup', 'server is required'), { format, renderer: renderAlertResult }); + } + if (!action || !VALID_ACTIONS.has(action)) { + return toMcp(fail('ssh_alert_setup', + `action must be one of: ${[...VALID_ACTIONS].join(', ')}`, { server }), + { format, renderer: renderAlertResult }); + } + + try { + if (action === 'set') { + const cfg = { + version: 1, + server, + enabled: !!enabled, + cpuThreshold: Number.isFinite(Number(cpuThreshold)) ? Number(cpuThreshold) : null, + memoryThreshold: Number.isFinite(Number(memoryThreshold)) ? Number(memoryThreshold) : null, + diskThreshold: Number.isFinite(Number(diskThreshold)) ? Number(diskThreshold) : null, + updated_at: new Date().toISOString(), + }; + const p = writeConfig(server, cfg); + return toMcp(ok('ssh_alert_setup', { + action: 'set', config: cfg, config_path: p, + }, { server }), { format, renderer: renderAlertResult }); + } + + if (action === 'get') { + const cfg = readConfig(server); + return toMcp(ok('ssh_alert_setup', { + action: 'get', config: cfg, config_path: configPathFor(server), + }, { server }), { format, renderer: renderAlertResult }); + } + + // action === 'check' + const cfg = readConfig(server); + if (!cfg) { + return toMcp(fail('ssh_alert_setup', + 'no alert configuration found for this server; run with action=set first', { server }), + { format, renderer: renderAlertResult }); + } + if (cfg.enabled === false) { + return toMcp(ok('ssh_alert_setup', { + action: 'check', status: 'disabled', + thresholds: cfg, alert_count: 0, alerts: [], + }, { server }), { format, renderer: renderAlertResult }); + } + + // Delegate to the existing health aggregator so parsers stay in one place. + const hc = await handleSshHealthCheck({ getConnection, args: { server, format: 'json' } }); + if (hc.isError) { + const parsed = safeJson(hc.content?.[0]?.text); + return toMcp(fail('ssh_alert_setup', + `health_check failed: ${parsed?.error || 'unknown'}`, { server }), + { format, renderer: renderAlertResult }); + } + const parsed = safeJson(hc.content?.[0]?.text); + if (!parsed || !parsed.success) { + return toMcp(fail('ssh_alert_setup', + `health_check returned malformed payload: ${parsed?.error || 'no data'}`, { server }), + { format, renderer: renderAlertResult }); + } + + const alerts = evaluateThresholds(parsed.data, cfg); + const status = alerts.length === 0 ? 'ok' : 'alerts_triggered'; + + return toMcp(ok('ssh_alert_setup', { + action: 'check', status, thresholds: cfg, + alert_count: alerts.length, alerts, + current_metrics: { + cpu: parsed.data?.cpu || null, + memory: parsed.data?.memory || null, + disk: parsed.data?.disk || null, + }, + }, { server }), { format, renderer: renderAlertResult }); + } catch (e) { + return toMcp(fail('ssh_alert_setup', e, { server }), + { format, renderer: renderAlertResult }); + } +} + +function safeJson(s) { + if (typeof s !== 'string') return null; + try { return JSON.parse(s); } + catch (_) { return null; } +} + +// Exported for tests. +export const __internals = { + configPathFor, readConfig, writeConfig, evaluateThresholds, ALERTS_DIR, +}; diff --git a/src/tools/backup-tools.js b/src/tools/backup-tools.js index 2c35108..f702499 100644 --- a/src/tools/backup-tools.js +++ b/src/tools/backup-tools.js @@ -62,11 +62,18 @@ function defaultOutputPath(type, name, { gzip, backupDir = DEFAULT_BACKUP_DIR, b * `outputPath` must already be shQuoted by caller when fed into the command, * but we receive the raw path and shQuote internally. * - * Returns { command, envPrefix } -- envPrefix must be prepended verbatim by caller. + * Returns { command, envPrefix, envCarriesSecret }: + * - command: the shell command body + * - envPrefix: `VAR=... VAR=... ` that must be prepended by caller + * - envCarriesSecret: true iff envPrefix encodes a credential that must + * NOT be written to persistent locations (crontab, scripts, logs). + * mongo's MCP_BACKUP_URI may or may not carry a password -- set based on + * whether a password was actually supplied. */ export function buildBackupCommand({ backup_type, database, paths, user, password, host, port, outputPath, gzip = true }) { const out = shQuote(outputPath); const ensureDir = `mkdir -p ${shQuote(dirnameOf(outputPath))} && `; + const hasPassword = password != null && password !== ''; switch (backup_type) { case 'mysql': { const parts = ['mysqldump', '--single-transaction', '--routines', '--triggers']; @@ -76,7 +83,7 @@ export function buildBackupCommand({ backup_type, database, paths, user, passwor if (database) parts.push(shQuote(database)); const core = `MYSQL_PWD="$MCP_BACKUP_PASS" ${parts.join(' ')}`; const cmd = gzip ? `${core} | gzip > ${out}` : `${core} > ${out}`; - return { command: ensureDir + cmd, envPrefix: envFor(password) }; + return { command: ensureDir + cmd, envPrefix: envFor(password), envCarriesSecret: hasPassword }; } case 'postgresql': { const parts = ['pg_dump', '--format=custom', '--clean', '--if-exists']; @@ -86,18 +93,20 @@ export function buildBackupCommand({ backup_type, database, paths, user, passwor if (database) parts.push(shQuote(database)); const core = `PGPASSWORD="$MCP_BACKUP_PASS" ${parts.join(' ')}`; const cmd = gzip ? `${core} | gzip > ${out}` : `${core} > ${out}`; - return { command: ensureDir + cmd, envPrefix: envFor(password) }; + return { command: ensureDir + cmd, envPrefix: envFor(password), envCarriesSecret: hasPassword }; } case 'mongodb': { // mongodump --archive (no value) writes the single-stream archive to // stdout; we then pipe / redirect to `out` (shQuoted). URI is carried // via MCP_BACKUP_URI so user/pass/host/port/db never appear in argv. + // The URI only carries a secret if a password was actually supplied; + // a plain `mongodb://localhost:27017/db` URI is fine to persist. const uri = buildMongoUri({ user, password, host, port, database }); const envPrefix = `MCP_BACKUP_URI=${shQuote(uri)} `; const parts = ['mongodump', '--uri', '"$MCP_BACKUP_URI"', '--archive']; if (gzip) parts.push('--gzip'); const cmd = `${parts.join(' ')} > ${out}`; - return { command: ensureDir + cmd, envPrefix }; + return { command: ensureDir + cmd, envPrefix, envCarriesSecret: hasPassword }; } case 'files': { const pathList = (paths || []).map(p => shQuote(p)).join(' '); @@ -106,7 +115,7 @@ export function buildBackupCommand({ backup_type, database, paths, user, passwor // they gave us. This matches the spec: `tar -czf OUTPUT -C / PATH`. const flag = gzip ? '-czf' : '-cf'; const cmd = `tar ${flag} ${out} -C / ${pathList}`; - return { command: ensureDir + cmd, envPrefix: '' }; + return { command: ensureDir + cmd, envPrefix: '', envCarriesSecret: false }; } default: throw new Error(`unsupported backup_type: ${backup_type}`); @@ -161,8 +170,9 @@ async function remoteSizeBytes(client, remotePath) { */ async function writeMeta(client, metaPath, metaObject, { timeout = 15_000 } = {}) { const json = JSON.stringify(metaObject); - // printf '%s' 'json' -> the JSON is a single shQuoted arg. - const cmd = `printf '%s' ${shQuote(json)} > ${shQuote(metaPath)}`; + // `printf '%s' -- ...` stops printf from treating a leading `%` in the JSON + // (e.g. a URL-encoded path like /backups/50%25-used/) as a format directive. + const cmd = `printf '%s' -- ${shQuote(json)} > ${shQuote(metaPath)}`; const r = await streamExecCommand(client, cmd, { timeoutMs: timeout }); if (r.code !== 0) { throw new Error(`meta write failed: ${(r.stderr || '').trim() || `exit ${r.code}`}`); @@ -637,17 +647,48 @@ export async function handleSshBackupSchedule({ getConnection, args }) { if (!VALID_TYPES.has(backup_type)) { return toMcp(fail('ssh_backup_schedule', `invalid backup_type: ${backup_type}`, { server }), { format }); } - if (!cron || typeof cron !== 'string' || cron.trim().split(/\s+/).length < 5) { + if (!cron || typeof cron !== 'string') { return toMcp(fail('ssh_backup_schedule', 'cron is required (e.g. "0 2 * * *")', { server }), { format }); } + const cronTrimmed = cron.trim(); + // Reject embedded newlines/CR/tab -- cron must be a single line. Without this, + // a caller could smuggle extra crontab entries via "0 0 * * *\nMALICIOUS\n". + if (/[\r\n\t]/.test(cronTrimmed)) { + return toMcp(fail('ssh_backup_schedule', + 'cron must be a single line (no newlines/tabs)', { server }), { format }); + } + // Reject anything that looks like it came through a shell expansion break + // (backticks, $() etc) even though shQuote would neutralize them -- + // defense in depth + clearer UX on malformed input. + if (/[`$]/.test(cronTrimmed)) { + return toMcp(fail('ssh_backup_schedule', + 'cron cannot contain shell metacharacters ($, `)', { server }), { format }); + } + // A cron expression is 5 (or 6 with seconds) whitespace-separated time fields. + // Split on literal space only -- newlines were already rejected above. + const fields = cronTrimmed.split(/ +/); + if (fields.length < 5) { + return toMcp(fail('ssh_backup_schedule', + 'cron must have at least 5 time fields (e.g. "0 2 * * *")', { server }), { format }); + } + + // Refuse to schedule if a password was passed -- writing it into crontab + // would persist the secret in plaintext (readable to anyone who gains the + // user's shell) and contradicts the "passwords never in argv/storage" + // invariant the rest of this server holds. Callers must pre-populate + // ~/.my.cnf / ~/.pgpass / PGPASSFILE on the target host. + if (password && (backup_type === 'mysql' || backup_type === 'postgresql' || backup_type === 'mongodb')) { + return toMcp(fail('ssh_backup_schedule', + 'refusing to embed password in crontab. Pre-configure credentials on the target host ' + + '(~/.my.cnf for mysql, ~/.pgpass or PGPASSFILE for postgresql, URI without password for ' + + 'mongodb) and omit the password argument.', + { server }), { format }); + } // Build the backup command that cron will run. Output path is templated with - // $(date) so each run produces a distinct artifact. We intentionally do NOT - // embed the password into the cron line -- caller should put `password` into - // ~/.my.cnf, ~/.pgpass, or environment to pick it up at run time. - // - // Schedule is a best-effort convenience: if `password` was provided, we include - // it inline but ALSO warn in the plan. + // $(date) so each run produces a distinct artifact. envPrefix is now empty + // because we reject password up front -- if that invariant ever changes, + // this is the place to carefully route the secret to a secured file. const targetName = backup_type === 'files' ? ((paths && paths[0]) || 'files').replace(/^\//, '').replace(/\//g, '_') || 'files' : database || backup_type; @@ -656,30 +697,41 @@ export async function handleSshBackupSchedule({ getConnection, args }) { let cmdBundle; try { cmdBundle = buildBackupCommand({ - backup_type, database, paths, user, password, host, port, + backup_type, database, paths, user, password: undefined, host, port, outputPath: scheduledOutTemplate, gzip, }); } catch (e) { return toMcp(fail('ssh_backup_schedule', e.message || String(e), { server }), { format }); } - const fullCmd = cmdBundle.envPrefix + cmdBundle.command; + if (cmdBundle.envCarriesSecret) { + // Defense in depth: after we scrubbed the password above, the builder + // still reports a secret-bearing env prefix. Bail rather than install + // something surprising in crontab. + return toMcp(fail('ssh_backup_schedule', + 'internal: build returned secret env prefix; refusing to install cron line', + { server }), { format }); + } + + // Non-secret env prefix (e.g. an anonymous mongodb URI) is kept so the cron + // job can execute correctly; the URI encodes db/host/port, not credentials. + const fullCmd = (cmdBundle.envPrefix || '') + cmdBundle.command; const marker = `# claude-code-ssh-backup:${backup_type}:${targetName}`; - const cronLine = `${cron.trim()} ${fullCmd} ${marker}`; + const cronLine = `${cronTrimmed} ${fullCmd} ${marker}`; if (isPreview) { const plan = buildPlan({ action: 'backup-schedule', target: `${server}:crontab`, effects: [ - `cron: \`${cron.trim()}\``, + `cron: \`${cronTrimmed}\``, `backup_type: \`${backup_type}\``, backup_type === 'files' ? `paths: ${(paths || []).map(p => `\`${p}\``).join(', ')}` : `database: \`${database}\``, `output template: \`${scheduledOutTemplate}\``, 'appends to user crontab; other entries preserved', - password ? 'WARNING: password embedded in cron line -- prefer ~/.my.cnf or ~/.pgpass' : 'no password embedded', + 'credentials must be pre-configured on host (~/.my.cnf, ~/.pgpass, PGPASSFILE, or URI)', ], reversibility: 'manual', risk: 'medium', @@ -714,7 +766,7 @@ export async function handleSshBackupSchedule({ getConnection, args }) { return toMcp( ok('ssh_backup_schedule', { - cron: cron.trim(), + cron: cronTrimmed, cron_line: cronLine, marker, backup_type, diff --git a/src/tools/cat-tools.js b/src/tools/cat-tools.js index 93b5203..0f191f5 100644 --- a/src/tools/cat-tools.js +++ b/src/tools/cat-tools.js @@ -60,6 +60,7 @@ export async function handleSshCat({ getConnection, args }) { timeout = 15_000, maxLen = 10_000, format = 'markdown', + abortSignal, } = args; if (!file) { @@ -77,7 +78,7 @@ export async function handleSshCat({ getConnection, args }) { let result, error; try { - result = await streamExecCommand(client, command, { timeoutMs: timeout }); + result = await streamExecCommand(client, command, { timeoutMs: timeout, abortSignal }); } catch (e) { error = e; } const durationMs = Date.now() - startedAt; diff --git a/src/tools/db-tools.js b/src/tools/db-tools.js index 464917f..f4bde6b 100644 --- a/src/tools/db-tools.js +++ b/src/tools/db-tools.js @@ -26,6 +26,46 @@ const DEFAULT_TIMEOUT_MS = 60_000; const DEFAULT_LIMIT = 1000; const MAX_ALLOWED_LIMIT = 100_000; +/** + * Conservative SQL-identifier validator (database / user names). + * + * shQuote() is correct for SHELL quoting but a value like `app'; DROP DATABASE x; --` + * shell-unquotes to a literal SQL string and becomes an INJECTED SQL token when the + * outer `-e ''` is parsed by mysql/psql. We don't render idents as SQL string + * literals anywhere safe -- `SHOW TABLES FROM 'name'`, `pg_database_size('name')`, + * and the parameterless `mysqldump ` all treat the value as an identifier. + * So we require a syntactic subset that every mainstream DBMS allows for + * database/role/schema/table names: + * [A-Za-z0-9_][A-Za-z0-9_.-]{0,63} + * Max 64 chars (MySQL cap is 64, PG is 63). `.` allowed so `schema.table` works for + * mongodump --db / pg_database_size callers that pass schema-qualified names. + * No spaces, no quotes, no semicolons, no backticks, no backslashes. + */ +const SQL_IDENT_RE = /^[A-Za-z0-9_][A-Za-z0-9_.-]{0,63}$/; +function isSafeSqlIdent(s) { + return typeof s === 'string' && SQL_IDENT_RE.test(s); +} +function rejectBadIdent(tool, field, value, { server, format }) { + return toMcp( + fail(tool, `${field} contains unsafe characters (must match [A-Za-z0-9_][A-Za-z0-9_.-]{0,63})`, { server }), + { format }, + ); +} + +/** + * Assemble a MongoDB URI for the `mongo` family so we can hand it to mongosh + * via env var instead of argv. Defaults to localhost:27017 because the SSH + * server IS typically the DB host in this deployment model. + */ +function buildMongoConnectionUri({ user, password, host = 'localhost', port = 27017, database, authSource }) { + const userinfo = user + ? (password ? `${encodeURIComponent(user)}:${encodeURIComponent(password)}@` : `${encodeURIComponent(user)}@`) + : ''; + const db = database ? `/${encodeURIComponent(database)}` : ''; + const q = authSource ? `?authSource=${encodeURIComponent(authSource)}` : ''; + return `mongodb://${userinfo}${host}:${port}${db}${q}`; +} + // -------------------------------------------------------------------------- // Helpers // -------------------------------------------------------------------------- @@ -113,19 +153,24 @@ export function buildPostgresQueryCommand({ database, query, user }) { } /** - * Build the MongoDB query command. Credentials via URI env var. - * `query` here is expected to be a JS snippet (e.g. 'db.users.find({active:true}).limit(100).toArray()'). + * Build the MongoDB query command. Credentials via SSH_MGR_DB_URI env var + * (never argv). `query` is a JS snippet; the db is switched via + * `db.getSiblingDB(...)` inside the eval so we don't rely on mongosh's + * positional-URI parsing. */ -export function buildMongoQueryCommand({ database, query, user }) { - // MONGO_URL carried via env var; mongosh accepts `mongodb://user:pw@host/db`. - // We fall back to plain mongosh against localhost when no URL is configured. - const parts = ['mongosh', '--quiet']; - if (database) parts.push(shQuote(database)); - parts.push('--eval', shQuote(query)); - if (user) { - parts.splice(2, 0, '-u', shQuote(user), '-p', '"$SSH_MGR_DB_PASS"'); - } - return parts.join(' '); +export function buildMongoQueryCommand({ database, query, user: _user }) { + // Assemble a single eval that: (a) connects via URI read from env (so no + // credentials appear in argv on the target host), (b) selects the target + // database via getSiblingDB instead of a positional, (c) runs the caller's + // snippet with the correct `db` binding. + const dbLit = JSON.stringify(database || 'admin'); + const wrappedEval = + 'const __mgr_conn = new Mongo(process.env.SSH_MGR_DB_URI); ' + + `const db = __mgr_conn.getDB(${dbLit}); ` + + query; + // --nodb tells mongosh NOT to auto-connect; we build the connection in the + // eval so mongosh never sees a URI in argv. + return `mongosh --quiet --nodb --eval ${shQuote(wrappedEval)}`; } // -------------------------------------------------------------------------- @@ -212,7 +257,8 @@ export function buildImportCommand({ db_type, database, input_path, user }) { if (db_type === 'mongodb') { const userArgs = user ? ` -u ${shQuote(user)} -p "$SSH_MGR_DB_PASS"` : ''; const gzipFlag = gz || String(input_path).endsWith('.archive.gz') ? ' --gzip' : ''; - return `mongorestore${userArgs} --db ${shQuote(database)}${gzipFlag} --archive=${input_path.replace(/'/g, '\'\\\'\'')}`; + // shQuote wraps in single quotes: --archive='/path with spaces/db.archive' + return `mongorestore${userArgs} --db ${shQuote(database)}${gzipFlag} --archive=${shQuote(input_path)}`; } return ''; } @@ -264,6 +310,12 @@ export async function handleSshDbQuery({ getConnection, args }) { if (!query || typeof query !== 'string') { return toMcp(fail('ssh_db_query', 'query is required'), { format }); } + if (database != null && !isSafeSqlIdent(database)) { + return rejectBadIdent('ssh_db_query', 'database', database, { server, format }); + } + if (user != null && !isSafeSqlIdent(user)) { + return rejectBadIdent('ssh_db_query', 'user', user, { server, format }); + } const cappedLimit = safeInt(limit, { min: 1, max: MAX_ALLOWED_LIMIT, fallback: DEFAULT_LIMIT }); @@ -310,14 +362,19 @@ export async function handleSshDbQuery({ getConnection, args }) { // Build command let cmd; - if (db_type === 'mysql') cmd = buildMySqlQueryCommand({ database, query: finalQuery, user }); - else if (db_type === 'postgresql') cmd = buildPostgresQueryCommand({ database, query: finalQuery, user }); - else cmd = buildMongoQueryCommand({ database, query: finalQuery, user }); - - // Wrap with env-var password injection so the secret never touches argv. - // `SSH_MGR_DB_PASS` is set inline before the command; shell exports it to - // the child (mysql/psql/mongosh) via MYSQL_PWD/PGPASSWORD. - const envPrefix = `SSH_MGR_DB_PASS=${shQuote(password)} `; + let envPrefix; + if (db_type === 'mysql') { + cmd = buildMySqlQueryCommand({ database, query: finalQuery, user }); + envPrefix = `SSH_MGR_DB_PASS=${shQuote(password)} `; + } else if (db_type === 'postgresql') { + cmd = buildPostgresQueryCommand({ database, query: finalQuery, user }); + envPrefix = `SSH_MGR_DB_PASS=${shQuote(password)} `; + } else { + // mongo: URI via env so the password never enters argv on the target host. + cmd = buildMongoQueryCommand({ database, query: finalQuery, user }); + const uri = buildMongoConnectionUri({ user, password, database }); + envPrefix = `SSH_MGR_DB_URI=${shQuote(uri)} `; + } const fullCmd = envPrefix + cmd; const startedAt = Date.now(); @@ -403,6 +460,12 @@ export async function handleSshDbList({ getConnection, args }) { if (!['mysql', 'postgresql', 'mongodb'].includes(db_type)) { return toMcp(fail('ssh_db_list', `unsupported db_type: ${db_type}`), { format }); } + if (database != null && !isSafeSqlIdent(database)) { + return rejectBadIdent('ssh_db_list', 'database', database, { server, format }); + } + if (user != null && !isSafeSqlIdent(user)) { + return rejectBadIdent('ssh_db_list', 'user', user, { server, format }); + } let cmd; if (db_type === 'mysql') cmd = buildMySqlListCommand({ database, user }); @@ -475,6 +538,12 @@ export async function handleSshDbDump({ getConnection, args }) { return toMcp(fail('ssh_db_dump', `unsupported db_type: ${db_type}`), { format }); } if (!database) return toMcp(fail('ssh_db_dump', 'database is required'), { format }); + if (!isSafeSqlIdent(database)) { + return rejectBadIdent('ssh_db_dump', 'database', database, { server, format }); + } + if (user != null && !isSafeSqlIdent(user)) { + return rejectBadIdent('ssh_db_dump', 'user', user, { server, format }); + } const outPath = output_path || `/tmp/${database}-${Date.now()}.${db_type === 'mongodb' ? 'archive' : 'sql'}${gzip ? '.gz' : ''}`; @@ -568,6 +637,12 @@ export async function handleSshDbImport({ getConnection, args }) { return toMcp(fail('ssh_db_import', `unsupported db_type: ${db_type}`), { format }); } if (!database) return toMcp(fail('ssh_db_import', 'database is required'), { format }); + if (!isSafeSqlIdent(database)) { + return rejectBadIdent('ssh_db_import', 'database', database, { server, format }); + } + if (user != null && !isSafeSqlIdent(user)) { + return rejectBadIdent('ssh_db_import', 'user', user, { server, format }); + } if (!input_path) return toMcp(fail('ssh_db_import', 'input_path is required'), { format }); if (isPreview) { diff --git a/src/tools/deploy-tools.js b/src/tools/deploy-tools.js index 26208df..a82e367 100644 --- a/src/tools/deploy-tools.js +++ b/src/tools/deploy-tools.js @@ -57,7 +57,7 @@ import crypto from 'crypto'; import fs from 'fs'; import { streamExecCommand, shQuote } from '../stream-exec.js'; -import { ok, fail, preview, toMcp, defaultRender } from '../structured-result.js'; +import { ok, fail, preview, toMcp } from '../structured-result.js'; import { buildPlan } from '../preview-mode.js'; import { formatBytes, formatDuration } from '../output-formatter.js'; @@ -81,10 +81,6 @@ function sha256File(localPath) { }); } -function localSize(p) { - try { return fs.statSync(p).size; } catch (_) { return null; } -} - function promisifyGetSftp(client, getSftp) { // Support either an injected getSftp(client)->Promise or the raw // ssh2 Client with client.sftp(cb). @@ -288,18 +284,20 @@ export async function handleSshDeploy({ getConnection, getSftp, args }) { // 5. Health check -- only if all post-hooks passed. let healthCheckExit = null; - let healthStderr = ''; if (!firstFailure && health_check) { try { const hr = await streamExecCommand(client, health_check, { timeoutMs: health_timeout }); healthCheckExit = hr.code; - healthStderr = hr.stderr || ''; if (hr.code !== 0) { - firstFailure = { phase: 'health_check', reason: `health_check exited ${hr.code}`, hook: health_check }; + const stderrSnippet = (hr.stderr || '').trim().slice(0, 200); + firstFailure = { + phase: 'health_check', + reason: `health_check exited ${hr.code}${stderrSnippet ? `: ${stderrSnippet}` : ''}`, + hook: health_check + }; } } catch (e) { healthCheckExit = -1; - healthStderr = String(e.message || e); firstFailure = { phase: 'health_check', reason: `health_check errored: ${e.message || e}`, hook: health_check }; } } diff --git a/src/tools/docker-tools.js b/src/tools/docker-tools.js index 5f67f10..be2d0f7 100644 --- a/src/tools/docker-tools.js +++ b/src/tools/docker-tools.js @@ -20,9 +20,12 @@ import { streamExecCommand, shQuote } from '../stream-exec.js'; import { ok, fail, preview, toMcp } from '../structured-result.js'; import { buildPlan } from '../preview-mode.js'; -import { formatDuration } from '../output-formatter.js'; +import { formatDuration, escapeMdCell } from '../output-formatter.js'; const DEFAULT_TIMEOUT_MS = 60_000; +// `docker pull` downloads image layers over the network -- a 1 GB image on a +// 10 Mbps uplink takes ~15 min. 60 s was too tight for anything non-trivial (H2). +const PULL_TIMEOUT_MS = 600_000; const DEFAULT_TAIL_LINES = 100; const MAX_TAIL_LINES = 100_000; @@ -176,7 +179,7 @@ export function renderDocker(result) { lines.push('| --- | --- | --- | --- | --- |'); for (const c of d.containers) { const id = (c.id || '').slice(0, 12); - const ports = (c.ports || '').slice(0, 40).replace(/\|/g, '\\|'); + const ports = escapeMdCell((c.ports || '').slice(0, 40)); lines.push(`| \`${id}\` | ${c.name ?? '--'} | ${c.image ?? '--'} | ${c.status ?? '--'} | ${ports} |`); } } @@ -544,7 +547,7 @@ async function runPull({ getConnection, server, image, isPreview, format, onChun let result; try { result = await streamExecCommand(client, remote, { - timeoutMs: DEFAULT_TIMEOUT_MS, + timeoutMs: PULL_TIMEOUT_MS, onChunk, }); } catch (e) { diff --git a/src/tools/exec-tools.js b/src/tools/exec-tools.js index fe4fbca..0d2e7e3 100644 --- a/src/tools/exec-tools.js +++ b/src/tools/exec-tools.js @@ -15,7 +15,7 @@ */ import { streamExecCommand } from '../stream-exec.js'; -import { formatExecResult, renderMarkdown, makeMcpContent } from '../output-formatter.js'; +import { formatExecResult, makeMcpContent } from '../output-formatter.js'; import { ok, fail, preview, toMcp, defaultRender } from '../structured-result.js'; import { buildPlan } from '../preview-mode.js'; import { pMap } from '../concurrency.js'; @@ -35,6 +35,7 @@ export async function handleSshExecute({ getConnection, args }) { format = 'markdown', preview: isPreview = false, onChunk, + abortSignal, } = args; if (isPreview) { @@ -60,7 +61,7 @@ export async function handleSshExecute({ getConnection, args }) { let result, error; try { result = await streamExecCommand(client, command, { - cwd, timeoutMs: timeout, debounceMs: DEFAULT_DEBOUNCE_MS, onChunk, + cwd, timeoutMs: timeout, debounceMs: DEFAULT_DEBOUNCE_MS, onChunk, abortSignal, }); } catch (e) { error = e; } @@ -97,6 +98,7 @@ export async function handleSshExecuteSudo({ getConnection, getServerConfig, arg maxLen = DEFAULT_MAX_LEN, format = 'markdown', preview: isPreview = false, + abortSignal, } = args; // Strip leading "sudo " -- we always add it explicitly below. @@ -141,6 +143,7 @@ export async function handleSshExecuteSudo({ getConnection, getServerConfig, arg // Write password + newline to stream.stdin. `sudo -S` consumes it. // When pw is empty/undefined, send an empty line so passwordless sudo still works. stdin: (pw || '') + '\n', + abortSignal, }); } catch (e) { error = e; } @@ -170,6 +173,7 @@ export async function handleSshExecuteGroup({ getConnection, resolveGroup, args format = 'markdown', stopOnError = false, preview: isPreview = false, + abortSignal, } = args; const servers = await resolveGroup(group); @@ -207,7 +211,7 @@ export async function handleSshExecuteGroup({ getConnection, resolveGroup, args } try { const r = await streamExecCommand(client, command, { - cwd, timeoutMs: timeout, debounceMs: DEFAULT_DEBOUNCE_MS, + cwd, timeoutMs: timeout, debounceMs: DEFAULT_DEBOUNCE_MS, abortSignal, }); const formatted = formatExecResult({ server: srv, command, cwd, diff --git a/src/tools/journalctl-tools.js b/src/tools/journalctl-tools.js index 666eb69..4497466 100644 --- a/src/tools/journalctl-tools.js +++ b/src/tools/journalctl-tools.js @@ -19,7 +19,7 @@ import { streamExecCommand, shQuote } from '../stream-exec.js'; import { ok, fail, toMcp } from '../structured-result.js'; -import { formatDuration } from '../output-formatter.js'; +import { formatDuration, escapeMdCell } from '../output-formatter.js'; const DEFAULT_TIMEOUT_MS = 30_000; const DEFAULT_LINES = 100; @@ -184,7 +184,7 @@ export function renderJournalctl(result) { lines.push('| --- | --- | --- | --- |'); for (const e of d.entries) { const t = (e.time ?? '--').toString(); - const msg = (e.message ?? '').toString().slice(0, 120).replace(/\|/g, '\\|').replace(/\n/g, ' '); + const msg = escapeMdCell((e.message ?? '').toString().slice(0, 120)); lines.push(`| ${t} | ${e.priority ?? '--'} | ${e.unit ?? '--'} | ${msg} |`); } } else { diff --git a/src/tools/key-tools.js b/src/tools/key-tools.js index a5cacd6..7a5734a 100644 --- a/src/tools/key-tools.js +++ b/src/tools/key-tools.js @@ -35,7 +35,6 @@ import { ok, fail, preview, toMcp } from '../structured-result.js'; import { buildPlan } from '../preview-mode.js'; const KNOWN_HOSTS_PATH = path.join(os.homedir(), '.ssh', 'known_hosts'); -const KNOWN_HOSTS_BACKUP = path.join(os.homedir(), '.ssh', 'known_hosts.mcp-backup'); // Module-level internal store keyed by `${host}:${port}`. Separate from OpenSSH // known_hosts: lets the MCP track keys accepted during its own lifetime without diff --git a/src/tools/monitoring-tools.js b/src/tools/monitoring-tools.js index adea83d..6f49e73 100644 --- a/src/tools/monitoring-tools.js +++ b/src/tools/monitoring-tools.js @@ -13,7 +13,7 @@ import { streamExecCommand, shQuote } from '../stream-exec.js'; import { ok, fail, preview, toMcp } from '../structured-result.js'; import { buildPlan } from '../preview-mode.js'; -import { formatBytes, formatDuration } from '../output-formatter.js'; +import { formatBytes, formatDuration, escapeMdCell } from '../output-formatter.js'; const DEFAULT_TIMEOUT_MS = 30_000; @@ -494,7 +494,7 @@ export function renderServiceStatus(result) { function renderProcTable(rows) { const lines = ['| PID | USER | CPU% | MEM% | CMD |', '| --- | --- | --- | --- | --- |']; for (const p of rows) { - const cmd = (p.cmd || p.comm || '').slice(0, 80).replace(/\|/g, '\\|'); + const cmd = escapeMdCell((p.cmd || p.comm || '').slice(0, 80)); lines.push(`| ${p.pid} | ${p.user ?? '--'} | ${fmtPct(p.cpu_pct)} | ${fmtPct(p.mem_pct)} | \`${cmd}\` |`); } return lines.join('\n'); @@ -586,7 +586,10 @@ export async function handleSshHealthCheck({ getConnection, args }) { 'echo \'---UPTIME---\'', 'cat /proc/uptime', 'echo \'---CORES---\'', 'nproc || grep -c ^processor /proc/cpuinfo', ].join('; '); - const remote = `bash -c ${shQuote(command)}`; + // LANG=C / LC_ALL=C pins output format for parsers: avoids locale-specific + // number formatting (e.g. `1,234.5` vs `1.234,5`) and translated column + // headers on non-English hosts. + const remote = `LANG=C LC_ALL=C bash -c ${shQuote(command)}`; const startedAt = Date.now(); let client; diff --git a/src/tools/session-tools.js b/src/tools/session-tools.js index 7dec1e0..7e5e437 100644 --- a/src/tools/session-tools.js +++ b/src/tools/session-tools.js @@ -656,11 +656,44 @@ export async function handleSshSessionList({ args }) { } /** - * ssh_session_close -- idempotent close. + * ssh_session_close -- idempotent close. Special value "all" closes every + * session currently tracked (advertised in the tool schema). */ export async function handleSshSessionClose({ args }) { const { session_id, format = 'markdown' } = args || {}; + if (session_id === 'all') { + // Snapshot IDs before we start deleting so mid-iteration mutations + // can't cause us to skip entries. + const ids = Array.from(sessions.keys()); + const closed = []; + const errors = []; + for (const id of ids) { + const s = sessions.get(id); + if (!s) continue; + let errored = false; + try { await s.close(); } + catch (e) { + errored = true; + errors.push({ session_id: id, error: e.message || String(e) }); + } + sessions.delete(id); + if (!errored) { + closed.push({ session_id: id, server: s.server, command_count: s.commandCount }); + } + } + return toMcp( + ok('ssh_session_close', { + session_id: 'all', + closed: true, + closed_count: closed.length, + sessions: closed, + errors, + }), + { format } + ); + } + const session = sessions.get(session_id); if (!session) { // Idempotent: repeated close on a dead/unknown id is not an error. diff --git a/src/tools/systemctl-tools.js b/src/tools/systemctl-tools.js index 1cd065a..6dcfb47 100644 --- a/src/tools/systemctl-tools.js +++ b/src/tools/systemctl-tools.js @@ -18,7 +18,7 @@ import { streamExecCommand, shQuote } from '../stream-exec.js'; import { ok, fail, preview, toMcp } from '../structured-result.js'; import { buildPlan } from '../preview-mode.js'; -import { formatBytes, formatDuration } from '../output-formatter.js'; +import { formatBytes, formatDuration, escapeMdCell } from '../output-formatter.js'; import { parseSystemctlShow, sdNum, @@ -256,7 +256,7 @@ export function renderSystemctl(result) { lines.push('| unit | load | active | sub | description |'); lines.push('| --- | --- | --- | --- | --- |'); for (const u of d.units) { - const desc = (u.description || '').slice(0, 60).replace(/\|/g, '\\|'); + const desc = escapeMdCell((u.description || '').slice(0, 60)); lines.push(`| \`${u.unit}\` | ${u.load} | ${u.active} | ${u.sub} | ${desc} |`); } } diff --git a/src/tools/tail-tools.js b/src/tools/tail-tools.js index da3ce85..85f40d6 100644 --- a/src/tools/tail-tools.js +++ b/src/tools/tail-tools.js @@ -69,6 +69,7 @@ export async function handleSshTail({ getConnection, args }) { timeout = DEFAULT_TIMEOUT_MS, maxLen = DEFAULT_MAX_LEN, format = 'markdown', + abortSignal, } = args || {}; if (!file) { @@ -86,7 +87,7 @@ export async function handleSshTail({ getConnection, args }) { let result, error; try { - result = await streamExecCommand(client, command, { timeoutMs: timeout }); + result = await streamExecCommand(client, command, { timeoutMs: timeout, abortSignal }); } catch (e) { error = e; } const durationMs = Date.now() - startedAt; diff --git a/src/tools/transfer-tools.js b/src/tools/transfer-tools.js index 6e9682a..56f2ac4 100644 --- a/src/tools/transfer-tools.js +++ b/src/tools/transfer-tools.js @@ -35,8 +35,6 @@ import { ok, fail, preview, toMcp, defaultRender } from '../structured-result.js import { buildPlan } from '../preview-mode.js'; import { formatBytes, formatDuration } from '../output-formatter.js'; -const DEFAULT_EXEC_TIMEOUT_MS = 120_000; - // -------------------------------------------------------------------------- // Helpers // -------------------------------------------------------------------------- @@ -192,13 +190,13 @@ export async function handleSshUpload({ getConnection, args }) { // Preview: never touch the remote beyond a stat if (isPreview) { - let stat = 'unknown'; let localSize = null; try { const st = fs.statSync(local_path); localSize = st.size; } catch (_) { /* local may not exist -- still preview */ } + let stat; try { const client = await getConnection(server); stat = await remoteStatLine(client, remote_path); @@ -306,7 +304,7 @@ export async function handleSshDownload({ getConnection, args }) { } if (isPreview) { - let stat = 'unknown'; + let stat; try { const client = await getConnection(server); stat = await remoteStatLine(client, remote_path); @@ -422,15 +420,29 @@ function renderTransferMarkdown(tool) { // ssh_sync -- rsync via spawn // -------------------------------------------------------------------------- +/** + * Resolved auth method for rsync. Returns the concrete strategy so the + * caller can pick a spawn shape without re-checking fields. + * Accepts both `keyPath` (canonical, matches config-loader) and `keypath` + * (historical alias). + */ +function resolveRsyncAuth(serverConfig) { + if (!serverConfig) return { kind: 'none', keyFile: null }; + const keyFile = serverConfig.keyPath || serverConfig.keypath || null; + if (keyFile) return { kind: 'key', keyFile: String(keyFile).replace(/^~/, os.homedir()) }; + if (serverConfig.password) return { kind: 'password', password: String(serverConfig.password) }; + return { kind: 'none' }; +} + /** * Build argv for rsync based on args + resolved server config. + * Returns ONLY rsync arguments -- never includes `sshpass` or a password. + * Caller is responsible for wrapping in `sshpass -e` when auth.kind === 'password'. * Exported for unit tests. */ export function buildRsyncArgv({ serverConfig, direction, localPath, remotePath, exclude = [], dry_run = false, delete: del = false, compress = true }) { + const auth = resolveRsyncAuth(serverConfig); const argv = []; - if (serverConfig && serverConfig.password && !serverConfig.keypath) { - argv.push('-p', serverConfig.password, 'rsync'); - } const opts = compress ? ['-avz'] : ['-av']; if (dry_run) opts.push('--dry-run'); if (del) opts.push('--delete'); @@ -440,10 +452,9 @@ export function buildRsyncArgv({ serverConfig, direction, localPath, remotePath, argv.push(...opts); const sshOpts = ['-o StrictHostKeyChecking=accept-new', '-o ConnectTimeout=10']; - if (serverConfig && serverConfig.keypath) { + if (auth.kind === 'key') { sshOpts.unshift('-o BatchMode=yes'); - const keyPath = String(serverConfig.keypath).replace(/^~/, os.homedir()); - sshOpts.push(`-i ${keyPath}`); + sshOpts.push(`-i ${auth.keyFile}`); } if (serverConfig && serverConfig.port && String(serverConfig.port) !== '22') { sshOpts.push(`-p ${serverConfig.port}`); @@ -459,7 +470,7 @@ export function buildRsyncArgv({ serverConfig, direction, localPath, remotePath, return argv; } -export async function handleSshSync({ getConnection, getServerConfig, args }) { +export async function handleSshSync({ getConnection: _getConnection, getServerConfig, args }) { const { server, source, @@ -518,10 +529,23 @@ export async function handleSshSync({ getConnection, getServerConfig, args }) { try { serverConfig = await getServerConfig(server); } catch (_) { /* best-effort */ } } - const usePassword = !!(serverConfig && serverConfig.password && !serverConfig.keypath); - const rsyncCmd = usePassword ? 'sshpass' : 'rsync'; + const auth = resolveRsyncAuth(serverConfig); const rsyncArgs = buildRsyncArgv({ serverConfig, direction, localPath, remotePath, exclude, dry_run, delete: del, compress }); + // Password auth: wrap rsync in `sshpass -e rsync ...` and pass password via + // SSHPASS env var. Never put the password in argv (visible to `ps aux`). + let spawnCmd; + let spawnArgs; + const spawnOpts = { stdio: ['ignore', 'pipe', 'pipe'] }; + if (auth.kind === 'password') { + spawnCmd = 'sshpass'; + spawnArgs = ['-e', 'rsync', ...rsyncArgs]; + spawnOpts.env = { ...process.env, SSHPASS: auth.password }; + } else { + spawnCmd = 'rsync'; + spawnArgs = rsyncArgs; + } + const startedAt = Date.now(); return new Promise((resolve) => { @@ -530,7 +554,7 @@ export async function handleSshSync({ getConnection, getServerConfig, args }) { let timedOut = false; let proc; try { - proc = spawnFn(rsyncCmd, rsyncArgs, { stdio: ['ignore', 'pipe', 'pipe'] }); + proc = spawnFn(spawnCmd, spawnArgs, spawnOpts); } catch (e) { return resolve(toMcp(fail('ssh_sync', `spawn failed: ${e.message || e}`, { server }), { format })); } @@ -586,7 +610,7 @@ export async function handleSshSync({ getConnection, getServerConfig, args }) { files_transferred: filesMatch ? parseInt(filesMatch[1].replace(/,/g, ''), 10) : 0, bytes_transferred: sizeMatch ? parseInt(sizeMatch[1].replace(/,/g, ''), 10) : 0, duration_ms: durationMs, - rsync_argv: [rsyncCmd, ...rsyncArgs], + rsync_argv: [spawnCmd, ...spawnArgs], }; resolve(toMcp( ok('ssh_sync', data, { server, duration_ms: durationMs }), @@ -655,16 +679,25 @@ export async function handleSshDiff({ getConnection, args }) { const tmpDir = fs.mkdtempSync(path.join(os.tmpdir(), 'sshdiff-')); const tmpA = path.join(tmpDir, 'a'); const tmpB = path.join(tmpDir, 'b'); + let sftpA, sftpB; try { - const sftpA = await getSftpChannel(clientA); - const sftpB = await getSftpChannel(clientB); + sftpA = await getSftpChannel(clientA); + sftpB = await getSftpChannel(clientB); await sftpFastGet(sftpA, path_a, tmpA); await sftpFastGet(sftpB, path_b, tmpB); } catch (e) { + // Close SFTP channels before returning -- OpenSSH defaults to MaxSessions=10 + // and leaking SFTP channels on cross-server diffs runs the pool dry fast (H1). + if (sftpA) endSftp(sftpA); + if (sftpB) endSftp(sftpB); try { fs.rmSync(tmpDir, { recursive: true, force: true }); } catch (_) { /* ignore */ } return toMcp(fail('ssh_diff', `fetch failed: ${e.message || e}`, { server }), { format }); } + // Success path must also release the SFTP channels (H1). + if (sftpA) endSftp(sftpA); + if (sftpB) endSftp(sftpB); + const diffOut = await spawnDiffLocal(tmpA, tmpB); try { fs.rmSync(tmpDir, { recursive: true, force: true }); } catch (_) { /* ignore */ } @@ -769,7 +802,7 @@ export async function handleSshEdit({ getConnection, args }) { const checker = pickSyntaxChecker(filePath, syntax_check); if (isPreview) { - let stat = 'unknown'; + let stat; try { const client = await getConnection(server); stat = await remoteStatLine(client, filePath); @@ -815,7 +848,12 @@ export async function handleSshEdit({ getConnection, args }) { } // 2. Compute new content - const nextContent = new_content != null ? String(new_content) : applyPatches(current, patch); + let nextContent; + try { + nextContent = new_content != null ? String(new_content) : applyPatches(current, patch); + } catch (e) { + return toMcp(fail('ssh_edit', e.message || String(e), { server }), { format }); + } const encoded = encodeBase64(nextContent); // 3. Paths (randomized to avoid TOCTOU collisions) diff --git a/src/tools/tunnel-tools.js b/src/tools/tunnel-tools.js index df504df..e3decba 100644 --- a/src/tools/tunnel-tools.js +++ b/src/tools/tunnel-tools.js @@ -34,6 +34,158 @@ function newTunnelId() { return `tunnel_${Date.now().toString(36)}_${idCounter.toString(36)}`; } +// -------------------------------------------------------------------------- +// SOCKS5 protocol handler (RFC 1928, no-auth CONNECT only) +// -------------------------------------------------------------------------- + +// Reply codes -- used for the SOCKS5 response byte. +const SOCKS_REP = Object.freeze({ + SUCCEEDED: 0x00, + GENERAL_FAILURE: 0x01, + CONNECTION_NOT_ALLOWED: 0x02, + NETWORK_UNREACHABLE: 0x03, + HOST_UNREACHABLE: 0x04, + CONNECTION_REFUSED: 0x05, + TTL_EXPIRED: 0x06, + COMMAND_NOT_SUPPORTED: 0x07, + ADDRESS_TYPE_NOT_SUPPORTED: 0x08, +}); + +/** + * Build a SOCKS5 reply packet. `boundAddr` / `boundPort` are the server's + * local binding for the outbound connection -- we return 0.0.0.0:0 because + * we don't have a meaningful value to report (we forward via SSH). + */ +function buildSocksReply(rep, atyp = 0x01) { + // VER REP RSV ATYP BND.ADDR BND.PORT + if (atyp === 0x01) { + // IPv4 binding: 4 zero bytes + 2 zero bytes + return Buffer.from([0x05, rep, 0x00, 0x01, 0, 0, 0, 0, 0, 0]); + } + // Fallback to IPv4 zero binding for unknown reply atyps. + return Buffer.from([0x05, rep, 0x00, 0x01, 0, 0, 0, 0, 0, 0]); +} + +/** + * Parse the SOCKS5 CONNECT request buffer. Returns + * { host, port, atyp, consumed } + * or throws an Error if the request is malformed / unsupported. + * Supports: ATYP 0x01 IPv4, 0x03 domain, 0x04 IPv6. + */ +export function parseSocksConnectRequest(buf) { + if (buf.length < 10) throw new Error('short CONNECT request'); + if (buf[0] !== 0x05) throw new Error(`unsupported VER ${buf[0]}`); + if (buf[1] !== 0x01) throw new Error(`only CMD=CONNECT supported, got ${buf[1]}`); + // buf[2] reserved, ignore + const atyp = buf[3]; + let host, portOffset; + if (atyp === 0x01) { + if (buf.length < 10) throw new Error('short IPv4 CONNECT'); + host = `${buf[4]}.${buf[5]}.${buf[6]}.${buf[7]}`; + portOffset = 8; + } else if (atyp === 0x03) { + const dlen = buf[4]; + if (buf.length < 5 + dlen + 2) throw new Error('short domain CONNECT'); + host = buf.slice(5, 5 + dlen).toString('ascii'); + portOffset = 5 + dlen; + } else if (atyp === 0x04) { + if (buf.length < 22) throw new Error('short IPv6 CONNECT'); + const segs = []; + for (let i = 0; i < 8; i++) segs.push(buf.readUInt16BE(4 + i * 2).toString(16)); + host = segs.join(':'); + portOffset = 20; + } else { + throw new Error(`unsupported ATYP ${atyp}`); + } + const port = buf.readUInt16BE(portOffset); + return { host, port, atyp, consumed: portOffset + 2 }; +} + +/** + * Drive one SOCKS5 session over a single client TCP socket: + * 1. greeting (methods negotiation) -- we accept only 0x00 (no auth) + * 2. CONNECT request -- open ssh.forwardOut to target + * 3. bidirectional pipe until either side closes + * + * `sshClient` is a duck-typed ssh2 Client (.forwardOut(src, srcPort, dst, + * dstPort, cb)). + */ +export function handleSocks5Connection(sock, sshClient) { + let phase = 'greeting'; // greeting -> request -> streaming + let buf = Buffer.alloc(0); + const fail = (rep = SOCKS_REP.GENERAL_FAILURE) => { + try { sock.write(buildSocksReply(rep)); } catch (_) { /* ignore */ } + try { sock.end(); } catch (_) { /* ignore */ } + }; + sock.on('error', () => { try { sock.destroy(); } catch (_) { /* ignore */ } }); + sock.on('data', (chunk) => { + buf = Buffer.concat([buf, chunk]); + if (phase === 'greeting') { + if (buf.length < 2) return; + if (buf[0] !== 0x05) { fail(); return; } + const nmethods = buf[1]; + if (buf.length < 2 + nmethods) return; + const methods = buf.slice(2, 2 + nmethods); + buf = buf.slice(2 + nmethods); + if (!methods.includes(0x00)) { + // Method-not-acceptable: VER + 0xFF + try { sock.write(Buffer.from([0x05, 0xff])); } catch (_) { /* ignore */ } + try { sock.end(); } catch (_) { /* ignore */ } + return; + } + try { sock.write(Buffer.from([0x05, 0x00])); } + catch (_) { sock.destroy(); return; } + phase = 'request'; + } + if (phase === 'request') { + let req; + try { req = parseSocksConnectRequest(buf); } + catch (_) { + // Either short (need more bytes) or unsupported. Parse errors from + // short buffers look identical to unsupported -- differentiate by + // length. + if (buf.length < 10) return; + fail(SOCKS_REP.COMMAND_NOT_SUPPORTED); + return; + } + buf = buf.slice(req.consumed); + phase = 'streaming'; + sshClient.forwardOut( + sock.remoteAddress || '127.0.0.1', + sock.remotePort || 0, + req.host, req.port, + (err, stream) => { + if (err) { + const msg = String(err.message || '').toLowerCase(); + let code = SOCKS_REP.GENERAL_FAILURE; + if (msg.includes('refused')) code = SOCKS_REP.CONNECTION_REFUSED; + else if (msg.includes('unreachable')) code = SOCKS_REP.HOST_UNREACHABLE; + fail(code); + return; + } + try { sock.write(buildSocksReply(SOCKS_REP.SUCCEEDED)); } + catch (_) { try { stream.destroy(); } catch (__) { /* ignore */ } return; } + // Any residual bytes the client sent before the reply need to be + // flushed into the newly-opened SSH channel (the piping begins on + // the next `sock.on('data')` only). + if (buf.length > 0) { + try { stream.write(buf); } catch (_) { /* ignore */ } + buf = Buffer.alloc(0); + } + sock.pipe(stream).pipe(sock); + const cleanup = () => { + try { stream.destroy(); } catch (_) { /* ignore */ } + try { sock.destroy(); } catch (_) { /* ignore */ } + }; + stream.on('close', cleanup); + stream.on('error', cleanup); + sock.on('close', cleanup); + } + ); + } + }); +} + /** Test-only: flush all registered tunnels. */ export function __resetTunnelStore() { for (const state of tunnels.values()) { @@ -110,11 +262,12 @@ export async function handleSshTunnelCreate(ctx = {}) { if (!Number.isFinite(lport) || lport <= 0 || lport > 65535) { return toMcp(fail('ssh_tunnel_create', 'local_port must be 1..65535', { server: server ?? null }), { format }); } - if (type !== 'dynamic') { - if (!remote_host || !remote_port) { - return toMcp(fail('ssh_tunnel_create', - `remote_host and remote_port required for type=${type}`, { server: server ?? null }), { format }); - } + // `dynamic` (SOCKS5) tunnels don't need a fixed remote_host/remote_port -- + // each SOCKS client connection carries its own target. Only local/remote + // require the remote endpoint upfront. + if (type !== 'dynamic' && (!remote_host || !remote_port)) { + return toMcp(fail('ssh_tunnel_create', + `remote_host and remote_port required for type=${type}`, { server: server ?? null }), { format }); } // -- preview ------------------------------------------------------- @@ -131,8 +284,9 @@ export async function handleSshTunnelCreate(ctx = {}) { effects.push(`requests remote forward ${shQuote(remote_host)}:${remote_port} from ${server}`); effects.push(`incoming connections piped to local ${bind}:${lport}`); } else { - effects.push(`opens SOCKS5 proxy on ${bind}:${lport}`); - effects.push(`all SOCKS client requests routed via ${server}`); + effects.push(`opens SOCKS5 listener on ${bind}:${lport}`); + effects.push(`each CONNECT is forwarded via ${server} (target chosen per-connection)`); + effects.push('auth: no-authentication method only (method 0x00)'); } if (probe) { effects.push(`reachability probe: dns=${probe.dns.ok ? 'ok' : 'fail'}, tcp=${probe.tcp.ok ? 'ok' : 'fail'}`); @@ -163,17 +317,11 @@ export async function handleSshTunnelCreate(ctx = {}) { }; try { - if (type === 'local' || type === 'dynamic') { + if (type === 'local') { const listener = net.createServer((sock) => { const srcAddr = sock.remoteAddress || '127.0.0.1'; const srcPort = sock.remotePort || 0; - const dstHost = type === 'local' ? remote_host : null; - const dstPort = type === 'local' ? Number(remote_port) : null; - if (type !== 'local') { // dynamic: no remote handler -- hook left as future work - sock.destroy(); - return; - } - client.forwardOut(srcAddr, srcPort, dstHost, dstPort, (err, stream) => { + client.forwardOut(srcAddr, srcPort, remote_host, Number(remote_port), (err, stream) => { if (err) { sock.destroy(); return; } sock.pipe(stream).pipe(sock); sock.on('close', () => { try { stream.destroy(); } catch (_) { /* ignore */ } }); @@ -183,6 +331,15 @@ export async function handleSshTunnelCreate(ctx = {}) { }); }); + await new Promise((resolve, reject) => { + listener.once('error', reject); + listener.listen(lport, bind, () => resolve()); + }); + state.listener = listener; + } else if (type === 'dynamic') { + const listener = net.createServer((sock) => { + handleSocks5Connection(sock, client); + }); await new Promise((resolve, reject) => { listener.once('error', reject); listener.listen(lport, bind, () => resolve()); diff --git a/src/tunnel-manager.js b/src/tunnel-manager.js deleted file mode 100644 index 849bada..0000000 --- a/src/tunnel-manager.js +++ /dev/null @@ -1,576 +0,0 @@ -/** - * SSH Tunnel Manager - * Manages SSH port forwarding and SOCKS proxy tunnels - */ - -import { v4 as uuidv4 } from 'uuid'; -import net from 'net'; -import { logger } from './logger.js'; - -// Map to store active tunnels -const tunnels = new Map(); - -// Tunnel types -export const TUNNEL_TYPES = { - LOCAL: 'local', // Local port forwarding (access remote service locally) - REMOTE: 'remote', // Remote port forwarding (expose local service remotely) - DYNAMIC: 'dynamic' // SOCKS proxy -}; - -// Tunnel states -export const TUNNEL_STATES = { - CONNECTING: 'connecting', - ACTIVE: 'active', - RECONNECTING: 'reconnecting', - FAILED: 'failed', - CLOSED: 'closed' -}; - -class SSHTunnel { - constructor(id, serverName, ssh, config) { - this.id = id; - this.serverName = serverName; - this.ssh = ssh; - this.type = config.type; - this.config = config; - this.state = TUNNEL_STATES.CONNECTING; - this.createdAt = new Date(); - this.lastActivity = new Date(); - this.connections = new Set(); - this.server = null; - this.reconnectAttempts = 0; - this.maxReconnectAttempts = 5; - this.stats = { - bytesTransferred: 0, - connectionsTotal: 0, - connectionsActive: 0, - errors: 0 - }; - } - - /** - * Start the tunnel - */ - async start() { - try { - switch (this.type) { - case TUNNEL_TYPES.LOCAL: - await this.startLocalForwarding(); - break; - - case TUNNEL_TYPES.REMOTE: - await this.startRemoteForwarding(); - break; - - case TUNNEL_TYPES.DYNAMIC: - await this.startDynamicForwarding(); - break; - - default: - throw new Error(`Unknown tunnel type: ${this.type}`); - } - - this.state = TUNNEL_STATES.ACTIVE; - this.lastActivity = new Date(); - - logger.info(`SSH tunnel ${this.id} started`, { - type: this.type, - server: this.serverName, - local: `${this.config.localHost}:${this.config.localPort}`, - remote: this.type !== TUNNEL_TYPES.DYNAMIC ? - `${this.config.remoteHost}:${this.config.remotePort}` : 'SOCKS' - }); - - } catch (error) { - this.state = TUNNEL_STATES.FAILED; - logger.error(`Failed to start tunnel ${this.id}`, { - error: error.message - }); - throw error; - } - } - - /** - * Start local port forwarding - */ - async startLocalForwarding() { - const { localHost, localPort, remoteHost, remotePort } = this.config; - - // Create local server - this.server = net.createServer(async (localSocket) => { - this.stats.connectionsTotal++; - this.stats.connectionsActive++; - this.connections.add(localSocket); - this.lastActivity = new Date(); - - logger.debug(`New connection to tunnel ${this.id}`, { - from: localSocket.remoteAddress - }); - - try { - // Forward to remote via SSH - const stream = await this.ssh.forwardOut( - localSocket.remoteAddress || '127.0.0.1', - localSocket.remotePort || 0, - remoteHost, - remotePort - ); - - // Pipe data between local and remote - localSocket.pipe(stream).pipe(localSocket); - - // Track data transfer - localSocket.on('data', (chunk) => { - this.stats.bytesTransferred += chunk.length; - this.lastActivity = new Date(); - }); - - stream.on('data', (chunk) => { - this.stats.bytesTransferred += chunk.length; - this.lastActivity = new Date(); - }); - - // Handle disconnection - const cleanup = () => { - this.stats.connectionsActive--; - this.connections.delete(localSocket); - localSocket.destroy(); - stream.destroy(); - }; - - localSocket.on('close', cleanup); - localSocket.on('error', cleanup); - stream.on('close', cleanup); - stream.on('error', cleanup); - - } catch (error) { - this.stats.errors++; - logger.error('Tunnel forwarding error', { - tunnel: this.id, - error: error.message - }); - localSocket.destroy(); - } - }); - - // Start listening - await new Promise((resolve, reject) => { - this.server.listen(localPort, localHost, (err) => { - if (err) reject(err); - else resolve(); - }); - }); - - logger.info('Local forwarding established', { - local: `${localHost}:${localPort}`, - remote: `${remoteHost}:${remotePort}` - }); - } - - /** - * Start remote port forwarding - */ - async startRemoteForwarding() { - const { localHost, localPort, remoteHost, remotePort } = this.config; - - // Request remote forwarding from SSH server - await new Promise((resolve, reject) => { - this.ssh.forwardIn(remoteHost, remotePort, (err) => { - if (err) reject(err); - else resolve(); - }); - }); - - // Handle incoming connections from remote - this.ssh.on('tcp connection', (info, accept, reject) => { - if (info.destPort !== remotePort) return; - - this.stats.connectionsTotal++; - this.stats.connectionsActive++; - this.lastActivity = new Date(); - - const remoteSocket = accept(); - - // Connect to local service - const localSocket = net.connect(localPort, localHost, () => { - // Pipe data between remote and local - remoteSocket.pipe(localSocket).pipe(remoteSocket); - - // Track data transfer - remoteSocket.on('data', (chunk) => { - this.stats.bytesTransferred += chunk.length; - this.lastActivity = new Date(); - }); - - localSocket.on('data', (chunk) => { - this.stats.bytesTransferred += chunk.length; - this.lastActivity = new Date(); - }); - }); - - // Handle errors and cleanup - const cleanup = () => { - this.stats.connectionsActive--; - remoteSocket.destroy(); - localSocket.destroy(); - }; - - localSocket.on('error', (err) => { - this.stats.errors++; - logger.error('Remote forwarding error', { - tunnel: this.id, - error: err.message - }); - cleanup(); - }); - - remoteSocket.on('close', cleanup); - localSocket.on('close', cleanup); - }); - - logger.info('Remote forwarding established', { - local: `${localHost}:${localPort}`, - remote: `${remoteHost}:${remotePort}` - }); - } - - /** - * Start dynamic port forwarding (SOCKS proxy) - */ - async startDynamicForwarding() { - const { localHost, localPort } = this.config; - - // Create SOCKS server - this.server = net.createServer(async (localSocket) => { - this.stats.connectionsTotal++; - this.stats.connectionsActive++; - this.connections.add(localSocket); - this.lastActivity = new Date(); - - let targetHost = null; - let targetPort = null; - let stream = null; - - // Simple SOCKS5 implementation (basic) - localSocket.once('data', async (chunk) => { - // Parse SOCKS request (simplified) - if (chunk[0] === 0x05) { // SOCKS5 - // Send auth method response - localSocket.write(Buffer.from([0x05, 0x00])); - - localSocket.once('data', async (chunk2) => { - // Parse connection request - if (chunk2[0] === 0x05 && chunk2[1] === 0x01) { // CONNECT - const addrType = chunk2[3]; - let offset = 4; - - if (addrType === 0x01) { // IPv4 - targetHost = `${chunk2[4]}.${chunk2[5]}.${chunk2[6]}.${chunk2[7]}`; - offset = 8; - } else if (addrType === 0x03) { // Domain - const domainLen = chunk2[4]; - targetHost = chunk2.slice(5, 5 + domainLen).toString(); - offset = 5 + domainLen; - } - - targetPort = (chunk2[offset] << 8) | chunk2[offset + 1]; - - try { - // Create SSH forwarding stream - stream = await this.ssh.forwardOut( - '127.0.0.1', 0, - targetHost, targetPort - ); - - // Send success response - const response = Buffer.from([ - 0x05, 0x00, 0x00, 0x01, - 0, 0, 0, 0, // Bind address (0.0.0.0) - 0, 0 // Bind port - ]); - localSocket.write(response); - - // Pipe data - localSocket.pipe(stream).pipe(localSocket); - - // Track data - localSocket.on('data', (chunk) => { - this.stats.bytesTransferred += chunk.length; - this.lastActivity = new Date(); - }); - - stream.on('data', (chunk) => { - this.stats.bytesTransferred += chunk.length; - this.lastActivity = new Date(); - }); - - } catch (error) { - // Send error response - const response = Buffer.from([ - 0x05, 0x01, 0x00, 0x01, - 0, 0, 0, 0, 0, 0 - ]); - localSocket.write(response); - localSocket.destroy(); - this.stats.errors++; - } - } - }); - } else { - // Not SOCKS5, close connection - localSocket.destroy(); - } - }); - - // Cleanup on disconnect - localSocket.on('close', () => { - this.stats.connectionsActive--; - this.connections.delete(localSocket); - if (stream) stream.destroy(); - }); - - localSocket.on('error', () => { - this.stats.errors++; - this.stats.connectionsActive--; - this.connections.delete(localSocket); - if (stream) stream.destroy(); - }); - }); - - // Start listening - await new Promise((resolve, reject) => { - this.server.listen(localPort, localHost, (err) => { - if (err) reject(err); - else resolve(); - }); - }); - - logger.info('SOCKS proxy established', { - local: `${localHost}:${localPort}` - }); - } - - /** - * Get tunnel information - */ - getInfo() { - return { - id: this.id, - server: this.serverName, - type: this.type, - state: this.state, - config: { - localHost: this.config.localHost, - localPort: this.config.localPort, - remoteHost: this.config.remoteHost, - remotePort: this.config.remotePort - }, - stats: this.stats, - created: this.createdAt, - lastActivity: this.lastActivity, - activeConnections: this.connections.size - }; - } - - /** - * Close the tunnel - */ - close() { - logger.info(`Closing tunnel ${this.id}`); - - this.state = TUNNEL_STATES.CLOSED; - - // Close all active connections - for (const conn of this.connections) { - conn.destroy(); - } - this.connections.clear(); - - // Close server - if (this.server) { - this.server.close(); - this.server = null; - } - - // Cancel remote forwarding if needed - if (this.type === TUNNEL_TYPES.REMOTE) { - this.ssh.unforwardIn(this.config.remoteHost, this.config.remotePort); - } - - tunnels.delete(this.id); - } - - /** - * Reconnect tunnel - */ - async reconnect() { - if (this.reconnectAttempts >= this.maxReconnectAttempts) { - logger.error(`Max reconnect attempts reached for tunnel ${this.id}`); - this.state = TUNNEL_STATES.FAILED; - return false; - } - - this.reconnectAttempts++; - this.state = TUNNEL_STATES.RECONNECTING; - - logger.info(`Reconnecting tunnel ${this.id}`, { - attempt: this.reconnectAttempts - }); - - try { - await this.start(); - this.reconnectAttempts = 0; - return true; - } catch (error) { - logger.error(`Reconnect failed for tunnel ${this.id}`, { - error: error.message - }); - - // Retry with exponential backoff - const delay = Math.min(1000 * Math.pow(2, this.reconnectAttempts), 30000); - setTimeout(() => this.reconnect(), delay); - - return false; - } - } -} - -/** - * Create a new SSH tunnel - */ -export async function createTunnel(serverName, ssh, config) { - const tunnelId = `tunnel_${Date.now()}_${uuidv4().substring(0, 8)}`; - - // Validate config - if (!config.type || !Object.values(TUNNEL_TYPES).includes(config.type)) { - throw new Error(`Invalid tunnel type: ${config.type}`); - } - - // Set defaults - config.localHost = config.localHost || '127.0.0.1'; - - if (config.type !== TUNNEL_TYPES.DYNAMIC) { - if (!config.remoteHost || !config.remotePort) { - throw new Error('Remote host and port required for port forwarding'); - } - } - - if (!config.localPort) { - throw new Error('Local port required'); - } - - const tunnel = new SSHTunnel(tunnelId, serverName, ssh, config); - tunnels.set(tunnelId, tunnel); - - try { - await tunnel.start(); - - logger.info('SSH tunnel created', { - id: tunnelId, - type: config.type, - server: serverName - }); - - return tunnel; - } catch (error) { - tunnels.delete(tunnelId); - throw error; - } -} - -/** - * Get an existing tunnel - */ -export function getTunnel(tunnelId) { - const tunnel = tunnels.get(tunnelId); - - if (!tunnel) { - throw new Error(`Tunnel ${tunnelId} not found`); - } - - return tunnel; -} - -/** - * List all active tunnels - */ -export function listTunnels(serverName = null) { - const activeTunnels = []; - - for (const [id, tunnel] of tunnels.entries()) { - if (tunnel.state !== TUNNEL_STATES.CLOSED) { - if (!serverName || tunnel.serverName === serverName) { - activeTunnels.push(tunnel.getInfo()); - } - } - } - - return activeTunnels; -} - -/** - * Close a tunnel - */ -export function closeTunnel(tunnelId) { - const tunnel = tunnels.get(tunnelId); - - if (!tunnel) { - throw new Error(`Tunnel ${tunnelId} not found`); - } - - tunnel.close(); - return true; -} - -/** - * Close all tunnels for a server - */ -export function closeServerTunnels(serverName) { - let closedCount = 0; - - for (const [id, tunnel] of tunnels.entries()) { - if (tunnel.serverName === serverName) { - tunnel.close(); - closedCount++; - } - } - - return closedCount; -} - -/** - * Monitor tunnel health - */ -export function monitorTunnels() { - const now = Date.now(); - const healthTimeout = 60 * 1000; // 1 minute - - for (const [id, tunnel] of tunnels.entries()) { - if (tunnel.state === TUNNEL_STATES.ACTIVE) { - const idle = now - tunnel.lastActivity.getTime(); - - // Check if tunnel is still healthy - if (idle > healthTimeout && tunnel.connections.size === 0) { - logger.debug(`Tunnel ${id} idle for ${idle}ms`); - } - - // Auto-reconnect failed tunnels - if (tunnel.state === TUNNEL_STATES.FAILED) { - tunnel.reconnect(); - } - } - } -} - -// Monitor tunnels periodically -setInterval(monitorTunnels, 30 * 1000); // Every 30 seconds - -export default { - createTunnel, - getTunnel, - listTunnels, - closeTunnel, - closeServerTunnels, - TUNNEL_TYPES, - TUNNEL_STATES -}; diff --git a/tests/test-alerts-tools.js b/tests/test-alerts-tools.js new file mode 100644 index 0000000..ec5953c --- /dev/null +++ b/tests/test-alerts-tools.js @@ -0,0 +1,211 @@ +#!/usr/bin/env node +/** + * Tests for src/tools/alerts-tools.js -- the re-implemented ssh_alert_setup. + * + * Covers: + * - set / get round-trips through the local config store + * - corrupt config file falls back to "no config" + * - check with disabled config returns status='disabled', no alerts + * - check with thresholds breached returns populated alerts[] + * - check with thresholds NOT breached returns status='ok' + * - invalid action rejected + * - missing server rejected + * - atomic write: tmp file cleanup on rename + * - server name traversal guard (can't escape ALERTS_DIR) + */ + +import assert from 'node:assert'; +import fs from 'node:fs'; +import path from 'node:path'; +import { handleSshAlertSetup, __internals } from '../src/tools/alerts-tools.js'; + +const { configPathFor, writeConfig, evaluateThresholds } = __internals; + +let passed = 0; +let failed = 0; +const fails = []; + +async function test(name, fn) { + try { await fn(); passed++; console.log(`[ok] ${name}`); } + catch (e) { failed++; fails.push({ name, err: e }); console.error(`[err] ${name}: ${e.message}`); } +} + +// Use a unique-per-test server name to avoid sharing state with real configs +// in ~/.ssh-manager/alerts. +function uniqueServer(prefix) { + return `${prefix}-${Date.now().toString(36)}-${Math.random().toString(36).slice(2, 6)}`; +} + +async function cleanupServer(name) { + try { fs.unlinkSync(configPathFor(name)); } catch (_) { /* ignore */ } +} + +// --- rejection paths ------------------------------------------------------ +await test('rejects when server missing', async () => { + const r = await handleSshAlertSetup({ getConnection: async () => ({}), args: { action: 'get', format: 'json' } }); + assert.strictEqual(r.isError, true); + const p = JSON.parse(r.content[0].text); + assert(p.error.includes('server is required')); +}); + +await test('rejects unknown action', async () => { + const r = await handleSshAlertSetup({ + getConnection: async () => ({}), + args: { server: 's', action: 'explode', format: 'json' }, + }); + assert.strictEqual(r.isError, true); + const p = JSON.parse(r.content[0].text); + assert(p.error.includes('action must be one of')); +}); + +// --- set / get round trip ------------------------------------------------- +await test('set persists thresholds; get returns them', async () => { + const srv = uniqueServer('roundtrip'); + try { + const s = await handleSshAlertSetup({ + getConnection: async () => ({}), + args: { + server: srv, action: 'set', + cpuThreshold: 80, memoryThreshold: 85, diskThreshold: 90, + format: 'json', + }, + }); + const parsedSet = JSON.parse(s.content[0].text); + assert.strictEqual(parsedSet.success, true, parsedSet.error); + assert.strictEqual(parsedSet.data.config.cpuThreshold, 80); + assert(fs.existsSync(parsedSet.data.config_path), 'config file must exist on disk'); + + const g = await handleSshAlertSetup({ + getConnection: async () => ({}), + args: { server: srv, action: 'get', format: 'json' }, + }); + const parsedGet = JSON.parse(g.content[0].text); + assert.strictEqual(parsedGet.success, true); + assert.strictEqual(parsedGet.data.config.cpuThreshold, 80); + assert.strictEqual(parsedGet.data.config.memoryThreshold, 85); + assert.strictEqual(parsedGet.data.config.diskThreshold, 90); + assert.strictEqual(parsedGet.data.config.enabled, true); + } finally { + await cleanupServer(srv); + } +}); + +await test('corrupt config file is treated as missing, not crash', async () => { + const srv = uniqueServer('corrupt'); + try { + const cfgPath = configPathFor(srv); + fs.mkdirSync(path.dirname(cfgPath), { recursive: true }); + fs.writeFileSync(cfgPath, '{ not: json', 'utf8'); + + const g = await handleSshAlertSetup({ + getConnection: async () => ({}), + args: { server: srv, action: 'get', format: 'json' }, + }); + const parsed = JSON.parse(g.content[0].text); + assert.strictEqual(parsed.success, true); + assert.strictEqual(parsed.data.config, null); + } finally { + await cleanupServer(srv); + } +}); + +// --- check path ----------------------------------------------------------- +// NOTE: alerts-tools.js imports handleSshHealthCheck by binding at module load, +// so a module-level monkey-patch to the re-exported symbol does NOT reach +// alerts-tools.js's own imported binding. We instead test evaluateThresholds() +// directly for the threshold logic, and cover the end-to-end wire-through via +// the "disabled" path + error paths. +await test('check: no config yet -> structured fail', async () => { + const srv = uniqueServer('no-cfg'); + const r = await handleSshAlertSetup({ + getConnection: async () => ({}), + args: { server: srv, action: 'check', format: 'json' }, + }); + assert.strictEqual(r.isError, true); + const p = JSON.parse(r.content[0].text); + assert(p.error.includes('no alert configuration')); +}); + +await test('check: disabled config returns status=disabled, alert_count=0', async () => { + const srv = uniqueServer('disabled'); + try { + writeConfig(srv, { + version: 1, server: srv, enabled: false, + cpuThreshold: 50, memoryThreshold: 50, diskThreshold: 50, + updated_at: new Date().toISOString(), + }); + + const r = await handleSshAlertSetup({ + getConnection: async () => ({}), + args: { server: srv, action: 'check', format: 'json' }, + }); + assert(!r.isError, 'disabled must not surface as an MCP isError'); + const parsed = JSON.parse(r.content[0].text); + assert.strictEqual(parsed.success, true); + assert.strictEqual(parsed.data.status, 'disabled'); + assert.strictEqual(parsed.data.alert_count, 0); + } finally { + await cleanupServer(srv); + } +}); + +// --- evaluateThresholds unit tests --------------------------------------- +await test('evaluateThresholds: all metrics below thresholds -> no alerts', () => { + const alerts = evaluateThresholds( + { cpu: { usage_percent: 30 }, memory: { used_percent: 40 }, disk: [{ mount: '/', used_percent: 20 }] }, + { cpuThreshold: 80, memoryThreshold: 80, diskThreshold: 80 }, + ); + assert.strictEqual(alerts.length, 0); +}); + +await test('evaluateThresholds: CPU breach surfaces', () => { + const alerts = evaluateThresholds( + { cpu: { usage_percent: 95 }, memory: { used_percent: 10 }, disk: [] }, + { cpuThreshold: 80, memoryThreshold: 80, diskThreshold: 80 }, + ); + assert.strictEqual(alerts.length, 1); + assert.strictEqual(alerts[0].metric, 'cpu'); + assert.strictEqual(alerts[0].observed, 95); +}); + +await test('evaluateThresholds: memory breach surfaces', () => { + const alerts = evaluateThresholds( + { memory: { used_percent: 92 } }, + { memoryThreshold: 90 }, + ); + assert.strictEqual(alerts.length, 1); + assert.strictEqual(alerts[0].metric, 'memory'); +}); + +await test('evaluateThresholds: per-mount disk breach surfaces each mount', () => { + const alerts = evaluateThresholds( + { disk: [ + { mount: '/', used_percent: 50 }, + { mount: '/var', used_percent: 97 }, + { mount: '/tmp', used_percent: 99 }, + ] }, + { diskThreshold: 95 }, + ); + assert.strictEqual(alerts.length, 2); + assert.deepStrictEqual(alerts.map(a => a.mount).sort(), ['/tmp', '/var']); +}); + +await test('evaluateThresholds: missing threshold suppresses that metric', () => { + const alerts = evaluateThresholds( + { cpu: { usage_percent: 99 }, memory: { used_percent: 99 } }, + { diskThreshold: 50 }, // only disk threshold set + ); + assert.strictEqual(alerts.length, 0, 'without cpu/memory thresholds, those metrics ignore'); +}); + +// --- path traversal guard ------------------------------------------------- +await test('server name with traversal characters cannot escape ALERTS_DIR', () => { + const p = configPathFor('../../etc/passwd'); + assert(!p.includes('..'), `got ${p}`); + assert(p.endsWith('.json')); + assert(p.startsWith(__internals.ALERTS_DIR), + `path ${p} must be inside ${__internals.ALERTS_DIR}`); +}); + +console.log(`\n${passed} passed, ${failed} failed`); +if (failed > 0) { for (const f of fails) console.error(` [err] ${f.name}\n ${f.err.stack}`); process.exit(1); } diff --git a/tests/test-backup-tools.js b/tests/test-backup-tools.js index 1f08cd1..7f2ec23 100644 --- a/tests/test-backup-tools.js +++ b/tests/test-backup-tools.js @@ -43,14 +43,14 @@ await test('buildBackupCommand: mysql uses MYSQL_PWD env, not -p flag', () => { password: 'sekret', outputPath: '/backups/app.sql.gz', gzip: true, }); assert(envPrefix.includes('MCP_BACKUP_PASS='), 'env prefix contains pass'); - assert(envPrefix.includes("'sekret'"), 'password shQuoted in env'); + assert(envPrefix.includes('\'sekret\''), 'password shQuoted in env'); assert(!command.includes('sekret'), 'password NOT in command body'); // No password-flag: mysqldump -p or -p . (mkdir's -p is fine -- different tool.) assert(!/mysqldump[^|]*\s-p[\s'"]/.test(command), 'no mysqldump -p flag with password'); assert(command.includes('MYSQL_PWD="$MCP_BACKUP_PASS"')); assert(command.includes('mysqldump')); assert(command.includes('| gzip > ')); - assert(command.includes("/backups/app.sql.gz")); + assert(command.includes('/backups/app.sql.gz')); }); await test('buildBackupCommand: postgres uses PGPASSWORD env', () => { @@ -84,9 +84,9 @@ await test('buildBackupCommand: files uses tar with shQuote', () => { gzip: true, }); assert(command.includes('tar -czf')); - assert(command.includes("'/etc/nginx'")); + assert(command.includes('\'/etc/nginx\'')); // Injection attempt wrapped in quotes - assert(command.includes("'/var/log; rm -rf /'")); + assert(command.includes('\'/var/log; rm -rf /\'')); // Ensure the dangerous substring is NOT floating free assert(!/\s\/var\/log;\s*rm/.test(command)); }); @@ -140,9 +140,9 @@ await test('backup_create: happy path generates meta + returns typed', async () const FAKE_HASH = 'abc123abc123abc123abc123abc123abc123abc123abc123abc123abc123abcd'; const client = new FakeClient({ script: (cmd) => { if (cmd.includes('sha256sum')) return { stdout: `${FAKE_HASH}\n`, code: 0 }; - if (cmd.includes("stat -c '%s'")) return { stdout: '12345\n', code: 0 }; + if (cmd.includes('stat -c \'%s\'')) return { stdout: '12345\n', code: 0 }; return { stdout: '', code: 0 }; - }}); + } }); const r = await handleSshBackupCreate({ getConnection: async () => client, args: { @@ -203,7 +203,7 @@ await test('backup_restore: sha256 mismatch -> refuse restore', async () => { if (cmd.startsWith('cat ')) return { stdout: metaJson, code: 0 }; if (cmd.includes('sha256sum')) return { stdout: 'DIFFERENT-HASH\n', code: 0 }; return { stdout: '', code: 0 }; - }}); + } }); const r = await handleSshBackupRestore({ getConnection: async () => client, args: { server: 's', backup_id: 'id1', verify: true, format: 'json' }, @@ -219,7 +219,7 @@ await test('backup_restore: preview loads meta and shows high-risk plan', async if (cmd.includes('find ')) return { stdout: '/b/1.tgz.meta\n', code: 0 }; if (cmd.startsWith('cat ')) return { stdout: JSON.stringify(meta), code: 0 }; return { stdout: '', code: 0 }; - }}); + } }); const r = await handleSshBackupRestore({ getConnection: async () => client, args: { server: 's', backup_id: 'id1', preview: true, format: 'json' }, @@ -256,5 +256,92 @@ await test('backup_schedule: preview shows cron plan', async () => { assert(parsed.data.plan.action.includes('schedule')); }); +await test('backup_schedule: rejects cron with embedded newline (injection guard)', async () => { + const r = await handleSshBackupSchedule({ + getConnection: async () => { throw new Error('should not reach connection'); }, + args: { + server: 's', + cron: '0 0 * * *\n* * * * * rm -rf ~', + backup_type: 'mysql', database: 'app', + preview: true, format: 'json', + }, + }); + assert.strictEqual(r.isError, true); + const parsed = JSON.parse(r.content[0].text); + assert(/single line|newline/i.test(parsed.error), `expected newline rejection, got: ${parsed.error}`); +}); + +await test('backup_schedule: rejects cron with shell metacharacters', async () => { + for (const bad of ['0 0 * * * `id`', '0 0 * * * $(whoami)']) { + const r = await handleSshBackupSchedule({ + getConnection: async () => { throw new Error('should not reach connection'); }, + args: { server: 's', cron: bad, backup_type: 'mysql', database: 'app', preview: true, format: 'json' }, + }); + assert.strictEqual(r.isError, true, `expected fail for ${JSON.stringify(bad)}`); + const parsed = JSON.parse(r.content[0].text); + assert(/shell metacharacters|\$|`/.test(parsed.error), `expected metachar rejection, got: ${parsed.error}`); + } +}); + +await test('backup_schedule: refuses password arg for DB backups (no plaintext secret in crontab)', async () => { + for (const dbType of ['mysql', 'postgresql', 'mongodb']) { + const r = await handleSshBackupSchedule({ + getConnection: async () => { throw new Error('should not reach connection'); }, + args: { + server: 's', cron: '0 2 * * *', + backup_type: dbType, database: 'app', user: 'u', password: 'sekret', + preview: true, format: 'json', + }, + }); + assert.strictEqual(r.isError, true, `${dbType}: expected fail response when password present`); + const parsed = JSON.parse(r.content[0].text); + assert(parsed.error.includes('refusing to embed password'), + `${dbType}: expected explicit refusal, got: ${parsed.error}`); + // Secret must not appear anywhere in the response + assert(!JSON.stringify(parsed).includes('sekret'), + `${dbType}: password leaked into response`); + } +}); + +await test('backup_schedule: preview for DB without password produces cron line with no secret', async () => { + const r = await handleSshBackupSchedule({ + getConnection: async () => { throw new Error('should not call'); }, + args: { + server: 's', cron: '30 3 * * *', + backup_type: 'mysql', database: 'app', user: 'u', + preview: true, format: 'json', + }, + }); + const parsed = JSON.parse(r.content[0].text); + assert.strictEqual(parsed.success, true, parsed.error); + assert(parsed.data.plan.cron_line); + assert(!parsed.data.plan.cron_line.includes('MCP_BACKUP_PASS='), + 'cron line must not embed password env prefix'); +}); + +await test('backup_schedule: mongodb without password installs cron line with anonymous URI (regression: B1)', async () => { + const r = await handleSshBackupSchedule({ + getConnection: async () => { throw new Error('should not call'); }, + args: { + server: 's', cron: '15 4 * * *', + backup_type: 'mongodb', database: 'app', + preview: true, format: 'json', + }, + }); + const parsed = JSON.parse(r.content[0].text); + // Previously this returned the internal defense-in-depth failure because + // mongo's MCP_BACKUP_URI env prefix was treated as a secret-bearing. + // Fix: classify env prefix as secret only when password was supplied. + assert.strictEqual(parsed.success, true, parsed.error); + assert(parsed.data.plan.cron_line, 'cron line should be built'); + assert(parsed.data.plan.cron_line.includes('MCP_BACKUP_URI='), + 'mongo cron line should carry URI env var'); + // URI must have no userinfo (no `@`) because no password was supplied. + const uriMatch = parsed.data.plan.cron_line.match(/MCP_BACKUP_URI=([^ ]+)/); + assert(uriMatch, 'MCP_BACKUP_URI= present in cron line'); + assert(!uriMatch[1].includes('@'), + `anonymous mongo URI must have no userinfo; got ${uriMatch[1]}`); +}); + console.log(`\n${passed} passed, ${failed} failed`); if (failed > 0) { for (const f of fails) console.error(` [err] ${f.name}\n ${f.err.stack}`); process.exit(1); } diff --git a/tests/test-cat-tools.js b/tests/test-cat-tools.js index b00fa77..509152f 100644 --- a/tests/test-cat-tools.js +++ b/tests/test-cat-tools.js @@ -35,39 +35,39 @@ console.log('[test] Testing cat-tools\n'); // --- buildCatCommand ----------------------------------------------------- await test('buildCatCommand: default -> plain cat with quoted path', () => { - assert.strictEqual(buildCatCommand({ file: '/var/log/app.log' }), "cat '/var/log/app.log'"); + assert.strictEqual(buildCatCommand({ file: '/var/log/app.log' }), 'cat \'/var/log/app.log\''); }); await test('buildCatCommand: path with spaces and semicolons is quoted', () => { assert.strictEqual( buildCatCommand({ file: '/var/log/my app; rm -rf /' }), - "cat '/var/log/my app; rm -rf /'" + 'cat \'/var/log/my app; rm -rf /\'' ); }); await test('buildCatCommand: head mode', () => { - assert.strictEqual(buildCatCommand({ file: '/f', head: 20 }), "head -n 20 '/f'"); + assert.strictEqual(buildCatCommand({ file: '/f', head: 20 }), 'head -n 20 \'/f\''); }); await test('buildCatCommand: tail mode', () => { - assert.strictEqual(buildCatCommand({ file: '/f', tail: 50 }), "tail -n 50 '/f'"); + assert.strictEqual(buildCatCommand({ file: '/f', tail: 50 }), 'tail -n 50 \'/f\''); }); await test('buildCatCommand: line-range with sed', () => { - assert.strictEqual(buildCatCommand({ file: '/f', line_start: 10, line_end: 25 }), "sed -n '10,25p' '/f'"); + assert.strictEqual(buildCatCommand({ file: '/f', line_start: 10, line_end: 25 }), 'sed -n \'10,25p\' \'/f\''); }); await test('buildCatCommand: line-range with grep filter', () => { assert.strictEqual( buildCatCommand({ file: '/f', line_start: 1, line_end: 100, grep: 'ERROR' }), - "sed -n '1,100p' '/f' | grep -E 'ERROR'" + 'sed -n \'1,100p\' \'/f\' | grep -E \'ERROR\'' ); }); await test('buildCatCommand: offset+limit uses dd', () => { assert.strictEqual( buildCatCommand({ file: '/f', offset: 1024, limit: 2048 }), - "dd if='/f' bs=1 skip=1024 count=2048 2>/dev/null" + 'dd if=\'/f\' bs=1 skip=1024 count=2048 2>/dev/null' ); }); @@ -75,35 +75,35 @@ await test('buildCatCommand: injection in numbers is neutralized by Number() coe // Pass an injection attempt through head=... const cmd = buildCatCommand({ file: '/f', head: '10; rm -rf /' }); // Number('10; rm -rf /') -> NaN -> floor(NaN) || 10 = 10 - assert.strictEqual(cmd, "head -n 10 '/f'"); + assert.strictEqual(cmd, 'head -n 10 \'/f\''); }); await test('buildCatCommand: grep-only mode', () => { - assert.strictEqual(buildCatCommand({ file: '/f', grep: 'TODO' }), "grep -E 'TODO' '/f'"); + assert.strictEqual(buildCatCommand({ file: '/f', grep: 'TODO' }), 'grep -E \'TODO\' \'/f\''); }); await test('buildCatCommand: grep pattern with special shell chars is quoted', () => { assert.strictEqual( - buildCatCommand({ file: '/f', grep: "it's; rm -rf /" }), - "grep -E 'it'\\''s; rm -rf /' '/f'" + buildCatCommand({ file: '/f', grep: 'it\'s; rm -rf /' }), + 'grep -E \'it\'\\\'\'s; rm -rf /\' \'/f\'' ); }); await test('buildCatCommand: line_start > line_end clamps end to start', () => { assert.strictEqual( buildCatCommand({ file: '/f', line_start: 100, line_end: 10 }), - "sed -n '100,100p' '/f'" + 'sed -n \'100,100p\' \'/f\'' ); }); await test('buildCatCommand: line_start 0 or negative clamps to 1', () => { assert.strictEqual( buildCatCommand({ file: '/f', line_start: 0, line_end: 5 }), - "sed -n '1,5p' '/f'" + 'sed -n \'1,5p\' \'/f\'' ); assert.strictEqual( buildCatCommand({ file: '/f', line_start: -5, line_end: 5 }), - "sed -n '1,5p' '/f'" + 'sed -n \'1,5p\' \'/f\'' ); }); @@ -124,7 +124,7 @@ await test('handleSshCat: head mode returns formatted success', async () => { args: { server: 'prod01', file: '/var/log/app.log', head: 3 }, }); assert.strictEqual(r.isError, undefined); - assert.strictEqual(client.lastCommand, "head -n 3 '/var/log/app.log'"); + assert.strictEqual(client.lastCommand, 'head -n 3 \'/var/log/app.log\''); assert(r.content[0].text.includes('line1')); assert(r.content[0].text.includes('line3')); }); diff --git a/tests/test-command-aliases.js b/tests/test-command-aliases.js index 5c85276..ce9a9c2 100755 --- a/tests/test-command-aliases.js +++ b/tests/test-command-aliases.js @@ -47,15 +47,15 @@ try { console.log('Test 2: Expand command alias'); try { const aliases = loadCommandAliases(); - + // Test with a known alias from default profile if (aliases['check-memory']) { const expanded = expandCommandAlias('check-memory'); - assert(expanded === aliases['check-memory'], + assert(expanded === aliases['check-memory'], 'Should expand check-memory to its full command'); console.log(`[ok] Expanded 'check-memory' to '${expanded}'`); } - + // Test with non-alias command const nonAlias = expandCommandAlias('ls -la'); assert(nonAlias === 'ls -la', 'Non-alias commands should remain unchanged'); @@ -70,17 +70,17 @@ console.log('Test 3: Add custom alias'); try { const testAlias = 'test-alias-' + Date.now(); const testCommand = 'echo "This is a test command"'; - + addCommandAlias(testAlias, testCommand); - + const aliases = loadCommandAliases(); assert(aliases[testAlias] === testCommand, 'Custom alias should be added'); - + const expanded = expandCommandAlias(testAlias); assert(expanded === testCommand, 'Custom alias should expand correctly'); - + console.log(`[ok] Added custom alias: ${testAlias}\n`); - + // Cleanup removeCommandAlias(testAlias); } catch (error) { @@ -93,14 +93,14 @@ console.log('Test 4: Remove custom alias'); try { const testAlias = 'test-remove-' + Date.now(); const testCommand = 'echo "To be removed"'; - + // Add then remove addCommandAlias(testAlias, testCommand); removeCommandAlias(testAlias); - + const aliases = loadCommandAliases(); assert(!aliases[testAlias], 'Alias should be removed'); - + console.log('[ok] Successfully removed custom alias\n'); } catch (error) { console.error(`[err] Failed to remove alias: ${error.message}\n`); @@ -112,23 +112,23 @@ console.log('Test 5: List command aliases'); try { const list = listCommandAliases(); assert(Array.isArray(list), 'listCommandAliases should return an array'); - + if (list.length > 0) { const firstAlias = list[0]; assert(firstAlias.alias, 'Each alias should have an alias property'); assert(firstAlias.command, 'Each alias should have a command property'); - assert(typeof firstAlias.isFromProfile === 'boolean', + assert(typeof firstAlias.isFromProfile === 'boolean', 'Each alias should have isFromProfile boolean'); - assert(typeof firstAlias.isCustom === 'boolean', + assert(typeof firstAlias.isCustom === 'boolean', 'Each alias should have isCustom boolean'); } - + console.log(`[ok] Listed ${list.length} aliases`); - + // Show some examples const profileAliases = list.filter(a => a.isFromProfile).slice(0, 3); const customAliases = list.filter(a => a.isCustom).slice(0, 3); - + if (profileAliases.length > 0) { console.log(' Profile aliases:', profileAliases.map(a => a.alias).join(', ')); } @@ -146,19 +146,19 @@ console.log('Test 6: Suggest aliases'); try { // Add a test alias for suggestion addCommandAlias('test-suggest', 'test suggestion command'); - + const suggestions = suggestAliases('test'); assert(Array.isArray(suggestions), 'suggestAliases should return an array'); - + const testSuggestion = suggestions.find(s => s.alias === 'test-suggest'); assert(testSuggestion, 'Should find the test alias in suggestions'); - + console.log(`[ok] Found ${suggestions.length} suggestions for 'test'`); if (suggestions.length > 0) { console.log(` Examples: ${suggestions.slice(0, 3).map(s => s.alias).join(', ')}`); } console.log(); - + // Cleanup removeCommandAlias('test-suggest'); } catch (error) { @@ -171,17 +171,17 @@ console.log('Test 7: Profile alias protection'); try { const aliases = loadCommandAliases(); const profileAlias = Object.keys(aliases)[0]; // Get first alias - + if (profileAlias) { const originalCommand = aliases[profileAlias]; - + // Try to remove a profile alias (should reset to original) removeCommandAlias(profileAlias); - + const newAliases = loadCommandAliases(); - assert(newAliases[profileAlias] === originalCommand, + assert(newAliases[profileAlias] === originalCommand, 'Profile aliases should be reset, not removed'); - + console.log(`[ok] Profile alias '${profileAlias}' is protected from removal\n`); } } catch (error) { @@ -198,4 +198,4 @@ if (fs.existsSync(backupFile)) { fs.unlinkSync(CUSTOM_ALIASES_FILE); } -console.log('[*] All command alias tests passed!'); \ No newline at end of file +console.log('[*] All command alias tests passed!'); diff --git a/tests/test-config-loader.js b/tests/test-config-loader.js new file mode 100644 index 0000000..2386f88 --- /dev/null +++ b/tests/test-config-loader.js @@ -0,0 +1,246 @@ +#!/usr/bin/env node +/** + * Tests for src/config-loader.js -- server config source precedence. + * + * CLAUDE.md documents: env > .env > TOML. Regression here silently + * changes which host Claude connects to, so this is high-stakes. + * + * Covers: + * - TOML-only config loads and normalizes (case-insensitive names, + * key_path / keypath / ssh_key aliases, default_dir, proxy_jump). + * - .env-only config loads via SSH_SERVER_NAME_* pattern. + * - env (process.env) overrides .env which overrides TOML. + * - Server name collision across formats: same name wins per precedence, + * no duplicate entries in the map. + * - exportToToml + loadTomlConfig round-trip preserves fields. + * - getServer uses lowercase normalization. + */ + +import assert from 'node:assert'; +import fs from 'node:fs'; +import os from 'node:os'; +import path from 'node:path'; +import { ConfigLoader } from '../src/config-loader.js'; + +let passed = 0; +let failed = 0; +const fails = []; + +async function test(name, fn) { + try { + await fn(); + passed++; + console.log(`[ok] ${name}`); + } catch (e) { + failed++; + fails.push({ name, err: e }); + console.error(`[err] ${name}: ${e.message}`); + } +} + +function mkTmp() { + return fs.mkdtempSync(path.join(os.tmpdir(), 'cfgload-')); +} + +function clearSshEnv() { + for (const k of Object.keys(process.env)) { + if (k.startsWith('SSH_SERVER_')) delete process.env[k]; + } +} + +// --- TOML loading -------------------------------------------------------- +await test('TOML: loads server with key_path / default_dir / proxy_jump', async () => { + clearSshEnv(); + const dir = mkTmp(); + const tomlPath = path.join(dir, 'cfg.toml'); + fs.writeFileSync(tomlPath, ` +[ssh_servers.prod] +host = "prod.example.com" +user = "deploy" +key_path = "~/.ssh/prod_rsa" +default_dir = "/srv/app" +proxy_jump = "bastion" +port = 2222 +`); + const loader = new ConfigLoader(); + await loader.load({ tomlPath, envPath: '/nonexistent.env' }); + const s = loader.getServer('prod'); + assert.strictEqual(s.host, 'prod.example.com'); + assert.strictEqual(s.user, 'deploy'); + assert.strictEqual(s.keyPath, '~/.ssh/prod_rsa', + 'key_path must map to keyPath (camelCase is canonical)'); + assert.strictEqual(s.defaultDir, '/srv/app'); + assert.strictEqual(s.proxyJump, 'bastion'); + assert.strictEqual(s.port, 2222); + assert.strictEqual(s.source, 'toml'); +}); + +await test('TOML: keypath and ssh_key are accepted as aliases for key_path', async () => { + clearSshEnv(); + const dir = mkTmp(); + const tomlPath = path.join(dir, 'cfg.toml'); + fs.writeFileSync(tomlPath, ` +[ssh_servers.a] +host = "a.example.com" +user = "u" +keypath = "/k1" + +[ssh_servers.b] +host = "b.example.com" +user = "u" +ssh_key = "/k2" +`); + const loader = new ConfigLoader(); + await loader.load({ tomlPath, envPath: '/nonexistent.env' }); + assert.strictEqual(loader.getServer('a').keyPath, '/k1'); + assert.strictEqual(loader.getServer('b').keyPath, '/k2'); +}); + +await test('TOML: server names are lowercased in the map', async () => { + clearSshEnv(); + const dir = mkTmp(); + const tomlPath = path.join(dir, 'cfg.toml'); + fs.writeFileSync(tomlPath, ` +[ssh_servers.PROD] +host = "p.example.com" +user = "u" +`); + const loader = new ConfigLoader(); + await loader.load({ tomlPath, envPath: '/nonexistent.env' }); + assert(loader.getServer('prod'), 'PROD should be resolvable as prod'); + assert(loader.getServer('PROD'), 'PROD should be resolvable by upper too (getServer lowercases)'); +}); + +// --- .env loading -------------------------------------------------------- +await test('.env: loads SSH_SERVER_NAME_* pattern', async () => { + clearSshEnv(); + const dir = mkTmp(); + const envPath = path.join(dir, '.env'); + fs.writeFileSync(envPath, [ + 'SSH_SERVER_STAGING_HOST=staging.example.com', + 'SSH_SERVER_STAGING_USER=deploy', + 'SSH_SERVER_STAGING_KEYPATH=/srv/keys/staging_rsa', + 'SSH_SERVER_STAGING_PORT=2200', + 'SSH_SERVER_STAGING_DEFAULT_DIR=/srv/app', + 'SSH_SERVER_STAGING_PROXYJUMP=bastion', + ].join('\n')); + const loader = new ConfigLoader(); + await loader.load({ tomlPath: '/nonexistent.toml', envPath }); + const s = loader.getServer('staging'); + assert.strictEqual(s.host, 'staging.example.com'); + assert.strictEqual(s.user, 'deploy'); + assert.strictEqual(s.keyPath, '/srv/keys/staging_rsa'); + assert.strictEqual(s.port, 2200); + assert.strictEqual(s.defaultDir, '/srv/app'); + assert.strictEqual(s.proxyJump, 'bastion'); +}); + +// --- precedence ---------------------------------------------------------- +await test('precedence: env (process.env) overrides .env overrides TOML', async () => { + clearSshEnv(); + const dir = mkTmp(); + + const tomlPath = path.join(dir, 'cfg.toml'); + fs.writeFileSync(tomlPath, ` +[ssh_servers.prod] +host = "toml-host" +user = "u" +`); + + const envPath = path.join(dir, '.env'); + fs.writeFileSync(envPath, 'SSH_SERVER_PROD_HOST=dotenv-host\nSSH_SERVER_PROD_USER=dotenvuser\n'); + + // `.env` via dotenv only sets variables that aren't already set in + // process.env, so set a process-level override to assert top priority. + process.env.SSH_SERVER_PROD_HOST = 'process-env-host'; + + const loader = new ConfigLoader(); + await loader.load({ tomlPath, envPath }); + const s = loader.getServer('prod'); + assert.strictEqual(s.host, 'process-env-host', + 'process.env must win over .env and TOML'); + + clearSshEnv(); +}); + +await test('precedence: .env beats TOML when process.env unset', async () => { + clearSshEnv(); + const dir = mkTmp(); + + const tomlPath = path.join(dir, 'cfg.toml'); + fs.writeFileSync(tomlPath, ` +[ssh_servers.app] +host = "toml-host" +user = "tomluser" +`); + + const envPath = path.join(dir, '.env'); + fs.writeFileSync(envPath, 'SSH_SERVER_APP_HOST=dotenv-host\nSSH_SERVER_APP_USER=dotenvuser\n'); + + const loader = new ConfigLoader(); + await loader.load({ tomlPath, envPath }); + const s = loader.getServer('app'); + assert.strictEqual(s.host, 'dotenv-host', '.env must win over TOML'); + clearSshEnv(); +}); + +// --- corpus / export ----------------------------------------------------- +await test('getServer, hasServer, getAllServers are case-insensitive + complete', async () => { + clearSshEnv(); + const dir = mkTmp(); + const tomlPath = path.join(dir, 'cfg.toml'); + fs.writeFileSync(tomlPath, ` +[ssh_servers.one] +host = "h1" +user = "u" +[ssh_servers.two] +host = "h2" +user = "u" +`); + const loader = new ConfigLoader(); + await loader.load({ tomlPath, envPath: '/nonexistent.env' }); + assert(loader.hasServer('ONE')); + assert(loader.hasServer('two')); + assert.strictEqual(loader.getAllServers().length, 2); + assert(!loader.hasServer('three')); +}); + +await test('configSource reflects actual load origin', async () => { + clearSshEnv(); + const dir = mkTmp(); + const tomlPath = path.join(dir, 'cfg.toml'); + fs.writeFileSync(tomlPath, '[ssh_servers.x]\nhost = "h"\nuser = "u"\n'); + const envPath = path.join(dir, '.env'); + fs.writeFileSync(envPath, 'SSH_SERVER_X_HOST=envh\nSSH_SERVER_X_USER=envu\n'); + + const l1 = new ConfigLoader(); + await l1.load({ tomlPath, envPath: '/nonexistent.env' }); + assert.strictEqual(l1.configSource, 'toml'); + + clearSshEnv(); + const l2 = new ConfigLoader(); + await l2.load({ tomlPath: '/nonexistent.toml', envPath }); + assert.strictEqual(l2.configSource, 'env'); + + clearSshEnv(); +}); + +await test('corrupt TOML does not crash load() and falls through to env', async () => { + clearSshEnv(); + const dir = mkTmp(); + const tomlPath = path.join(dir, 'cfg.toml'); + fs.writeFileSync(tomlPath, 'this is = not [valid TOML'); + const envPath = path.join(dir, '.env'); + fs.writeFileSync(envPath, 'SSH_SERVER_FALLBACK_HOST=fallback.example\nSSH_SERVER_FALLBACK_USER=u\n'); + const loader = new ConfigLoader(); + await loader.load({ tomlPath, envPath }); + assert(loader.getServer('fallback')); + assert.strictEqual(loader.getServer('fallback').host, 'fallback.example'); + clearSshEnv(); +}); + +// Clean up env state left from any previous runs before exiting. +clearSshEnv(); + +console.log(`\n${passed} passed, ${failed} failed`); +if (failed > 0) { for (const f of fails) console.error(` [err] ${f.name}\n ${f.err.stack}`); process.exit(1); } diff --git a/tests/test-config.js b/tests/test-config.js new file mode 100644 index 0000000..f9527a7 --- /dev/null +++ b/tests/test-config.js @@ -0,0 +1,63 @@ +#!/usr/bin/env node +/** + * Tests for src/config.js -- env-driven output limits. + * + * The values are read at import-time so these tests focus on the helpers + * (intFromEnv / boolFromEnv via exported symbols) and on the runtime + * truncateOutput() shape. Wiring into output-formatter is verified in + * test-output-formatter. + */ + +import assert from 'node:assert'; +import { OUTPUT_LIMITS, RESPONSE_FORMAT, truncateOutput } from '../src/config.js'; + +let passed = 0; +let failed = 0; +const fails = []; + +function test(name, fn) { + try { fn(); passed++; console.log(`[ok] ${name}`); } + catch (e) { failed++; fails.push({ name, err: e }); console.error(`[err] ${name}: ${e.message}`); } +} + +test('OUTPUT_LIMITS has sane defaults and is frozen', () => { + assert(typeof OUTPUT_LIMITS.MAX_OUTPUT_LENGTH === 'number'); + assert(OUTPUT_LIMITS.MAX_OUTPUT_LENGTH >= 100); + assert(typeof OUTPUT_LIMITS.MAX_TAIL_LINES === 'number'); + assert(typeof OUTPUT_LIMITS.MAX_RSYNC_OUTPUT === 'number'); + assert(Object.isFrozen(OUTPUT_LIMITS)); +}); + +test('RESPONSE_FORMAT exposes boolean flags and is frozen', () => { + assert(typeof RESPONSE_FORMAT.COMPACT_JSON === 'boolean'); + assert(typeof RESPONSE_FORMAT.DEBUG === 'boolean'); + assert(Object.isFrozen(RESPONSE_FORMAT)); +}); + +test('truncateOutput returns short input unchanged', () => { + assert.strictEqual(truncateOutput('short', 10_000), 'short'); +}); + +test('truncateOutput keeps head + tail and elides middle for long input', () => { + const input = 'A'.repeat(5_000) + 'MIDDLE' + 'B'.repeat(5_000); + const out = truncateOutput(input, 1_000); + assert(out.includes('A')); + assert(out.includes('B')); + assert(!out.includes('MIDDLE'), 'middle must be elided'); + assert(out.includes('elided'), 'should announce the elision'); + assert(out.length < input.length); +}); + +test('truncateOutput handles nullish input safely', () => { + assert.strictEqual(truncateOutput(null), ''); + assert.strictEqual(truncateOutput(undefined), ''); + assert.strictEqual(truncateOutput(''), ''); +}); + +test('truncateOutput coerces non-string input to string', () => { + const out = truncateOutput(12345, 10_000); + assert.strictEqual(out, '12345'); +}); + +console.log(`\n${passed} passed, ${failed} failed`); +if (failed > 0) { for (const f of fails) console.error(` [err] ${f.name}\n ${f.err.stack}`); process.exit(1); } diff --git a/tests/test-db-tools.js b/tests/test-db-tools.js index 1855d5b..723a456 100644 --- a/tests/test-db-tools.js +++ b/tests/test-db-tools.js @@ -64,8 +64,8 @@ await test('buildMySqlQueryCommand: uses MYSQL_PWD env, NOT -p argv', () => { assert(cmd.startsWith('MYSQL_PWD='), `expected MYSQL_PWD prefix, got: ${cmd}`); assert(cmd.includes('mysql')); assert(!cmd.includes('-p'), `password flag must NOT appear, got: ${cmd}`); - assert(cmd.includes("-D 'app'")); - assert(cmd.includes("'SELECT 1'")); + assert(cmd.includes('-D \'app\'')); + assert(cmd.includes('\'SELECT 1\'')); }); await test('buildPostgresQueryCommand: uses PGPASSWORD env, NOT password in argv', () => { @@ -75,19 +75,35 @@ await test('buildPostgresQueryCommand: uses PGPASSWORD env, NOT password in argv // psql password arg `-W` would trigger a prompt; `-w` means no-password -- neither should appear with a value. // But we should not have any flag that carries a literal password. assert(!/--password\s*=?\s*\S/.test(cmd), 'psql --password flag must NOT carry a value'); - assert(cmd.includes("-U 'alice'")); - assert(cmd.includes("-d 'app'")); + assert(cmd.includes('-U \'alice\'')); + assert(cmd.includes('-d \'app\'')); }); -await test('buildMongoQueryCommand: escapes eval snippet', () => { +await test('buildMongoQueryCommand: uses --nodb + env-URI, escapes eval, names db via getSiblingDB', () => { const cmd = buildMongoQueryCommand({ database: 'app', - query: "db.users.find({name: \"O'Brien\"}).toArray()", + query: 'db.users.find({name: "O\'Brien"}).toArray()', }); assert(cmd.startsWith('mongosh')); - assert(cmd.includes("'app'")); + assert(cmd.includes('--nodb'), + '--nodb is required so mongosh does NOT auto-connect from argv/URI'); + assert(cmd.includes('getDB("app")'), + 'target db must be selected via Mongo().getDB(), not as a positional URI arg'); + assert(cmd.includes('process.env.SSH_MGR_DB_URI'), + 'connection URI must be read from env, never argv'); // Single quotes inside the query get POSIX-escaped: '\'' - assert(cmd.includes("'\\''"), 'single-quote inside eval must be POSIX-escaped'); + assert(cmd.includes('\'\\\'\''), 'single-quote inside eval must be POSIX-escaped'); +}); + +await test('buildMongoQueryCommand: no user/credentials appear in argv (regression: H8)', () => { + const cmd = buildMongoQueryCommand({ + database: 'app', + query: 'db.x.find({}).toArray()', + user: 'alice', + }); + // Neither `-u` nor `-p` should appear -- those leak via `ps aux`. + assert(!/\s-u\s/.test(cmd), 'mongo query must not put `-u` user in argv'); + assert(!/\s-p\s/.test(cmd), 'mongo query must not put `-p` password in argv'); }); // -------------------------------------------------------------------------- @@ -110,6 +126,35 @@ await test('ssh_db_query: isSafeSelect rejection -> structured fail, NO remote c assert.strictEqual(r.isError, true); }); +await test('ssh_db_query: rejects database/user names with SQL metacharacters (injection guard)', async () => { + for (const bad of ['app\'; DROP DATABASE x; --', 'app; DROP', 'a b', 'a`b', 'a\\b']) { + const r = await handleSshDbQuery({ + getConnection: async () => { throw new Error('must not connect'); }, + args: { server: 's', db_type: 'mysql', database: bad, query: 'SELECT 1', format: 'json' }, + }); + assert.strictEqual(r.isError, true, `expected fail for database=${JSON.stringify(bad)}`); + const parsed = JSON.parse(r.content[0].text); + assert(parsed.error.includes('unsafe characters'), + `expected 'unsafe characters' in error, got: ${parsed.error}`); + } + // user field should be guarded too + const r2 = await handleSshDbQuery({ + getConnection: async () => { throw new Error('must not connect'); }, + args: { server: 's', db_type: 'mysql', query: 'SELECT 1', user: 'u\'; DROP', format: 'json' }, + }); + assert.strictEqual(r2.isError, true); +}); + +await test('ssh_db_dump: rejects database names with metacharacters', async () => { + const r = await handleSshDbDump({ + getConnection: async () => { throw new Error('must not connect'); }, + args: { server: 's', db_type: 'mysql', database: 'x\'; rm -rf /', format: 'json', preview: true }, + }); + assert.strictEqual(r.isError, true); + const parsed = JSON.parse(r.content[0].text); + assert(parsed.error.includes('unsafe characters')); +}); + await test('ssh_db_query: `SELECT deleted_at FROM t` is accepted (old impl would falsely reject)', async () => { const client = new FakeClient({ script: () => ({ stdout: 'deleted_at\n2024-01-01\n', code: 0 }) }); const r = await handleSshDbQuery({ @@ -131,7 +176,7 @@ await test('ssh_db_query: `SELECT deleted_at FROM t` is accepted (old impl would // handleSshDbQuery -- credential handling // -------------------------------------------------------------------------- await test('ssh_db_query: MySQL password goes via MYSQL_PWD env, never argv', async () => { - const secret = "pw-with-'quotes-and-$chars"; + const secret = 'pw-with-\'quotes-and-$chars'; const client = new FakeClient({ script: () => ({ stdout: 'id\n1\n', code: 0 }) }); await handleSshDbQuery({ getConnection: async () => client, @@ -177,14 +222,14 @@ await test('ssh_db_query: MongoDB eval properly escaped for POSIX shell', async getConnection: async () => client, args: { server: 's', db_type: 'mongodb', database: 'app', - query: "db.users.find({name: 'alice'}).toArray()", + query: 'db.users.find({name: \'alice\'}).toArray()', format: 'json', }, }); const cmd = client.lastCommand; assert(cmd.includes('mongosh')); // Single-quotes embedded in the query must be POSIX-escaped so the shell doesn't break. - assert(cmd.includes("'\\''"), `expected POSIX escape, got: ${cmd}`); + assert(cmd.includes('\'\\\'\''), `expected POSIX escape, got: ${cmd}`); }); await test('ssh_db_query: Mongo eval rejects obvious mutations', async () => { diff --git a/tests/test-deploy-tools.js b/tests/test-deploy-tools.js index 1b9b394..ddb1d3e 100644 --- a/tests/test-deploy-tools.js +++ b/tests/test-deploy-tools.js @@ -39,7 +39,7 @@ function makeClient(script) { sftp(cb) { // Pass a mock sftp -- fastPut always succeeds. setImmediate(() => cb(null, { - fastPut(local, remote, done) { setImmediate(() => done(null)); }, + fastPut(_local, _remote, done) { setImmediate(() => done(null)); }, })); }, }; @@ -266,7 +266,7 @@ await test('new-file rollback: deletes uploaded artifact, not mv', async () => { }); // Clean up artifact -try { fs.rmSync(artifactDir, { recursive: true, force: true }); } catch (_) {} +try { fs.rmSync(artifactDir, { recursive: true, force: true }); } catch { /* best-effort cleanup */ } console.log(`\n${passed} passed, ${failed} failed`); if (failed > 0) { for (const f of fails) console.error(` [err] ${f.name}\n ${f.err.stack}`); process.exit(1); } diff --git a/tests/test-docker-tools.js b/tests/test-docker-tools.js index e564026..79eba83 100644 --- a/tests/test-docker-tools.js +++ b/tests/test-docker-tools.js @@ -3,8 +3,7 @@ import assert from 'assert'; import { EventEmitter } from 'events'; import { - ALLOWED_ACTIONS, MUTATING_ACTIONS, REVERSIBILITY, RISK_MAP, - CONTAINER_NAME_RE, CONTAINER_ID_RE, IMAGE_REF_RE, + REVERSIBILITY, RISK_MAP, isValidContainer, isValidImage, parseDockerPs, parseDockerInspect, handleSshDocker, @@ -171,12 +170,12 @@ await test('handleSshDocker: exec is command-shQuoted', async () => { const client = new FakeClient({ script: () => ({ stdout: 'hi', code: 0 }) }); await handleSshDocker({ getConnection: async () => client, - args: { server: 's', action: 'exec', container: 'myapp', command: "echo 'hi'; rm -rf /" }, + args: { server: 's', action: 'exec', container: 'myapp', command: 'echo \'hi\'; rm -rf /' }, }); const lastCmd = client.commands[client.commands.length - 1]; assert(lastCmd.includes('docker exec')); // The injection attempt should be inside quotes - assert(lastCmd.includes("'echo '\\''hi'\\''; rm -rf /'") || lastCmd.includes("'echo 'hi'; rm -rf /'") || /docker exec.*myapp.*sh -c/.test(lastCmd)); + assert(lastCmd.includes('\'echo \'\\\'\'hi\'\\\'\'; rm -rf /\'') || lastCmd.includes('\'echo \'hi\'; rm -rf /\'') || /docker exec.*myapp.*sh -c/.test(lastCmd)); }); console.log(`\n${passed} passed, ${failed} failed`); diff --git a/tests/test-exec-tools.js b/tests/test-exec-tools.js index 194ba50..7e5b18b 100644 --- a/tests/test-exec-tools.js +++ b/tests/test-exec-tools.js @@ -16,8 +16,6 @@ async function test(name, fn) { try { await fn(); passed++; console.log(`[ok] ${name}`); } catch (e) { failed++; fails.push({ name, err: e }); console.error(`[err] ${name}: ${e.message}`); } } -const sleep = ms => new Promise(r => setTimeout(r, ms)); - // --- Fake ssh2 client ---------------------------------------------------- class FakeStream extends EventEmitter { constructor() { @@ -78,7 +76,7 @@ await test('ssh_execute: cwd shell-safely quoted in remote command', async () => getConnection: async () => client, args: { server: 's', command: 'ls', cwd: '/tmp; rm -rf /' }, }); - assert.strictEqual(client.lastCommand, "cd '/tmp; rm -rf /' && ls"); + assert.strictEqual(client.lastCommand, 'cd \'/tmp; rm -rf /\' && ls'); }); await test('ssh_execute: non-zero exit renders [err] marker (not isError)', async () => { @@ -259,7 +257,7 @@ await test('ssh_execute_group: connection failure on one server reported as per- await test('ssh_execute_group: concurrency caps parallelism', async () => { let inFlight = 0, peak = 0; const mkClient = () => ({ - exec(cmd, cb) { + exec(_cmd, cb) { inFlight++; peak = Math.max(peak, inFlight); const s = new FakeStream(); setImmediate(() => { diff --git a/tests/test-hooks.js b/tests/test-hooks.js index f119250..bbd3638 100755 --- a/tests/test-hooks.js +++ b/tests/test-hooks.js @@ -36,7 +36,7 @@ if (fs.existsSync(HOOKS_CONFIG_FILE)) { console.log('Test 1: Initialize hooks system'); try { await initializeHooks(); - assert(fs.existsSync(path.join(__dirname, '..', 'hooks')), + assert(fs.existsSync(path.join(__dirname, '..', 'hooks')), 'Hooks directory should be created'); console.log('[ok] Hooks system initialized\n'); } catch (error) { @@ -49,10 +49,10 @@ console.log('Test 2: Load hooks configuration'); try { const hooks = loadHooksConfig(); assert(typeof hooks === 'object', 'loadHooksConfig should return an object'); - + // Should have at least on-error hook from default assert(hooks['on-error'], 'Should have on-error hook'); - + console.log(`[ok] Loaded ${Object.keys(hooks).length} hooks`); console.log(` Hooks: ${Object.keys(hooks).join(', ')}\n`); } catch (error) { @@ -65,7 +65,7 @@ console.log('Test 3: List hooks'); try { const hooksList = listHooks(); assert(Array.isArray(hooksList), 'listHooks should return an array'); - + if (hooksList.length > 0) { const firstHook = hooksList[0]; assert(firstHook.name, 'Each hook should have a name'); @@ -73,7 +73,7 @@ try { assert(firstHook.description, 'Each hook should have a description'); assert(typeof firstHook.actionCount === 'number', 'Each hook should have actionCount'); } - + console.log(`[ok] Listed ${hooksList.length} hooks`); hooksList.forEach(h => { console.log(` ${h.enabled ? '[ok]' : '[err]'} ${h.name}: ${h.actionCount} actions`); @@ -98,16 +98,16 @@ try { } ] }; - + addHook('test-hook', testHook); - + const hooks = loadHooksConfig(); assert(hooks['test-hook'], 'Test hook should be added'); - assert(hooks['test-hook'].description === testHook.description, + assert(hooks['test-hook'].description === testHook.description, 'Test hook should have correct description'); - + console.log('[ok] Successfully added custom hook\n'); - + // Cleanup removeHook('test-hook'); } catch (error) { @@ -124,19 +124,19 @@ try { description: 'Hook for toggle testing', actions: [] }); - + // Disable it toggleHook('toggle-test', false); let hooks = loadHooksConfig(); assert(hooks['toggle-test'].enabled === false, 'Hook should be disabled'); - + // Enable it toggleHook('toggle-test', true); hooks = loadHooksConfig(); assert(hooks['toggle-test'].enabled === true, 'Hook should be enabled'); - + console.log('[ok] Successfully toggled hook state\n'); - + // Cleanup removeHook('toggle-test'); } catch (error) { @@ -149,7 +149,7 @@ console.log('Test 6: Execute hook'); try { // Create a test file to verify hook execution const testFile = path.join(__dirname, '..', 'test-hook-output.txt'); - + // Add a test hook that creates a file addHook('execution-test', { enabled: true, @@ -162,16 +162,16 @@ try { } ] }); - + // Execute the hook const result = await executeHook('execution-test', { server: 'test-server' }); assert(result.success === true, 'Hook execution should succeed'); - + // Verify file was created assert(fs.existsSync(testFile), 'Hook should have created test file'); - + console.log('[ok] Hook executed successfully'); - + // Cleanup if (fs.existsSync(testFile)) { fs.unlinkSync(testFile); @@ -202,14 +202,14 @@ try { } ] }); - + // Try to execute it const result = await executeHook('disabled-test', {}); assert(result.success === true, 'Should return success'); assert(result.skipped === true, 'Should indicate hook was skipped'); - + console.log('[ok] Disabled hook was correctly skipped\n'); - + // Cleanup removeHook('disabled-test'); } catch (error) { @@ -221,7 +221,7 @@ try { console.log('Test 8: Hook with context replacement'); try { const testFile = path.join(__dirname, '..', 'context-test.txt'); - + addHook('context-test', { enabled: true, description: 'Context replacement test', @@ -233,12 +233,12 @@ try { } ] }); - - await executeHook('context-test', { - server: 'production', - error: 'test-error' + + await executeHook('context-test', { + server: 'production', + error: 'test-error' }); - + if (fs.existsSync(testFile)) { const content = fs.readFileSync(testFile, 'utf8'); assert(content.includes('production'), 'Should replace {server} with context value'); @@ -246,7 +246,7 @@ try { console.log('[ok] Context replacement works correctly'); fs.unlinkSync(testFile); } - + // Cleanup removeHook('context-test'); console.log(); @@ -269,4 +269,4 @@ if (fs.existsSync(backupFile)) { console.log('[pkg] Kept current hooks configuration\n'); } -console.log('[*] All hooks tests passed!'); \ No newline at end of file +console.log('[*] All hooks tests passed!'); diff --git a/tests/test-host-key-verification.js b/tests/test-host-key-verification.js index 3fd7672..0087b3f 100644 --- a/tests/test-host-key-verification.js +++ b/tests/test-host-key-verification.js @@ -28,7 +28,7 @@ import('../src/ssh-key-manager.js').then(mod => { // a fresh SSHManager and synthesize the same hostVerifier inline here. The // PRODUCTION logic lives in src/ssh-manager.js connect() -- keep this test // in lockstep with that code. -function makeVerifier({ host, port, knownList, strictEnv = false }) { +function makeVerifier({ host: _host, port: _port, knownList, strictEnv = false }) { // mirrors the logic in SSHManager.connect() hostVerifier return (key) => { const presented = 'SHA256:' + crypto.createHash('sha256').update(key).digest('base64').replace(/=+$/, ''); @@ -88,7 +88,7 @@ const hashA = 'SHA256:' + crypto.createHash('sha256').update(keyA).digest('base6 const v = makeVerifier({ host: 'h', port: 22, knownList: [ { fingerprint: otherHash, type: 'ssh-ed25519' }, { fingerprint: hashA, type: 'ssh-rsa' }, - ]}); + ] }); const r = v(keyA); assert(r.action === 'accept-match', 'multi-algo known host: any matching fingerprint accepts'); } diff --git a/tests/test-index-registration.js b/tests/test-index-registration.js new file mode 100644 index 0000000..d4c179b --- /dev/null +++ b/tests/test-index-registration.js @@ -0,0 +1,95 @@ +#!/usr/bin/env node +/** + * Registration invariants for src/index.js. + * + * Without this test, adding a tool to TOOL_GROUPS but forgetting to wire + * a registerToolConditional(...) call in index.js is a silent drift: the + * tool appears in the registry, ship-readiness tests pass, but users get + * "unknown tool" at runtime. This test reads index.js as text and pins: + * + * 1. Every TOOL_GROUPS entry has a registerToolConditional('', ...) call. + * 2. Every registerToolConditional('', ...) in index.js corresponds + * to a TOOL_GROUPS entry (no orphans). + * 3. Every registered tool has a TOOL_ANNOTATIONS entry (mirrors + * test-tool-annotations, kept here for independent coverage). + */ + +import assert from 'node:assert'; +import fs from 'node:fs'; +import path from 'node:path'; +import url from 'node:url'; +import { TOOL_GROUPS, getAllTools } from '../src/tool-registry.js'; +import { TOOL_ANNOTATIONS } from '../src/tool-annotations.js'; + +const __dirname = path.dirname(url.fileURLToPath(import.meta.url)); +const indexSrc = fs.readFileSync(path.join(__dirname, '..', 'src', 'index.js'), 'utf8'); + +let passed = 0; +let failed = 0; +const fails = []; + +async function test(name, fn) { + try { + await fn(); + passed++; + console.log(`[ok] ${name}`); + } catch (e) { + failed++; + fails.push({ name, err: e }); + console.error(`[err] ${name}: ${e.message}`); + } +} + +function registeredNames(src) { + // Matches registerToolConditional( NEWLINE 'tool_name', + const re = /registerToolConditional\(\s*['"]([A-Za-z_][\w-]*)['"]/g; + const out = new Set(); + let m; + while ((m = re.exec(src))) out.add(m[1]); + return out; +} + +await test('every TOOL_GROUPS entry is registered in index.js', () => { + const registered = registeredNames(indexSrc); + const missing = getAllTools().filter(name => !registered.has(name)); + assert.strictEqual(missing.length, 0, + `tools listed in TOOL_GROUPS but never registered: ${missing.join(', ')}`); +}); + +await test('every registerToolConditional() in index.js corresponds to a TOOL_GROUPS entry', () => { + const registered = registeredNames(indexSrc); + const known = new Set(getAllTools()); + const orphans = [...registered].filter(name => !known.has(name)); + assert.strictEqual(orphans.length, 0, + `tools registered in index.js but missing from TOOL_GROUPS: ${orphans.join(', ')}`); +}); + +await test('count of registered tools matches registry exactly', () => { + const registered = registeredNames(indexSrc); + assert.strictEqual(registered.size, getAllTools().length, + `registered=${registered.size} vs registry=${getAllTools().length}`); +}); + +await test('every registered tool has an annotations entry (drift check)', () => { + const registered = registeredNames(indexSrc); + const missing = [...registered].filter(name => !TOOL_ANNOTATIONS[name]); + assert.strictEqual(missing.length, 0, + `tools registered without annotations: ${missing.join(', ')}`); +}); + +await test('TOOL_GROUPS has no duplicate names across groups', () => { + const all = getAllTools(); + const uniq = new Set(all); + assert.strictEqual(all.length, uniq.size, + `duplicates detected in TOOL_GROUPS: ${all.length} entries, ${uniq.size} unique`); +}); + +await test('every group declared in TOOL_GROUPS is non-empty', () => { + for (const [name, tools] of Object.entries(TOOL_GROUPS)) { + assert(Array.isArray(tools) && tools.length > 0, + `group ${name} is empty or not an array`); + } +}); + +console.log(`\n${passed} passed, ${failed} failed`); +if (failed > 0) { for (const f of fails) console.error(` [err] ${f.name}\n ${f.err.stack}`); process.exit(1); } diff --git a/tests/test-journalctl-tools.js b/tests/test-journalctl-tools.js index b91b636..176f232 100644 --- a/tests/test-journalctl-tools.js +++ b/tests/test-journalctl-tools.js @@ -4,7 +4,7 @@ import assert from 'assert'; import { EventEmitter } from 'events'; import { normalizePriority, safeLines, buildJournalctlCommand, parseJournalJsonl, - handleSshJournalctl, ALLOWED_PRIORITIES, PRIORITY_NAMES, + handleSshJournalctl, } from '../src/tools/journalctl-tools.js'; let passed = 0, failed = 0; const fails = []; @@ -61,10 +61,10 @@ await test('buildJournalctlCommand: defaults', () => { }); await test('buildJournalctlCommand: unit and since/until are shell-quoted', () => { - const cmd = buildJournalctlCommand({ unit: 'nginx; rm -rf /', since: "2024-01-01'; DROP", until: '1h' }); - assert(cmd.includes("-u 'nginx; rm -rf /'")); - assert(cmd.includes("--since '2024-01-01'\\''; DROP'")); - assert(cmd.includes("--until '1h'")); + const cmd = buildJournalctlCommand({ unit: 'nginx; rm -rf /', since: '2024-01-01\'; DROP', until: '1h' }); + assert(cmd.includes('-u \'nginx; rm -rf /\'')); + assert(cmd.includes('--since \'2024-01-01\'\\\'\'; DROP\'')); + assert(cmd.includes('--until \'1h\'')); }); await test('buildJournalctlCommand: json:false omits --output=json', () => { @@ -72,8 +72,8 @@ await test('buildJournalctlCommand: json:false omits --output=json', () => { }); await test('buildJournalctlCommand: grep pattern appended safely', () => { - const cmd = buildJournalctlCommand({ grep: "ERROR'; rm" }); - assert(cmd.includes("| grep -E 'ERROR'\\''; rm'")); + const cmd = buildJournalctlCommand({ grep: 'ERROR\'; rm' }); + assert(cmd.includes('| grep -E \'ERROR\'\\\'\'; rm\'')); }); // --- parseJournalJsonl -------------------------------------------------- diff --git a/tests/test-monitoring-tools.js b/tests/test-monitoring-tools.js index 4a4ae80..5dd9ab0 100644 --- a/tests/test-monitoring-tools.js +++ b/tests/test-monitoring-tools.js @@ -20,7 +20,6 @@ import { parsePsInfo, parseSystemctlShow, sdNum, - shapeServiceRecord, splitHealthSections, computeStatus, extractJournalLines, @@ -540,7 +539,6 @@ await test('ssh_service_status: missing service arg -> structured failure', asyn args: { server: 's' }, }); assert.strictEqual(r.isError, true); - const parsed = JSON.parse(JSON.stringify({ ok: true })); // force assertion style assert(r.content[0].text.toLowerCase().includes('service') || r.content[0].text.includes('failed')); }); @@ -550,10 +548,10 @@ await test('ssh_service_status: service name is shell-quoted in remote command', const client = new FakeClient({ script: () => ({ stdout, code: 0 }) }); await handleSshServiceStatus({ getConnection: async () => client, - args: { server: 's', service: "foo; rm -rf /", format: 'json' }, + args: { server: 's', service: 'foo; rm -rf /', format: 'json' }, }); assert(!client.lastCommand.includes('rm -rf /; '), 'injection must not escape quoting'); - assert(client.lastCommand.includes("'foo; rm -rf /'")); + assert(client.lastCommand.includes('\'foo; rm -rf /\'')); }); // --- handleSshProcessManager -------------------------------------------- diff --git a/tests/test-plan-tools.js b/tests/test-plan-tools.js index 33c4772..49bf024 100644 --- a/tests/test-plan-tools.js +++ b/tests/test-plan-tools.js @@ -431,7 +431,7 @@ await test('run: rollback inherits original step server when not specified', asy if (args.command === 'undo1') { rollbackServer = args.server; return okResp('ok'); } return okResp('ok'); }); - const r = await handleSshPlan({ + await handleSshPlan({ dispatch: { exec: execSpy }, args: { mode: 'run', server: 'default_server', format: 'json', diff --git a/tests/test-port-test-tools.js b/tests/test-port-test-tools.js index c123b9a..17807b6 100644 --- a/tests/test-port-test-tools.js +++ b/tests/test-port-test-tools.js @@ -4,7 +4,7 @@ import assert from 'assert'; import { EventEmitter } from 'events'; import { parseDnsOutput, parseTcpOutput, parseTlsOutput, parseHttpOutput, - buildDnsCommand, buildTcpCommand, buildTlsCommand, buildHttpCommand, + buildDnsCommand, buildTlsCommand, buildHttpCommand, handleSshPortTest, } from '../src/tools/port-test-tools.js'; @@ -103,7 +103,7 @@ await test('parseHttpOutput: malformed returns null', () => { // --- buildXxxCommand: shQuote / injection safety ------------------------- await test('buildDnsCommand: host is shell-quoted', () => { const cmd = buildDnsCommand('evil.com; rm -rf /'); - assert(cmd.includes("'evil.com; rm -rf /'")); + assert(cmd.includes('\'evil.com; rm -rf /\'')); assert(!cmd.match(/^[^']*evil\.com; rm -rf \//), 'no unquoted fragment'); }); @@ -135,7 +135,7 @@ await test('handleSshPortTest: full chain tcp+dns with scripted results', async if (cmd.startsWith('getent hosts')) return { stdout: '1.2.3.4 host.example.com\n', code: 0 }; if (cmd.includes('nc -z') || cmd.includes('/dev/tcp/')) return { stdout: 'TCP_LATENCY_MS=5\n', code: 0 }; return { stdout: '', code: 0 }; - }}); + } }); const r = await handleSshPortTest({ getConnection: async () => client, args: { diff --git a/tests/test-profiles.js b/tests/test-profiles.js index 0c93323..9fb0e5d 100755 --- a/tests/test-profiles.js +++ b/tests/test-profiles.js @@ -4,11 +4,11 @@ * Test suite for Profile Loader */ -import { - loadProfile, - listProfiles, - setActiveProfile, - getActiveProfileName +import { + loadProfile, + listProfiles, + setActiveProfile, + getActiveProfileName } from '../src/profile-loader.js'; import assert from 'assert'; import fs from 'fs'; @@ -39,10 +39,10 @@ try { const profiles = listProfiles(); assert(Array.isArray(profiles), 'listProfiles should return an array'); assert(profiles.length > 0, 'Should have at least one profile'); - + const defaultProfile = profiles.find(p => p.name === 'default'); assert(defaultProfile, 'Default profile should be in the list'); - + console.log(`[ok] Found ${profiles.length} profiles:`); profiles.forEach(p => { console.log(` - ${p.name}: ${p.aliasCount} aliases, ${p.hookCount} hooks`); @@ -82,24 +82,30 @@ try { // Test 5: Switch profiles console.log('Test 5: Switch profiles'); const testProfileFile = path.join(__dirname, '..', '.ssh-manager-profile'); -const originalProfile = fs.existsSync(testProfileFile) ? - fs.readFileSync(testProfileFile, 'utf8').trim() : null; +let originalProfile = null; +try { + originalProfile = fs.readFileSync(testProfileFile, 'utf8').trim(); +} catch (e) { + if (e.code !== 'ENOENT') throw e; +} try { // Switch to docker profile const switchResult = setActiveProfile('docker'); assert(switchResult === true, 'Should successfully switch to docker profile'); - + const newProfile = getActiveProfileName(); assert(newProfile === 'docker', 'Active profile should be docker after switch'); - + console.log('[ok] Successfully switched to docker profile'); - + // Restore original profile if (originalProfile) { fs.writeFileSync(testProfileFile, originalProfile); - } else if (fs.existsSync(testProfileFile)) { - fs.unlinkSync(testProfileFile); + } else { + try { fs.unlinkSync(testProfileFile); } catch (e) { + if (e.code !== 'ENOENT') throw e; + } } console.log('[ok] Restored original profile setting\n'); } catch (error) { @@ -107,8 +113,10 @@ try { // Cleanup if (originalProfile) { fs.writeFileSync(testProfileFile, originalProfile); - } else if (fs.existsSync(testProfileFile)) { - fs.unlinkSync(testProfileFile); + } else { + try { fs.unlinkSync(testProfileFile); } catch (e) { + if (e.code !== 'ENOENT') throw e; + } } process.exit(1); } @@ -118,7 +126,7 @@ console.log('Test 6: Load non-existent profile'); try { const profile = loadProfile('non-existent-profile'); assert(profile, 'Should return a profile even for non-existent name'); - assert(profile.name === 'default' || profile.name === 'minimal', + assert(profile.name === 'default' || profile.name === 'minimal', 'Should fallback to default or minimal profile'); console.log(`[ok] Correctly fell back to ${profile.name} profile\n`); } catch (error) { @@ -131,19 +139,19 @@ console.log('Test 7: Validate all profile JSON files'); try { const profilesDir = path.join(__dirname, '..', 'profiles'); const files = fs.readdirSync(profilesDir).filter(f => f.endsWith('.json')); - + for (const file of files) { const filePath = path.join(profilesDir, file); const content = fs.readFileSync(filePath, 'utf8'); const profile = JSON.parse(content); - + assert(profile.name, `Profile ${file} should have a name`); assert(profile.description, `Profile ${file} should have a description`); - assert(typeof profile.commandAliases === 'object', + assert(typeof profile.commandAliases === 'object', `Profile ${file} should have commandAliases object`); - assert(typeof profile.hooks === 'object', + assert(typeof profile.hooks === 'object', `Profile ${file} should have hooks object`); - + console.log(` [ok] ${file} is valid`); } console.log(`[ok] All ${files.length} profile files are valid\n`); @@ -152,4 +160,4 @@ try { process.exit(1); } -console.log('[*] All profile tests passed!'); \ No newline at end of file +console.log('[*] All profile tests passed!'); diff --git a/tests/test-session-tools.js b/tests/test-session-tools.js index 4b5520b..5c0b462 100644 --- a/tests/test-session-tools.js +++ b/tests/test-session-tools.js @@ -36,8 +36,6 @@ async function test(name, fn) { try { await fn(); passed++; console.log(`[ok] ${name}`); } catch (e) { failed++; fails.push({ name, err: e }); console.error(`[err] ${name}: ${e.message}`); } } -const sleep = ms => new Promise(r => setTimeout(r, ms)); - // -------------------------------------------------------------------------- // FakeShellStream -- scriptable bidirectional shell // -------------------------------------------------------------------------- @@ -50,7 +48,7 @@ class FakeShellStream extends EventEmitter { this.closed = false; this._marker = null; // scriptFor(userCmd, marker) -> { stdout, exit, stderr?, skipMarker?, customEcho? } - this.scriptFor = scriptFor || ((cmd) => ({ stdout: '', exit: 0 })); + this.scriptFor = scriptFor || (() => ({ stdout: '', exit: 0 })); this.delayMs = delayMs; } @@ -91,22 +89,11 @@ class FakeShellStream extends EventEmitter { close() { this.closed = true; setImmediate(() => this.emit('close', 0)); } } -/** Build a synthesizer that returns scripted output per-command. */ -function scripted(table) { - return (cmd) => { - for (const { match, response } of table) { - if (typeof match === 'string' ? cmd === match : match.test(cmd)) { - return typeof response === 'function' ? response(cmd) : response; - } - } - return { stdout: '', exit: 0 }; - }; -} - // Build a fake "client" that yields a FakeShellStream when .shell() is called. function makeFakeClient(stream) { return { shell(opts, cb) { + void opts; setImmediate(() => cb(null, stream)); }, }; @@ -281,17 +268,7 @@ await test('runCommand: multi-line output (500 lines) preserved intact', async ( }); await test('runCommand: ANSI color codes around marker still detected', async () => { - const stream = new FakeShellStream({ - scriptFor: (_cmd, marker) => ({ - stdout: '\x1b[31mred text\x1b[0m\n', - exit: 0, - // Override echo so the marker emission includes a leading color reset. - // The script table engine emits stdout then marker line automatically, - // but for this test we need to inject ANSI *around* the marker. We - // use skipMarker + customEcho to craft the wire precisely. - }), - }); - // Pivot: bypass the normal scriptFor for ANSI-around-marker by using a + // Bypass the normal scriptFor for ANSI-around-marker by using a // direct scriptFor that emits the marker itself. const streamAnsi = new FakeShellStream({ scriptFor: () => ({ @@ -502,6 +479,36 @@ await test('session_close: idempotent -- second call is success with already_clo assert.strictEqual(secondP.data.already_closed, true); }); +await test('session_close: session_id="all" closes every tracked session (C4)', async () => { + await cleanupAllSessions(); + const streams = [makeSeedingStream(), makeSeedingStream(), makeSeedingStream()]; + const ids = []; + for (const s of streams) { + const started = await handleSshSessionStart({ + getConnection: async () => makeFakeClient(s), + args: { server: 's', format: 'json' }, + }); + ids.push(JSON.parse(started.content[0].text).data.session_id); + } + + const r = await handleSshSessionClose({ args: { session_id: 'all', format: 'json' } }); + const parsed = JSON.parse(r.content[0].text); + assert.strictEqual(parsed.success, true); + assert.strictEqual(parsed.data.closed_count, 3, 'all three sessions should be reported closed'); + const closedIds = parsed.data.sessions.map(s => s.session_id).sort(); + assert.deepStrictEqual(closedIds, [...ids].sort(), 'every started session must appear in the result'); + + // Registry must be empty afterwards. + const list = await handleSshSessionList({ args: { format: 'json' } }); + assert.strictEqual(JSON.parse(list.content[0].text).data.total, 0); + + // Each stream must have received exit\n + end(). + for (const s of streams) { + assert(s.writes.join('').includes('exit\n')); + assert.strictEqual(s.endCalls, 1); + } +}); + await test('session_close: gracefully writes `exit` and ends the stream', async () => { const stream = makeSeedingStream(); const started = await handleSshSessionStart({ @@ -606,7 +613,7 @@ await test('output containing coincidental "__MCP_EOC_" prefix does NOT trigger // string "__MCP_EOC_deadbeef" (short) and it won't match because the live // session's marker has a different, unpredictable suffix. const stream = new FakeShellStream({ - scriptFor: (cmd, marker) => { + scriptFor: (cmd) => { if (cmd === 'pwd' || cmd === 'whoami' || cmd === 'echo $HOME') { return { stdout: 'x\n', exit: 0 }; } diff --git a/tests/test-socks5.js b/tests/test-socks5.js new file mode 100644 index 0000000..154a3dc --- /dev/null +++ b/tests/test-socks5.js @@ -0,0 +1,190 @@ +#!/usr/bin/env node +/** + * Tests for the SOCKS5 handler in src/tools/tunnel-tools.js. + * + * Covers: + * - parseSocksConnectRequest: IPv4, domain, IPv6, malformed, unsupported CMD. + * - handleSocks5Connection: full handshake happy path (greeting -> CONNECT -> + * reply -> streaming). + * - handleSocks5Connection: method negotiation fails when client offers + * only authenticated methods (no 0x00 method). + * - handleSocks5Connection: forwardOut error surfaces a SOCKS error reply. + */ + +import assert from 'node:assert'; +import { EventEmitter } from 'node:events'; +import { + parseSocksConnectRequest, + handleSocks5Connection, +} from '../src/tools/tunnel-tools.js'; + +let passed = 0; +let failed = 0; +const fails = []; + +async function test(name, fn) { + try { await fn(); passed++; console.log(`[ok] ${name}`); } + catch (e) { failed++; fails.push({ name, err: e }); console.error(`[err] ${name}: ${e.message}`); } +} + +// --- parseSocksConnectRequest -------------------------------------------- +test('parseSocksConnectRequest: IPv4 target', () => { + // VER=5 CMD=1 RSV=0 ATYP=1 ADDR=1.2.3.4 PORT=80 (0x0050) + const buf = Buffer.from([0x05, 0x01, 0x00, 0x01, 1, 2, 3, 4, 0x00, 0x50]); + const r = parseSocksConnectRequest(buf); + assert.strictEqual(r.host, '1.2.3.4'); + assert.strictEqual(r.port, 80); + assert.strictEqual(r.atyp, 0x01); + assert.strictEqual(r.consumed, 10); +}); + +test('parseSocksConnectRequest: domain name target', () => { + const host = 'example.com'; + const hostBytes = Buffer.from(host, 'ascii'); + // VER=5 CMD=1 RSV=0 ATYP=3 LEN=11 ADDR=example.com PORT=443 (0x01bb) + const buf = Buffer.concat([ + Buffer.from([0x05, 0x01, 0x00, 0x03, hostBytes.length]), + hostBytes, + Buffer.from([0x01, 0xbb]), + ]); + const r = parseSocksConnectRequest(buf); + assert.strictEqual(r.host, 'example.com'); + assert.strictEqual(r.port, 443); + assert.strictEqual(r.atyp, 0x03); +}); + +test('parseSocksConnectRequest: IPv6 target', () => { + // VER=5 CMD=1 RSV=0 ATYP=4 ADDR=::1 PORT=22 + const addr = Buffer.alloc(16); + addr[15] = 1; // ::1 + const buf = Buffer.concat([ + Buffer.from([0x05, 0x01, 0x00, 0x04]), + addr, + Buffer.from([0x00, 0x16]), + ]); + const r = parseSocksConnectRequest(buf); + assert.strictEqual(r.host, '0:0:0:0:0:0:0:1'); + assert.strictEqual(r.port, 22); + assert.strictEqual(r.atyp, 0x04); +}); + +test('parseSocksConnectRequest: rejects non-CONNECT CMD', () => { + const buf = Buffer.from([0x05, 0x02, 0x00, 0x01, 1, 2, 3, 4, 0, 80]); + assert.throws(() => parseSocksConnectRequest(buf), /CONNECT/); +}); + +test('parseSocksConnectRequest: rejects bad VER', () => { + const buf = Buffer.from([0x04, 0x01, 0x00, 0x01, 1, 2, 3, 4, 0, 80]); + assert.throws(() => parseSocksConnectRequest(buf), /VER/); +}); + +test('parseSocksConnectRequest: rejects unknown ATYP', () => { + const buf = Buffer.from([0x05, 0x01, 0x00, 0x09, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]); + assert.throws(() => parseSocksConnectRequest(buf), /ATYP/); +}); + +// --- handleSocks5Connection ---------------------------------------------- + +class FakeSocket extends EventEmitter { + constructor() { + super(); + this.writes = []; + this.destroyed = false; + this.ended = false; + this.remoteAddress = '127.0.0.1'; + this.remotePort = 51234; + } + write(buf) { this.writes.push(Buffer.isBuffer(buf) ? buf : Buffer.from(buf)); return true; } + end() { this.ended = true; } + destroy() { this.destroyed = true; } + pipe(dst) { return dst; } +} + +function makeFakeSsh({ forwardError = null } = {}) { + const client = { + calls: [], + streams: [], + forwardOut(sa, sp, da, dp, cb) { + this.calls.push({ sa, sp, da, dp }); + if (forwardError) { return setImmediate(() => cb(forwardError)); } + const stream = new EventEmitter(); + stream.destroy = () => {}; + stream.write = () => true; + stream.pipe = (dst) => dst; + this.streams.push(stream); + setImmediate(() => cb(null, stream)); + }, + }; + return client; +} + +test('handleSocks5Connection: greeting -> CONNECT IPv4 -> success reply', async () => { + const sock = new FakeSocket(); + const ssh = makeFakeSsh(); + handleSocks5Connection(sock, ssh); + // Greeting: VER=5 NMETHODS=1 METHODS=[0x00] + sock.emit('data', Buffer.from([0x05, 0x01, 0x00])); + await new Promise(r => setImmediate(r)); + // Server replies VER=5 METHOD=0x00 + assert.deepStrictEqual(Array.from(sock.writes[0]), [0x05, 0x00]); + // CONNECT to 1.2.3.4:80 + sock.emit('data', Buffer.from([0x05, 0x01, 0x00, 0x01, 1, 2, 3, 4, 0x00, 0x50])); + await new Promise(r => setImmediate(r)); + // ssh.forwardOut invoked with right host:port + assert.strictEqual(ssh.calls.length, 1); + assert.strictEqual(ssh.calls[0].da, '1.2.3.4'); + assert.strictEqual(ssh.calls[0].dp, 80); + // Reply contains SUCCEEDED (0x00 in second byte) + const last = sock.writes[sock.writes.length - 1]; + assert.strictEqual(last[0], 0x05); + assert.strictEqual(last[1], 0x00, `expected SUCCEEDED reply, got ${last[1]}`); +}); + +test('handleSocks5Connection: client offering only auth methods gets 0xFF', async () => { + const sock = new FakeSocket(); + const ssh = makeFakeSsh(); + handleSocks5Connection(sock, ssh); + // Greeting: VER=5 NMETHODS=1 METHODS=[0x02] (GSSAPI/user-pass only, no 0x00) + sock.emit('data', Buffer.from([0x05, 0x01, 0x02])); + await new Promise(r => setImmediate(r)); + assert.deepStrictEqual(Array.from(sock.writes[0]), [0x05, 0xff]); + assert.strictEqual(sock.ended, true); + assert.strictEqual(ssh.calls.length, 0, 'no forwardOut should have been made'); +}); + +test('handleSocks5Connection: forwardOut refused surfaces CONNECTION_REFUSED reply', async () => { + const sock = new FakeSocket(); + const ssh = makeFakeSsh({ forwardError: new Error('connection refused by remote') }); + handleSocks5Connection(sock, ssh); + sock.emit('data', Buffer.from([0x05, 0x01, 0x00])); + await new Promise(r => setImmediate(r)); + sock.emit('data', Buffer.from([0x05, 0x01, 0x00, 0x01, 1, 2, 3, 4, 0x00, 0x50])); + await new Promise(r => setImmediate(r)); + const last = sock.writes[sock.writes.length - 1]; + assert.strictEqual(last[0], 0x05); + assert.strictEqual(last[1], 0x05, 'expected REP=CONNECTION_REFUSED (0x05)'); +}); + +test('handleSocks5Connection: handles domain-ATYP CONNECT', async () => { + const sock = new FakeSocket(); + const ssh = makeFakeSsh(); + handleSocks5Connection(sock, ssh); + sock.emit('data', Buffer.from([0x05, 0x01, 0x00])); + await new Promise(r => setImmediate(r)); + const host = 'example.com'; + const req = Buffer.concat([ + Buffer.from([0x05, 0x01, 0x00, 0x03, host.length]), + Buffer.from(host, 'ascii'), + Buffer.from([0x01, 0xbb]), + ]); + sock.emit('data', req); + await new Promise(r => setImmediate(r)); + assert.strictEqual(ssh.calls[0].da, 'example.com'); + assert.strictEqual(ssh.calls[0].dp, 443); +}); + +await new Promise(r => setTimeout(r, 50)); // flush pending setImmediate callbacks + +console.log(`\n${passed} passed, ${failed} failed`); +if (failed > 0) { for (const f of fails) console.error(` [err] ${f.name}\n ${f.err.stack}`); process.exit(1); } diff --git a/tests/test-sql-safety.js b/tests/test-sql-safety.js index f186440..d7b1c4a 100644 --- a/tests/test-sql-safety.js +++ b/tests/test-sql-safety.js @@ -1,7 +1,8 @@ #!/usr/bin/env node /** - * Tests for src/tools/sql-safety.js -- the replacement for the buggy - * `isSafeQuery` in database-manager.js. + * Tests for src/tools/sql-safety.js -- guards the `ssh_db_query` path + * against multi-statement smuggling, comment-hidden mutations, and + * INTO OUTFILE write attempts. * * Goals (per the task spec): * - No false positives on common column names: deleted_at, update_count, drop_box. @@ -134,7 +135,7 @@ test('accepts SELECT with block comment inside', () => { test('accepts string literal that contains dangerous keywords', () => { // The string `'DROP TABLE users'` is data, not code -- must not false-positive. - const r = isSafeSelect("SELECT 'DROP TABLE users' AS msg"); + const r = isSafeSelect('SELECT \'DROP TABLE users\' AS msg'); assert.strictEqual(r.ok, true); }); @@ -199,7 +200,7 @@ test('rejects comment-hidden DROP -- the comment is stripped, DROP is first toke }); test('rejects SELECT ... INTO OUTFILE (MySQL file write)', () => { - const r = isSafeSelect("SELECT * INTO OUTFILE '/tmp/pwn' FROM users"); + const r = isSafeSelect('SELECT * INTO OUTFILE \'/tmp/pwn\' FROM users'); assert.strictEqual(r.ok, false); assert(r.reason.toUpperCase().includes('INTO')); }); @@ -235,12 +236,12 @@ test('rejects CALL stored_procedure()', () => { }); test('rejects LOAD DATA INFILE', () => { - const r = isSafeSelect("LOAD DATA INFILE '/etc/passwd' INTO TABLE t"); + const r = isSafeSelect('LOAD DATA INFILE \'/etc/passwd\' INTO TABLE t'); assert.strictEqual(r.ok, false); }); test('rejects COPY (Postgres server-side file access)', () => { - const r = isSafeSelect("COPY t FROM '/etc/passwd'"); + const r = isSafeSelect('COPY t FROM \'/etc/passwd\''); assert.strictEqual(r.ok, false); }); @@ -325,22 +326,22 @@ test('internal: stripComments handles nested block comments', () => { }); test('internal: stripStrings removes single-quoted and preserves everything else', () => { - const out = __internals.stripStrings("SELECT 'DROP' FROM t"); + const out = __internals.stripStrings('SELECT \'DROP\' FROM t'); assert(!out.includes('DROP'), 'DROP was inside a string, should be gone'); assert(out.includes('SELECT')); assert(out.includes('FROM t')); }); test('internal: stripStrings handles `` inside backticks', () => { - const out = __internals.stripStrings("SELECT `a``b` FROM t"); + const out = __internals.stripStrings('SELECT `a``b` FROM t'); assert(!out.includes('a')); assert(out.includes('SELECT')); assert(out.includes('FROM t')); }); test('internal: stripStrings handles `` inside single quotes', () => { - const out = __internals.stripStrings("SELECT 'it''s' FROM t"); - assert(!out.includes("it")); + const out = __internals.stripStrings('SELECT \'it\'\'s\' FROM t'); + assert(!out.includes('it')); assert(out.includes('SELECT')); }); diff --git a/tests/test-ssh-manager-exec-passthrough.js b/tests/test-ssh-manager-exec-passthrough.js index 9678bd4..a3f6c64 100644 --- a/tests/test-ssh-manager-exec-passthrough.js +++ b/tests/test-ssh-manager-exec-passthrough.js @@ -42,7 +42,7 @@ assert(typeof captured?.maybeCb === 'function', 'exec forwards callback as 3rd a // --- sftp passthrough --- assert(typeof mgr.sftp === 'function', 'SSHManager.sftp is a function'); captured = null; -const sftpCb = (err, sftp) => {}; +const sftpCb = (_err, _sftp) => {}; mgr.sftp(sftpCb); assert(captured?.sftpCb === sftpCb, 'sftp forwards callback to underlying client'); @@ -67,7 +67,7 @@ assert(captured?.srcA === '127.0.0.1' && captured?.dstP === 22, 'forwardOut call // --- forwardOut Promise-style (used by index.js for proxy jumps) --- let resolvedStream = null; -mgr.client.forwardOut = (srcA, srcP, dstA, dstP, cb) => cb(null, { tag: 'mockStream' }); +mgr.client.forwardOut = (_srcA, _srcP, _dstA, _dstP, cb) => cb(null, { tag: 'mockStream' }); const promise = mgr.forwardOut('127.0.0.1', 0, 'jump.host', 22); assert(promise && typeof promise.then === 'function', 'forwardOut Promise-style returns a Promise'); await promise.then(s => { resolvedStream = s; }); diff --git a/tests/test-stream-exec.js b/tests/test-stream-exec.js index 82f8233..7e3405d 100644 --- a/tests/test-stream-exec.js +++ b/tests/test-stream-exec.js @@ -81,15 +81,15 @@ console.log('[test] Testing stream-exec\n'); // --- shQuote / buildRemoteCommand ---------------------------------------- await test('shQuote: simple path', () => { - assert.strictEqual(shQuote('/var/app'), "'/var/app'"); + assert.strictEqual(shQuote('/var/app'), '\'/var/app\''); }); await test('shQuote: path with space', () => { - assert.strictEqual(shQuote('/home/my user'), "'/home/my user'"); + assert.strictEqual(shQuote('/home/my user'), '\'/home/my user\''); }); await test('shQuote: path with single-quote escapes correctly', () => { - assert.strictEqual(shQuote("it's"), "'it'\\''s'"); + assert.strictEqual(shQuote('it\'s'), '\'it\'\\\'\'s\''); }); await test('shQuote: path with injection attempt stays literal', () => { @@ -97,15 +97,15 @@ await test('shQuote: path with injection attempt stays literal', () => { const dangerous = '/tmp; rm -rf /'; const quoted = shQuote(dangerous); // The quoted form wraps in single quotes -- bash treats `;` as literal inside. - assert.strictEqual(quoted, "'/tmp; rm -rf /'"); + assert.strictEqual(quoted, '\'/tmp; rm -rf /\''); }); await test('shQuote: close-quote injection attempt is neutralized', () => { // Attempt to break out of single quotes: evil'; rm -rf /; echo ' - const dangerous = "evil'; rm -rf /; echo '"; + const dangerous = 'evil\'; rm -rf /; echo \''; const quoted = shQuote(dangerous); // Must escape the internal quote to close-escape-reopen. - assert.strictEqual(quoted, "'evil'\\''; rm -rf /; echo '\\'''"); + assert.strictEqual(quoted, '\'evil\'\\\'\'; rm -rf /; echo \'\\\'\'\''); }); await test('buildRemoteCommand: no cwd returns command unchanged', () => { @@ -115,13 +115,13 @@ await test('buildRemoteCommand: no cwd returns command unchanged', () => { }); await test('buildRemoteCommand: cwd is quoted', () => { - assert.strictEqual(buildRemoteCommand('ls', '/var/app'), "cd '/var/app' && ls"); + assert.strictEqual(buildRemoteCommand('ls', '/var/app'), 'cd \'/var/app\' && ls'); }); await test('buildRemoteCommand: shell-injection in cwd neutralized', () => { const cmd = buildRemoteCommand('ls', '/tmp; rm -rf /'); // After quoting, `;` is inside single quotes -> bash treats as literal dir name. - assert.strictEqual(cmd, "cd '/tmp; rm -rf /' && ls"); + assert.strictEqual(cmd, 'cd \'/tmp; rm -rf /\' && ls'); }); // --- Happy path ---------------------------------------------------------- @@ -356,7 +356,7 @@ await test('cwd propagates into the remote command with shell-safe quoting', asy await sleep(5); assert.strictEqual( client.lastCommand, - "cd '/srv/my app; rm -rf /' && ls", + 'cd \'/srv/my app; rm -rf /\' && ls', ); client.streams[0].finish(0); await p; diff --git a/tests/test-structured-result.js b/tests/test-structured-result.js index ad03728..af4d376 100644 --- a/tests/test-structured-result.js +++ b/tests/test-structured-result.js @@ -38,6 +38,54 @@ test('fail: error from string preserved verbatim', () => { assert.strictEqual(r.error, 'unsafe query'); }); +// --- fail() hardening (Wave 2 H3) --------------------------------------- +test('fail: null error normalizes to "unknown error" (no [object Object])', () => { + const r = fail('t', null); + assert.strictEqual(r.error, 'unknown error'); +}); + +test('fail: undefined error normalizes to "unknown error"', () => { + const r = fail('t', undefined); + assert.strictEqual(r.error, 'unknown error'); +}); + +test('fail: plain object without .message is JSON-stringified, not [object Object]', () => { + const r = fail('t', { code: -1, signal: 'TIMEOUT' }); + assert.notStrictEqual(r.error, '[object Object]'); + assert(r.error.includes('-1') && r.error.includes('TIMEOUT')); +}); + +test('fail: plain object with .message uses the message', () => { + const r = fail('t', { message: 'connection lost', retryable: true }); + assert.strictEqual(r.error, 'connection lost'); +}); + +test('fail: Error stack is hidden unless MCP_SSH_INCLUDE_STACK=1', () => { + const r = fail('t', new Error('boom')); + assert.strictEqual(r.error_stack, undefined, + 'stack must not leak by default -- opt-in only'); +}); + +test('fail: Error stack is exposed when MCP_SSH_INCLUDE_STACK=1', () => { + const prev = process.env.MCP_SSH_INCLUDE_STACK; + process.env.MCP_SSH_INCLUDE_STACK = '1'; + try { + const r = fail('t', new Error('boom')); + assert(typeof r.error_stack === 'string' && r.error_stack.includes('Error: boom')); + } finally { + if (prev === undefined) delete process.env.MCP_SSH_INCLUDE_STACK; + else process.env.MCP_SSH_INCLUDE_STACK = prev; + } +}); + +test('fail: circular object falls back to String() without throwing', () => { + const obj = { a: 1 }; + obj.self = obj; + const r = fail('t', obj); + assert(typeof r.error === 'string'); + assert.notStrictEqual(r.error, undefined); +}); + test('preview: data carries preview:true + plan', () => { const r = preview('ssh_upload', { action: 'upload', target: 'prod01:/x' }, { server: 'prod01' }); assert.strictEqual(r.success, true); diff --git a/tests/test-systemctl-tools.js b/tests/test-systemctl-tools.js index 0390627..b8b99c2 100644 --- a/tests/test-systemctl-tools.js +++ b/tests/test-systemctl-tools.js @@ -3,8 +3,8 @@ import assert from 'assert'; import { EventEmitter } from 'events'; import { - ALLOWED_ACTIONS, MUTATING_ACTIONS, NO_UNIT_ACTIONS, REVERSIBILITY, RISK_MAP, - UNIT_NAME_RE, isValidUnit, + ALLOWED_ACTIONS, + isValidUnit, parseListUnits, parseListUnitFiles, shapeUnitStatus, parseJournalLines, handleSshSystemctl, } from '../src/tools/systemctl-tools.js'; @@ -191,7 +191,7 @@ await test('handleSshSystemctl: list-units parses typed array', async () => { const client = new FakeClient({ script: () => ({ stdout: 'nginx.service loaded active running nginx HTTP', code: 0, - })}); + }) }); const r = await handleSshSystemctl({ getConnection: async () => client, args: { server: 's', action: 'list-units', format: 'json' }, diff --git a/tests/test-tail-tools.js b/tests/test-tail-tools.js index d206ca6..fd39a5f 100644 --- a/tests/test-tail-tools.js +++ b/tests/test-tail-tools.js @@ -89,28 +89,28 @@ console.log('[test] Testing tail-tools\n'); await test('buildTailCommand: default (no follow, no grep) quotes path', () => { assert.strictEqual( buildTailCommand({ file: '/var/log/app.log', lines: 20 }), - "tail -n 20 '/var/log/app.log'" + 'tail -n 20 \'/var/log/app.log\'' ); }); await test('buildTailCommand: follow mode includes -f', () => { assert.strictEqual( buildTailCommand({ file: '/f', lines: 5, follow: true }), - "tail -n 5 -f '/f'" + 'tail -n 5 -f \'/f\'' ); }); await test('buildTailCommand: grep is shell-quoted and piped', () => { assert.strictEqual( - buildTailCommand({ file: '/f', lines: 10, grep: "it's; rm -rf /" }), - "tail -n 10 '/f' | grep -E 'it'\\''s; rm -rf /'" + buildTailCommand({ file: '/f', lines: 10, grep: 'it\'s; rm -rf /' }), + 'tail -n 10 \'/f\' | grep -E \'it\'\\\'\'s; rm -rf /\'' ); }); await test('buildTailCommand: injection in lines neutralized by Number()', () => { assert.strictEqual( buildTailCommand({ file: '/f', lines: '10; rm -rf /' }), - "tail -n 10 '/f'" + 'tail -n 10 \'/f\'' ); }); @@ -124,7 +124,7 @@ await test('handleSshTail: happy path with quoted path and default lines', async args: { server: 'prod01', file: '/var/log/app.log' }, }); assert.strictEqual(r.isError, undefined); - assert.strictEqual(client.lastCommand, "tail -n 10 '/var/log/app.log'"); + assert.strictEqual(client.lastCommand, 'tail -n 10 \'/var/log/app.log\''); const md = r.content[0].text; assert(md.startsWith('[ok] **ssh_execute**'), 'uses exec markdown renderer'); assert(md.includes('a')); @@ -137,7 +137,7 @@ await test('handleSshTail: grep filter appended to command', async () => { getConnection: async () => client, args: { server: 's', file: '/var/log/app.log', lines: 50, grep: 'ERROR' }, }); - assert.strictEqual(client.lastCommand, "tail -n 50 '/var/log/app.log' | grep -E 'ERROR'"); + assert.strictEqual(client.lastCommand, 'tail -n 50 \'/var/log/app.log\' | grep -E \'ERROR\''); }); await test('handleSshTail: injection attempt in file path is neutralized by shQuote', async () => { @@ -146,7 +146,7 @@ await test('handleSshTail: injection attempt in file path is neutralized by shQu getConnection: async () => client, args: { server: 's', file: '/tmp/log; rm -rf /', lines: 5 }, }); - assert.strictEqual(client.lastCommand, "tail -n 5 '/tmp/log; rm -rf /'"); + assert.strictEqual(client.lastCommand, 'tail -n 5 \'/tmp/log; rm -rf /\''); }); await test('handleSshTail: connection failure -> isError with stderr diagnostic', async () => { @@ -192,7 +192,7 @@ await test('handleSshTailStart: returns session_id + tail -n N -f command', asyn assert.strictEqual(parsed.success, true); assert.strictEqual(parsed.tool, 'ssh_tail_start'); assert(/^tail_[0-9a-f]{16}$/.test(parsed.data.session_id), `id shape: ${parsed.data.session_id}`); - assert.strictEqual(client.lastCommand, "tail -n 3 -f '/var/log/app.log'"); + assert.strictEqual(client.lastCommand, 'tail -n 3 -f \'/var/log/app.log\''); // Session is tracked in the registry assert(_sessionsForTest().has(parsed.data.session_id)); // Cleanup @@ -205,7 +205,7 @@ await test('handleSshTailStart: file path shell-quoted (injection neutralized)', getConnection: async () => client, args: { server: 's', file: '/var/log/bad; rm -rf /', lines: 5, format: 'json' }, }); - assert.strictEqual(client.lastCommand, "tail -n 5 -f '/var/log/bad; rm -rf /'"); + assert.strictEqual(client.lastCommand, 'tail -n 5 -f \'/var/log/bad; rm -rf /\''); const parsed = JSON.parse(r.content[0].text); await handleSshTailStop({ args: { session_id: parsed.data.session_id } }); }); @@ -216,7 +216,7 @@ await test('handleSshTailStart: grep filter quoted and appended after tail', asy getConnection: async () => client, args: { server: 's', file: '/f', lines: 10, grep: 'ERR', format: 'json' }, }); - assert.strictEqual(client.lastCommand, "tail -n 10 -f '/f' | grep -E 'ERR'"); + assert.strictEqual(client.lastCommand, 'tail -n 10 -f \'/f\' | grep -E \'ERR\''); const parsed = JSON.parse(r.content[0].text); await handleSshTailStop({ args: { session_id: parsed.data.session_id } }); }); diff --git a/tests/test-tool-annotations.js b/tests/test-tool-annotations.js new file mode 100644 index 0000000..6f6a232 --- /dev/null +++ b/tests/test-tool-annotations.js @@ -0,0 +1,116 @@ +#!/usr/bin/env node +/** + * Tests for src/tool-annotations.js: every registered tool must have a + * title + at least one of readOnlyHint/destructiveHint/idempotentHint, and + * the MCP-spec annotation invariants (readOnly != destructive) must hold. + */ + +import assert from 'node:assert'; +import { TOOL_ANNOTATIONS, withAnnotations } from '../src/tool-annotations.js'; +import { TOOL_GROUPS } from '../src/tool-registry.js'; + +let passed = 0; +let failed = 0; +const fails = []; + +async function test(name, fn) { + try { + await fn(); + passed++; + console.log(`[ok] ${name}`); + } catch (e) { + failed++; + fails.push({ name, err: e }); + console.error(`[err] ${name}: ${e.message}`); + } +} + +const allRegistered = Object.values(TOOL_GROUPS).flat(); + +await test('every registered tool has an annotations entry', () => { + const missing = allRegistered.filter(name => !TOOL_ANNOTATIONS[name]); + assert.strictEqual(missing.length, 0, + `tools registered but missing annotations: ${missing.join(', ')}`); +}); + +await test('every annotated tool is actually registered (no dangling entries)', () => { + const registered = new Set(allRegistered); + const dangling = Object.keys(TOOL_ANNOTATIONS).filter(name => !registered.has(name)); + assert.strictEqual(dangling.length, 0, + `annotations defined for unknown tools: ${dangling.join(', ')}`); +}); + +await test('every annotated tool has a human title', () => { + const missing = Object.entries(TOOL_ANNOTATIONS) + .filter(([, v]) => !v.title || typeof v.title !== 'string') + .map(([k]) => k); + assert.strictEqual(missing.length, 0, + `tools missing title: ${missing.join(', ')}`); +}); + +await test('readOnlyHint and destructiveHint are never both true (spec invariant)', () => { + const conflicts = Object.entries(TOOL_ANNOTATIONS) + .filter(([, v]) => v.annotations?.readOnlyHint && v.annotations?.destructiveHint) + .map(([k]) => k); + assert.strictEqual(conflicts.length, 0, + `readOnly + destructive both set on: ${conflicts.join(', ')}`); +}); + +await test('obviously-destructive tools are marked destructiveHint', () => { + const expected = ['ssh_backup_restore', 'ssh_db_import', 'ssh_deploy', 'ssh_deploy_artifact', + 'ssh_execute_sudo', 'ssh_backup_schedule', 'ssh_edit', 'ssh_plan']; + for (const name of expected) { + assert.strictEqual(TOOL_ANNOTATIONS[name]?.annotations?.destructiveHint, true, + `${name} should be destructiveHint:true`); + } +}); + +await test('obviously read-only tools are marked readOnlyHint', () => { + const expected = ['ssh_list_servers', 'ssh_health_check', 'ssh_cat', 'ssh_db_list', + 'ssh_db_query', 'ssh_tail', 'ssh_tail_read', 'ssh_backup_list', + 'ssh_connection_status', 'ssh_history', 'ssh_session_list']; + for (const name of expected) { + assert.strictEqual(TOOL_ANNOTATIONS[name]?.annotations?.readOnlyHint, true, + `${name} should be readOnlyHint:true`); + } +}); + +await test('withAnnotations() merges title + annotations into schema', () => { + const base = { description: 'x', inputSchema: {} }; + const out = withAnnotations('ssh_list_servers', base); + assert.strictEqual(out.title, 'List Configured Servers'); + assert.strictEqual(out.annotations.readOnlyHint, true); + assert.strictEqual(out.annotations.idempotentHint, true); + // Caller-provided fields preserved + assert.strictEqual(out.description, 'x'); +}); + +await test('withAnnotations() leaves unknown tools untouched', () => { + const base = { description: 'x', inputSchema: {} }; + const out = withAnnotations('ssh_nonexistent_tool', base); + assert.deepStrictEqual(out, base); +}); + +await test('withAnnotations() does not clobber a caller-provided title', () => { + const base = { title: 'Custom', description: 'x', inputSchema: {} }; + const out = withAnnotations('ssh_execute', base); + assert.strictEqual(out.title, 'Custom'); +}); + +await test('withAnnotations() caller-provided annotations override map defaults', () => { + // ssh_list_servers is annotated readOnlyHint:true, idempotentHint:true. + // If a future caller explicitly flips readOnlyHint off, that must win. + const base = { + description: 'x', + inputSchema: {}, + annotations: { readOnlyHint: false }, + }; + const out = withAnnotations('ssh_list_servers', base); + assert.strictEqual(out.annotations.readOnlyHint, false, + 'caller override must beat map default'); + assert.strictEqual(out.annotations.idempotentHint, true, + 'non-overridden map defaults still apply'); +}); + +console.log(`\n${passed} passed, ${failed} failed`); +if (failed > 0) { for (const f of fails) console.error(` [err] ${f.name}\n ${f.err.stack}`); process.exit(1); } diff --git a/tests/test-tool-config-manager.js b/tests/test-tool-config-manager.js new file mode 100644 index 0000000..8e7a5da --- /dev/null +++ b/tests/test-tool-config-manager.js @@ -0,0 +1,224 @@ +#!/usr/bin/env node +/** + * Tests for src/tool-config-manager.js -- the gate that decides whether + * each registered tool is served to the MCP client. Zero coverage prior + * to this file; this is the gatekeeper for every one of the 50 tools. + * + * Covers: + * - default config when no file exists (all enabled) + * - corrupt JSON falls back to defaults, doesn't crash + * - invalid structure (missing fields / bad mode) falls back to defaults + * - mode=all, mode=minimal, mode=custom semantics + * - individual tool overrides + * - disableGroup('core') is refused (core is load-bearing) + * - unknown tool / unknown group rejected by enable/disable helpers + * - getEnabledTools / getDisabledTools arithmetic matches registry + * - exportClaudeCodeConfig produces sensible auto-approval patterns + */ + +import assert from 'node:assert'; +import fs from 'node:fs'; +import os from 'node:os'; +import path from 'node:path'; +import { ToolConfigManager } from '../src/tool-config-manager.js'; +import { TOOL_GROUPS, getAllTools } from '../src/tool-registry.js'; + +let passed = 0; +let failed = 0; +const fails = []; + +async function test(name, fn) { + try { + await fn(); + passed++; + console.log(`[ok] ${name}`); + } catch (e) { + failed++; + fails.push({ name, err: e }); + console.error(`[err] ${name}: ${e.message}`); + } +} + +function makeTmpConfigPath() { + const dir = fs.mkdtempSync(path.join(os.tmpdir(), 'toolcfg-')); + return path.join(dir, 'tools-config.json'); +} + +function makeManagerWithPath(cfgPath) { + const m = new ToolConfigManager(); + m.configPath = cfgPath; + return m; +} + +// --- load() -------------------------------------------------------------- +await test('load(): no file -> default mode=all, all tools enabled', async () => { + const cfgPath = makeTmpConfigPath(); + // Ensure absent + if (fs.existsSync(cfgPath)) fs.unlinkSync(cfgPath); + const m = makeManagerWithPath(cfgPath); + await m.load(); + assert.strictEqual(m.config.mode, 'all'); + assert.strictEqual(m.getEnabledTools().length, getAllTools().length); + assert.strictEqual(m.getDisabledTools().length, 0); +}); + +await test('load(): corrupt JSON falls back to defaults (no crash)', async () => { + const cfgPath = makeTmpConfigPath(); + fs.writeFileSync(cfgPath, '{ this is not: valid JSON', 'utf8'); + const m = makeManagerWithPath(cfgPath); + await m.load(); // must not throw + assert.strictEqual(m.config.mode, 'all'); + assert.strictEqual(m.getEnabledTools().length, getAllTools().length); +}); + +await test('load(): invalid structure (missing version) falls back to defaults', async () => { + const cfgPath = makeTmpConfigPath(); + fs.writeFileSync(cfgPath, JSON.stringify({ mode: 'custom' }), 'utf8'); + const m = makeManagerWithPath(cfgPath); + await m.load(); + assert.strictEqual(m.config.mode, 'all'); +}); + +await test('load(): invalid mode falls back to defaults', async () => { + const cfgPath = makeTmpConfigPath(); + fs.writeFileSync(cfgPath, JSON.stringify({ version: '1.0', mode: 'bogus' }), 'utf8'); + const m = makeManagerWithPath(cfgPath); + await m.load(); + assert.strictEqual(m.config.mode, 'all'); +}); + +await test('load(): valid minimal config is accepted', async () => { + const cfgPath = makeTmpConfigPath(); + fs.writeFileSync(cfgPath, JSON.stringify({ version: '1.0', mode: 'minimal' }), 'utf8'); + const m = makeManagerWithPath(cfgPath); + await m.load(); + assert.strictEqual(m.config.mode, 'minimal'); +}); + +// --- isToolEnabled -------------------------------------------------------- +await test('mode=all enables every tool', () => { + const m = new ToolConfigManager(); + m.config = m.getDefaultConfig(); // mode=all + for (const name of getAllTools()) { + assert.strictEqual(m.isToolEnabled(name), true, `${name} should be enabled`); + } +}); + +await test('mode=minimal enables ONLY core group', () => { + const m = new ToolConfigManager(); + m.config = { version: '1.0', mode: 'minimal' }; + for (const name of getAllTools()) { + const expected = TOOL_GROUPS.core.includes(name); + assert.strictEqual(m.isToolEnabled(name), expected, + `${name} expected ${expected} in minimal mode`); + } +}); + +await test('mode=custom respects per-group enable flags', () => { + const m = new ToolConfigManager(); + m.config = { + version: '1.0', mode: 'custom', + groups: { + core: { enabled: true }, sessions: { enabled: false }, + monitoring: { enabled: false }, backup: { enabled: false }, + database: { enabled: false }, advanced: { enabled: false }, + gamechanger: { enabled: false }, + }, + }; + for (const name of TOOL_GROUPS.core) assert.strictEqual(m.isToolEnabled(name), true); + for (const name of TOOL_GROUPS.sessions) assert.strictEqual(m.isToolEnabled(name), false); + for (const name of TOOL_GROUPS.database) assert.strictEqual(m.isToolEnabled(name), false); +}); + +await test('individual tool override wins over group disable (in custom mode)', () => { + const m = new ToolConfigManager(); + m.config = { + version: '1.0', mode: 'custom', + groups: { database: { enabled: false } }, + tools: { ssh_db_query: true }, + }; + assert.strictEqual(m.isToolEnabled('ssh_db_query'), true, + 'explicit tool=true must override group=false'); + assert.strictEqual(m.isToolEnabled('ssh_db_dump'), false, + 'sibling in disabled group without override stays off'); +}); + +await test('individual tool override can disable a tool inside an enabled group', () => { + const m = new ToolConfigManager(); + m.config = { + version: '1.0', mode: 'custom', + groups: { core: { enabled: true } }, + tools: { ssh_execute: false }, + }; + assert.strictEqual(m.isToolEnabled('ssh_execute'), false); + assert.strictEqual(m.isToolEnabled('ssh_list_servers'), true); +}); + +await test('isToolEnabled defaults to true before load (first-run safety)', () => { + const m = new ToolConfigManager(); + // Nothing loaded; this.config is null. + assert.strictEqual(m.isToolEnabled('ssh_execute'), true); +}); + +// --- group / tool mutators ----------------------------------------------- +await test('disableGroup("core") is refused -- core is load-bearing', async () => { + const cfgPath = makeTmpConfigPath(); + const m = makeManagerWithPath(cfgPath); + await m.load(); + const r = await m.disableGroup('core'); + assert.strictEqual(r, false); + // core stays enabled (disableGroup should not have mutated anything) + assert.strictEqual(m.isToolEnabled('ssh_execute'), true); +}); + +await test('enableGroup / disableGroup on unknown group returns false', async () => { + const cfgPath = makeTmpConfigPath(); + const m = makeManagerWithPath(cfgPath); + await m.load(); + assert.strictEqual(await m.enableGroup('nonsense'), false); + assert.strictEqual(await m.disableGroup('nonsense'), false); +}); + +await test('enableTool / disableTool reject unknown tools', async () => { + const cfgPath = makeTmpConfigPath(); + const m = makeManagerWithPath(cfgPath); + await m.load(); + assert.strictEqual(await m.enableTool('ssh_not_a_tool'), false); + assert.strictEqual(await m.disableTool('ssh_not_a_tool'), false); +}); + +await test('setMode rejects invalid modes', async () => { + const cfgPath = makeTmpConfigPath(); + const m = makeManagerWithPath(cfgPath); + await m.load(); + assert.strictEqual(await m.setMode('garbage'), false); + assert.strictEqual(m.config.mode, 'all', 'mode unchanged after bad setMode'); +}); + +// --- summary + export ---------------------------------------------------- +await test('getSummary returns counts matching registry', async () => { + const cfgPath = makeTmpConfigPath(); + const m = makeManagerWithPath(cfgPath); + await m.load(); + const s = m.getSummary(); + assert.strictEqual(s.totalTools, getAllTools().length); + assert.strictEqual(s.enabledCount + s.disabledCount, s.totalTools); + assert.strictEqual(s.groups.length, Object.keys(TOOL_GROUPS).length); +}); + +await test('exportClaudeCodeConfig emits mcp__ssh-manager__ prefixed patterns for every enabled tool', async () => { + const cfgPath = makeTmpConfigPath(); + const m = makeManagerWithPath(cfgPath); + await m.load(); + const exp = m.exportClaudeCodeConfig(); + const enabled = m.getEnabledTools(); + assert.strictEqual(exp.patterns.length, enabled.length); + for (const p of exp.patterns) { + assert(/^mcp__ssh-manager__ssh_/.test(p), + `unexpected pattern shape: ${p}`); + } + assert(Array.isArray(exp.exampleConfig.autoApprove.tools)); +}); + +console.log(`\n${passed} passed, ${failed} failed`); +if (failed > 0) { for (const f of fails) console.error(` [err] ${f.name}\n ${f.err.stack}`); process.exit(1); } diff --git a/tests/test-tool-exec-stream.js b/tests/test-tool-exec-stream.js deleted file mode 100644 index 15ac3a9..0000000 --- a/tests/test-tool-exec-stream.js +++ /dev/null @@ -1,202 +0,0 @@ -#!/usr/bin/env node - -/** - * Test suite for src/tool-exec-stream.js -- full pipeline with a fake client. - * Run: node tests/test-tool-exec-stream.js - */ - -import assert from 'assert'; -import { EventEmitter } from 'events'; -import { runStreamedExec } from '../src/tool-exec-stream.js'; - -let passed = 0; -let failed = 0; -const fails = []; - -async function test(name, fn) { - try { - await fn(); - passed++; - console.log(`[ok] ${name}`); - } catch (e) { - failed++; - fails.push({ name, err: e }); - console.error(`[err] ${name}: ${e.message}`); - } -} - -class FakeStream extends EventEmitter { - constructor() { - super(); - this.stderr = new EventEmitter(); - this.signals = []; - } - signal(n) { this.signals.push(n); } - close() { setImmediate(() => this.emit('close', null, 'TERM')); } -} -class FakeClient { - constructor({ execError } = {}) { - this.execError = execError; - this.lastCommand = null; - this.streams = []; - } - exec(cmd, cb) { - this.lastCommand = cmd; - setImmediate(() => { - if (this.execError) return cb(this.execError); - const s = new FakeStream(); - this.streams.push(s); - cb(null, s); - }); - } -} -const sleep = (ms) => new Promise(r => setTimeout(r, ms)); - -console.log('[test] Testing tool-exec-stream\n'); - -// --- Happy path ---------------------------------------------------------- -await test('success returns markdown content with [ok] marker and exit 0 badge', async () => { - const client = new FakeClient(); - const p = runStreamedExec({ - client, server: 'prod01', command: 'echo hi', cwd: '/var/app', - debounceMs: 5, - }); - await sleep(5); - const s = client.streams[0]; - s.emit('data', Buffer.from('hi\n')); - s.emit('close', 0); - const r = await p; - assert.strictEqual(r.isError, undefined, 'not a tool-level error'); - assert.strictEqual(r.content.length, 1); - const md = r.content[0].text; - assert(md.startsWith('[ok] **ssh_execute**'), 'success marker'); - assert(md.includes('`prod01`')); - assert(md.includes('exit 0')); - assert(md.includes('*(in `/var/app`)*')); - assert(md.includes('hi')); -}); - -await test('non-zero exit is NOT isError (command ran, just failed)', async () => { - const client = new FakeClient(); - const p = runStreamedExec({ client, server: 's', command: 'false', debounceMs: 5 }); - await sleep(5); - const s = client.streams[0]; - s.emit('close', 1); - const r = await p; - assert.strictEqual(r.isError, undefined); - assert(r.content[0].text.includes('exit 1')); - assert(r.content[0].text.startsWith('[err] **ssh_execute**')); -}); - -await test('exec error -> isError:true with stderr populated', async () => { - const client = new FakeClient({ execError: new Error('connection refused') }); - const r = await runStreamedExec({ client, server: 's', command: 'anything' }); - assert.strictEqual(r.isError, true); - assert(r.content[0].text.includes('connection refused')); - assert(r.content[0].text.includes('exit -1')); -}); - -await test('timeout bubbles up as isError:true with timeout message', async () => { - const client = new FakeClient(); - const p = runStreamedExec({ client, server: 's', command: 'sleep 999', timeoutMs: 30, debounceMs: 5 }); - await sleep(5); - client.streams[0].emit('data', Buffer.from('working')); - const r = await p; - assert.strictEqual(r.isError, true); - assert(r.content[0].text.includes('timeout after 30ms')); - // The fake stream received INT signal - assert(client.streams[0].signals.includes('INT')); -}); - -// --- cwd shell safety at the tool layer --------------------------------- -await test('cwd with injection attempt is neutralized before reaching remote', async () => { - const client = new FakeClient(); - const p = runStreamedExec({ - client, server: 's', command: 'ls', - cwd: '/tmp; rm -rf /', - debounceMs: 5, - }); - await sleep(5); - assert.strictEqual(client.lastCommand, "cd '/tmp; rm -rf /' && ls"); - client.streams[0].emit('close', 0); - await p; -}); - -// --- onChunk forwarding -------------------------------------------------- -await test('onChunk receives debounced stdout chunks', async () => { - const chunks = []; - const client = new FakeClient(); - const p = runStreamedExec({ - client, server: 's', command: 'cmd', - debounceMs: 20, - onChunk: c => chunks.push(c), - }); - await sleep(5); - const s = client.streams[0]; - for (let i = 0; i < 5; i++) s.emit('data', Buffer.from('x')); - await sleep(30); - s.emit('close', 0); - await p; - assert(chunks.length >= 1); - assert.strictEqual(chunks.map(c => c.text).join(''), 'xxxxx'); - assert(chunks.every(c => c.kind === 'stdout')); -}); - -// --- format variants ----------------------------------------------------- -await test('format:json returns single JSON block parseable into wire schema', async () => { - const client = new FakeClient(); - const p = runStreamedExec({ - client, server: 'prod01', command: 'uname -a', cwd: null, - format: 'json', debounceMs: 5, - }); - await sleep(5); - const s = client.streams[0]; - s.emit('data', Buffer.from('Linux rocky\n')); - s.emit('close', 0); - const r = await p; - assert.strictEqual(r.content.length, 1); - const parsed = JSON.parse(r.content[0].text); - assert.strictEqual(parsed.server, 'prod01'); - assert.strictEqual(parsed.exit_code, 0); - assert.strictEqual(parsed.success, true); - assert.strictEqual(parsed.stdout, 'Linux rocky\n'); - assert(parsed.duration_ms >= 0); -}); - -await test('format:both returns markdown + json in content array', async () => { - const client = new FakeClient(); - const p = runStreamedExec({ - client, server: 's', command: 'c', format: 'both', debounceMs: 5, - }); - await sleep(5); - client.streams[0].emit('close', 0); - const r = await p; - assert.strictEqual(r.content.length, 2); - assert(r.content[0].text.includes('ssh_execute'), 'md first'); - assert.doesNotThrow(() => JSON.parse(r.content[1].text)); -}); - -// --- Large output truncation through the pipeline ----------------------- -await test('oversized stdout is truncated and rendered with elided blockquote', async () => { - const client = new FakeClient(); - const p = runStreamedExec({ - client, server: 's', command: 'spam', - maxLen: 500, - debounceMs: 5, - }); - await sleep(5); - const s = client.streams[0]; - s.emit('data', Buffer.from('A'.repeat(5000) + 'TAIL_MARKER')); - s.emit('close', 0); - const r = await p; - const md = r.content[0].text; - assert(md.includes('TAIL_MARKER'), 'tail preserved in render'); - assert(md.includes('> elided:'), 'truncation blockquote'); -}); - -// --- Summary ------------------------------------------------------------- -console.log(`\n${passed} passed, ${failed} failed`); -if (failed > 0) { - for (const f of fails) console.error(` [err] ${f.name}\n ${f.err.stack}`); - process.exit(1); -} diff --git a/tests/test-transfer-tools.js b/tests/test-transfer-tools.js index 36eba4b..ad29870 100644 --- a/tests/test-transfer-tools.js +++ b/tests/test-transfer-tools.js @@ -150,13 +150,13 @@ await test('ssh_upload: verify=true calls sha256sum with shell-quoted path', asy }); await handleSshUpload({ getConnection: async () => client, - args: { server: 's', local_path: t.path, remote_path: "/etc/odd'name", format: 'json' }, + args: { server: 's', local_path: t.path, remote_path: '/etc/odd\'name', format: 'json' }, }); // The hash command should contain the quoted remote path (single-quoted with escape) const hashCmd = client.commands.find(c => c.startsWith('sha256sum ')); assert(hashCmd, 'expected sha256sum command'); - assert(hashCmd.includes("'/etc/odd'\\''name'"), `quote not applied: ${hashCmd}`); - assert(hashCmd.endsWith("awk '{print $1}'")); + assert(hashCmd.includes('\'/etc/odd\'\\\'\'name\''), `quote not applied: ${hashCmd}`); + assert(hashCmd.endsWith('awk \'{print $1}\'')); } finally { cleanupPath(t.dir); } }); @@ -215,7 +215,7 @@ await test('ssh_upload: preview shows remote stat + never calls sftp/fastPut', a // stat uses `stat -c '%s %Y' REMOTE 2>/dev/null || echo "new file"` const statCmd = client.commands.find(c => c.startsWith('stat ')); assert(statCmd, 'expected stat command'); - assert(statCmd.includes("'/target'")); + assert(statCmd.includes('\'/target\'')); } finally { cleanupPath(t.dir); } }); @@ -254,9 +254,8 @@ await test('ssh_upload: preview never calls getConnection for sftp (no fastPut)' }); await test('ssh_upload: missing local_path returns structured failure (no connection)', async () => { - let called = false; const r = await handleSshUpload({ - getConnection: async () => { called = true; throw new Error('should not connect'); }, + getConnection: async () => { throw new Error('should not connect'); }, args: { server: 's', local_path: '/does/not/exist/here.bin', remote_path: '/x', format: 'json' }, }); assert.strictEqual(r.isError, true); @@ -285,7 +284,7 @@ await test('ssh_download: happy path calls fastGet(remote,local) in that order', const localDest = path.join(dir, 'out'); try { const sftp = new FakeSftp(); - sftp.fastGetImpl = (remote, local, cb) => { + sftp.fastGetImpl = (_remote, local, cb) => { fs.writeFileSync(local, content); cb(null); }; @@ -312,7 +311,7 @@ await test('ssh_download: sha256 mismatch -> isError with diagnostic', async () const localDest = path.join(dir, 'out'); try { const sftp = new FakeSftp(); - sftp.fastGetImpl = (remote, local, cb) => { + sftp.fastGetImpl = (_remote, local, cb) => { fs.writeFileSync(local, 'actual'); cb(null); }; @@ -351,7 +350,7 @@ await test('ssh_download: verify=false skips sha256 entirely', async () => { const localDest = path.join(dir, 'f'); try { const sftp = new FakeSftp(); - sftp.fastGetImpl = (remote, local, cb) => { + sftp.fastGetImpl = (_remote, local, cb) => { fs.writeFileSync(local, 'data'); cb(null); }; @@ -368,7 +367,7 @@ await test('ssh_download: verify=false skips sha256 entirely', async () => { await test('ssh_download: fastGet error -> isError', async () => { const sftp = new FakeSftp(); - sftp.fastGetImpl = (remote, local, cb) => cb(new Error('remote missing')); + sftp.fastGetImpl = (_remote, _local, cb) => cb(new Error('remote missing')); const client = new FakeClient({ sftp, script: () => ({ stdout: 'hash', code: 0 }) }); const r = await handleSshDownload({ getConnection: async () => client, @@ -382,9 +381,9 @@ await test('ssh_download: fastGet error -> isError', async () => { // -------------------------------------------------------------------------- // ssh_sync // -------------------------------------------------------------------------- -await test('buildRsyncArgv: keypath auth yields direct rsync with -i key', () => { +await test('buildRsyncArgv: keyPath auth yields direct rsync with -i key (canonical field)', () => { const argv = buildRsyncArgv({ - serverConfig: { user: 'u', host: 'h', keypath: '/k' }, + serverConfig: { user: 'u', host: 'h', keyPath: '/k' }, direction: 'push', localPath: '/src', remotePath: '/dst', exclude: [], dry_run: false, delete: false, compress: true, }); @@ -398,14 +397,28 @@ await test('buildRsyncArgv: keypath auth yields direct rsync with -i key', () => assert(argv[argv.length - 1] === 'u@h:/dst'); }); -await test('buildRsyncArgv: password auth prepends -p + rsync', () => { +await test('buildRsyncArgv: keypath (legacy alias) still accepted for backward compat', () => { + const argv = buildRsyncArgv({ + serverConfig: { user: 'u', host: 'h', keypath: '/legacy' }, + direction: 'push', localPath: '/s', remotePath: '/d', + }); + const eIdx = argv.indexOf('-e'); + assert(argv[eIdx + 1].includes('-i /legacy'), + 'legacy lowercase keypath field should resolve the same as keyPath'); +}); + +await test('buildRsyncArgv: password auth is NOT embedded in argv (regression: no secret leak via ps aux)', () => { const argv = buildRsyncArgv({ serverConfig: { user: 'u', host: 'h', password: 'sekret' }, direction: 'push', localPath: '/s', remotePath: '/d', }); - assert.strictEqual(argv[0], '-p'); - assert.strictEqual(argv[1], 'sekret'); - assert.strictEqual(argv[2], 'rsync'); + // Password must not appear anywhere in the rsync argv. The handler uses + // `sshpass -e` + SSHPASS env var -- proven by the ssh_sync test below. + for (const v of argv) { + assert(!String(v).includes('sekret'), + `password leaked into rsync argv: ${JSON.stringify(argv)}`); + } + assert.strictEqual(argv[0], '-avz'); // pure rsync flags, no sshpass prefix }); await test('buildRsyncArgv: exclude + dry_run + delete flags honored', () => { @@ -523,10 +536,10 @@ await test('ssh_sync: same-prefix source/dest rejected with helpful error', asyn assert(parsed.error.includes('one local + one remote')); }); -await test('ssh_sync: password config drives sshpass command', async () => { +await test('ssh_sync: password config drives `sshpass -e rsync ...` with SSHPASS env (no secret in argv)', async () => { const spawnCalls = []; - const fakeSpawn = (cmd, args) => { - spawnCalls.push({ cmd, args }); + const fakeSpawn = (cmd, args, opts) => { + spawnCalls.push({ cmd, args, opts }); const proc = new EventEmitter(); proc.stdout = new EventEmitter(); proc.stderr = new EventEmitter(); @@ -542,10 +555,15 @@ await test('ssh_sync: password config drives sshpass command', async () => { format: 'json', spawnFn: fakeSpawn, }, }); - assert.strictEqual(spawnCalls[0].cmd, 'sshpass'); - assert.strictEqual(spawnCalls[0].args[0], '-p'); - assert.strictEqual(spawnCalls[0].args[1], 'pw'); - assert.strictEqual(spawnCalls[0].args[2], 'rsync'); + const call = spawnCalls[0]; + assert.strictEqual(call.cmd, 'sshpass'); + assert.strictEqual(call.args[0], '-e', 'sshpass must read password from SSHPASS env, not argv'); + assert.strictEqual(call.args[1], 'rsync'); + // Password must not be in argv at all + for (const a of call.args) { + assert(!String(a).includes('pw'), `password leaked into argv: ${JSON.stringify(call.args)}`); + } + assert.strictEqual(call.opts.env.SSHPASS, 'pw', 'SSHPASS must be set in spawn env'); }); // -------------------------------------------------------------------------- @@ -563,7 +581,7 @@ await test('ssh_diff: same-server builds `diff -u A B` with quoted paths', async assert.strictEqual(parsed.success, true); assert.strictEqual(parsed.data.mode, 'same-server'); assert.strictEqual(parsed.data.identical, false); - assert(client.commands[0].startsWith("diff -u '/etc/a.conf' '/etc/b.conf'")); + assert(client.commands[0].startsWith('diff -u \'/etc/a.conf\' \'/etc/b.conf\'')); }); await test('ssh_diff: identical files -> identical:true on exit 0', async () => { @@ -581,9 +599,9 @@ await test('ssh_diff: cross-server downloads both paths via sftp', async () => { const dirB = fs.mkdtempSync(path.join(os.tmpdir(), 'diffB-')); try { const sftpA = new FakeSftp(); - sftpA.fastGetImpl = (remote, local, cb) => { fs.writeFileSync(local, 'A-content\n'); cb(null); }; + sftpA.fastGetImpl = (_remote, local, cb) => { fs.writeFileSync(local, 'A-content\n'); cb(null); }; const sftpB = new FakeSftp(); - sftpB.fastGetImpl = (remote, local, cb) => { fs.writeFileSync(local, 'B-content\n'); cb(null); }; + sftpB.fastGetImpl = (_remote, local, cb) => { fs.writeFileSync(local, 'B-content\n'); cb(null); }; const clients = { srv1: new FakeClient({ sftp: sftpA, script: () => ({ stdout: '', code: 0 }) }), srv2: new FakeClient({ sftp: sftpB, script: () => ({ stdout: '', code: 0 }) }), @@ -659,7 +677,7 @@ await test('ssh_edit: preview shows plan + stat + bytes, never mutates', async ( await test('ssh_edit: full-replace flow writes tmp via base64, cp backup, then mv', async () => { const client = new FakeClient({ script: (cmd) => { - if (cmd.startsWith(`cat `)) return { stdout: '{"old":true}\n', code: 0 }; + if (cmd.startsWith('cat ')) return { stdout: '{"old":true}\n', code: 0 }; return { stdout: '', code: 0 }; }, }); @@ -696,14 +714,10 @@ await test('ssh_edit: full-replace flow writes tmp via base64, cp backup, then m }); await test('ssh_edit: json path triggers python3 json syntax check; failure aborts + cleans tmp', async () => { - let tmpPath = null; const client = new FakeClient({ script: (cmd) => { if (cmd.startsWith('cat ')) return { stdout: '{}', code: 0 }; if (cmd.includes('base64 -d >')) { - // capture the tmp path from the command - const m = cmd.match(/base64 -d > '([^']+)'/); - if (m) tmpPath = m[1]; return { stdout: '', code: 0 }; } if (cmd.includes('python3 -c') && cmd.includes('json')) { @@ -793,7 +807,7 @@ await test('ssh_edit: missing new_content AND patch -> structured failure', asyn }); await test('ssh_edit: path with shell metachars is single-quoted in every remote command', async () => { - const evil = "/etc/my'cfg; rm -rf /.conf"; + const evil = '/etc/my\'cfg; rm -rf /.conf'; const client = new FakeClient({ script: (cmd) => cmd.startsWith('cat ') ? { stdout: 'x', code: 0 } @@ -815,8 +829,8 @@ await test('ssh_edit: path with shell metachars is single-quoted in every remote // before the closing quote). The NAKED unescaped form must never appear -- // if it did, the embedded single quote would terminate the shell literal // and the trailing `; rm -rf /` would parse as a new command. - const quotedPrefix = "'/etc/my'\\''cfg; rm -rf /.conf"; - const naked = "/etc/my'cfg; rm -rf /.conf"; + const quotedPrefix = '\'/etc/my\'\\\'\'cfg; rm -rf /.conf'; + const naked = '/etc/my\'cfg; rm -rf /.conf'; const remoteCommands = client.commands.filter(c => c.includes('/etc/my')); assert(remoteCommands.length > 0, 'at least one command should reference the path'); for (const cmd of remoteCommands) { diff --git a/wiki/Home.md b/wiki/Home.md index d3a7409..c613e01 100644 --- a/wiki/Home.md +++ b/wiki/Home.md @@ -48,12 +48,12 @@ claude mcp add ssh-manager node "$(pwd)/src/index.js" ```mermaid pie title 51 tools across 7 groups "core" : 5 - "sessions" : 4 + "sessions" : 6 "monitoring" : 6 "backup" : 4 "database" : 4 "advanced" : 14 - "gamechanger" : 14 + "gamechanger" : 12 ``` Opt in per group via `ssh-manager tools configure`. Minimal mode (5 tools) is ~3.5k tokens; full mode is ~43k. diff --git a/wiki/Recipes.md b/wiki/Recipes.md index 1ef8349..a1e6ea5 100644 --- a/wiki/Recipes.md +++ b/wiki/Recipes.md @@ -13,8 +13,9 @@ Claude: [ssh_journalctl unit=nginx since=15min] upstream connect timeout to 10.0 [ssh_execute server=prod01 command="curl -s -o /dev/null -w '%{http_code}' http://10.0.0.31:8080/health"] 200 root cause: payments-api died ~8 min ago, nginx still timing out on cached upstream fix: restarted payments-api, nginx now healthy -you: add an alert so i hear about it next time -Claude: [ssh_alert_setup server=app01 metric=service_up threshold=1 unit=payments-api interval=60] configured +you: add a hook so nagios pages me next time this fires +Claude: [ssh_hooks add server=app01 event=on_service_down + action="curl -X POST https://nagios.internal/incidents -d 'unit=payments-api'"] registered ``` ## Rolling config deploy across a fleet diff --git a/wiki/Tool-reference.md b/wiki/Tool-reference.md index 4ae1e4e..d6ad22a 100644 --- a/wiki/Tool-reference.md +++ b/wiki/Tool-reference.md @@ -51,7 +51,7 @@ Persistent shells. The session keeps its cwd, env, and shell state across multip | `ssh_process_manager` | list / filter / kill processes | | `ssh_tail` | real-time log tail with regex filter | | `ssh_monitor` | CPU / RAM / disk / net snapshot | -| `ssh_alert_setup` | configure threshold alerts | +| `ssh_alert_setup` | set / get / check threshold alerts (CPU, memory, per-mount disk) | | `ssh_history` | per-server command history | ## backup (4)