From 1e0a675de3a5323ce5e7a2376339f0643ce0b7dd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tr=E1=BA=A7n=20Quang=20=C4=90=C3=A3ng?= Date: Mon, 11 May 2026 08:38:48 +0000 Subject: [PATCH] =?UTF-8?q?feat(cli):=20M2=20=E2=80=94=20server=20lifecycl?= =?UTF-8?q?e,=20auth,=20robot=20envelope=20refactor?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit New subcommands: - server start [--detach] [--host H] [--port P] — daemon with PID + endpoint sidecar - server stop — SIGTERM via PID file with cleanup - server status — robot envelope: PID liveness + health probe + DB summary - server init [--force] — bootstraps empty db.json, prints fresh admin key - auth login (saves profile to config.toml, optional connectivity check) - auth logout, auth whoami, auth list Auth credentials persisted to ~/.config/openproxy/config.toml. Refactor (no behaviour change for existing flags): - provider list/add, key list/add, pool list/status/create/delete, tunnel start/stop/status now emit openproxy.v1 envelopes when --robot is set. Legacy --json keeps existing pretty-print output. - route honors --robot by switching to JSON streaming mode (per-event envelope deferred to M3). Config helpers: - New save_config_file in cli/config.rs for auth login/logout. - config_file_path and load_config_file made public for reuse. Tests: - 6 unit tests in server.rs/auth.rs (pid round trip, key shape, profile slug derivation, URL normalization, login/logout round trip). - tests/server_lifecycle.rs spawns real binary, asserts PID + endpoint sidecar appear, /api/health answers, stop cleans up. --- src/cli/auth.rs | 422 +++++++++++++++++++++++++++++++ src/cli/config.rs | 25 +- src/cli/mod.rs | 374 +++++++++++++++++++++++----- src/cli/server.rs | 512 ++++++++++++++++++++++++++++++++++++++ src/main.rs | 96 ++++++- tests/server_lifecycle.rs | 148 +++++++++++ 6 files changed, 1502 insertions(+), 75 deletions(-) create mode 100644 src/cli/auth.rs create mode 100644 src/cli/server.rs create mode 100644 tests/server_lifecycle.rs diff --git a/src/cli/auth.rs b/src/cli/auth.rs new file mode 100644 index 00000000..71cb6dcd --- /dev/null +++ b/src/cli/auth.rs @@ -0,0 +1,422 @@ +//! `openproxy auth` — manage credentials for remote server management. +//! +//! By default the CLI works against the local DB and needs no auth. The auth +//! subcommands only matter when you want to point this CLI at a *different* +//! `openproxy` server (e.g. on a teammate's box or a VPS): +//! +//! - `auth login --url --api-key [--profile ]`: saves the +//! credentials as a profile in `~/.config/openproxy/config.toml`. The key +//! is stored as plaintext TOML — see SECURITY note below. The CLI then +//! activates the profile by setting `default_profile`. +//! - `auth logout [--profile ]`: deletes the named profile (or the +//! current default). +//! - `auth whoami`: shows the active profile, its URL, and a masked key. In +//! robot mode, also returns the resolved data dir and remote flag. +//! +//! SECURITY: storing API keys in plaintext is acceptable for personal dev +//! boxes — it's the same trust model as `~/.netrc` or `~/.aws/credentials`. +//! For shared machines, prefer setting `OPENPROXY_API_KEY` per-shell or +//! using a per-profile `api_key_env = "MY_VAR"` indirection. + +use std::time::Duration; + +use anyhow::Context; +use serde_json::json; + +use crate::cli::config::{load_config_file, save_config_file, ResolvedConfig}; +use crate::cli::output::{emit_error, emit_robot, humanln, mask_secret, OutputCtx}; + +pub struct LoginOptions { + pub url: String, + pub api_key: String, + pub profile: Option, + /// If true, skip the live health probe and trust the user's input. + pub no_verify: bool, + /// If true, do not promote this profile to `default_profile`. + pub no_activate: bool, +} + +pub struct LogoutOptions { + pub profile: Option, + /// If true, also clear `default_profile` when removing the current default. + pub keep_default: bool, +} + +/// `openproxy auth login` — persist credentials and (optionally) verify them. +pub async fn run_login(ctx: OutputCtx, opts: LoginOptions) -> anyhow::Result { + let url = normalize_url(&opts.url); + if !is_http_url(&url) { + return Ok(emit_error( + ctx, + "validation", + &format!("--url must start with http:// or https:// (got '{url}')"), + )?); + } + if opts.api_key.trim().is_empty() { + return Ok(emit_error(ctx, "validation", "--api-key cannot be empty")?); + } + + // Optional connectivity probe so the user finds out about typos here + // rather than on every subsequent command. + let verified = if opts.no_verify { + None + } else { + Some(probe_with_key(&url, &opts.api_key).await) + }; + if let Some(false) = verified { + return Ok(emit_error( + ctx, + "auth", + &format!("could not authenticate against {url} (use --no-verify to skip this check)"), + )?); + } + + let profile_name = opts + .profile + .clone() + .unwrap_or_else(|| default_profile_name(&url)); + + let mut file = load_config_file().unwrap_or_default(); + let entry = file.profiles.entry(profile_name.clone()).or_default(); + entry.url = Some(url.clone()); + entry.api_key = Some(opts.api_key.clone()); + // Clear any indirection so the saved profile is self-contained. + entry.api_key_env = None; + + if !opts.no_activate { + file.default_profile = Some(profile_name.clone()); + } + + let path = save_config_file(&file).context("save updated config file")?; + + if ctx.is_robot() { + emit_robot( + "openproxy.v1.auth.login", + json!({ + "profile": profile_name, + "url": url, + "config_file": path.display().to_string(), + "verified": verified, + "activated": !opts.no_activate, + "masked_key": mask_secret(&opts.api_key), + }), + )?; + } else { + humanln( + ctx, + format!( + "Saved profile '{profile_name}' -> {url} (key {})", + mask_secret(&opts.api_key) + ), + ); + if !opts.no_activate { + humanln(ctx, "Activated as default profile."); + } + humanln(ctx, format!(" config: {}", path.display())); + match verified { + Some(true) => humanln(ctx, " verified: ok"), + Some(false) => humanln(ctx, " verified: FAIL"), + None => humanln(ctx, " verified: skipped"), + } + } + Ok(0) +} + +/// `openproxy auth logout` — remove a saved profile. +pub fn run_logout(ctx: OutputCtx, opts: LogoutOptions) -> anyhow::Result { + let mut file = load_config_file().unwrap_or_default(); + let target = opts + .profile + .clone() + .or_else(|| file.default_profile.clone()); + let Some(name) = target else { + return Ok(emit_error( + ctx, + "not_found", + "no profile to log out of (no --profile and no default_profile set)", + )?); + }; + + if file.profiles.remove(&name).is_none() { + return Ok(emit_error( + ctx, + "not_found", + &format!("profile '{name}' not found in config"), + )?); + } + + if file.default_profile.as_deref() == Some(&name) && !opts.keep_default { + file.default_profile = None; + } + + let path = save_config_file(&file).context("save updated config file")?; + + if ctx.is_robot() { + emit_robot( + "openproxy.v1.auth.logout", + json!({ + "profile": name, + "config_file": path.display().to_string(), + }), + )?; + } else { + humanln(ctx, format!("Removed profile '{name}'")); + } + Ok(0) +} + +/// `openproxy auth whoami` — describe the currently-resolved identity. +pub async fn run_whoami(ctx: OutputCtx, cfg: &ResolvedConfig, verify: bool) -> anyhow::Result { + let file = load_config_file().unwrap_or_default(); + let profile = cfg.profile.clone().or(file.default_profile.clone()); + + // Pick the live probe target: prefer the resolved remote URL if set, + // otherwise fall back to the local default. + let probe_target = cfg.remote_url.clone(); + + let verified = if verify { + match (probe_target.as_deref(), cfg.api_key.as_deref()) { + (Some(url), Some(key)) => Some(probe_with_key(url, key).await), + (Some(url), None) => Some(probe_health(url).await), + _ => None, + } + } else { + None + }; + + if ctx.is_robot() { + emit_robot( + "openproxy.v1.auth.whoami", + json!({ + "profile": profile, + "url": probe_target, + "is_remote": cfg.is_remote(), + "data_dir": cfg.data_dir.display().to_string(), + "has_api_key": cfg.api_key.is_some(), + "masked_key": cfg.api_key.as_deref().map(mask_secret), + "verified": verified, + }), + )?; + } else { + humanln(ctx, "openproxy auth whoami:"); + humanln( + ctx, + format!(" profile: {}", profile.as_deref().unwrap_or("")), + ); + humanln( + ctx, + format!(" url: {}", probe_target.as_deref().unwrap_or("")), + ); + humanln( + ctx, + format!( + " api_key: {}", + cfg.api_key + .as_deref() + .map(mask_secret) + .unwrap_or_else(|| "".to_string()) + ), + ); + humanln(ctx, format!(" data_dir: {}", cfg.data_dir.display())); + match verified { + Some(true) => humanln(ctx, " verified: ok"), + Some(false) => humanln(ctx, " verified: FAIL"), + None => {} + } + } + Ok(if matches!(verified, Some(false)) { + 1 + } else { + 0 + }) +} + +/// `openproxy auth list` — show all configured profiles. Useful for agents +/// to discover what's available before picking one with `--profile`. +pub fn run_list(ctx: OutputCtx) -> anyhow::Result { + let file = load_config_file().unwrap_or_default(); + let entries: Vec<_> = file + .profiles + .iter() + .map(|(name, p)| { + json!({ + "name": name, + "url": p.url, + "has_api_key": p.api_key.is_some() || p.api_key_env.is_some(), + "data_dir": p.data_dir, + }) + }) + .collect(); + + if ctx.is_robot() { + emit_robot( + "openproxy.v1.auth.list", + json!({ + "default_profile": file.default_profile, + "profiles": entries, + }), + )?; + } else { + humanln(ctx, "Configured profiles:"); + if entries.is_empty() { + humanln(ctx, " (none)"); + } else { + for entry in &entries { + let name = entry["name"].as_str().unwrap_or("?"); + let url = entry["url"].as_str().unwrap_or(""); + let marker = if Some(name) == file.default_profile.as_deref() { + " (default)" + } else { + "" + }; + humanln(ctx, format!(" {name}{marker} -> {url}")); + } + } + } + Ok(0) +} + +fn normalize_url(url: &str) -> String { + url.trim().trim_end_matches('/').to_string() +} + +fn is_http_url(url: &str) -> bool { + url.starts_with("http://") || url.starts_with("https://") +} + +fn default_profile_name(url: &str) -> String { + // Derive a reasonable default profile name from the URL host. + // Examples: + // http://localhost:4623 -> "localhost" + // https://op.example.com:4623 -> "op-example-com" + // https://op.example.com -> "op-example-com" + let without_scheme = url + .strip_prefix("https://") + .or_else(|| url.strip_prefix("http://")) + .unwrap_or(url); + let host_part = without_scheme + .split(['/', '?', '#']) + .next() + .unwrap_or(without_scheme); + let host = host_part.split(':').next().unwrap_or(host_part); + let host = host.trim(); + if host.is_empty() { + return "remote".to_string(); + } + host.replace('.', "-").to_lowercase() +} + +async fn probe_health(url: &str) -> bool { + let endpoint = format!("{}/api/health", url.trim_end_matches('/')); + let client = match reqwest::Client::builder() + .timeout(Duration::from_millis(1500)) + .build() + { + Ok(c) => c, + Err(_) => return false, + }; + matches!(client.get(&endpoint).send().await, Ok(r) if r.status().is_success()) +} + +async fn probe_with_key(url: &str, key: &str) -> bool { + // First make sure the server is up at all. + if !probe_health(url).await { + return false; + } + // Then try a private endpoint with the key. /api/providers requires auth + // and is universally available so it's a fine smoke test. + let endpoint = format!("{}/api/providers", url.trim_end_matches('/')); + let client = match reqwest::Client::builder() + .timeout(Duration::from_millis(2500)) + .build() + { + Ok(c) => c, + Err(_) => return false, + }; + match client + .get(&endpoint) + .header("Authorization", format!("Bearer {key}")) + .send() + .await + { + // 200 = good; 401/403 = bad key; everything else = treat as bad to + // surface the issue at login time. + Ok(r) => r.status().is_success(), + Err(_) => false, + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn default_profile_name_strips_scheme_port_and_dots() { + assert_eq!(default_profile_name("http://localhost:4623"), "localhost"); + assert_eq!( + default_profile_name("https://op.example.com:4623"), + "op-example-com" + ); + assert_eq!( + default_profile_name("https://OP.EXAMPLE.COM"), + "op-example-com" + ); + assert_eq!(default_profile_name("http://1.2.3.4:80/x"), "1-2-3-4"); + } + + #[test] + fn normalize_url_strips_trailing_slash() { + assert_eq!(normalize_url(" https://x/y/ "), "https://x/y"); + assert_eq!(normalize_url("http://x/"), "http://x"); + } + + #[test] + fn is_http_url_checks_scheme() { + assert!(is_http_url("http://x")); + assert!(is_http_url("https://x")); + assert!(!is_http_url("file:///x")); + assert!(!is_http_url("x")); + } + + /// Round-trip: login then logout against a temporary config file. + #[tokio::test] + async fn login_then_logout_round_trip() { + let tmp = tempfile::tempdir().unwrap(); + let cfg_path = tmp.path().join("config.toml"); + std::env::set_var("OPENPROXY_CONFIG", &cfg_path); + + run_login( + OutputCtx::robot(), + LoginOptions { + url: "http://localhost:65535".into(), + api_key: "test-key-1".into(), + profile: Some("p1".into()), + no_verify: true, + no_activate: false, + }, + ) + .await + .unwrap(); + + let file = load_config_file().unwrap(); + assert_eq!(file.default_profile.as_deref(), Some("p1")); + let p = file.profiles.get("p1").unwrap(); + assert_eq!(p.url.as_deref(), Some("http://localhost:65535")); + assert_eq!(p.api_key.as_deref(), Some("test-key-1")); + + run_logout( + OutputCtx::robot(), + LogoutOptions { + profile: Some("p1".into()), + keep_default: false, + }, + ) + .unwrap(); + + let file = load_config_file().unwrap(); + assert!(file.profiles.get("p1").is_none()); + assert!(file.default_profile.is_none()); + + std::env::remove_var("OPENPROXY_CONFIG"); + } +} diff --git a/src/cli/config.rs b/src/cli/config.rs index efd270c0..8026ce93 100644 --- a/src/cli/config.rs +++ b/src/cli/config.rs @@ -123,7 +123,10 @@ fn default_data_dir() -> PathBuf { .unwrap_or_else(|| PathBuf::from(".openproxy")) } -fn config_file_path() -> Option { +/// Resolve the path of the CLI config file. Honors `$OPENPROXY_CONFIG` so +/// `auth login` / tests can point at a temporary file without touching the +/// real `~/.config/openproxy/config.toml`. +pub fn config_file_path() -> Option { if let Ok(custom) = std::env::var("OPENPROXY_CONFIG") { return Some(PathBuf::from(custom)); } @@ -131,7 +134,9 @@ fn config_file_path() -> Option { Some(dirs.config_dir().join("config.toml")) } -fn load_config_file() -> anyhow::Result { +/// Read the config file from disk, returning a default (empty) file if it +/// does not exist. Errors only on read or parse failure. +pub fn load_config_file() -> anyhow::Result { let Some(path) = config_file_path() else { return Ok(ConfigFile::default()); }; @@ -145,6 +150,22 @@ fn load_config_file() -> anyhow::Result { Ok(parsed) } +/// Serialize `file` to disk at the resolved config path. Creates the parent +/// directory if needed. Used by `auth login` / `auth logout` so the user does +/// not have to hand-edit TOML. +pub fn save_config_file(file: &ConfigFile) -> anyhow::Result { + let path = + config_file_path().ok_or_else(|| anyhow::anyhow!("cannot determine config file path"))?; + if let Some(parent) = path.parent() { + std::fs::create_dir_all(parent) + .with_context(|| format!("create config parent directory {}", parent.display()))?; + } + let text = toml::to_string_pretty(file).context("serialize config file to TOML")?; + std::fs::write(&path, text) + .with_context(|| format!("write config file at {}", path.display()))?; + Ok(path) +} + #[cfg(test)] mod tests { use super::*; diff --git a/src/cli/mod.rs b/src/cli/mod.rs index f2220951..d0d61389 100644 --- a/src/cli/mod.rs +++ b/src/cli/mod.rs @@ -19,10 +19,12 @@ use crate::types::{ApiKey, AppDb, ProviderConnection, ProxyPool}; use crate::core::tunnel::{TunnelManager, TunnelProvider}; +pub mod auth; pub mod config; pub mod doctor; pub mod output; pub mod schema; +pub mod server; #[derive(Debug, Clone, Parser)] #[command( @@ -162,6 +164,81 @@ pub enum Command { }, /// Run a self-test of the local install (data dir, db, server health). Doctor, + /// Manage the local server daemon (start/stop/status/init). + Server { + #[command(subcommand)] + cmd: ServerCmd, + }, + /// Manage credentials for remote-management mode. + Auth { + #[command(subcommand)] + cmd: AuthCmd, + }, +} + +#[derive(Debug, Clone, Subcommand)] +pub enum ServerCmd { + /// Start the API server. Without `--detach` runs in the foreground. + Start { + /// Run the server in the background and return immediately. + #[arg(long)] + detach: bool, + /// Override the host the server binds to. + #[arg(long)] + host: Option, + /// Override the port the server binds to. + #[arg(long)] + port: Option, + }, + /// Send SIGTERM to the running server and wait for it to exit. + Stop, + /// Report whether a server is running for this data dir. + Status, + /// Initialize an empty db.json and emit the first admin API key. + Init { + /// Overwrite an existing db.json if present. + #[arg(long)] + force: bool, + }, +} + +#[derive(Debug, Clone, Subcommand)] +pub enum AuthCmd { + /// Save credentials for a remote openproxy server as a profile. + Login { + /// Base URL of the remote server, e.g. https://op.example.com:4623 + #[arg(long)] + url: String, + /// API key generated by `openproxy server init` on the remote server. + #[arg(long)] + api_key: String, + /// Profile name. Defaults to a slug derived from the URL host. + #[arg(long)] + profile: Option, + /// Skip the live health probe (use if the server is offline). + #[arg(long)] + no_verify: bool, + /// Do not promote this profile to `default_profile`. + #[arg(long)] + no_activate: bool, + }, + /// Remove a saved profile. + Logout { + /// Profile name to remove. Defaults to the active default profile. + #[arg(long)] + profile: Option, + /// Keep `default_profile` set even if we just removed it. + #[arg(long)] + keep_default: bool, + }, + /// Show the active identity and optionally verify connectivity. + Whoami { + /// Probe the server to confirm the saved key still works. + #[arg(long)] + verify: bool, + }, + /// List all configured profiles. + List, } #[derive(Debug, Clone, Subcommand)] @@ -255,25 +332,25 @@ impl Cli { let db = rt.block_on(Db::load())?; let db = std::sync::Arc::new(db); let rt = tokio::runtime::Runtime::new()?; - rt.block_on(run_provider(cmd, &db)) + rt.block_on(run_provider(cmd, &db, ctx)) } Command::Key { cmd } => { let db = rt.block_on(Db::load())?; let db = std::sync::Arc::new(db); let rt = tokio::runtime::Runtime::new()?; - rt.block_on(run_key(cmd, &db)) + rt.block_on(run_key(cmd, &db, ctx)) } Command::Pool { cmd } => { let db = rt.block_on(Db::load())?; let db = std::sync::Arc::new(db); let rt = tokio::runtime::Runtime::new()?; - rt.block_on(run_pool(cmd, &db)) + rt.block_on(run_pool(cmd, &db, ctx)) } Command::Tunnel { cmd } => { let db = rt.block_on(Db::load())?; let db = std::sync::Arc::new(db); let rt = tokio::runtime::Runtime::new()?; - rt.block_on(run_tunnel(cmd, db.clone())) + rt.block_on(run_tunnel(cmd, db.clone(), ctx)) } Command::Route { model, @@ -309,6 +386,67 @@ impl Cli { let resolved = config::ResolvedConfig::resolve(overrides)?; rt.block_on(doctor::run(ctx, &resolved)).map(|_| ()) } + Command::Server { cmd } => { + let resolved = config::ResolvedConfig::resolve(overrides)?; + match cmd { + ServerCmd::Start { detach, host, port } => { + let opts = server::StartOptions { + host: host.unwrap_or_else(|| "0.0.0.0".to_string()), + port: port.unwrap_or(4623), + detach, + }; + rt.block_on(server::run_start(ctx, &resolved, opts)) + .map(|_| ()) + } + ServerCmd::Stop => { + rt.block_on(server::run_stop(ctx, &resolved)).map(|_| ()) + } + ServerCmd::Status => rt + .block_on(server::run_status(ctx, &resolved, 4623)) + .map(|_| ()), + ServerCmd::Init { force } => rt + .block_on(server::run_init(ctx, &resolved, force)) + .map(|_| ()), + } + } + Command::Auth { cmd } => { + let resolved = config::ResolvedConfig::resolve(overrides)?; + match cmd { + AuthCmd::Login { + url, + api_key, + profile, + no_verify, + no_activate, + } => rt + .block_on(auth::run_login( + ctx, + auth::LoginOptions { + url, + api_key, + profile, + no_verify, + no_activate, + }, + )) + .map(|_| ()), + AuthCmd::Logout { + profile, + keep_default, + } => auth::run_logout( + ctx, + auth::LogoutOptions { + profile, + keep_default, + }, + ) + .map(|_| ()), + AuthCmd::Whoami { verify } => rt + .block_on(auth::run_whoami(ctx, &resolved, verify)) + .map(|_| ()), + AuthCmd::List => auth::run_list(ctx).map(|_| ()), + } + } } } else { Ok(()) @@ -316,37 +454,55 @@ impl Cli { } } -pub async fn run_provider(cmd: ProviderCmd, db: &Db) -> anyhow::Result<()> { +pub async fn run_provider(cmd: ProviderCmd, db: &Db, ctx: output::OutputCtx) -> anyhow::Result<()> { match cmd { ProviderCmd::List { json } => { let connections = db.provider_connections(crate::db::ProviderConnectionFilter::default()); let nodes = db.provider_nodes(None); - if json { + if ctx.is_robot() { + output::emit_robot( + "openproxy.v1.provider.list", + serde_json::json!({ + "provider_connections": connections, + "provider_nodes": nodes, + }), + )?; + } else if json { + // Legacy --json: kept for backward compat. Schema is the + // same shape as the robot envelope's `data`, just without + // the wrapper. #[derive(serde::Serialize)] struct ListOutput { provider_connections: Vec, provider_nodes: Vec, } - let output = ListOutput { + let out = ListOutput { provider_connections: connections, provider_nodes: nodes, }; - println!("{}", serde_json::to_string_pretty(&output)?); + println!("{}", serde_json::to_string_pretty(&out)?); } else { - println!("Provider Connections:"); + output::humanln(ctx, "Provider Connections:"); for conn in &connections { - println!( - " {} ({}) - {}", - conn.provider, - conn.auth_type, - conn.name.as_deref().unwrap_or("unnamed") + output::humanln( + ctx, + format!( + " {} ({}) - {}", + conn.provider, + conn.auth_type, + conn.name.as_deref().unwrap_or("unnamed") + ), ); } - println!("\nProvider Nodes:"); + output::humanln(ctx, ""); + output::humanln(ctx, "Provider Nodes:"); for node in &nodes { - println!(" {} - {} ({})", node.name, node.r#type, node.id); + output::humanln( + ctx, + format!(" {} - {} ({})", node.name, node.r#type, node.id), + ); } } } @@ -354,8 +510,12 @@ pub async fn run_provider(cmd: ProviderCmd, db: &Db) -> anyhow::Result<()> { let config: ProviderConnection = match serde_json::from_str(&config) { Ok(c) => c, Err(e) => { - eprintln!("Failed to parse config: {}", e); - std::process::exit(1); + let exit = output::emit_error( + ctx, + "validation", + &format!("failed to parse --config JSON: {e}"), + )?; + std::process::exit(exit); } }; @@ -370,16 +530,28 @@ pub async fn run_provider(cmd: ProviderCmd, db: &Db) -> anyhow::Result<()> { }) .await?; - if json { + if ctx.is_robot() { + output::emit_robot( + "openproxy.v1.provider.add", + serde_json::to_value(&new_conn)?, + )?; + } else if json { println!("{}", serde_json::to_string_pretty(&new_conn)?); } else { - println!("Provider '{}' added successfully", new_conn.provider); + output::humanln( + ctx, + format!("Provider '{}' added successfully", new_conn.provider), + ); } } } Ok(()) } -pub async fn run_tunnel(cmd: TunnelCmd, db: std::sync::Arc) -> anyhow::Result<()> { +pub async fn run_tunnel( + cmd: TunnelCmd, + db: std::sync::Arc, + ctx: output::OutputCtx, +) -> anyhow::Result<()> { let tunnel_manager = TunnelManager::new((db).clone()); match cmd { @@ -388,69 +560,110 @@ pub async fn run_tunnel(cmd: TunnelCmd, db: std::sync::Arc) -> anyhow::Resul .parse::() .map_err(|e| anyhow::anyhow!("{}", e))?; - println!("Starting {} tunnel on port {}...", provider, port); + output::humanln( + ctx, + format!("Starting {} tunnel on port {}...", provider, port), + ); tunnel_manager.start(provider, port).await?; // Wait a bit for URL to appear tokio::time::sleep(tokio::time::Duration::from_secs(3)).await; let status = tunnel_manager.status().await; - if status.running { - println!("Tunnel started successfully"); + if ctx.is_robot() { + output::emit_robot( + "openproxy.v1.tunnel.start", + serde_json::json!({ + "running": status.running, + "provider": status.provider, + "url": status.url, + "pid": status.pid, + }), + )?; + if !status.running { + std::process::exit(1); + } + } else if status.running { + output::humanln(ctx, "Tunnel started successfully"); if let Some(url) = status.url { - println!(" URL: {}", url); + output::humanln(ctx, format!(" URL: {}", url)); } if let Some(pid) = status.pid { - println!(" PID: {}", pid); + output::humanln(ctx, format!(" PID: {}", pid)); } } else { - eprintln!("Tunnel failed to start"); - std::process::exit(1); + let exit = output::emit_error(ctx, "other", "tunnel failed to start")?; + std::process::exit(exit); } } TunnelCmd::Stop => { - println!("Stopping tunnel..."); + output::humanln(ctx, "Stopping tunnel..."); tunnel_manager.stop().await?; - println!("Tunnel stopped"); + if ctx.is_robot() { + output::emit_robot( + "openproxy.v1.tunnel.stop", + serde_json::json!({"stopped": true}), + )?; + } else { + output::humanln(ctx, "Tunnel stopped"); + } } TunnelCmd::Status => { let status = tunnel_manager.status().await; - if status.running { - println!("Tunnel is running"); + if ctx.is_robot() { + output::emit_robot( + "openproxy.v1.tunnel.status", + serde_json::json!({ + "running": status.running, + "provider": status.provider, + "url": status.url, + "pid": status.pid, + }), + )?; + } else if status.running { + output::humanln(ctx, "Tunnel is running"); if let Some(p) = status.provider { - println!(" Provider: {}", p); + output::humanln(ctx, format!(" Provider: {}", p)); } if let Some(url) = status.url { - println!(" URL: {}", url); + output::humanln(ctx, format!(" URL: {}", url)); } if let Some(pid) = status.pid { - println!(" PID: {}", pid); + output::humanln(ctx, format!(" PID: {}", pid)); } } else { - println!("Tunnel is stopped"); + output::humanln(ctx, "Tunnel is stopped"); } } } Ok(()) } -pub async fn run_key(cmd: KeyCmd, db: &Db) -> anyhow::Result<()> { +pub async fn run_key(cmd: KeyCmd, db: &Db, ctx: output::OutputCtx) -> anyhow::Result<()> { match cmd { KeyCmd::List { json } => { let snapshot = db.snapshot(); let api_keys = &snapshot.api_keys; - if json { + if ctx.is_robot() { + output::emit_robot( + "openproxy.v1.key.list", + serde_json::json!({"keys": api_keys}), + )?; + } else if json { println!("{}", serde_json::to_string_pretty(api_keys)?); } else { - println!("API Keys:"); + output::humanln(ctx, "API Keys:"); for k in api_keys { let key_preview = k.key.chars().take(8).collect::(); - println!( - " {} [{}...] ({})", - k.name, - key_preview, - if k.is_active() { "active" } else { "inactive" } + output::humanln( + ctx, + format!( + " {} [{}...] ({})", + k.name, + key_preview, + if k.is_active() { "active" } else { "inactive" } + ), ); } } @@ -471,29 +684,39 @@ pub async fn run_key(cmd: KeyCmd, db: &Db) -> anyhow::Result<()> { }) .await?; - if json { + if ctx.is_robot() { + output::emit_robot("openproxy.v1.key.add", serde_json::to_value(&new_key)?)?; + } else if json { println!("{}", serde_json::to_string_pretty(&new_key)?); } else { - println!("API key added successfully"); + output::humanln(ctx, "API key added successfully"); } } } Ok(()) } -pub async fn run_pool(cmd: PoolCmd, db: &Db) -> anyhow::Result<()> { +pub async fn run_pool(cmd: PoolCmd, db: &Db, ctx: output::OutputCtx) -> anyhow::Result<()> { match cmd { PoolCmd::List { json } => { let snapshot = db.snapshot(); let pools = &snapshot.proxy_pools; - if json { + if ctx.is_robot() { + output::emit_robot( + "openproxy.v1.pool.list", + serde_json::json!({"pools": pools}), + )?; + } else if json { println!("{}", serde_json::to_string_pretty(pools)?); } else { - println!("Connection Pools:"); + output::humanln(ctx, "Connection Pools:"); for pool in pools { let status = pool.test_status.as_deref().unwrap_or("unknown"); - println!(" {} - {} ({})", pool.name, pool.r#type, status); + output::humanln( + ctx, + format!(" {} - {} ({})", pool.name, pool.r#type, status), + ); } } } @@ -503,23 +726,32 @@ pub async fn run_pool(cmd: PoolCmd, db: &Db) -> anyhow::Result<()> { match pool { Some(pool) => { - if json { + if ctx.is_robot() { + output::emit_robot( + "openproxy.v1.pool.status", + serde_json::to_value(pool)?, + )?; + } else if json { println!("{}", serde_json::to_string_pretty(pool)?); } else { - println!("Pool: {}", pool.name); - println!(" Type: {}", pool.r#type); - println!(" URL: {}", pool.proxy_url); - println!( - " Status: {:?}", - pool.test_status.as_deref().unwrap_or("unknown") + output::humanln(ctx, format!("Pool: {}", pool.name)); + output::humanln(ctx, format!(" Type: {}", pool.r#type)); + output::humanln(ctx, format!(" URL: {}", pool.proxy_url)); + output::humanln( + ctx, + format!( + " Status: {:?}", + pool.test_status.as_deref().unwrap_or("unknown") + ), ); - println!(" Success Rate: {:?}", pool.success_rate); - println!(" RTT (ms): {:?}", pool.rtt_ms); + output::humanln(ctx, format!(" Success Rate: {:?}", pool.success_rate)); + output::humanln(ctx, format!(" RTT (ms): {:?}", pool.rtt_ms)); } } None => { - eprintln!("Pool '{}' not found", name); - std::process::exit(1); + let exit = + output::emit_error(ctx, "not_found", &format!("pool '{name}' not found"))?; + std::process::exit(exit); } } } @@ -553,10 +785,12 @@ pub async fn run_pool(cmd: PoolCmd, db: &Db) -> anyhow::Result<()> { }) .await?; - if json { + if ctx.is_robot() { + output::emit_robot("openproxy.v1.pool.create", serde_json::to_value(&new_pool)?)?; + } else if json { println!("{}", serde_json::to_string_pretty(&new_pool)?); } else { - println!("Pool '{}' created successfully", name); + output::humanln(ctx, format!("Pool '{}' created successfully", name)); } } PoolCmd::Delete { name, json } => { @@ -564,8 +798,9 @@ pub async fn run_pool(cmd: PoolCmd, db: &Db) -> anyhow::Result<()> { let pool_exists = snapshot.proxy_pools.iter().any(|p| p.name == name); if !pool_exists { - eprintln!("Pool '{}' not found", name); - std::process::exit(1); + let exit = + output::emit_error(ctx, "not_found", &format!("pool '{name}' not found"))?; + std::process::exit(exit); } db.update(|db| { @@ -573,7 +808,12 @@ pub async fn run_pool(cmd: PoolCmd, db: &Db) -> anyhow::Result<()> { }) .await?; - if json { + if ctx.is_robot() { + output::emit_robot( + "openproxy.v1.pool.delete", + serde_json::json!({"deleted": name}), + )?; + } else if json { #[derive(serde::Serialize)] struct DeleteOutput { deleted: String, @@ -583,7 +823,7 @@ pub async fn run_pool(cmd: PoolCmd, db: &Db) -> anyhow::Result<()> { serde_json::to_string_pretty(&DeleteOutput { deleted: name })? ); } else { - println!("Pool '{}' deleted successfully", name); + output::humanln(ctx, format!("Pool '{}' deleted successfully", name)); } } } diff --git a/src/cli/server.rs b/src/cli/server.rs new file mode 100644 index 00000000..5dc3f558 --- /dev/null +++ b/src/cli/server.rs @@ -0,0 +1,512 @@ +//! `openproxy server` — manage the local server daemon lifecycle. +//! +//! Subcommands: +//! - `server start [--detach] [--host H] [--port P]`: start the API server. +//! Without `--detach` we just run the server in the foreground (same as +//! invoking `openproxy` with no subcommand). With `--detach` we re-exec +//! ourselves as a fully detached child, write a PID file under `$DATA_DIR`, +//! and probe the health endpoint before returning. +//! - `server stop`: read the PID file and send SIGTERM (Unix) or `kill` the +//! process (Windows), waits up to 5s for graceful exit. +//! - `server status`: report whether a server is running for this `$DATA_DIR` +//! and whether the local API is reachable. +//! - `server init [--force]`: create an empty `db.json` and emit the first +//! admin API key (shown exactly once). + +use std::os::unix::fs::PermissionsExt; +use std::path::{Path, PathBuf}; +use std::time::{Duration, Instant}; + +use anyhow::Context; +use serde_json::json; + +use crate::cli::config::ResolvedConfig; +use crate::cli::output::{emit_error, emit_robot, humanln, OutputCtx}; +use crate::db::Db; +use crate::types::ApiKey; + +/// File name of the PID file written into `$DATA_DIR`. +const PID_FILE: &str = "openproxy.pid"; +/// Sidecar file recording `:` of the running server so that +/// `server status` and `server stop` can probe the right endpoint without +/// asking the user. +const PORT_FILE: &str = "openproxy.endpoint"; + +#[derive(Debug)] +pub struct StartOptions { + pub host: String, + pub port: u16, + pub detach: bool, +} + +pub fn pid_file_path(data_dir: &Path) -> PathBuf { + data_dir.join(PID_FILE) +} + +/// Read the PID file, returning `None` if absent or unparseable. +pub fn read_pid(data_dir: &Path) -> Option { + let p = pid_file_path(data_dir); + let text = std::fs::read_to_string(&p).ok()?; + text.trim().parse::().ok() +} + +/// True iff a process with this PID is currently alive. On Unix we use +/// `kill(pid, 0)` which returns ESRCH if the process is gone. +pub fn process_alive(pid: u32) -> bool { + #[cfg(unix)] + { + // SAFETY: `kill` with signal 0 only checks existence; never modifies + // the target process. + unsafe { libc::kill(pid as libc::pid_t, 0) == 0 } + } + #[cfg(not(unix))] + { + // Best-effort fallback: assume alive if PID file exists. + let _ = pid; + true + } +} + +fn write_pid(data_dir: &Path, pid: u32) -> anyhow::Result<()> { + std::fs::create_dir_all(data_dir) + .with_context(|| format!("create data dir {}", data_dir.display()))?; + std::fs::write(pid_file_path(data_dir), pid.to_string()) + .with_context(|| format!("write pid file in {}", data_dir.display()))?; + Ok(()) +} + +fn write_endpoint(data_dir: &Path, host: &str, port: u16) -> anyhow::Result<()> { + std::fs::write(data_dir.join(PORT_FILE), format!("{host}:{port}")) + .with_context(|| format!("write endpoint file in {}", data_dir.display()))?; + Ok(()) +} + +pub fn read_endpoint(data_dir: &Path) -> Option<(String, u16)> { + let text = std::fs::read_to_string(data_dir.join(PORT_FILE)).ok()?; + let trimmed = text.trim(); + let (host, port) = trimmed.rsplit_once(':')?; + let port: u16 = port.parse().ok()?; + Some((host.to_string(), port)) +} + +fn remove_pid(data_dir: &Path) { + let _ = std::fs::remove_file(pid_file_path(data_dir)); + let _ = std::fs::remove_file(data_dir.join(PORT_FILE)); +} + +/// `openproxy server start`. +/// +/// In foreground mode, returns `Ok(None)` to signal the caller (main.rs) to +/// continue with the in-process server boot. In detach mode, spawns a child +/// and returns `Ok(Some(exit_code))` so the caller exits. +pub async fn run_start( + ctx: OutputCtx, + cfg: &ResolvedConfig, + opts: StartOptions, +) -> anyhow::Result> { + // Bail out early if another server appears to be running for this + // DATA_DIR. This protects against accidental double-start. + if let Some(existing) = read_pid(&cfg.data_dir) { + if process_alive(existing) { + let msg = format!( + "openproxy already running (pid {existing}) for data dir {}", + cfg.data_dir.display() + ); + let exit = emit_error(ctx, "conflict", &msg)?; + return Ok(Some(exit)); + } + // Stale PID file: clean it up and continue. + remove_pid(&cfg.data_dir); + } + + if !opts.detach { + // Foreground: let main.rs run the server loop. Write our own PID so + // `server status` can find us. + write_pid(&cfg.data_dir, std::process::id())?; + write_endpoint(&cfg.data_dir, &opts.host, opts.port)?; + if ctx.is_robot() { + emit_robot( + "openproxy.v1.server.start", + json!({ + "pid": std::process::id(), + "host": opts.host, + "port": opts.port, + "detached": false, + "data_dir": cfg.data_dir.display().to_string(), + }), + )?; + } else { + humanln( + ctx, + format!( + "Starting openproxy on {}:{} (pid {})", + opts.host, + opts.port, + std::process::id() + ), + ); + } + return Ok(None); + } + + // Detached: re-exec ourselves with the server defaults but no subcommand, + // detach stdio, and probe the health endpoint to confirm it came up. + let me = std::env::current_exe().context("locate current executable")?; + let log_path = cfg.data_dir.join("openproxy.log"); + std::fs::create_dir_all(&cfg.data_dir).ok(); + let stdout = std::fs::OpenOptions::new() + .create(true) + .append(true) + .open(&log_path) + .with_context(|| format!("open log file {}", log_path.display()))?; + let stderr = stdout.try_clone().context("clone log file handle")?; + + let mut cmd = std::process::Command::new(&me); + cmd.arg("--host") + .arg(&opts.host) + .arg("--port") + .arg(opts.port.to_string()) + .arg("--data-dir") + .arg(&cfg.data_dir); + cmd.stdin(std::process::Stdio::null()); + cmd.stdout(stdout); + cmd.stderr(stderr); + + #[cfg(unix)] + { + use std::os::unix::process::CommandExt; + // SAFETY: pre_exec is called after fork() in the child only. We use + // `setsid` to disown the controlling terminal so the child survives + // its parent. No allocation, no signals, no locks held. + unsafe { + cmd.pre_exec(|| { + if libc::setsid() == -1 { + return Err(std::io::Error::last_os_error()); + } + Ok(()) + }); + } + } + + let child = cmd + .spawn() + .with_context(|| format!("spawn detached server from {}", me.display()))?; + let child_pid = child.id(); + write_pid(&cfg.data_dir, child_pid)?; + write_endpoint(&cfg.data_dir, &opts.host, opts.port)?; + // Drop the Child handle without waiting; we don't want to reap. + std::mem::forget(child); + + // Probe the health endpoint for up to ~5s. If it doesn't come up we still + // succeed (the process is spawned) but warn. + let probe_url = format!("http://127.0.0.1:{}/api/health", opts.port); + let healthy = wait_for_health(&probe_url, Duration::from_secs(5)).await; + + if ctx.is_robot() { + emit_robot( + "openproxy.v1.server.start", + json!({ + "pid": child_pid, + "host": opts.host, + "port": opts.port, + "detached": true, + "healthy": healthy, + "data_dir": cfg.data_dir.display().to_string(), + "log_file": log_path.display().to_string(), + }), + )?; + } else { + humanln( + ctx, + format!( + "Started openproxy (pid {child_pid}) on {}:{} — logs: {}", + opts.host, + opts.port, + log_path.display() + ), + ); + if !healthy { + humanln( + ctx, + format!( + " warning: health probe at {probe_url} did not respond in 5s; check the log" + ), + ); + } + } + Ok(Some(0)) +} + +/// `openproxy server stop`. +pub async fn run_stop(ctx: OutputCtx, cfg: &ResolvedConfig) -> anyhow::Result { + let Some(pid) = read_pid(&cfg.data_dir) else { + let msg = format!( + "no openproxy.pid found in {} (server not started by this CLI?)", + cfg.data_dir.display() + ); + return Ok(emit_error(ctx, "not_found", &msg)?); + }; + + if !process_alive(pid) { + remove_pid(&cfg.data_dir); + if ctx.is_robot() { + emit_robot( + "openproxy.v1.server.stop", + json!({"pid": pid, "result": "already_dead"}), + )?; + } else { + humanln(ctx, format!("openproxy (pid {pid}) was not running")); + } + return Ok(0); + } + + #[cfg(unix)] + { + // SAFETY: standard `kill(pid, SIGTERM)`; we own the choice of signal. + let rc = unsafe { libc::kill(pid as libc::pid_t, libc::SIGTERM) }; + if rc != 0 { + let msg = format!( + "kill(pid={pid}, SIGTERM) failed: {}", + std::io::Error::last_os_error() + ); + return Ok(emit_error(ctx, "other", &msg)?); + } + } + + // Wait up to 5s for the process to exit. + let deadline = Instant::now() + Duration::from_secs(5); + while Instant::now() < deadline { + if !process_alive(pid) { + break; + } + tokio::time::sleep(Duration::from_millis(100)).await; + } + let stopped = !process_alive(pid); + if stopped { + remove_pid(&cfg.data_dir); + } + + if ctx.is_robot() { + emit_robot( + "openproxy.v1.server.stop", + json!({ + "pid": pid, + "result": if stopped { "stopped" } else { "timeout" }, + }), + )?; + } else if stopped { + humanln(ctx, format!("Stopped openproxy (pid {pid})")); + } else { + humanln( + ctx, + format!("Sent SIGTERM to pid {pid} but it is still alive after 5s"), + ); + } + Ok(if stopped { 0 } else { 1 }) +} + +/// `openproxy server status`. +pub async fn run_status( + ctx: OutputCtx, + cfg: &ResolvedConfig, + fallback_port: u16, +) -> anyhow::Result { + let pid = read_pid(&cfg.data_dir); + let alive = pid.map(process_alive).unwrap_or(false); + + // Probe order: explicit --url > recorded endpoint sidecar > default port. + let probe_url = if let Some(url) = cfg.remote_url.clone() { + url + } else if let Some((host, port)) = read_endpoint(&cfg.data_dir) { + // 0.0.0.0 / :: are bind addresses, not dial addresses. + let dial_host = if host == "0.0.0.0" || host == "::" || host.is_empty() { + "127.0.0.1".to_string() + } else { + host + }; + format!("http://{dial_host}:{port}") + } else { + format!("http://127.0.0.1:{fallback_port}") + }; + let health_url = format!("{}/api/health", probe_url.trim_end_matches('/')); + let reachable = probe_health(&health_url).await; + + let db_summary = match Db::load().await { + Ok(db) => { + let snap = db.snapshot(); + Some(json!({ + "providers": snap.provider_connections.len(), + "keys": snap.api_keys.len(), + "pools": snap.proxy_pools.len(), + "combos": snap.combos.len(), + })) + } + Err(_) => None, + }; + + if ctx.is_robot() { + emit_robot( + "openproxy.v1.server.status", + json!({ + "pid": pid, + "process_alive": alive, + "reachable": reachable, + "probe_url": health_url, + "data_dir": cfg.data_dir.display().to_string(), + "db": db_summary, + }), + )?; + } else { + humanln(ctx, "openproxy server:"); + match pid { + Some(p) if alive => humanln(ctx, format!(" pid: {p} (alive)")), + Some(p) => humanln(ctx, format!(" pid: {p} (stale)")), + None => humanln(ctx, " pid: none"), + } + humanln( + ctx, + format!( + " reachable: {} ({health_url})", + if reachable { "yes" } else { "no" } + ), + ); + if let Some(db) = &db_summary { + humanln(ctx, format!(" db: {db}")); + } + } + Ok(if alive || reachable { 0 } else { 1 }) +} + +/// `openproxy server init`. Initializes an empty `db.json` and prints one +/// fresh admin API key. Refuses to overwrite an existing DB unless `force`. +pub async fn run_init(ctx: OutputCtx, cfg: &ResolvedConfig, force: bool) -> anyhow::Result { + std::fs::create_dir_all(&cfg.data_dir) + .with_context(|| format!("create data dir {}", cfg.data_dir.display()))?; + + let db_path = cfg.data_dir.join("db.json"); + if db_path.exists() && !force { + let msg = format!( + "db.json already exists at {} (use --force to overwrite)", + db_path.display() + ); + return Ok(emit_error(ctx, "conflict", &msg)?); + } + + // Touch a clean db.json by writing an empty AppDb shape, then loading it. + let empty = serde_json::json!({ + "providerConnections": [], + "providerNodes": [], + "apiKeys": [], + "proxyPools": [], + "combos": [], + "modelAliases": {}, + "modelAvailability": {}, + "settings": {} + }); + let tmp = cfg.data_dir.join(".db.json.init"); + std::fs::write(&tmp, serde_json::to_vec_pretty(&empty)?) + .with_context(|| format!("write {}", tmp.display()))?; + // Lock down permissions so the DB isn't world-readable on shared boxes. + #[cfg(unix)] + { + let _ = std::fs::set_permissions(&tmp, std::fs::Permissions::from_mode(0o600)); + } + std::fs::rename(&tmp, &db_path).with_context(|| format!("install {}", db_path.display()))?; + + // Now load the db and append a fresh admin key. + let key_secret = generate_api_key(); + let key = ApiKey { + id: uuid::Uuid::new_v4().to_string(), + name: "admin".into(), + key: key_secret.clone(), + machine_id: None, + is_active: Some(true), + created_at: Some(chrono::Utc::now().to_rfc3339()), + extra: std::collections::BTreeMap::new(), + }; + + let db = Db::load().await?; + db.update(|d| d.api_keys.push(key.clone())).await?; + + if ctx.is_robot() { + emit_robot( + "openproxy.v1.server.init", + json!({ + "data_dir": cfg.data_dir.display().to_string(), + "db_path": db_path.display().to_string(), + "admin_key": { + "id": key.id, + "name": key.name, + "key": key_secret, + }, + }), + )?; + } else { + humanln( + ctx, + format!("Initialized openproxy at {}", cfg.data_dir.display()), + ); + humanln(ctx, ""); + humanln(ctx, "Admin API key (save it now — shown only once):"); + humanln(ctx, format!(" {key_secret}")); + humanln(ctx, ""); + humanln( + ctx, + "Start the server with: openproxy server start --detach", + ); + } + Ok(0) +} + +fn generate_api_key() -> String { + use rand::RngCore; + let mut bytes = [0u8; 24]; + rand::thread_rng().fill_bytes(&mut bytes); + let hex: String = bytes.iter().map(|b| format!("{b:02x}")).collect(); + format!("op-{hex}") +} + +async fn probe_health(url: &str) -> bool { + let client = match reqwest::Client::builder() + .timeout(Duration::from_millis(800)) + .build() + { + Ok(c) => c, + Err(_) => return false, + }; + matches!(client.get(url).send().await, Ok(r) if r.status().is_success()) +} + +async fn wait_for_health(url: &str, total: Duration) -> bool { + let deadline = Instant::now() + total; + while Instant::now() < deadline { + if probe_health(url).await { + return true; + } + tokio::time::sleep(Duration::from_millis(200)).await; + } + false +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn pid_file_round_trip() { + let dir = tempfile::tempdir().unwrap(); + assert!(read_pid(dir.path()).is_none()); + write_pid(dir.path(), 12345).unwrap(); + assert_eq!(read_pid(dir.path()), Some(12345)); + remove_pid(dir.path()); + assert!(read_pid(dir.path()).is_none()); + } + + #[test] + fn generated_key_is_op_prefixed_and_long() { + let k = generate_api_key(); + assert!(k.starts_with("op-")); + // 24 random bytes -> 48 hex chars + "op-" -> 51 total. + assert_eq!(k.len(), 51); + } +} diff --git a/src/main.rs b/src/main.rs index d6deee8c..af55e1ca 100644 --- a/src/main.rs +++ b/src/main.rs @@ -6,7 +6,7 @@ use tracing::info; use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt}; use openproxy::cli::config::ResolvedConfig; -use openproxy::cli::{Cli, Command, SchemaCmd}; +use openproxy::cli::{AuthCmd, Cli, Command, SchemaCmd, ServerCmd}; use openproxy::db::watcher::spawn_watcher; use openproxy::db::Db; use openproxy::server::console_logs::{shared_console_log_buffer, ConsoleLogMakeWriter}; @@ -28,25 +28,25 @@ async fn main() -> anyhow::Result<()> { Command::Provider { cmd } => { let db = Db::load().await?; let db = Arc::new(db); - openproxy::cli::run_provider(cmd.clone(), &db).await?; + openproxy::cli::run_provider(cmd.clone(), &db, ctx).await?; return Ok(()); } Command::Key { cmd } => { let db = Db::load().await?; let db = Arc::new(db); - openproxy::cli::run_key(cmd.clone(), &db).await?; + openproxy::cli::run_key(cmd.clone(), &db, ctx).await?; return Ok(()); } Command::Pool { cmd } => { let db = Db::load().await?; let db = Arc::new(db); - openproxy::cli::run_pool(cmd.clone(), &db).await?; + openproxy::cli::run_pool(cmd.clone(), &db, ctx).await?; return Ok(()); } Command::Tunnel { cmd } => { let db = Db::load().await?; let db = Arc::new(db); - openproxy::cli::run_tunnel(cmd.clone(), db).await?; + openproxy::cli::run_tunnel(cmd.clone(), db, ctx).await?; return Ok(()); } Command::Route { @@ -58,12 +58,15 @@ async fn main() -> anyhow::Result<()> { } => { let db = Db::load().await?; let db = Arc::new(db); + // `--robot` implies JSON mode for route, but the per-event + // shape is left to a future M3 refactor (streaming envelope). + let json_mode = *json || ctx.is_robot(); return run_route( model.clone(), combo.clone(), prompt.clone(), *stream, - *json, + json_mode, &db, ) .await; @@ -98,6 +101,87 @@ async fn main() -> anyhow::Result<()> { } return Ok(()); } + Command::Server { cmd } => match cmd { + ServerCmd::Start { detach, host, port } => { + let opts = openproxy::cli::server::StartOptions { + host: host.clone().unwrap_or_else(|| cli.host.clone()), + port: port.unwrap_or(cli.port), + detach: *detach, + }; + match openproxy::cli::server::run_start(ctx, &resolved, opts).await? { + Some(exit) => { + if exit != 0 { + std::process::exit(exit); + } + return Ok(()); + } + // Foreground: fall through to the server boot below. + None => {} + } + } + ServerCmd::Stop => { + let exit = openproxy::cli::server::run_stop(ctx, &resolved).await?; + if exit != 0 { + std::process::exit(exit); + } + return Ok(()); + } + ServerCmd::Status => { + let exit = openproxy::cli::server::run_status(ctx, &resolved, cli.port).await?; + if exit != 0 { + std::process::exit(exit); + } + return Ok(()); + } + ServerCmd::Init { force } => { + let exit = openproxy::cli::server::run_init(ctx, &resolved, *force).await?; + if exit != 0 { + std::process::exit(exit); + } + return Ok(()); + } + }, + Command::Auth { cmd } => { + let exit = match cmd { + AuthCmd::Login { + url, + api_key, + profile, + no_verify, + no_activate, + } => { + openproxy::cli::auth::run_login( + ctx, + openproxy::cli::auth::LoginOptions { + url: url.clone(), + api_key: api_key.clone(), + profile: profile.clone(), + no_verify: *no_verify, + no_activate: *no_activate, + }, + ) + .await? + } + AuthCmd::Logout { + profile, + keep_default, + } => openproxy::cli::auth::run_logout( + ctx, + openproxy::cli::auth::LogoutOptions { + profile: profile.clone(), + keep_default: *keep_default, + }, + )?, + AuthCmd::Whoami { verify } => { + openproxy::cli::auth::run_whoami(ctx, &resolved, *verify).await? + } + AuthCmd::List => openproxy::cli::auth::run_list(ctx)?, + }; + if exit != 0 { + std::process::exit(exit); + } + return Ok(()); + } } } diff --git a/tests/server_lifecycle.rs b/tests/server_lifecycle.rs new file mode 100644 index 00000000..9f8214a1 --- /dev/null +++ b/tests/server_lifecycle.rs @@ -0,0 +1,148 @@ +//! End-to-end test for `openproxy server start --detach` / `server stop`. +//! +//! Spawns the real binary, asserts the PID and endpoint sidecar files are +//! written, that the server answers on /api/health, and that stop tears +//! everything down. Skipped automatically if the binary is missing. + +use std::path::{Path, PathBuf}; +use std::process::Command; +use std::time::{Duration, Instant}; + +fn locate_binary() -> Option { + // Honor the convention `CARGO_BIN_EXE_` set during integration test + // builds, with a fallback to `target/debug/openproxy` for `cargo test` + // invocations that don't have the env var. + if let Some(p) = option_env!("CARGO_BIN_EXE_openproxy") { + let path = PathBuf::from(p); + if path.exists() { + return Some(path); + } + } + let cwd = std::env::current_dir().ok()?; + for ancestor in cwd.ancestors() { + let candidate = ancestor.join("target/debug/openproxy"); + if candidate.exists() { + return Some(candidate); + } + } + None +} + +fn next_free_port() -> u16 { + // Bind to port 0 and let the OS pick a free one. We immediately drop the + // listener — there is a tiny race against another process snatching the + // port, but it's good enough for an integration test. + let listener = std::net::TcpListener::bind("127.0.0.1:0").expect("bind ephemeral"); + let port = listener.local_addr().expect("local addr").port(); + drop(listener); + port +} + +fn wait_for_file(p: &Path, total: Duration) -> bool { + let deadline = Instant::now() + total; + while Instant::now() < deadline { + if p.exists() { + return true; + } + std::thread::sleep(Duration::from_millis(50)); + } + false +} + +#[test] +fn server_start_detach_writes_pid_and_endpoint_then_stop_cleans_up() { + let Some(bin) = locate_binary() else { + eprintln!("skipping: openproxy binary not found"); + return; + }; + + let dir = tempfile::tempdir().expect("tempdir"); + let port = next_free_port(); + + // First: init the data dir so the server has a db.json to load. + let init = Command::new(&bin) + .arg("--data-dir") + .arg(dir.path()) + .arg("server") + .arg("init") + .arg("--robot") + .output() + .expect("run init"); + assert!(init.status.success(), "init failed: {init:?}"); + + // Start detached. + let start = Command::new(&bin) + .arg("--data-dir") + .arg(dir.path()) + .arg("server") + .arg("start") + .arg("--detach") + .arg("--host") + .arg("127.0.0.1") + .arg("--port") + .arg(port.to_string()) + .arg("--robot") + .output() + .expect("run start"); + assert!( + start.status.success(), + "detached start failed: stdout={} stderr={}", + String::from_utf8_lossy(&start.stdout), + String::from_utf8_lossy(&start.stderr) + ); + + let pid_file = dir.path().join("openproxy.pid"); + let endpoint_file = dir.path().join("openproxy.endpoint"); + assert!( + wait_for_file(&pid_file, Duration::from_secs(3)), + "pid file not created" + ); + assert!( + wait_for_file(&endpoint_file, Duration::from_secs(3)), + "endpoint file not created" + ); + + let endpoint = std::fs::read_to_string(&endpoint_file).expect("read endpoint"); + assert_eq!(endpoint.trim(), format!("127.0.0.1:{port}")); + + // Status should report it alive and reachable. + let status = Command::new(&bin) + .arg("--data-dir") + .arg(dir.path()) + .arg("server") + .arg("status") + .arg("--robot") + .output() + .expect("run status"); + assert!(status.status.success(), "status failed: {status:?}"); + let status_stdout = String::from_utf8_lossy(&status.stdout); + assert!( + status_stdout.contains("\"process_alive\":true"), + "status did not report process_alive=true: {status_stdout}" + ); + assert!( + status_stdout.contains("\"reachable\":true"), + "status did not report reachable=true: {status_stdout}" + ); + + // Stop. + let stop = Command::new(&bin) + .arg("--data-dir") + .arg(dir.path()) + .arg("server") + .arg("stop") + .arg("--robot") + .output() + .expect("run stop"); + assert!(stop.status.success(), "stop failed: {stop:?}"); + let stop_stdout = String::from_utf8_lossy(&stop.stdout); + assert!( + stop_stdout.contains("\"result\":\"stopped\"") + || stop_stdout.contains("\"result\":\"already_dead\""), + "stop did not confirm shutdown: {stop_stdout}" + ); + + // PID and endpoint files should be gone. + assert!(!pid_file.exists(), "pid file not cleaned up"); + assert!(!endpoint_file.exists(), "endpoint file not cleaned up"); +}