diff --git a/src-tauri/Cargo.lock b/src-tauri/Cargo.lock index fab3f62a..a24fc160 100644 --- a/src-tauri/Cargo.lock +++ b/src-tauri/Cargo.lock @@ -5626,9 +5626,9 @@ checksum = "f87165f0995f63a9fbeea62b64d10b4d9d8e78ec6d7d51fb2125fda7bb36788f" [[package]] name = "rustls-webpki" -version = "0.103.12" +version = "0.103.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8279bb85272c9f10811ae6a6c547ff594d6a7f3c6c6b02ee9726d1d0dcfcdd06" +checksum = "61c429a8649f110dddef65e2a5ad240f747e85f7758a6bccc7e5777bd33f756e" dependencies = [ "aws-lc-rs", "ring", diff --git a/src-tauri/src/enterprise/service_locations/windows.rs b/src-tauri/src/enterprise/service_locations/windows.rs index 2205eb9d..e6aa14d6 100644 --- a/src-tauri/src/enterprise/service_locations/windows.rs +++ b/src-tauri/src/enterprise/service_locations/windows.rs @@ -15,7 +15,6 @@ use defguard_wireguard_rs::{ }; use known_folders::get_known_folder_path; use log::{debug, error, warn}; -use tokio::time::sleep; use windows::{ core::PSTR, Win32::System::RemoteDesktop::{ @@ -55,7 +54,9 @@ const SERVICE_LOCATIONS_SUBDIR: &str = "service_locations"; /// /// Note: `NotifyAddrChange` also fires when WireGuard interfaces are created. This is /// harmless because `connect_to_service_locations` skips already-connected locations. -pub(crate) async fn watch_for_network_change( +/// +/// Runs on a dedicated OS thread because `NotifyAddrChange` is a blocking syscall. +pub(crate) fn watch_for_network_change( service_location_manager: Arc>, ) { loop { @@ -65,7 +66,7 @@ pub(crate) async fn watch_for_network_change( if result != 0 { error!("NotifyAddrChange failed with error code: {result}"); - sleep(NETWORK_CHANGE_MONITOR_RESTART_DELAY).await; + std::thread::sleep(NETWORK_CHANGE_MONITOR_RESTART_DELAY); continue; } @@ -73,14 +74,14 @@ pub(crate) async fn watch_for_network_change( "Network address change detected, waiting {NETWORK_STABILIZATION_DELAY:?}s for \ network to stabilize before attempting service location connections..." ); - sleep(NETWORK_STABILIZATION_DELAY).await; + std::thread::sleep(NETWORK_STABILIZATION_DELAY); debug!("Attempting to connect to service locations after network change"); - match service_location_manager + let connect_result = service_location_manager .write() .unwrap() - .connect_to_service_locations() - { + .connect_to_service_locations(); + match connect_result { Ok(_) => { debug!("Service location connect attempt after network change completed"); } @@ -91,11 +92,15 @@ pub(crate) async fn watch_for_network_change( } } -pub(crate) async fn watch_for_login_logoff( +/// Watches for user logon/logoff events and connects/disconnects pre-logon service locations +/// accordingly. +/// +/// Runs on a dedicated OS thread because `WTSWaitSystemEvent` is a blocking syscall. +pub(crate) fn watch_for_login_logoff( service_location_manager: Arc>, ) -> Result<(), ServiceLocationError> { loop { - let mut event_flags = 0; + let mut event_flags: u32 = 0; let success = unsafe { WTSWaitSystemEvent( Some(WTS_CURRENT_SERVER_HANDLE), @@ -110,7 +115,7 @@ pub(crate) async fn watch_for_login_logoff( } Err(err) => { error!("Failed waiting for login/logoff event: {err:?}"); - sleep(Duration::from_secs(LOGIN_LOGOFF_EVENT_RETRY_DELAY_SECS)).await; + std::thread::sleep(Duration::from_secs(LOGIN_LOGOFF_EVENT_RETRY_DELAY_SECS)); continue; } }; @@ -118,7 +123,6 @@ pub(crate) async fn watch_for_login_logoff( if event_flags & WTS_EVENT_LOGON != 0 { debug!("Detected user logon, attempting to auto-disconnect from service locations."); service_location_manager - .clone() .write() .unwrap() .disconnect_service_locations(Some(ServiceLocationMode::PreLogon))?; @@ -126,7 +130,6 @@ pub(crate) async fn watch_for_login_logoff( if event_flags & WTS_EVENT_LOGOFF != 0 { debug!("Detected user logoff, attempting to auto-connect to service locations."); service_location_manager - .clone() .write() .unwrap() .connect_to_service_locations()?; @@ -281,7 +284,11 @@ pub(crate) fn is_user_logged_in() -> bool { buffer.0 as *mut _, ); - // We found an active session with a username + // We found an active session with a username. + // Free the session list before returning to avoid a leak. + windows::Win32::System::RemoteDesktop::WTSFreeMemory( + pp_sessions as _, + ); return true; } } diff --git a/src-tauri/src/service/windows.rs b/src-tauri/src/service/windows.rs index dec55f23..00c5c0a0 100644 --- a/src-tauri/src/service/windows.rs +++ b/src-tauri/src/service/windows.rs @@ -115,17 +115,19 @@ fn run_service() -> Result<(), DaemonError> { let service_location_manager = Arc::new(RwLock::new(service_location_manager)); - // Spawn network change monitoring task first so NotifyAddrChange is registered as early - // as possible, minimising the window in which a network event could be missed before - // the watcher is listening. The retry task below is the backstop for any event that - // still slips through that window. + // Spawn network change monitoring on a dedicated OS thread so the blocking + // NotifyAddrChange syscall does not stall Tokio's async worker threads. + // Register it first so no network event can be missed before the watcher is listening; + // the retry loop below is the backstop for any event that slips through the startup window. let service_location_manager_clone = service_location_manager.clone(); - runtime.spawn(async move { - let manager = service_location_manager_clone; - info!("Starting network change monitoring"); - watch_for_network_change(manager.clone()).await; - error!("Network change monitoring ended unexpectedly."); - }); + std::thread::Builder::new() + .name("network-change-monitor".to_string()) + .spawn(move || { + info!("Starting network change monitoring"); + watch_for_network_change(service_location_manager_clone); + error!("Network change monitoring ended unexpectedly."); + }) + .expect("Failed to spawn network change monitor thread"); // Spawn service location auto-connect task with retries. // Each attempt skips locations that are already connected, so it is safe to call @@ -174,32 +176,34 @@ fn run_service() -> Result<(), DaemonError> { info!("Service location auto-connect task finished"); }); - // Spawn login/logoff monitoring task, runs concurrently with the tasks above. + // Spawn login/logoff monitoring on a dedicated OS thread so the blocking + // WTSWaitSystemEvent syscall does not stall Tokio's async worker threads. let service_location_manager_clone = service_location_manager.clone(); - runtime.spawn(async move { - let manager = service_location_manager_clone; - - info!("Starting login/logoff event monitoring"); - loop { - match watch_for_login_logoff(manager.clone()).await { - Ok(()) => { - warn!( - "Login/logoff event monitoring ended unexpectedly. Restarting in \ - {LOGIN_LOGOFF_MONITORING_RESTART_DELAY_SECS:?}..." - ); - sleep(LOGIN_LOGOFF_MONITORING_RESTART_DELAY_SECS).await; - } - Err(e) => { - error!( - "Error in login/logoff event monitoring: {e}. Restarting in \ - {LOGIN_LOGOFF_MONITORING_RESTART_DELAY_SECS:?}...", - ); - sleep(LOGIN_LOGOFF_MONITORING_RESTART_DELAY_SECS).await; - info!("Restarting login/logoff event monitoring"); + std::thread::Builder::new() + .name("login-logoff-monitor".to_string()) + .spawn(move || { + info!("Starting login/logoff event monitoring"); + loop { + match watch_for_login_logoff(service_location_manager_clone.clone()) { + Ok(()) => { + warn!( + "Login/logoff event monitoring ended unexpectedly. Restarting in \ + {LOGIN_LOGOFF_MONITORING_RESTART_DELAY_SECS:?}..." + ); + std::thread::sleep(LOGIN_LOGOFF_MONITORING_RESTART_DELAY_SECS); + } + Err(e) => { + error!( + "Error in login/logoff event monitoring: {e}. Restarting in \ + {LOGIN_LOGOFF_MONITORING_RESTART_DELAY_SECS:?}...", + ); + std::thread::sleep(LOGIN_LOGOFF_MONITORING_RESTART_DELAY_SECS); + info!("Restarting login/logoff event monitoring"); + } } } - } - }); + }) + .expect("Failed to spawn login/logoff monitor thread"); // Spawn the main gRPC server task let service_location_manager_clone = service_location_manager.clone();