From 49f760035cf7e79b8c7141a90519ba3d0a10f88a Mon Sep 17 00:00:00 2001 From: zhanghe Date: Wed, 17 Dec 2025 18:38:00 +0800 Subject: [PATCH 1/2] support no realtime signal process --- adapter/cmsis/src/os2/thread.rs | 38 ++- header/src/lib.rs | 3 + kernel/src/scheduler/mod.rs | 24 +- kernel/src/signal/mod.rs | 38 ++- kernel/src/signal/syscall.rs | 488 +++++++++++++++++++++++++++++ kernel/src/syscall_handlers/mod.rs | 86 +++-- kernel/src/thread/mod.rs | 204 +++++++++++- test_harness/src/lib.rs | 41 ++- 8 files changed, 834 insertions(+), 88 deletions(-) create mode 100644 kernel/src/signal/syscall.rs diff --git a/adapter/cmsis/src/os2/thread.rs b/adapter/cmsis/src/os2/thread.rs index f8700977..123731aa 100644 --- a/adapter/cmsis/src/os2/thread.rs +++ b/adapter/cmsis/src/os2/thread.rs @@ -62,6 +62,20 @@ os_adapter! { } } +extern "C" fn cmsis_sigterm_handler(_signum: i32) { + let current = scheduler::current_thread(); + let Some(alien_ptr) = current.get_alien_ptr() else { + return; + }; + let t = unsafe { &mut *(alien_ptr.as_ptr() as *mut Os2Thread) }; + exit_os2_thread(t); + scheduler::retire_me(); +} + +extern "C" fn cmsis_suspend_handler(_signum: i32) { + scheduler::suspend_me_for(usize::MAX) +} + impl Os2Thread { delegate! { to self.inner() { @@ -249,15 +263,9 @@ pub extern "C" fn osThreadNew( .build(); { let mut l = t.lock(); - l.register_once_signal_handler(libc::SIGTERM, move || { - let current = scheduler::current_thread(); - let Some(alien_ptr) = current.get_alien_ptr() else { - return; - }; - let t = unsafe { &mut *(alien_ptr.as_ptr() as *mut Os2Thread) }; - exit_os2_thread(t); - scheduler::retire_me(); - }); + // CMSIS threads use SIGTERM as the termination request. + // Bind a persistent kernel-side handler to perform cleanup + retire. + l.install_signal_handler(libc::SIGTERM, cmsis_sigterm_handler); if merge_attr.stack_mem.is_null() { let stack_base = t.stack_base(); l.set_cleanup(Entry::Closure(Box::new(move || { @@ -462,12 +470,14 @@ pub extern "C" fn osThreadSuspend(thread_id: osThreadId_t) -> osStatus_t { scheduler::suspend_me_for(usize::MAX); return osStatus_t_osOK; } - // FIXME: We should use SIGUSR1 here, however it's not defined yet. - if !t - .lock() - .kill_with_once_handler(libc::SIGHUP, move || scheduler::suspend_me_for(usize::MAX)) + // Use SIGUSR1 signal to ask the target thread to self-suspend. { - return osStatus_t_osErrorResource; + let mut guard = t.lock(); + guard.install_signal_handler(libc::SIGUSR1, cmsis_suspend_handler); + // If the signal is already pending, treat it as resource-busy. + if !guard.kill(libc::SIGUSR1) { + return osStatus_t_osErrorResource; + } } osStatus_t_osOK } diff --git a/header/src/lib.rs b/header/src/lib.rs index d1671437..71c85b49 100644 --- a/header/src/lib.rs +++ b/header/src/lib.rs @@ -60,6 +60,9 @@ pub mod syscalls { RtSigQueueInfo, RtSigSuspend, RtSigTimedWait, + Kill, + Tgkill, + Tkill, Socket, Bind, Connect, diff --git a/kernel/src/scheduler/mod.rs b/kernel/src/scheduler/mod.rs index 4e3ef2da..c6ba319b 100644 --- a/kernel/src/scheduler/mod.rs +++ b/kernel/src/scheduler/mod.rs @@ -143,11 +143,33 @@ fn prepare_signal_handling(t: &ThreadNode) { if !l.activate_signal_context() { return; }; + + // Pick one deliverable signal so we can choose the correct stack for this delivery + let pending = l.pending_signals(); + let mut deliverable: i32 = 0; + for signum in 1..32 { + if pending & (1 << signum) == 0 { + continue; + } + if l.is_signal_blocked(signum) { + continue; + } + deliverable = signum; + break; + } + let ctx = l.saved_sp() as *mut arch::Context; let ctx = unsafe { &mut *ctx }; // Update ctx so that signal context will be restored. ctx.set_return_address(arch::switch_stack as usize) - .set_arg(0, l.signal_handler_sp()) + .set_arg( + 0, + if deliverable != 0 { + l.signal_delivery_sp(deliverable) + } else { + l.signal_handler_sp() + }, + ) .set_arg(1, signal::handler_entry as usize); } diff --git a/kernel/src/signal/mod.rs b/kernel/src/signal/mod.rs index 5199f48e..9beeb457 100644 --- a/kernel/src/signal/mod.rs +++ b/kernel/src/signal/mod.rs @@ -14,6 +14,8 @@ use crate::{arch, scheduler, thread, thread::ThreadNode}; +pub mod syscall; + fn handle_signal_fallback(signum: i32) { if signum != libc::SIGTERM { return; @@ -22,30 +24,48 @@ fn handle_signal_fallback(signum: i32) { } fn handle_signal(t: &ThreadNode, signum: i32) { - let mut l = t.lock(); - let Some(handler) = l.take_signal_handler(signum) else { - drop(l); + // Don't use once handler now + // POSIX-ish: consult installed sigaction. + let sa = { + let l = t.lock(); + l.get_sigaction(signum) + }; + + // None => SIG_DFL in our kernel representation. + let Some(handler) = sa.sa_handler else { + // almost all non realtime signals' default action is terminate the process return handle_signal_fallback(signum); }; - drop(l); - handler(); + + handler(signum); } // This routine is supposed to be executed in THREAD mode. #[inline(never)] pub(crate) unsafe extern "C" fn handler_entry(_sp: usize, _old_sp: usize) { let current = scheduler::current_thread(); + // Deliver only unblocked signals. + // NOTE: pending_signals uses kernel numbering (bit = 1< SigMask { + // Only preserve signals 1..31; bit 31 (signal 32) is unused with NSIG=32. + let mut v: SigMask = 0; + unsafe { + core::ptr::copy_nonoverlapping( + (&set as *const sigset_t).cast::(), + (&mut v as *mut SigMask).cast::(), + core::mem::size_of::().min(core::mem::size_of::()), + ); + } + v & 0x7FFF_FFFF +} + +#[inline] +fn mask_to_sigset(mask: SigMask) -> sigset_t { + let mut out: sigset_t = unsafe { core::mem::zeroed() }; + unsafe { + core::ptr::copy_nonoverlapping( + (&mask as *const SigMask).cast::(), + (&mut out as *mut sigset_t).cast::(), + core::mem::size_of::().min(core::mem::size_of::()), + ); + } + out +} + +// Types must match the userspace layout used by `libc` and the syscall handler. +#[allow(non_camel_case_types)] +#[repr(C)] +#[derive(Clone, Copy)] +pub struct sigaltstack { + pub ss_sp: *mut c_void, + pub ss_flags: c_int, + pub ss_size: usize, +} + +impl Default for sigaltstack { + fn default() -> Self { + Self { + ss_sp: core::ptr::null_mut(), + ss_flags: 0, + ss_size: 0, + } + } +} + +#[allow(non_camel_case_types)] +#[repr(align(8))] +#[derive(Clone, Copy, Default)] +pub struct siginfo_t { + pub si_signo: c_int, + pub si_errno: c_int, + pub si_code: c_int, + _pad: [c_int; 29], + _align: [usize; 0], +} + +#[allow(non_camel_case_types)] +#[derive(Clone, Copy, Default)] +pub struct sigaction { + pub sa_sigaction: sighandler_t, + pub sa_mask: sigset_t, + pub sa_flags: c_int, + pub sa_restorer: Option, +} + +#[inline] +fn handler_to_sighandler_t(h: Option) -> sighandler_t { + match h { + core::option::Option::None => libc::SIG_DFL as sighandler_t, + Some(f) => f as usize as sighandler_t, + } +} + +#[inline] +fn sighandler_t_to_handler(v: sighandler_t) -> Option { + if v == libc::SIG_DFL as sighandler_t { + None + } else if v == libc::SIG_IGN as sighandler_t { + // Kernel currently doesn't model SIG_IGN distinctly; treat as default. + None + } else if v == libc::SIG_ERR as sighandler_t { + None + } else { + Some(unsafe { core::mem::transmute::(v as usize) }) + } +} + +#[inline] +fn validate_signum(signum: c_int) -> Result { + if signum <= 0 || signum >= 32 { + return Err(-libc::EINVAL); + } + Ok(signum as usize) +} + +fn find_thread_by_tid(tid: pid_t) -> Option { + let target = tid as usize; + let mut it = GlobalQueueVisitor::new(); + while let Some(t) = it.next() { + if Thread::id(&t) == target { + return Some(t); + } + } + None +} + +#[inline] +fn sigset_bit_for(signum: usize) -> sigset_t { + // POSIX numbering: signal N is bit (N-1). + mask_to_sigset(1u32 << ((signum - 1) as u32)) +} + +#[inline] +fn has_pending_in_set_deliverable(t: &Thread, set: sigset_t) -> bool { + let pending = t.pending_signals_bitmap(); + let set_mask = sigset_to_mask(set); + for signum in 1..32 { + let bit = 1u32 << signum; + if pending & bit == 0 { + continue; + } + let mask = 1u32 << ((signum - 1) as u32); + if set_mask & mask == 0 { + continue; + } + if !t.is_signal_blocked(signum as i32) { + return true; + } + } + false +} + +#[inline] +fn take_pending_from_set(t: &mut Thread, set: sigset_t) -> Option<(c_int, SigInfo)> { + let pending = t.pending_signals(); + let set_mask = sigset_to_mask(set); + for signum in 1..32 { + let bit = 1u32 << signum; + if pending & bit == 0 { + continue; + } + let mask = 1u32 << ((signum - 1) as u32); + if set_mask & mask == 0 { + continue; + } + t.clear_signal(signum as i32); + let info = t.take_siginfo(signum as i32).unwrap_or(SigInfo { + si_signo: signum as i32, + si_errno: 0, + si_code: 0, + }); + return Some((signum as c_int, info)); + } + None +} + +fn deliver_to_tid(tid: pid_t, sig: c_int, info: *mut siginfo_t) -> c_int { + let signum = match validate_signum(sig) { + Ok(v) => v as i32, + Err(e) => return e, + }; + + let Some(target) = find_thread_by_tid(tid) else { + return -libc::ESRCH; + }; + + let sinfo = if info.is_null() { + SigInfo { + si_signo: signum, + si_errno: 0, + si_code: 0, + } + } else { + let u = unsafe { &*(info as *const siginfo_t) }; + SigInfo { + si_signo: u.si_signo, + si_errno: u.si_errno, + si_code: u.si_code, + } + }; + + target.lock().push_siginfo(signum, sinfo); + + let st = target.state(); + if st == thread::SUSPENDED { + let _ = scheduler::queue_ready_thread(thread::SUSPENDED, target); + } + 0 +} + +pub fn sigaction(signum: c_int, act: *const c_void, oact: *mut c_void) -> c_int { + let signum: c_int = match validate_signum(signum) { + Ok(v) => v as c_int, + Err(e) => return e, + }; + let current = scheduler::current_thread(); + + // Copy out old action if requested. + if !oact.is_null() { + let old = { + let l = current.lock(); + l.get_sigaction(signum) + }; + let out = sigaction { + sa_sigaction: handler_to_sighandler_t(old.sa_handler), + sa_mask: old.sa_mask, + sa_flags: old.sa_flags as c_int, + sa_restorer: None, + }; + unsafe { (oact as *mut c_void).cast::().write(out) }; + } + + if act.is_null() { + return 0; + } + + let new_act = unsafe { &*(act as *const c_void).cast::() }; + let sa = KernelSigAction { + sa_handler: sighandler_t_to_handler(new_act.sa_sigaction), + sa_flags: new_act.sa_flags as usize, + sa_mask: new_act.sa_mask, + }; + current.lock().set_sigaction(signum, sa); + 0 +} + +pub fn sigaltstack(ss: *const c_void, old_ss: *mut c_void) -> c_int { + let current = scheduler::current_thread(); + if !old_ss.is_null() { + let old = current.lock().get_sigaltstack(); + let out = sigaltstack { + ss_sp: old.ss_sp as *mut core::ffi::c_void as *mut c_void, + ss_flags: old.ss_flags, + ss_size: old.ss_size, + }; + unsafe { (old_ss as *mut c_void).cast::().write(out) }; + } + + if ss.is_null() { + return 0; + } + + let new_ss = unsafe { &*(ss as *const c_void).cast::() }; + let kss = SigAltStack { + ss_sp: new_ss.ss_sp as *mut c_void as *mut core::ffi::c_void, + ss_flags: new_ss.ss_flags, + ss_size: new_ss.ss_size, + }; + current.lock().set_sigaltstack(kss); + 0 +} + +pub fn sigpending(set: *mut sigset_t) -> c_int { + if set.is_null() { + return -libc::EINVAL; + } + let current = scheduler::current_thread(); + let pending = current.lock().pending_signals(); + let mut out: sigset_t = 0; + for signum in 1..32 { + if pending & (1 << signum) != 0 { + out |= sigset_bit_for(signum); + } + } + unsafe { set.write(out) }; + 0 +} + +pub fn sigprocmask(how: c_int, set: *const sigset_t, oldset: *mut sigset_t) -> c_int { + let current = scheduler::current_thread(); + let mut l = current.lock(); + + if !oldset.is_null() { + unsafe { oldset.write(mask_to_sigset(l.signal_mask())) }; + } + + if set.is_null() { + return 0; + } + let new_set = sigset_to_mask(unsafe { *set }); + + match how { + libc::SIG_BLOCK => { + let cur = l.signal_mask(); + l.set_signal_mask(cur | new_set); + } + libc::SIG_UNBLOCK => { + let cur = l.signal_mask(); + l.set_signal_mask(cur & !new_set); + } + libc::SIG_SETMASK => { + l.set_signal_mask(new_set); + } + _ => return -libc::EINVAL, + } + + 0 +} + +pub fn sigqueueinfo(pid: pid_t, sig: c_int, info: *mut siginfo_t) -> c_int { + deliver_to_tid(pid, sig, info) +} + +pub fn sigsuspend(set: *const sigset_t) -> c_int { + if set.is_null() { + return -libc::EINVAL; + } + // POSIX: SIGKILL and SIGSTOP are not blockable; ignore them in the supplied mask. + let mut new_mask = sigset_to_mask(unsafe { *set }); + new_mask &= !(1u32 << ((libc::SIGKILL - 1) as u32)); + new_mask &= !(1u32 << ((libc::SIGSTOP - 1) as u32)); + let t = scheduler::current_thread(); + { + let mut l = t.lock(); + l.save_signal_mask(); + l.set_signal_mask(new_mask); + } + + // Wait until *any* deliverable signal is pending. + loop { + // If something is already deliverable, stop sleeping. + if has_pending_in_set_deliverable(&t.lock(), mask_to_sigset(!new_mask)) { + break; + } + // Sleep until woken by a signal delivery (or some other event). A signal + // delivery will queue this thread back to READY via queue_ready_thread. + scheduler::suspend_me_for(WAITING_FOREVER); + } + + // Restore mask and return EINTR (POSIX behavior). + { + let mut l = t.lock(); + l.restore_saved_signal_mask(); + } + -libc::EINTR +} + +pub fn sigtimedwait(set: *const sigset_t, info: *mut c_void, timeout: *const timespec) -> c_int { + if set.is_null() { + return -libc::EINVAL; + } + let mut set = sigset_to_mask(unsafe { *set }); + // POSIX: SIGKILL and SIGSTOP are not waitable; ignore them in the supplied mask. + set &= !(1u32 << ((libc::SIGKILL - 1) as u32)); + set &= !(1u32 << ((libc::SIGSTOP - 1) as u32)); + let timeout_ticks = if timeout.is_null() { + None + } else { + let ts = unsafe { &*timeout }; + let ms = (ts.tv_sec as i64) + .saturating_mul(1000) + .saturating_add((ts.tv_nsec as i64) / 1_000_000); + Some(time::tick_from_millisecond(ms.max(0) as usize)) + }; + + let t = scheduler::current_thread(); + let old_mask = { t.lock().signal_mask() }; + { + let mut l = t.lock(); + let cur = l.signal_mask(); + l.set_signal_mask(cur | set); + } + + loop { + // If we already have a pending matching signal, consume it immediately. + let maybe = { + let mut l = t.lock(); + take_pending_from_set(&mut l, mask_to_sigset(set)) + }; + if let Some((signo, sinfo)) = maybe { + if !info.is_null() { + let out = siginfo_t { + si_signo: sinfo.si_signo as c_int, + si_errno: sinfo.si_errno as c_int, + si_code: sinfo.si_code as c_int, + ..Default::default() + }; + unsafe { (info as *mut c_void).cast::().write(out) }; + } + // Restore mask before returning. + { + let mut l = t.lock(); + l.set_signal_mask(old_mask); + } + return signo; + } + + // Otherwise, sleep until a matching signal arrives or until the timeout. + match timeout_ticks { + Some(ticks) => { + scheduler::suspend_me_for(ticks.max(1)); + // After waking (either due to signal delivery or timer), loop + // again to check pending signals. If nothing is available, we + // treat it as a timeout. + let maybe_after = { + let mut l = t.lock(); + take_pending_from_set(&mut l, mask_to_sigset(set)) + }; + if let Some((signo, sinfo)) = maybe_after { + if !info.is_null() { + let out = siginfo_t { + si_signo: sinfo.si_signo as c_int, + si_errno: sinfo.si_errno as c_int, + si_code: sinfo.si_code as c_int, + ..Default::default() + }; + unsafe { (info as *mut c_void).cast::().write(out) }; + } + { + let mut l = t.lock(); + l.set_signal_mask(old_mask); + } + return signo; + } + { + let mut l = t.lock(); + l.set_signal_mask(old_mask); + } + return -libc::EAGAIN; + } + None => { + scheduler::suspend_me_for(WAITING_FOREVER); + } + } + } +} + +pub fn kill(pid: pid_t, sig: c_int) -> c_int { + // - we treat `pid` as a TID (thread-directed), matching current kernel model. + // - pid==0 / pid<0 process-group semantics are not implemented. + if pid <= 0 { + return -libc::ESRCH; + } + deliver_to_tid(pid, sig, core::ptr::null_mut()) +} + +pub fn tgkill(_tgid: pid_t, pid: pid_t, sig: c_int) -> c_int { + // tgkill targets a specific thread in a specific thread group. + // we accept it as a thread-directed signal to `pid`. + if pid <= 0 { + return -libc::ESRCH; + } + deliver_to_tid(pid, sig, core::ptr::null_mut()) +} + +pub fn tkill(pid: pid_t, sig: c_int) -> c_int { + // Thread-directed signal. + if pid <= 0 { + return -libc::ESRCH; + } + deliver_to_tid(pid, sig, core::ptr::null_mut()) +} diff --git a/kernel/src/syscall_handlers/mod.rs b/kernel/src/syscall_handlers/mod.rs index 0e97e675..8203b322 100644 --- a/kernel/src/syscall_handlers/mod.rs +++ b/kernel/src/syscall_handlers/mod.rs @@ -23,7 +23,8 @@ use crate::vfs::syscalls as vfs_syscalls; #[cfg(enable_vfs)] pub use crate::vfs::syscalls::{Stat, Statfs as StatFs}; use crate::{ - config, scheduler, + config, + scheduler, signal, sync::atomic_wait as futex, thread::{self, Builder, Entry, Stack, Thread}, time, @@ -39,6 +40,7 @@ use core::{ use libc::{ addrinfo, c_char, c_int, c_long, c_uint, c_ulong, c_void, clockid_t, mode_t, msghdr, off_t, sigset_t, size_t, sockaddr, socklen_t, timespec, EBUSY, EINVAL, ESRCH, + pid_t, EINVAL, }; #[cfg(not(enable_vfs))] @@ -217,37 +219,10 @@ pub struct Context { pub args: [usize; 6], } -/// this signal data structure will be used in signal handling -/// now add attributes to disable warnings -/// copy from librs/signal/mod.rs -#[allow(non_camel_case_types)] -#[repr(C)] -#[derive(Clone)] -pub struct sigaltstack { - pub ss_sp: *mut c_void, - pub ss_flags: c_int, - pub ss_size: size_t, -} - -/// copy from librs/signal/mod.rs -#[allow(non_camel_case_types)] -#[repr(align(8))] -pub struct siginfo_t { - pub si_signo: c_int, - pub si_errno: c_int, - pub si_code: c_int, - _pad: [c_int; 29], - _align: [usize; 0], -} - -/// copy from librs/signal/mod.rs -#[allow(non_camel_case_types)] -pub struct sigaction { - pub sa_handler: Option, - pub sa_flags: c_ulong, - pub sa_restorer: Option, - pub sa_mask: sigset_t, -} +// NOTE: signal-related user ABI structs are defined in `signal::syscall`. +// Keep `syscall_handlers` free of conflicting type-level names (like +// `sigaction`) since `define_syscall_handler!` generates a module with +// the same identifier. #[repr(C)] pub struct mq_attr { @@ -575,39 +550,53 @@ define_syscall_handler!( } ); define_syscall_handler!( - signalaction(_signum: c_int, _act: *const c_void, _oact: *mut c_void) -> c_int { - // TODO: implement signalaction - 0 + signalaction(signum: c_int, act: *const c_void, oact: *mut c_void) -> c_int { + crate::signal::syscall::sigaction(signum, act as *const libc::c_void, oact as *mut libc::c_void) } ); define_syscall_handler!( - signaltstack(_ss: *const c_void, _old_ss: *mut c_void) -> c_int { - 0 + signaltstack(ss: *const c_void, old_ss: *mut c_void) -> c_int { + crate::signal::syscall::sigaltstack(ss as *const libc::c_void, old_ss as *mut libc::c_void) } ); define_syscall_handler!( - sigpending(_set: *mut libc::sigset_t) -> c_int { - 0 + sigpending(set: *mut libc::sigset_t) -> c_int { + crate::signal::syscall::sigpending(set) } ); define_syscall_handler!( - sigprocmask(_how: c_int, _set: *const libc::sigset_t, _oldset: *mut libc::sigset_t) -> c_int { - 0 + sigprocmask(how: c_int, set: *const libc::sigset_t, oldset: *mut libc::sigset_t) -> c_int { + crate::signal::syscall::sigprocmask(how, set, oldset) } ); define_syscall_handler!( - sigqueueinfo(_pid: c_int, _sig: c_int, _info: *const c_void) -> c_int { - 0 + sigqueueinfo(pid: pid_t, sig: c_int, info: *mut c_void) -> c_int { + crate::signal::syscall::sigqueueinfo(pid, sig, info as *mut crate::signal::syscall::siginfo_t) } ); define_syscall_handler!( - sigsuspend(_set: *const libc::sigset_t) -> c_int { - 0 + sigsuspend(set: *const libc::sigset_t) -> c_int { + crate::signal::syscall::sigsuspend(set) } ); define_syscall_handler!( - sigtimedwait(_set: *const sigset_t, _info: *mut c_void, _timeout: *const timespec) -> c_int { - 0 + sigtimedwait(set: *const sigset_t, info: *mut c_void, timeout: *const timespec) -> c_int { + crate::signal::syscall::sigtimedwait(set, info, timeout) + } +); +define_syscall_handler!( + kill(pid: pid_t, sig: c_int) -> c_int { + crate::signal::syscall::kill(pid, sig) + } +); +define_syscall_handler!( + tgkill(tgid: pid_t, pid: pid_t, sig: c_int) -> c_int { + crate::signal::syscall::tgkill(tgid, pid, sig) + } +); +define_syscall_handler!( + tkill(pid: pid_t, sig: c_int) -> c_int { + crate::signal::syscall::tkill(pid, sig) } ); @@ -845,6 +834,9 @@ syscall_table! { (RtSigQueueInfo, sigqueueinfo), (RtSigSuspend, sigsuspend), (RtSigTimedWait, sigtimedwait), + (Kill, kill), + (Tgkill, tgkill), + (Tkill, tkill), (Socket, socket), (Bind, bind), (Connect, connect), diff --git a/kernel/src/thread/mod.rs b/kernel/src/thread/mod.rs index 6eb55a91..7e97d765 100644 --- a/kernel/src/thread/mod.rs +++ b/kernel/src/thread/mod.rs @@ -37,7 +37,7 @@ use alloc::boxed::Box; use core::{ alloc::Layout, ptr::NonNull, - sync::atomic::{AtomicI32, AtomicU32, AtomicUsize, Ordering}, + sync::atomic::{AtomicI32, AtomicU32, Ordering}, }; mod builder; @@ -134,14 +134,23 @@ impl ThreadStats { pub(crate) type GlobalQueueListHead = UniqueListHead; -#[derive(Default)] pub(crate) struct SignalContext { + // Pending signals bitmask (1 << signum). pending_signals: u32, + // Whether this signal context is currently active (we're on signal stack). active: bool, // Will recover thread_context at recover_sp on exiting of signal handler. recover_sp: usize, thread_context: arch::Context, - once_action: [Option>; 32], + // Per-signal installed actions (sa_handler/sa_mask/sa_flags). + sigactions: [KernelSigAction; 32], + + // Alternate signal stack description (empty ss_size == 0 means disabled). + alt_stack: SigAltStack, + + // Per-signal queued siginfo (we don't support queueing multiple same-signals + // yet; slot is overwritten by newer deliveries). + pending_siginfo: [Option; 32], } impl core::fmt::Debug for SignalContext { @@ -150,6 +159,54 @@ impl core::fmt::Debug for SignalContext { } } +impl Default for SignalContext { + fn default() -> Self { + Self { + pending_signals: 0, + active: false, + recover_sp: 0, + thread_context: arch::Context::default(), + sigactions: core::array::from_fn(|_| KernelSigAction::default()), + alt_stack: SigAltStack::default(), + pending_siginfo: core::array::from_fn(|_| None), + } + } +} + +// Kernel-side lightweight siginfo (subset used by the kernel). +#[derive(Clone, Copy, Debug, Default)] +pub struct SigInfo { + pub si_signo: i32, + pub si_errno: i32, + pub si_code: i32, +} + +// Kernel representation of sigaction. +#[derive(Clone, Copy, Debug, Default)] +pub struct KernelSigAction { + pub sa_handler: Option, + pub sa_flags: usize, + pub sa_mask: libc::sigset_t, +} + +// Kernel-side representation of sigaltstack (small subset). +#[derive(Clone, Copy, Debug)] +pub struct SigAltStack { + pub ss_sp: *mut core::ffi::c_void, + pub ss_flags: i32, + pub ss_size: usize, +} + +impl Default for SigAltStack { + fn default() -> Self { + Self { + ss_sp: core::ptr::null_mut(), + ss_flags: 0, + ss_size: 0, + } + } +} + #[derive(Debug)] pub struct Thread { global: GlobalQueueListHead, @@ -198,6 +255,10 @@ pub struct Thread { // - Check mutex's pending queue acquired_mutexes: SpinLock, signal_context: Option>, + // Current signal mask (POSIX numbering: signal N is bit N-1). + blocked_mask: u32, + // Saved mask used by syscalls that temporarily replace the mask (e.g. sigsuspend). + saved_mask: u32, } extern "C" fn run_simple_c(f: extern "C" fn()) { @@ -406,6 +467,8 @@ impl Thread { pending_on_mutex: ArcCas::new(None), acquired_mutexes: SpinLock::new(MutexList::new()), signal_context: None, + blocked_mask: 0, + saved_mask: 0, } } @@ -464,14 +527,6 @@ impl Thread { } } - pub fn kill_with_once_handler(&mut self, signum: i32, f: impl FnOnce() + 'static) -> bool { - if !self.kill(signum) { - return false; - } - self.register_once_signal_handler(signum, f); - true - } - pub fn kill(&mut self, signum: i32) -> bool { let sig_ctx = self.get_or_create_signal_context(); let old = sig_ctx.pending_signals; @@ -480,14 +535,95 @@ impl Thread { (old & 1 << signum) == 0 } - pub fn register_once_signal_handler(&mut self, signum: i32, f: impl FnOnce() + 'static) { + pub fn install_signal_handler( + &mut self, + signum: i32, + handler: extern "C" fn(i32), + ) -> &mut Self { + let sa = KernelSigAction { + sa_handler: Some(handler), + sa_flags: 0, + sa_mask: 0, + }; + self.set_sigaction(signum, sa) + } + + // Set per-signal action (kernel representation). signum must be 0..32. + pub fn set_sigaction(&mut self, signum: i32, sa: KernelSigAction) -> &mut Self { let sig_ctx = self.get_or_create_signal_context(); - sig_ctx.once_action[signum as usize] = Some(Box::new(f)); + sig_ctx.sigactions[signum as usize] = sa; + self + } + + // Get per-signal action. + pub fn get_sigaction(&self, signum: i32) -> KernelSigAction { + self.signal_context + .as_ref() + .map_or_else(KernelSigAction::default, |c| c.sigactions[signum as usize]) } - pub fn take_signal_handler(&mut self, signum: i32) -> Option> { + // Push a siginfo for `signum` into per-thread slot and mark it pending. + pub fn push_siginfo(&mut self, signum: i32, info: SigInfo) -> &mut Self { let sig_ctx = self.get_or_create_signal_context(); - sig_ctx.once_action[signum as usize].take() + sig_ctx.pending_siginfo[signum as usize] = Some(info); + sig_ctx.pending_signals |= 1 << signum; + self + } + + // Take pending siginfo for `signum`. + pub fn take_siginfo(&mut self, signum: i32) -> Option { + let sig_ctx = self.get_or_create_signal_context(); + sig_ctx.pending_siginfo[signum as usize].take() + } + + // Set or query alternate signal stack. + pub fn set_sigaltstack(&mut self, ss: SigAltStack) -> &mut Self { + let sig_ctx = self.get_or_create_signal_context(); + sig_ctx.alt_stack = ss; + self + } + + pub fn get_sigaltstack(&self) -> SigAltStack { + self.signal_context + .as_ref() + .map_or_else(SigAltStack::default, |c| c.alt_stack) + } + + pub fn is_signal_blocked(&self, signum: i32) -> bool { + if signum <= 0 || signum >= 32 { + return false; + } + // We only support standard signals 1..31 here; treat sigset_t as a + // bitset and check the (signum-1) bit. + let bit: u32 = 1u32 << ((signum - 1) as u32); + (self.blocked_mask & bit) != 0 + } + + // Returns the current signal mask (POSIX numbering: bit (signum-1)). + pub fn signal_mask(&self) -> u32 { + self.blocked_mask + } + + // Replace the current signal mask. + pub fn set_signal_mask(&mut self, new_mask: u32) { + self.blocked_mask = new_mask; + } + + // Save the current signal mask for later restoration (used by sigsuspend-like syscalls). + pub fn save_signal_mask(&mut self) { + self.saved_mask = self.blocked_mask; + } + + // Restore the previously saved signal mask. + pub fn restore_saved_signal_mask(&mut self) { + self.blocked_mask = self.saved_mask; + } + + // Returns the current pending signal bitmap using *kernel numbering* (bit = 1 << signum). + pub fn pending_signals_bitmap(&self) -> u32 { + self.signal_context + .as_ref() + .map_or(0, |c| c.pending_signals) } pub(crate) fn activate_signal_context(&mut self) -> bool { @@ -527,6 +663,28 @@ impl Thread { self.saved_sp - core::mem::size_of::() } + // Compute the stack pointer to use for signal delivery. + // + // If SA_ONSTACK is requested for the delivered signal and an alternate + // signal stack is configured (and not disabled), we deliver on that stack. + // Otherwise we fall back to the thread's normal stack (the legacy behavior). + pub(crate) fn signal_delivery_sp(&mut self, signum: i32) -> usize { + let sa = self.get_sigaction(signum); + // Only use altstack when requested. + let want_onstack = (sa.sa_flags as i32) & libc::SA_ONSTACK != 0; + if want_onstack { + let ss = self.get_sigaltstack(); + if !ss.ss_sp.is_null() && ss.ss_size != 0 && (ss.ss_flags & libc::SS_DISABLE) == 0 { + // Place the signal handler context at the top of the alt stack. + // Keep it aligned like the normal path. + let mut sp = (ss.ss_sp as usize).saturating_add(ss.ss_size); + sp &= !(core::mem::align_of::() - 1); + return sp.saturating_sub(core::mem::size_of::()); + } + } + self.signal_handler_sp() + } + pub(crate) fn init(&mut self, stack: Stack, entry: Entry) -> &mut Self { self.stack = stack; // TODO: Stack sanity check. @@ -679,7 +837,21 @@ impl Thread { #[inline] pub fn has_pending_signals(&mut self) -> bool { - self.pending_signals() != 0 + let pending = self.pending_signals(); + // `blocked` uses POSIX numbering (signal N is bit (N-1)). + // `pending_signals` uses kernel numbering (signal N is bit N). + // For now we only consider signals 1..31 (NSIG=32) and treat 0 as unused. + for signum in 1..32 { + if pending & (1 << signum) == 0 { + continue; + } + let bit: u32 = 1u32 << ((signum - 1) as u32); + let is_blocked = (self.blocked_mask & bit) != 0; + if !is_blocked { + return true; + } + } + false } #[inline] diff --git a/test_harness/src/lib.rs b/test_harness/src/lib.rs index ed078129..ec62f35e 100644 --- a/test_harness/src/lib.rs +++ b/test_harness/src/lib.rs @@ -14,9 +14,10 @@ extern crate proc_macro; use proc_macro::TokenStream; +use proc_macro2::Span; use quote::quote; use std::sync::atomic::{AtomicBool, Ordering}; -use syn::{parse_macro_input, FnArg, ItemFn}; +use syn::{parse_macro_input, Expr, ExprLit, FnArg, ItemFn, Lit, LitStr, Meta}; static ENABLE_TEST_ONLY: AtomicBool = AtomicBool::new(false); static HAS_ONLY_TEST: AtomicBool = AtomicBool::new(false); @@ -60,6 +61,28 @@ fn generate_test_case(_attr: TokenStream, item: TokenStream) -> TokenStream { let input = parse_macro_input!(item as ItemFn); let test_name = &input.sig.ident; let input_block = &input.block; + let mut passthrough_attrs = Vec::new(); + let mut ignore_reason: Option = None; + + for attr in input.attrs.into_iter() { + if attr.path().is_ident("ignore") { + let reason = match &attr.meta { + Meta::Path(_) => LitStr::new("", Span::call_site()), + Meta::NameValue(name_value) => match &name_value.value { + Expr::Lit(ExprLit { + lit: Lit::Str(lit_str), + .. + }) => lit_str.clone(), + _ => LitStr::new("", Span::call_site()), + }, + _ => LitStr::new("", Span::call_site()), + }; + ignore_reason = Some(reason); + continue; + } + + passthrough_attrs.push(attr); + } let filtered_params = input .sig @@ -72,13 +95,29 @@ fn generate_test_case(_attr: TokenStream, item: TokenStream) -> TokenStream { _ => None, }); + let ignore_guard = ignore_reason.map(|reason| { + quote! { + { + let msg: &str = #reason; + if msg.is_empty() { + println!("[ IGNORED ] {}", stringify!(#test_name)); + } else { + println!("[ IGNORED ] {} - {}", stringify!(#test_name), msg); + } + return; + } + } + }); + let expanded = quote! { + #(#passthrough_attrs)* #[test_case] fn #test_name(#(#filtered_params),*) { #[cfg(not(use_defmt))] use semihosting::println; #[cfg(use_defmt)] use defmt::println as println; + #ignore_guard println!("[ RUN ] {}", stringify!(#test_name)); #( let _ = #param_names; )* #input_block From 038cbe28d95251d75bca44a166716a2559929053 Mon Sep 17 00:00:00 2001 From: zhanghe Date: Sun, 4 Jan 2026 09:43:47 +0800 Subject: [PATCH 2/2] use ipi kick off signal process in SMP --- header/src/lib.rs | 2 + kernel/src/arch/aarch64/mod.rs | 3 ++ kernel/src/arch/arm/mod.rs | 3 ++ kernel/src/arch/riscv/mod.rs | 42 ++++++++++++++++ kernel/src/arch/riscv/trap.rs | 21 +++++++- kernel/src/lib.rs | 4 ++ kernel/src/scheduler/mod.rs | 37 +++++++++++++- kernel/src/signal/mod.rs | 36 +++++++------ kernel/src/signal/syscall.rs | 81 ++++++++++++++---------------- kernel/src/syscall_handlers/mod.rs | 23 +++++++-- kernel/src/thread/builder.rs | 2 +- kernel/src/thread/mod.rs | 77 +++++++++++++++++++++------- 12 files changed, 245 insertions(+), 86 deletions(-) diff --git a/header/src/lib.rs b/header/src/lib.rs index 71c85b49..41dc049f 100644 --- a/header/src/lib.rs +++ b/header/src/lib.rs @@ -86,6 +86,8 @@ pub mod syscalls { MqTimedSend, MqTimedReceive, MqGetSetAttr, + GetPid, + PthreadToTid, LastNR, } } diff --git a/kernel/src/arch/aarch64/mod.rs b/kernel/src/arch/aarch64/mod.rs index 34f6612b..e99ce70c 100644 --- a/kernel/src/arch/aarch64/mod.rs +++ b/kernel/src/arch/aarch64/mod.rs @@ -560,3 +560,6 @@ pub(crate) extern "C" fn switch_stack( ) } } + +// add stub functions for IPI +pub fn send_reschedule_ipi_all() {} diff --git a/kernel/src/arch/arm/mod.rs b/kernel/src/arch/arm/mod.rs index e70388c3..0fa38c56 100644 --- a/kernel/src/arch/arm/mod.rs +++ b/kernel/src/arch/arm/mod.rs @@ -619,6 +619,9 @@ pub extern "C" fn pend_switch_context() { post_pendsv(); } +// stub: now it's UP case. +pub fn send_reschedule_ipi_all() {} + #[inline(always)] pub extern "C" fn switch_context(saved_sp_mut: *mut u8, to_sp: usize) { switch_context_with_hook(saved_sp_mut, to_sp, core::ptr::null_mut()); diff --git a/kernel/src/arch/riscv/mod.rs b/kernel/src/arch/riscv/mod.rs index 1a89a561..65b10e9f 100644 --- a/kernel/src/arch/riscv/mod.rs +++ b/kernel/src/arch/riscv/mod.rs @@ -43,6 +43,48 @@ pub(crate) const MIE_MEIE: usize = 1 << 11; static PENDING_SWITCH_CONTEXT: [AtomicBool; NUM_CORES] = [const { AtomicBool::new(false) }; NUM_CORES]; +// QEMU virt uses CLINT at 0x0200_0000 for MSIP (software interrupts). +#[cfg(any(target_board = "qemu_riscv64", target_board = "qemu_riscv32"))] +const CLINT_BASE: usize = 0x0200_0000; + +#[cfg(any(target_board = "qemu_riscv64", target_board = "qemu_riscv32"))] +#[inline] +fn msip_ptr(hart: usize) -> *mut u32 { + (CLINT_BASE + hart * core::mem::size_of::()) as *mut u32 +} + +#[cfg(not(any(target_board = "qemu_riscv64", target_board = "qemu_riscv32")))] +#[inline] +fn msip_ptr(_hart: usize) -> *mut u32 { + core::ptr::null_mut() +} + +pub fn send_reschedule_ipi_all() { + if NUM_CORES <= 1 { + return; + } + let this = current_cpu_id(); + for hart in 0..NUM_CORES { + if hart == this { + continue; + } + let p = msip_ptr(hart); + if p.is_null() { + continue; + } + unsafe { p.write_volatile(1) }; + } +} + +pub(crate) fn clear_reschedule_ipi() { + let hart = current_cpu_id(); + let p = msip_ptr(hart); + if p.is_null() { + return; + } + unsafe { p.write_volatile(0) }; +} + #[inline] pub(crate) extern "C" fn pend_switch_context() { if !sysirq::is_in_irq() { diff --git a/kernel/src/arch/riscv/trap.rs b/kernel/src/arch/riscv/trap.rs index 09d46836..716ac4ad 100644 --- a/kernel/src/arch/riscv/trap.rs +++ b/kernel/src/arch/riscv/trap.rs @@ -13,7 +13,8 @@ // limitations under the License. use super::{ - claim_switch_context, disable_local_irq, enable_local_irq, Context, IsrContext, NR_SWITCH, + claim_switch_context, clear_reschedule_ipi, disable_local_irq, enable_local_irq, + pend_switch_context, Context, IsrContext, NR_SWITCH, }; use crate::{ boards::handle_plic_irq, @@ -34,6 +35,7 @@ use core::{ pub(crate) const INTERRUPT_MASK: usize = 1usize << (usize::BITS - 1); pub(crate) const TIMER_INT: usize = INTERRUPT_MASK | 0x7; +pub(crate) const SOFT_INT: usize = INTERRUPT_MASK | 0x3; pub(crate) const ECALL: usize = 0xB; pub(crate) const EXTERN_INT: usize = INTERRUPT_MASK | 0xB; @@ -224,6 +226,17 @@ fn might_switch_context(from: &Context, ra: usize) -> usize { } let this_thread = scheduler::current_thread_ref(); let Some(next) = scheduler::next_preferred_thread(this_thread.priority()) else { + let current = scheduler::current_thread(); + if current.lock().has_pending_signals() + && current.state() == thread::RUNNING + && Thread::id(¤t) + != Thread::id(&scheduler::get_idle_thread(super::current_cpu_id())) + { + return scheduler::switch_current_thread( + old_sp, + scheduler::get_idle_thread(super::current_cpu_id()), + ); + } return old_sp; }; this_thread.lock().set_saved_sp(old_sp); @@ -236,6 +249,12 @@ fn might_switch_context(from: &Context, ra: usize) -> usize { extern "C" fn handle_trap(ctx: &mut Context, mcause: usize, mtval: usize, cont: usize) -> usize { let sp = ctx as *const _ as usize; match mcause { + SOFT_INT => { + clear_reschedule_ipi(); + // Ensure we attempt a context switch when leaving the interrupt. + pend_switch_context(); + might_switch_context(ctx, cont) + } EXTERN_INT => { handle_plic_irq(ctx, mcause, mtval); sp diff --git a/kernel/src/lib.rs b/kernel/src/lib.rs index 22f168da..2dde0a08 100644 --- a/kernel/src/lib.rs +++ b/kernel/src/lib.rs @@ -536,6 +536,10 @@ mod tests { // Should not hang. #[test] + #[cfg_attr( + any(target_board = "qemu_virt64_aarch64"), + ignore = "IPI hasn't been implemented yet" + )] fn test_simple_signal() { let a = Arc::new(ConstBarrier::<{ 2 }>::new()); let a_cloned = a.clone(); diff --git a/kernel/src/scheduler/mod.rs b/kernel/src/scheduler/mod.rs index c6ba319b..c5980068 100644 --- a/kernel/src/scheduler/mod.rs +++ b/kernel/src/scheduler/mod.rs @@ -269,7 +269,7 @@ pub(crate) extern "C" fn save_context_finish_hook(hook: Option<&mut ContextSwitc } } -fn switch_current_thread(old_sp: usize, next: ThreadNode) -> usize { +pub(crate) fn switch_current_thread(old_sp: usize, next: ThreadNode) -> usize { let to_sp = next.saved_sp(); let ok = next.transfer_state(thread::READY, thread::RUNNING); debug_assert!(ok); @@ -281,6 +281,11 @@ fn switch_current_thread(old_sp: usize, next: ThreadNode) -> usize { { next.lock().set_start_cycles(cycles); } + // Prepare signal handling before switching into the next thread. + // This keeps behavior consistent across switch paths (with/without hooks). + if next.lock().has_pending_signals() { + prepare_signal_handling(&next); + } let old = set_current_thread(next); #[cfg(debugging_scheduler)] crate::trace!( @@ -311,6 +316,36 @@ fn switch_current_thread(old_sp: usize, next: ThreadNode) -> usize { to_sp } +// add this for signal process, there is *no* code point return to "user space", +// because we need support both dsc(direct syscall) and swi (svc syscall) +// This is an workaround when we need a guaranteed context switch to process +// per-thread work such as pending signals. +pub fn yield_me_definitely() { + if unlikely(!is_schedule_ready()) { + return; + } + debug_assert!(arch::local_irq_enabled()); + let pg = thread::Thread::try_preempt_me(); + if !pg.preemptable() { + arch::idle(); + return; + } + drop(pg); + yield_to_next_unconditionally(); +} + +fn yield_to_next_unconditionally() { + debug_assert!(arch::local_irq_enabled()); + let next = next_ready_thread().map_or_else(idle::current_idle_thread, |v| v); + let to_sp = next.saved_sp(); + let old = current_thread_ref(); + let from_sp_ptr = old.saved_sp_ptr(); + let mut hook_holder = ContextSwitchHookHolder::new(next); + hook_holder.set_prev_thread_target_state(thread::READY); + arch::switch_context_with_hook(from_sp_ptr as *mut u8, to_sp, &mut hook_holder as *mut _); + debug_assert!(arch::local_irq_enabled()); +} + pub(crate) extern "C" fn relinquish_me_and_return_next_sp(old_sp: usize) -> usize { debug_assert!(!arch::local_irq_enabled()); debug_assert!(!crate::irq::is_in_irq()); diff --git a/kernel/src/signal/mod.rs b/kernel/src/signal/mod.rs index 9beeb457..68aa926b 100644 --- a/kernel/src/signal/mod.rs +++ b/kernel/src/signal/mod.rs @@ -44,22 +44,23 @@ fn handle_signal(t: &ThreadNode, signum: i32) { #[inline(never)] pub(crate) unsafe extern "C" fn handler_entry(_sp: usize, _old_sp: usize) { let current = scheduler::current_thread(); - // Deliver only unblocked signals. - // NOTE: pending_signals uses kernel numbering (bit = 1< Result { Ok(signum as usize) } +// pid_t type lookups fn find_thread_by_tid(tid: pid_t) -> Option { - let target = tid as usize; + if tid <= 0 { + return None; + } + let target = tid as u32; let mut it = GlobalQueueVisitor::new(); while let Some(t) = it.next() { - if Thread::id(&t) == target { + if Thread::tid(&t) == target { return Some(t); } } @@ -146,26 +150,6 @@ fn sigset_bit_for(signum: usize) -> sigset_t { mask_to_sigset(1u32 << ((signum - 1) as u32)) } -#[inline] -fn has_pending_in_set_deliverable(t: &Thread, set: sigset_t) -> bool { - let pending = t.pending_signals_bitmap(); - let set_mask = sigset_to_mask(set); - for signum in 1..32 { - let bit = 1u32 << signum; - if pending & bit == 0 { - continue; - } - let mask = 1u32 << ((signum - 1) as u32); - if set_mask & mask == 0 { - continue; - } - if !t.is_signal_blocked(signum as i32) { - return true; - } - } - false -} - #[inline] fn take_pending_from_set(t: &mut Thread, set: sigset_t) -> Option<(c_int, SigInfo)> { let pending = t.pending_signals(); @@ -217,10 +201,24 @@ fn deliver_to_tid(tid: pid_t, sig: c_int, info: *mut siginfo_t) -> c_int { target.lock().push_siginfo(signum, sinfo); + // Fast path for self-signal: we need a guaranteed + // context switch so `prepare_signal_handling()` runs + if Thread::id(&target) == scheduler::current_thread_id() { + scheduler::yield_me_definitely(); + return 0; + } + + //FIXME: we must implement correct wakeup, should add a waitqueue for signal waiters let st = target.state(); if st == thread::SUSPENDED { let _ = scheduler::queue_ready_thread(thread::SUSPENDED, target); } + // UP: give scheduler a hint to reschedule soon + scheduler::yield_me_now_or_later(); + // SMP: Ensure the scheduler re-checks runnable threads soon. + if blueos_kconfig::CONFIG_NUM_CORES > 1 { + arch::send_reschedule_ipi_all(); + } 0 } @@ -313,23 +311,31 @@ pub fn sigprocmask(how: c_int, set: *const sigset_t, oldset: *mut sigset_t) -> c if set.is_null() { return 0; } - let new_set = sigset_to_mask(unsafe { *set }); - + let mut new_mask = sigset_to_mask(unsafe { *set }); + // SIGKILL and SIGSTOP aren't able to block + new_mask &= !(1u32 << ((libc::SIGKILL - 1) as u32)); + new_mask &= !(1u32 << ((libc::SIGSTOP - 1) as u32)); match how { libc::SIG_BLOCK => { let cur = l.signal_mask(); - l.set_signal_mask(cur | new_set); + l.set_signal_mask(cur | new_mask); } libc::SIG_UNBLOCK => { let cur = l.signal_mask(); - l.set_signal_mask(cur & !new_set); + l.set_signal_mask(cur & !new_mask); } libc::SIG_SETMASK => { - l.set_signal_mask(new_set); + l.set_signal_mask(new_mask); } _ => return -libc::EINVAL, } + // If unblocking made some pending signals deliverable, ensure they are + if l.has_pending_signals() { + drop(l); + scheduler::yield_me_definitely(); + } + 0 } @@ -352,16 +358,9 @@ pub fn sigsuspend(set: *const sigset_t) -> c_int { l.set_signal_mask(new_mask); } - // Wait until *any* deliverable signal is pending. - loop { - // If something is already deliverable, stop sleeping. - if has_pending_in_set_deliverable(&t.lock(), mask_to_sigset(!new_mask)) { - break; - } - // Sleep until woken by a signal delivery (or some other event). A signal - // delivery will queue this thread back to READY via queue_ready_thread. - scheduler::suspend_me_for(WAITING_FOREVER); - } + // Sleep until woken by a signal delivery (or some other event). + // POSIX behavior: sigsuspend returns -1/EINTR after a signal handler runs. + scheduler::suspend_me_for(WAITING_FOREVER); // Restore mask and return EINTR (POSIX behavior). { @@ -461,9 +460,8 @@ pub fn sigtimedwait(set: *const sigset_t, info: *mut c_void, timeout: *const tim } } +// all use *deliver_to_tid*, when implement multi-threaded process support, need change here. pub fn kill(pid: pid_t, sig: c_int) -> c_int { - // - we treat `pid` as a TID (thread-directed), matching current kernel model. - // - pid==0 / pid<0 process-group semantics are not implemented. if pid <= 0 { return -libc::ESRCH; } @@ -471,8 +469,6 @@ pub fn kill(pid: pid_t, sig: c_int) -> c_int { } pub fn tgkill(_tgid: pid_t, pid: pid_t, sig: c_int) -> c_int { - // tgkill targets a specific thread in a specific thread group. - // we accept it as a thread-directed signal to `pid`. if pid <= 0 { return -libc::ESRCH; } @@ -480,7 +476,6 @@ pub fn tgkill(_tgid: pid_t, pid: pid_t, sig: c_int) -> c_int { } pub fn tkill(pid: pid_t, sig: c_int) -> c_int { - // Thread-directed signal. if pid <= 0 { return -libc::ESRCH; } diff --git a/kernel/src/syscall_handlers/mod.rs b/kernel/src/syscall_handlers/mod.rs index 8203b322..e56248d6 100644 --- a/kernel/src/syscall_handlers/mod.rs +++ b/kernel/src/syscall_handlers/mod.rs @@ -23,8 +23,7 @@ use crate::vfs::syscalls as vfs_syscalls; #[cfg(enable_vfs)] pub use crate::vfs::syscalls::{Stat, Statfs as StatFs}; use crate::{ - config, - scheduler, signal, + config, scheduler, signal, sync::atomic_wait as futex, thread::{self, Builder, Entry, Stack, Thread}, time, @@ -39,8 +38,7 @@ use core::{ }; use libc::{ addrinfo, c_char, c_int, c_long, c_uint, c_ulong, c_void, clockid_t, mode_t, msghdr, off_t, - sigset_t, size_t, sockaddr, socklen_t, timespec, EBUSY, EINVAL, ESRCH, - pid_t, EINVAL, + pid_t, sigset_t, size_t, sockaddr, socklen_t, timespec, EBUSY, EINVAL, ESRCH, }; #[cfg(not(enable_vfs))] @@ -297,6 +295,21 @@ get_tid() -> c_long { handle as c_long }); +define_syscall_handler!( +get_pid() -> c_long { + let t = scheduler::current_thread(); + Thread::tid(&t) as c_long +}); + +define_syscall_handler!( +pthread_to_tid(pthread: usize) -> c_long { + let target = thread::GlobalQueueVisitor::find_if(|t| thread::Thread::id(t) == pthread); + let Some(target) = target else { + return -(ESRCH as c_long); + }; + thread::Thread::tid(&target) as c_long +}); + define_syscall_handler!( get_sched_param(tid: usize) -> c_long { @@ -796,6 +809,7 @@ syscall_table! { (Echo, echo), (Nop, nop), (GetTid, get_tid), + (GetPid, get_pid), (GetSchedParam, get_sched_param), (SetSchedParam, set_sched_param), (CreateThread, create_thread), @@ -860,6 +874,7 @@ syscall_table! { (MqTimedSend, mq_timedsend), (MqTimedReceive, mq_timedreceive), (MqGetSetAttr, mq_getsetattr), + (PthreadToTid, pthread_to_tid), } #[cfg(not(enable_syscall))] diff --git a/kernel/src/thread/builder.rs b/kernel/src/thread/builder.rs index 70fa763a..88933104 100644 --- a/kernel/src/thread/builder.rs +++ b/kernel/src/thread/builder.rs @@ -196,7 +196,6 @@ pub(crate) fn build_static_thread( let stack = &mut s.stack; let arc = unsafe { ThreadNode::from_static_inner_ref(inner) }; debug_assert_eq!(ThreadNode::strong_count(&arc), 1); - let _id = Thread::id(&arc); let mut w = arc.lock(); w.init( Stack::from_raw(stack.rep.as_mut_ptr(), stack.rep.len()), @@ -207,6 +206,7 @@ pub(crate) fn build_static_thread( w.set_kind(kind); debug_assert!((thread::IDLE..=thread::RETIRED).contains(&init_state)); unsafe { w.set_state(init_state) }; + let id = Thread::id(&arc); debug!( "System thread 0x{:x} created: sp: 0x{:x}, stack base: {:?}, stack size: {}, context size: {}", id, diff --git a/kernel/src/thread/mod.rs b/kernel/src/thread/mod.rs index 7e97d765..e3e787f9 100644 --- a/kernel/src/thread/mod.rs +++ b/kernel/src/thread/mod.rs @@ -40,6 +40,8 @@ use core::{ sync::atomic::{AtomicI32, AtomicU32, Ordering}, }; +static NEXT_TID: AtomicU32 = AtomicU32::new(1); + mod builder; pub use builder::*; @@ -135,8 +137,10 @@ impl ThreadStats { pub(crate) type GlobalQueueListHead = UniqueListHead; pub(crate) struct SignalContext { - // Pending signals bitmask (1 << signum). - pending_signals: u32, + // Pending counts per signal number. + // Index uses kernel numbering: slot N corresponds to signal N. + // Slot 0 is unused. + pending_counts: [u32; 32], // Whether this signal context is currently active (we're on signal stack). active: bool, // Will recover thread_context at recover_sp on exiting of signal handler. @@ -162,7 +166,7 @@ impl core::fmt::Debug for SignalContext { impl Default for SignalContext { fn default() -> Self { Self { - pending_signals: 0, + pending_counts: [0u32; 32], active: false, recover_sp: 0, thread_context: arch::Context::default(), @@ -209,6 +213,7 @@ impl Default for SigAltStack { #[derive(Debug)] pub struct Thread { + tid: u32, global: GlobalQueueListHead, sched_node: IlistHead, pub timer: Option>, @@ -444,6 +449,7 @@ impl Thread { const fn new(kind: ThreadKind) -> Self { Self { + tid: 0, cleanup: None, stack: Stack::new(), state: AtomicUint::new(IDLE), @@ -477,6 +483,11 @@ impl Thread { me as *const Self as usize } + #[inline] + pub fn tid(me: &Self) -> u32 { + me.tid + } + #[inline] pub(crate) fn try_preempt_me() -> PreemptGuard { let current = scheduler::current_thread(); @@ -529,10 +540,14 @@ impl Thread { pub fn kill(&mut self, signum: i32) -> bool { let sig_ctx = self.get_or_create_signal_context(); - let old = sig_ctx.pending_signals; - sig_ctx.pending_signals |= 1 << signum; - // Return false if there is signum pending. - (old & 1 << signum) == 0 + if signum <= 0 || signum >= 32 { + return false; + } + let idx = signum as usize; + let old = sig_ctx.pending_counts[idx]; + sig_ctx.pending_counts[idx] = old.saturating_add(1); + // Return false if there is signum already pending. + old == 0 } pub fn install_signal_handler( @@ -565,8 +580,11 @@ impl Thread { // Push a siginfo for `signum` into per-thread slot and mark it pending. pub fn push_siginfo(&mut self, signum: i32, info: SigInfo) -> &mut Self { let sig_ctx = self.get_or_create_signal_context(); - sig_ctx.pending_siginfo[signum as usize] = Some(info); - sig_ctx.pending_signals |= 1 << signum; + if signum > 0 && signum < 32 { + let idx = signum as usize; + sig_ctx.pending_siginfo[idx] = Some(info); + sig_ctx.pending_counts[idx] = sig_ctx.pending_counts[idx].saturating_add(1); + } self } @@ -621,9 +639,15 @@ impl Thread { // Returns the current pending signal bitmap using *kernel numbering* (bit = 1 << signum). pub fn pending_signals_bitmap(&self) -> u32 { - self.signal_context - .as_ref() - .map_or(0, |c| c.pending_signals) + self.signal_context.as_ref().map_or(0, |c| { + let mut bits: u32 = 0; + for signum in 1..32 { + if c.pending_counts[signum] != 0 { + bits |= 1u32 << (signum as u32); + } + } + bits + }) } pub(crate) fn activate_signal_context(&mut self) -> bool { @@ -686,6 +710,9 @@ impl Thread { } pub(crate) fn init(&mut self, stack: Stack, entry: Entry) -> &mut Self { + if self.tid == 0 { + self.tid = NEXT_TID.fetch_add(1, Ordering::Relaxed); + } self.stack = stack; // TODO: Stack sanity check. let maybe_sp = self.stack.top() as usize @@ -830,9 +857,15 @@ impl Thread { #[inline] pub fn pending_signals(&mut self) -> u32 { - self.signal_context - .as_ref() - .map_or_else(|| 0, |c| c.pending_signals) + self.signal_context.as_ref().map_or(0, |c| { + let mut bits: u32 = 0; + for signum in 1..32 { + if c.pending_counts[signum] != 0 { + bits |= 1u32 << (signum as u32); + } + } + bits + }) } #[inline] @@ -859,7 +892,16 @@ impl Thread { let Some(c) = &mut self.signal_context else { return self; }; - c.pending_signals &= !(1 << signum); + if signum <= 0 || signum >= 32 { + return self; + } + let idx = signum as usize; + if c.pending_counts[idx] != 0 { + c.pending_counts[idx] -= 1; + if c.pending_counts[idx] == 0 { + c.pending_siginfo[idx] = None; + } + } self } @@ -868,7 +910,8 @@ impl Thread { let Some(c) = &mut self.signal_context else { return self; }; - c.pending_signals = 0; + c.pending_counts = [0u32; 32]; + c.pending_siginfo = core::array::from_fn(|_| None); self }