|
| 1 | +use crate::interface::{cagetable_getref, cagetable_getref_opt, RustAtomicOrdering}; |
| 2 | +use sysdefs::constants::{SA_NODEFER, SA_RESETHAND, SIG_DFL}; |
| 3 | + |
| 4 | +const EPOCH_NORMAL: u64 = 0; |
| 5 | +const EPOCH_SIGNAL: u64 = 0xc0ffee; |
| 6 | +const EPOCH_KILLED: u64 = 0xdead; |
| 7 | + |
| 8 | +// switch the epoch of the main thread of the cage to "signal" state |
| 9 | +// thread safety: this function could possibly be invoked by multiple threads of the same cage |
| 10 | +pub fn signal_epoch_trigger(cageid: u64) { |
| 11 | + let cage = cagetable_getref(cageid); |
| 12 | + |
| 13 | + let threadid_guard = cage.main_threadid.read(); |
| 14 | + let main_threadid = *threadid_guard; |
| 15 | + let epoch_handler = cage |
| 16 | + .epoch_handler |
| 17 | + .get(&main_threadid) |
| 18 | + .expect("main threadid does not exist"); |
| 19 | + let guard = epoch_handler.write(); |
| 20 | + let epoch = *guard; |
| 21 | + // SAFETY: the pointer is locked with write access so no one is able to modify it concurrently |
| 22 | + // However, Potential BUG (TODO): We still need to verify the lifetime of the pointer. This pointer |
| 23 | + // is created by wasmtime and will be destroyed at some point when the wasm instance is destroyed |
| 24 | + // we still need to figure out when is the destroy happening and make sure it is destroyed after the |
| 25 | + // information in rawposix is updated |
| 26 | + unsafe { |
| 27 | + *epoch = EPOCH_SIGNAL; |
| 28 | + } |
| 29 | +} |
| 30 | + |
| 31 | +// switch the epoch of all threads of the cage to "killed" state |
| 32 | +// thread safety: this function will only be invoked by main thread of the cage |
| 33 | +pub fn epoch_kill_all(cageid: u64) { |
| 34 | + let cage = cagetable_getref(cageid); |
| 35 | + |
| 36 | + let threadid_guard = cage.main_threadid.read(); |
| 37 | + let main_threadid = *threadid_guard; |
| 38 | + // we iterate through the epoch handler of each thread in the cage |
| 39 | + for entry in cage.epoch_handler.iter() { |
| 40 | + if entry.key() == &main_threadid { |
| 41 | + // main thread should be the one invoking this method |
| 42 | + // so main thread could kill itself and we do not need to notify it again |
| 43 | + continue; |
| 44 | + } |
| 45 | + let epoch_handler = entry.value(); |
| 46 | + let guard = epoch_handler.write(); |
| 47 | + let epoch = *guard; |
| 48 | + // SAFETY: see comment at `signal_epoch_trigger` |
| 49 | + unsafe { |
| 50 | + *epoch = EPOCH_KILLED; |
| 51 | + } |
| 52 | + } |
| 53 | +} |
| 54 | + |
| 55 | +// get the current epoch state of the thread |
| 56 | +// thread safety: this function will only be invoked by main thread of the cage |
| 57 | +fn get_epoch_state(cageid: u64, thread_id: u64) -> u64 { |
| 58 | + let cage = cagetable_getref(cageid); |
| 59 | + let epoch_handler = cage |
| 60 | + .epoch_handler |
| 61 | + .get(&(thread_id as i32)) |
| 62 | + .expect("threadid does not exist"); |
| 63 | + let guard = epoch_handler.read(); |
| 64 | + let epoch = *guard; |
| 65 | + // SAFETY: see comment at `signal_epoch_trigger` |
| 66 | + unsafe { *epoch } |
| 67 | +} |
| 68 | + |
| 69 | +// check the specified thread with specified cage is in "killed" state |
| 70 | +// thread safety: this function could possibly be invoked by multiple threads of the same cage |
| 71 | +pub fn thread_check_killed(cageid: u64, thread_id: u64) -> bool { |
| 72 | + let cage = cagetable_getref(cageid); |
| 73 | + // this method should not be invoked if the thread is already killed (i.e. thread is removed from epoch_handler) |
| 74 | + let epoch_handler = cage.epoch_handler.get(&(thread_id as i32)).unwrap(); |
| 75 | + let guard = epoch_handler.write(); |
| 76 | + let epoch = *guard; |
| 77 | + // SAFETY: see comment at `signal_epoch_trigger` |
| 78 | + unsafe { *epoch == EPOCH_KILLED } |
| 79 | +} |
| 80 | + |
| 81 | +// reset the epoch of the main thread of the cage to "normal" state |
| 82 | +// usually invoked when all the pending signals are handled for the cage |
| 83 | +// thread safety: this function will only be invoked by main thread of the cage |
| 84 | +pub fn signal_epoch_reset(cageid: u64) { |
| 85 | + let cage = cagetable_getref(cageid); |
| 86 | + |
| 87 | + let threadid_guard = cage.main_threadid.read(); |
| 88 | + let main_threadid = *threadid_guard; |
| 89 | + let epoch_handler = cage.epoch_handler.get(&main_threadid).unwrap(); |
| 90 | + let guard = epoch_handler.write(); |
| 91 | + let epoch = *guard; |
| 92 | + // SAFETY: see comment at `signal_epoch_trigger` |
| 93 | + unsafe { |
| 94 | + *epoch = EPOCH_NORMAL; |
| 95 | + } |
| 96 | +} |
| 97 | + |
| 98 | +// manually check if the epoch is not in "normal" state |
| 99 | +// useful if we want to do our own epoch check in host |
| 100 | +// thread safety: this function will only be invoked by main thread of the cage |
| 101 | +pub fn signal_check_trigger(cageid: u64) -> bool { |
| 102 | + let cage = cagetable_getref(cageid); |
| 103 | + |
| 104 | + let threadid_guard = cage.main_threadid.read(); |
| 105 | + let main_threadid = *threadid_guard; |
| 106 | + |
| 107 | + let epoch_handler = cage.epoch_handler.get(&main_threadid).unwrap(); |
| 108 | + let guard = epoch_handler.write(); |
| 109 | + let epoch = *guard; |
| 110 | + // SAFETY: see comment at `signal_epoch_trigger` |
| 111 | + unsafe { *epoch > EPOCH_NORMAL } |
| 112 | +} |
| 113 | + |
| 114 | +// check if the signal of the cage is in blocked state |
| 115 | +// thread safety: this function will only be invoked by main thread of the cage |
| 116 | +// but should still work fine if accessed by multiple threads |
| 117 | +pub fn signal_check_block(cageid: u64, signo: i32) -> bool { |
| 118 | + let cage = cagetable_getref(cageid); |
| 119 | + let sigset = cage.sigset.load(RustAtomicOrdering::Relaxed); |
| 120 | + |
| 121 | + // check if the corresponding signal bit is set in sigset |
| 122 | + (sigset & convert_signal_mask(signo)) > 0 |
| 123 | +} |
| 124 | + |
| 125 | +// retrieve the signal handler for the specified signal of the cage |
| 126 | +// if the signal handler does not exist, then return SIG_DFL |
| 127 | +// thread safety: this function will only be invoked by main thread of the cage |
| 128 | +pub fn signal_get_handler(cageid: u64, signo: i32) -> u32 { |
| 129 | + let cage = cagetable_getref(cageid); |
| 130 | + let handler = match cage.signalhandler.get(&signo) { |
| 131 | + Some(action_struct) => { |
| 132 | + action_struct.sa_handler // if we have a handler and its not blocked return it |
| 133 | + } |
| 134 | + None => SIG_DFL as u32, // if we dont have a handler return SIG_DFL |
| 135 | + }; |
| 136 | + handler |
| 137 | +} |
| 138 | + |
| 139 | +// send specified signal to the cage, return value indicates whether the cage exists |
| 140 | +// thread safety: this function could possibly be invoked by multiple threads of the same cage |
| 141 | +pub fn lind_send_signal(cageid: u64, signo: i32) -> bool { |
| 142 | + if let Some(cage) = cagetable_getref_opt(cageid) { |
| 143 | + let mut pending_signals = cage.pending_signals.write(); |
| 144 | + // TODO: currently we are queuing the same signals instead of merging the same signal |
| 145 | + // this is different from linux which always merge the same signal if they havn't been handled yet |
| 146 | + // we queue the signals for now because our epoch based signal implementation could have much longer |
| 147 | + // gap for signal checkings than linux. We need to finally decide whether do the queuing or merging |
| 148 | + // in the future, probably based on some experimental data |
| 149 | + pending_signals.push(signo); |
| 150 | + |
| 151 | + // we only trigger epoch if the signal is not blocked |
| 152 | + if !signal_check_block(cageid, signo) { |
| 153 | + signal_epoch_trigger(cageid); |
| 154 | + } |
| 155 | + |
| 156 | + true |
| 157 | + } else { |
| 158 | + false |
| 159 | + } |
| 160 | +} |
| 161 | + |
| 162 | +pub fn convert_signal_mask(signo: i32) -> u64 { |
| 163 | + (1 << (signo - 1)) as u64 |
| 164 | +} |
| 165 | + |
| 166 | +// retrieve the first unblocked signal in the pending signal list |
| 167 | +// returns an optional tuple where the first element is the signal number |
| 168 | +// the second element is the signal handler |
| 169 | +// and the third element is the signal mask restore callback function |
| 170 | +// thread safety: this function will only be invoked by main thread of the cage |
| 171 | +pub fn lind_get_first_signal(cageid: u64) -> Option<(i32, u32, Box<dyn Fn(u64)>)> { |
| 172 | + let cage = cagetable_getref(cageid); |
| 173 | + let mut pending_signals = cage.pending_signals.write(); |
| 174 | + let sigset = cage.sigset.load(RustAtomicOrdering::Relaxed); |
| 175 | + |
| 176 | + // we iterate through signal and retrieve the first unblocked signals in the pending list |
| 177 | + if let Some(index) = pending_signals.iter().position( |
| 178 | + |&signo| (sigset & convert_signal_mask(signo)) == 0, // check if signal is blocked |
| 179 | + ) { |
| 180 | + // retrieve the signal number |
| 181 | + let signo = pending_signals.remove(index); |
| 182 | + // retrieve the corresponding signal handler |
| 183 | + match cage.signalhandler.get_mut(&signo) { |
| 184 | + Some(mut sigaction) => { |
| 185 | + // if sigprocmask is called during the execution of the signal handler |
| 186 | + // the signal mask will not be perseved once handler is finished |
| 187 | + |
| 188 | + // by default, we block the same signal during its execution |
| 189 | + let mut mask_self = convert_signal_mask(signo); |
| 190 | + let signal_handler = sigaction.sa_handler; |
| 191 | + // if SA_RESETHAND is set, we reset the signal handler to default for this signal |
| 192 | + if sigaction.sa_flags as u32 & SA_RESETHAND > 0 { |
| 193 | + sigaction.sa_handler = SIG_DFL as u32; |
| 194 | + } |
| 195 | + |
| 196 | + // if SA_NODEFER is set, we allow the same signal to interrupt itself |
| 197 | + if sigaction.sa_flags as u32 & SA_NODEFER > 0 { |
| 198 | + mask_self = 0; |
| 199 | + } |
| 200 | + // temporily update the signal mask |
| 201 | + cage.sigset |
| 202 | + .fetch_or(sigaction.sa_mask | mask_self, RustAtomicOrdering::Relaxed); |
| 203 | + |
| 204 | + // restorer is called when the signal handler finishes. It should restore the signal mask |
| 205 | + let restorer = Box::new(move |cageid| { |
| 206 | + let cage = cagetable_getref(cageid); |
| 207 | + cage.sigset.store(sigset, RustAtomicOrdering::Relaxed); |
| 208 | + }); |
| 209 | + Some((signo, signal_handler, restorer)) |
| 210 | + } |
| 211 | + None => { |
| 212 | + // retrieve the signal handler |
| 213 | + // if no signal handler is found, SIG_DFL will be returned |
| 214 | + let signal_handler = signal_get_handler(cageid, signo); |
| 215 | + let restorer = Box::new(move |cageid| { |
| 216 | + let cage = cagetable_getref(cageid); |
| 217 | + cage.sigset.store(sigset, RustAtomicOrdering::Relaxed); |
| 218 | + }); |
| 219 | + Some((signo, signal_handler, restorer)) |
| 220 | + } |
| 221 | + } |
| 222 | + } else { |
| 223 | + // if there is no pending unblocked signal, we return None |
| 224 | + None |
| 225 | + } |
| 226 | +} |
| 227 | + |
| 228 | +// check if there is any pending unblocked signals |
| 229 | +// return true if no pending unblocked signals are found |
| 230 | +// thread safety: this function will only be invoked by main thread of the cage |
| 231 | +pub fn lind_check_no_pending_signal(cageid: u64) -> bool { |
| 232 | + let cage = cagetable_getref(cageid); |
| 233 | + let mut pending_signals = cage.pending_signals.write(); |
| 234 | + |
| 235 | + // iterate through each pending signal |
| 236 | + if let Some(index) = pending_signals.iter().position( |
| 237 | + // check if the signal is blocked |
| 238 | + |&signo| !signal_check_block(cageid, signo), |
| 239 | + ) { |
| 240 | + false |
| 241 | + } else { |
| 242 | + true |
| 243 | + } |
| 244 | +} |
| 245 | + |
| 246 | +// initialize the signal for a new thread |
| 247 | +// thread safety: this function could possibly be invoked by multiple threads of the same cage |
| 248 | +pub fn lind_signal_init(cageid: u64, epoch_handler: *mut u64, threadid: i32, is_mainthread: bool) { |
| 249 | + let cage = cagetable_getref(cageid); |
| 250 | + |
| 251 | + // if this is specified as the main thread, then replace the main_threadid field in cage |
| 252 | + if is_mainthread { |
| 253 | + let mut threadid_guard = cage.main_threadid.write(); |
| 254 | + *threadid_guard = threadid; |
| 255 | + } |
| 256 | + let epoch_handler = super::RustLock::new(epoch_handler); |
| 257 | + cage.epoch_handler.insert(threadid, epoch_handler); |
| 258 | +} |
| 259 | + |
| 260 | +// clean up signal stuff for an exited thread |
| 261 | +// return true if this is the last thread in the cage, otherwise return false |
| 262 | +pub fn lind_thread_exit(cageid: u64, thread_id: u64) -> bool { |
| 263 | + let cage = cagetable_getref(cageid); |
| 264 | + // lock the main threadid until all the related fields including epoch_handler finishes its updating |
| 265 | + let mut threadid_guard = cage.main_threadid.write(); |
| 266 | + let main_threadid = *threadid_guard as u64; |
| 267 | + |
| 268 | + let mut last_thread = false; |
| 269 | + |
| 270 | + if thread_id == main_threadid { |
| 271 | + // if main thread exits, we should find a new main thread |
| 272 | + // unless this is the last thread in the cage |
| 273 | + if let Some(entry) = cage |
| 274 | + .epoch_handler |
| 275 | + .iter() |
| 276 | + .find(|entry| *entry.key() as u64 != thread_id) |
| 277 | + { |
| 278 | + let id = *entry.key(); |
| 279 | + *threadid_guard = id; |
| 280 | + |
| 281 | + // we also need to migrate the epoch state to the new thread |
| 282 | + let state = get_epoch_state(cageid, thread_id); |
| 283 | + let new_thread_epoch_handler = entry.value().write(); |
| 284 | + let new_thread_epoch = *new_thread_epoch_handler; |
| 285 | + // TODO: we should also make sure the new thread is not in EPOCH_KILLED state. |
| 286 | + // Will be integrated with process exiting fix |
| 287 | + unsafe { |
| 288 | + *new_thread_epoch = state; |
| 289 | + }; |
| 290 | + } else { |
| 291 | + // we just exited the last thread in the cage |
| 292 | + last_thread = true; |
| 293 | + } |
| 294 | + } |
| 295 | + // remove the epoch handler of the thread |
| 296 | + cage.epoch_handler |
| 297 | + .remove(&(thread_id as i32)) |
| 298 | + .expect("thread id does not exist!"); |
| 299 | + |
| 300 | + last_thread |
| 301 | +} |
| 302 | + |
| 303 | +// trigger the epoch if pending signal list is not empty |
| 304 | +// This function is invoked only by a newly exec-ed cage |
| 305 | +// immediately after it completes its initialization. |
| 306 | +// Its purpose is to handle the scenario where Linux resets |
| 307 | +// the signal mask but preserves pending signals after exec. |
| 308 | +// As a result, the new process may receive signals that were |
| 309 | +// pending in the previous process right after it starts. |
| 310 | +pub fn signal_may_trigger(cageid: u64) { |
| 311 | + let cage = cagetable_getref(cageid); |
| 312 | + let pending_signals = cage.pending_signals.read(); |
| 313 | + if !pending_signals.is_empty() { |
| 314 | + signal_epoch_trigger(cageid); |
| 315 | + } |
| 316 | +} |
0 commit comments