signal.c 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062
  1. /*
  2. * Emulation of BSD signals
  3. *
  4. * Copyright (c) 2003 - 2008 Fabrice Bellard
  5. * Copyright (c) 2013 Stacey Son
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License as published by
  9. * the Free Software Foundation; either version 2 of the License, or
  10. * (at your option) any later version.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program; if not, see <http://www.gnu.org/licenses/>.
  19. */
  20. #include "qemu/osdep.h"
  21. #include "qemu/log.h"
  22. #include "qemu.h"
  23. #include "user/cpu_loop.h"
  24. #include "exec/page-protection.h"
  25. #include "user/page-protection.h"
  26. #include "user/signal.h"
  27. #include "user/tswap-target.h"
  28. #include "gdbstub/user.h"
  29. #include "signal-common.h"
  30. #include "trace.h"
  31. #include "hw/core/tcg-cpu-ops.h"
  32. #include "host-signal.h"
  33. /* target_siginfo_t must fit in gdbstub's siginfo save area. */
  34. QEMU_BUILD_BUG_ON(sizeof(target_siginfo_t) > MAX_SIGINFO_LENGTH);
  35. static struct target_sigaction sigact_table[TARGET_NSIG];
  36. static void host_signal_handler(int host_sig, siginfo_t *info, void *puc);
  37. static void target_to_host_sigset_internal(sigset_t *d,
  38. const target_sigset_t *s);
  39. static inline int on_sig_stack(TaskState *ts, unsigned long sp)
  40. {
  41. return sp - ts->sigaltstack_used.ss_sp < ts->sigaltstack_used.ss_size;
  42. }
  43. static inline int sas_ss_flags(TaskState *ts, unsigned long sp)
  44. {
  45. return ts->sigaltstack_used.ss_size == 0 ? SS_DISABLE :
  46. on_sig_stack(ts, sp) ? SS_ONSTACK : 0;
  47. }
  48. int host_interrupt_signal = SIGRTMAX;
  49. /*
  50. * The BSD ABIs use the same signal numbers across all the CPU architectures, so
  51. * (unlike Linux) these functions are just the identity mapping. This might not
  52. * be true for XyzBSD running on AbcBSD, which doesn't currently work.
  53. */
  54. int host_to_target_signal(int sig)
  55. {
  56. return sig;
  57. }
  58. int target_to_host_signal(int sig)
  59. {
  60. return sig;
  61. }
  62. static inline void target_sigemptyset(target_sigset_t *set)
  63. {
  64. memset(set, 0, sizeof(*set));
  65. }
  66. static inline void target_sigaddset(target_sigset_t *set, int signum)
  67. {
  68. signum--;
  69. uint32_t mask = (uint32_t)1 << (signum % TARGET_NSIG_BPW);
  70. set->__bits[signum / TARGET_NSIG_BPW] |= mask;
  71. }
  72. static inline int target_sigismember(const target_sigset_t *set, int signum)
  73. {
  74. signum--;
  75. abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
  76. return (set->__bits[signum / TARGET_NSIG_BPW] & mask) != 0;
  77. }
  78. /* Adjust the signal context to rewind out of safe-syscall if we're in it */
  79. static inline void rewind_if_in_safe_syscall(void *puc)
  80. {
  81. ucontext_t *uc = (ucontext_t *)puc;
  82. uintptr_t pcreg = host_signal_pc(uc);
  83. if (pcreg > (uintptr_t)safe_syscall_start
  84. && pcreg < (uintptr_t)safe_syscall_end) {
  85. host_signal_set_pc(uc, (uintptr_t)safe_syscall_start);
  86. }
  87. }
  88. /*
  89. * Note: The following take advantage of the BSD signal property that all
  90. * signals are available on all architectures.
  91. */
  92. static void host_to_target_sigset_internal(target_sigset_t *d,
  93. const sigset_t *s)
  94. {
  95. int i;
  96. target_sigemptyset(d);
  97. for (i = 1; i <= NSIG; i++) {
  98. if (sigismember(s, i)) {
  99. target_sigaddset(d, host_to_target_signal(i));
  100. }
  101. }
  102. }
  103. void host_to_target_sigset(target_sigset_t *d, const sigset_t *s)
  104. {
  105. target_sigset_t d1;
  106. int i;
  107. host_to_target_sigset_internal(&d1, s);
  108. for (i = 0; i < _SIG_WORDS; i++) {
  109. d->__bits[i] = tswap32(d1.__bits[i]);
  110. }
  111. }
  112. static void target_to_host_sigset_internal(sigset_t *d,
  113. const target_sigset_t *s)
  114. {
  115. int i;
  116. sigemptyset(d);
  117. for (i = 1; i <= TARGET_NSIG; i++) {
  118. if (target_sigismember(s, i)) {
  119. sigaddset(d, target_to_host_signal(i));
  120. }
  121. }
  122. }
  123. void target_to_host_sigset(sigset_t *d, const target_sigset_t *s)
  124. {
  125. target_sigset_t s1;
  126. int i;
  127. for (i = 0; i < TARGET_NSIG_WORDS; i++) {
  128. s1.__bits[i] = tswap32(s->__bits[i]);
  129. }
  130. target_to_host_sigset_internal(d, &s1);
  131. }
  132. static bool has_trapno(int tsig)
  133. {
  134. return tsig == TARGET_SIGILL ||
  135. tsig == TARGET_SIGFPE ||
  136. tsig == TARGET_SIGSEGV ||
  137. tsig == TARGET_SIGBUS ||
  138. tsig == TARGET_SIGTRAP;
  139. }
  140. /* Siginfo conversion. */
  141. /*
  142. * Populate tinfo w/o swapping based on guessing which fields are valid.
  143. */
  144. static inline void host_to_target_siginfo_noswap(target_siginfo_t *tinfo,
  145. const siginfo_t *info)
  146. {
  147. int sig = host_to_target_signal(info->si_signo);
  148. int si_code = info->si_code;
  149. int si_type;
  150. /*
  151. * Make sure we that the variable portion of the target siginfo is zeroed
  152. * out so we don't leak anything into that.
  153. */
  154. memset(&tinfo->_reason, 0, sizeof(tinfo->_reason));
  155. /*
  156. * This is awkward, because we have to use a combination of the si_code and
  157. * si_signo to figure out which of the union's members are valid.o We
  158. * therefore make our best guess.
  159. *
  160. * Once we have made our guess, we record it in the top 16 bits of
  161. * the si_code, so that tswap_siginfo() later can use it.
  162. * tswap_siginfo() will strip these top bits out before writing
  163. * si_code to the guest (sign-extending the lower bits).
  164. */
  165. tinfo->si_signo = sig;
  166. tinfo->si_errno = info->si_errno;
  167. tinfo->si_code = info->si_code;
  168. tinfo->si_pid = info->si_pid;
  169. tinfo->si_uid = info->si_uid;
  170. tinfo->si_status = info->si_status;
  171. tinfo->si_addr = (abi_ulong)(unsigned long)info->si_addr;
  172. /*
  173. * si_value is opaque to kernel. On all FreeBSD platforms,
  174. * sizeof(sival_ptr) >= sizeof(sival_int) so the following
  175. * always will copy the larger element.
  176. */
  177. tinfo->si_value.sival_ptr =
  178. (abi_ulong)(unsigned long)info->si_value.sival_ptr;
  179. switch (si_code) {
  180. /*
  181. * All the SI_xxx codes that are defined here are global to
  182. * all the signals (they have values that none of the other,
  183. * more specific signal info will set).
  184. */
  185. case SI_USER:
  186. case SI_LWP:
  187. case SI_KERNEL:
  188. case SI_QUEUE:
  189. case SI_ASYNCIO:
  190. /*
  191. * Only the fixed parts are valid (though FreeBSD doesn't always
  192. * set all the fields to non-zero values.
  193. */
  194. si_type = QEMU_SI_NOINFO;
  195. break;
  196. case SI_TIMER:
  197. tinfo->_reason._timer._timerid = info->_reason._timer._timerid;
  198. tinfo->_reason._timer._overrun = info->_reason._timer._overrun;
  199. si_type = QEMU_SI_TIMER;
  200. break;
  201. case SI_MESGQ:
  202. tinfo->_reason._mesgq._mqd = info->_reason._mesgq._mqd;
  203. si_type = QEMU_SI_MESGQ;
  204. break;
  205. default:
  206. /*
  207. * We have to go based on the signal number now to figure out
  208. * what's valid.
  209. */
  210. si_type = QEMU_SI_NOINFO;
  211. if (has_trapno(sig)) {
  212. tinfo->_reason._fault._trapno = info->_reason._fault._trapno;
  213. si_type = QEMU_SI_FAULT;
  214. }
  215. #ifdef TARGET_SIGPOLL
  216. /*
  217. * FreeBSD never had SIGPOLL, but emulates it for Linux so there's
  218. * a chance it may popup in the future.
  219. */
  220. if (sig == TARGET_SIGPOLL) {
  221. tinfo->_reason._poll._band = info->_reason._poll._band;
  222. si_type = QEMU_SI_POLL;
  223. }
  224. #endif
  225. /*
  226. * Unsure that this can actually be generated, and our support for
  227. * capsicum is somewhere between weak and non-existent, but if we get
  228. * one, then we know what to save.
  229. */
  230. #ifdef QEMU_SI_CAPSICUM
  231. if (sig == TARGET_SIGTRAP) {
  232. tinfo->_reason._capsicum._syscall =
  233. info->_reason._capsicum._syscall;
  234. si_type = QEMU_SI_CAPSICUM;
  235. }
  236. #endif
  237. break;
  238. }
  239. tinfo->si_code = deposit32(si_code, 24, 8, si_type);
  240. }
  241. static void tswap_siginfo(target_siginfo_t *tinfo, const target_siginfo_t *info)
  242. {
  243. int si_type = extract32(info->si_code, 24, 8);
  244. int si_code = sextract32(info->si_code, 0, 24);
  245. __put_user(info->si_signo, &tinfo->si_signo);
  246. __put_user(info->si_errno, &tinfo->si_errno);
  247. __put_user(si_code, &tinfo->si_code); /* Zero out si_type, it's internal */
  248. __put_user(info->si_pid, &tinfo->si_pid);
  249. __put_user(info->si_uid, &tinfo->si_uid);
  250. __put_user(info->si_status, &tinfo->si_status);
  251. __put_user(info->si_addr, &tinfo->si_addr);
  252. /*
  253. * Unswapped, because we passed it through mostly untouched. si_value is
  254. * opaque to the kernel, so we didn't bother with potentially wasting cycles
  255. * to swap it into host byte order.
  256. */
  257. tinfo->si_value.sival_ptr = info->si_value.sival_ptr;
  258. /*
  259. * We can use our internal marker of which fields in the structure
  260. * are valid, rather than duplicating the guesswork of
  261. * host_to_target_siginfo_noswap() here.
  262. */
  263. switch (si_type) {
  264. case QEMU_SI_NOINFO: /* No additional info */
  265. break;
  266. case QEMU_SI_FAULT:
  267. __put_user(info->_reason._fault._trapno,
  268. &tinfo->_reason._fault._trapno);
  269. break;
  270. case QEMU_SI_TIMER:
  271. __put_user(info->_reason._timer._timerid,
  272. &tinfo->_reason._timer._timerid);
  273. __put_user(info->_reason._timer._overrun,
  274. &tinfo->_reason._timer._overrun);
  275. break;
  276. case QEMU_SI_MESGQ:
  277. __put_user(info->_reason._mesgq._mqd, &tinfo->_reason._mesgq._mqd);
  278. break;
  279. case QEMU_SI_POLL:
  280. /* Note: Not generated on FreeBSD */
  281. __put_user(info->_reason._poll._band, &tinfo->_reason._poll._band);
  282. break;
  283. #ifdef QEMU_SI_CAPSICUM
  284. case QEMU_SI_CAPSICUM:
  285. __put_user(info->_reason._capsicum._syscall,
  286. &tinfo->_reason._capsicum._syscall);
  287. break;
  288. #endif
  289. default:
  290. g_assert_not_reached();
  291. }
  292. }
  293. void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info)
  294. {
  295. host_to_target_siginfo_noswap(tinfo, info);
  296. tswap_siginfo(tinfo, tinfo);
  297. }
  298. int block_signals(void)
  299. {
  300. TaskState *ts = get_task_state(thread_cpu);
  301. sigset_t set;
  302. /*
  303. * It's OK to block everything including SIGSEGV, because we won't run any
  304. * further guest code before unblocking signals in
  305. * process_pending_signals(). We depend on the FreeBSD behavior here where
  306. * this will only affect this thread's signal mask. We don't use
  307. * pthread_sigmask which might seem more correct because that routine also
  308. * does odd things with SIGCANCEL to implement pthread_cancel().
  309. */
  310. sigfillset(&set);
  311. sigprocmask(SIG_SETMASK, &set, 0);
  312. return qatomic_xchg(&ts->signal_pending, 1);
  313. }
  314. /* Returns 1 if given signal should dump core if not handled. */
  315. static int core_dump_signal(int sig)
  316. {
  317. switch (sig) {
  318. case TARGET_SIGABRT:
  319. case TARGET_SIGFPE:
  320. case TARGET_SIGILL:
  321. case TARGET_SIGQUIT:
  322. case TARGET_SIGSEGV:
  323. case TARGET_SIGTRAP:
  324. case TARGET_SIGBUS:
  325. return 1;
  326. default:
  327. return 0;
  328. }
  329. }
  330. /* Abort execution with signal. */
  331. static G_NORETURN
  332. void dump_core_and_abort(int target_sig)
  333. {
  334. CPUState *cpu = thread_cpu;
  335. CPUArchState *env = cpu_env(cpu);
  336. TaskState *ts = get_task_state(cpu);
  337. int core_dumped = 0;
  338. int host_sig;
  339. struct sigaction act;
  340. host_sig = target_to_host_signal(target_sig);
  341. gdb_signalled(env, target_sig);
  342. /* Dump core if supported by target binary format */
  343. if (core_dump_signal(target_sig) && (ts->bprm->core_dump != NULL)) {
  344. stop_all_tasks();
  345. core_dumped =
  346. ((*ts->bprm->core_dump)(target_sig, env) == 0);
  347. }
  348. if (core_dumped) {
  349. struct rlimit nodump;
  350. /*
  351. * We already dumped the core of target process, we don't want
  352. * a coredump of qemu itself.
  353. */
  354. getrlimit(RLIMIT_CORE, &nodump);
  355. nodump.rlim_cur = 0;
  356. setrlimit(RLIMIT_CORE, &nodump);
  357. (void) fprintf(stderr, "qemu: uncaught target signal %d (%s) "
  358. "- %s\n", target_sig, strsignal(host_sig), "core dumped");
  359. }
  360. /*
  361. * The proper exit code for dying from an uncaught signal is
  362. * -<signal>. The kernel doesn't allow exit() or _exit() to pass
  363. * a negative value. To get the proper exit code we need to
  364. * actually die from an uncaught signal. Here the default signal
  365. * handler is installed, we send ourself a signal and we wait for
  366. * it to arrive.
  367. */
  368. memset(&act, 0, sizeof(act));
  369. sigfillset(&act.sa_mask);
  370. act.sa_handler = SIG_DFL;
  371. sigaction(host_sig, &act, NULL);
  372. kill(getpid(), host_sig);
  373. /*
  374. * Make sure the signal isn't masked (just reuse the mask inside
  375. * of act).
  376. */
  377. sigdelset(&act.sa_mask, host_sig);
  378. sigsuspend(&act.sa_mask);
  379. /* unreachable */
  380. abort();
  381. }
  382. /*
  383. * Queue a signal so that it will be send to the virtual CPU as soon as
  384. * possible.
  385. */
  386. void queue_signal(CPUArchState *env, int sig, int si_type,
  387. target_siginfo_t *info)
  388. {
  389. CPUState *cpu = env_cpu(env);
  390. TaskState *ts = get_task_state(cpu);
  391. trace_user_queue_signal(env, sig);
  392. info->si_code = deposit32(info->si_code, 24, 8, si_type);
  393. ts->sync_signal.info = *info;
  394. ts->sync_signal.pending = sig;
  395. /* Signal that a new signal is pending. */
  396. qatomic_set(&ts->signal_pending, 1);
  397. return;
  398. }
  399. static int fatal_signal(int sig)
  400. {
  401. switch (sig) {
  402. case TARGET_SIGCHLD:
  403. case TARGET_SIGURG:
  404. case TARGET_SIGWINCH:
  405. case TARGET_SIGINFO:
  406. /* Ignored by default. */
  407. return 0;
  408. case TARGET_SIGCONT:
  409. case TARGET_SIGSTOP:
  410. case TARGET_SIGTSTP:
  411. case TARGET_SIGTTIN:
  412. case TARGET_SIGTTOU:
  413. /* Job control signals. */
  414. return 0;
  415. default:
  416. return 1;
  417. }
  418. }
  419. /*
  420. * Force a synchronously taken QEMU_SI_FAULT signal. For QEMU the
  421. * 'force' part is handled in process_pending_signals().
  422. */
  423. void force_sig_fault(int sig, int code, abi_ulong addr)
  424. {
  425. CPUState *cpu = thread_cpu;
  426. target_siginfo_t info = {};
  427. info.si_signo = sig;
  428. info.si_errno = 0;
  429. info.si_code = code;
  430. info.si_addr = addr;
  431. queue_signal(cpu_env(cpu), sig, QEMU_SI_FAULT, &info);
  432. }
  433. static void host_signal_handler(int host_sig, siginfo_t *info, void *puc)
  434. {
  435. CPUState *cpu = thread_cpu;
  436. TaskState *ts = get_task_state(cpu);
  437. target_siginfo_t tinfo;
  438. ucontext_t *uc = puc;
  439. struct emulated_sigtable *k;
  440. int guest_sig;
  441. uintptr_t pc = 0;
  442. bool sync_sig = false;
  443. if (host_sig == host_interrupt_signal) {
  444. ts->signal_pending = 1;
  445. cpu_exit(thread_cpu);
  446. return;
  447. }
  448. /*
  449. * Non-spoofed SIGSEGV and SIGBUS are synchronous, and need special
  450. * handling wrt signal blocking and unwinding.
  451. */
  452. if ((host_sig == SIGSEGV || host_sig == SIGBUS) && info->si_code > 0) {
  453. MMUAccessType access_type;
  454. uintptr_t host_addr;
  455. abi_ptr guest_addr;
  456. bool is_write;
  457. host_addr = (uintptr_t)info->si_addr;
  458. /*
  459. * Convert forcefully to guest address space: addresses outside
  460. * reserved_va are still valid to report via SEGV_MAPERR.
  461. */
  462. guest_addr = h2g_nocheck(host_addr);
  463. pc = host_signal_pc(uc);
  464. is_write = host_signal_write(info, uc);
  465. access_type = adjust_signal_pc(&pc, is_write);
  466. if (host_sig == SIGSEGV) {
  467. bool maperr = true;
  468. if (info->si_code == SEGV_ACCERR && h2g_valid(host_addr)) {
  469. /* If this was a write to a TB protected page, restart. */
  470. if (is_write &&
  471. handle_sigsegv_accerr_write(cpu, &uc->uc_sigmask,
  472. pc, guest_addr)) {
  473. return;
  474. }
  475. /*
  476. * With reserved_va, the whole address space is PROT_NONE,
  477. * which means that we may get ACCERR when we want MAPERR.
  478. */
  479. if (page_get_flags(guest_addr) & PAGE_VALID) {
  480. maperr = false;
  481. } else {
  482. info->si_code = SEGV_MAPERR;
  483. }
  484. }
  485. sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
  486. cpu_loop_exit_sigsegv(cpu, guest_addr, access_type, maperr, pc);
  487. } else {
  488. sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
  489. if (info->si_code == BUS_ADRALN) {
  490. cpu_loop_exit_sigbus(cpu, guest_addr, access_type, pc);
  491. }
  492. }
  493. sync_sig = true;
  494. }
  495. /* Get the target signal number. */
  496. guest_sig = host_to_target_signal(host_sig);
  497. if (guest_sig < 1 || guest_sig > TARGET_NSIG) {
  498. return;
  499. }
  500. trace_user_host_signal(cpu, host_sig, guest_sig);
  501. host_to_target_siginfo_noswap(&tinfo, info);
  502. k = &ts->sigtab[guest_sig - 1];
  503. k->info = tinfo;
  504. k->pending = guest_sig;
  505. ts->signal_pending = 1;
  506. /*
  507. * For synchronous signals, unwind the cpu state to the faulting
  508. * insn and then exit back to the main loop so that the signal
  509. * is delivered immediately.
  510. */
  511. if (sync_sig) {
  512. cpu->exception_index = EXCP_INTERRUPT;
  513. cpu_loop_exit_restore(cpu, pc);
  514. }
  515. rewind_if_in_safe_syscall(puc);
  516. /*
  517. * Block host signals until target signal handler entered. We
  518. * can't block SIGSEGV or SIGBUS while we're executing guest
  519. * code in case the guest code provokes one in the window between
  520. * now and it getting out to the main loop. Signals will be
  521. * unblocked again in process_pending_signals().
  522. */
  523. sigfillset(&uc->uc_sigmask);
  524. sigdelset(&uc->uc_sigmask, SIGSEGV);
  525. sigdelset(&uc->uc_sigmask, SIGBUS);
  526. /* Interrupt the virtual CPU as soon as possible. */
  527. cpu_exit(thread_cpu);
  528. }
  529. /* do_sigaltstack() returns target values and errnos. */
  530. /* compare to kern/kern_sig.c sys_sigaltstack() and kern_sigaltstack() */
  531. abi_long do_sigaltstack(abi_ulong uss_addr, abi_ulong uoss_addr, abi_ulong sp)
  532. {
  533. TaskState *ts = get_task_state(thread_cpu);
  534. int ret;
  535. target_stack_t oss;
  536. if (uoss_addr) {
  537. /* Save current signal stack params */
  538. oss.ss_sp = tswapl(ts->sigaltstack_used.ss_sp);
  539. oss.ss_size = tswapl(ts->sigaltstack_used.ss_size);
  540. oss.ss_flags = tswapl(sas_ss_flags(ts, sp));
  541. }
  542. if (uss_addr) {
  543. target_stack_t *uss;
  544. target_stack_t ss;
  545. size_t minstacksize = TARGET_MINSIGSTKSZ;
  546. ret = -TARGET_EFAULT;
  547. if (!lock_user_struct(VERIFY_READ, uss, uss_addr, 1)) {
  548. goto out;
  549. }
  550. __get_user(ss.ss_sp, &uss->ss_sp);
  551. __get_user(ss.ss_size, &uss->ss_size);
  552. __get_user(ss.ss_flags, &uss->ss_flags);
  553. unlock_user_struct(uss, uss_addr, 0);
  554. ret = -TARGET_EPERM;
  555. if (on_sig_stack(ts, sp)) {
  556. goto out;
  557. }
  558. ret = -TARGET_EINVAL;
  559. if (ss.ss_flags != TARGET_SS_DISABLE
  560. && ss.ss_flags != TARGET_SS_ONSTACK
  561. && ss.ss_flags != 0) {
  562. goto out;
  563. }
  564. if (ss.ss_flags == TARGET_SS_DISABLE) {
  565. ss.ss_size = 0;
  566. ss.ss_sp = 0;
  567. } else {
  568. ret = -TARGET_ENOMEM;
  569. if (ss.ss_size < minstacksize) {
  570. goto out;
  571. }
  572. }
  573. ts->sigaltstack_used.ss_sp = ss.ss_sp;
  574. ts->sigaltstack_used.ss_size = ss.ss_size;
  575. }
  576. if (uoss_addr) {
  577. ret = -TARGET_EFAULT;
  578. if (copy_to_user(uoss_addr, &oss, sizeof(oss))) {
  579. goto out;
  580. }
  581. }
  582. ret = 0;
  583. out:
  584. return ret;
  585. }
  586. /* do_sigaction() return host values and errnos */
  587. int do_sigaction(int sig, const struct target_sigaction *act,
  588. struct target_sigaction *oact)
  589. {
  590. struct target_sigaction *k;
  591. struct sigaction act1;
  592. int host_sig;
  593. int ret = 0;
  594. if (sig < 1 || sig > TARGET_NSIG) {
  595. return -TARGET_EINVAL;
  596. }
  597. if ((sig == TARGET_SIGKILL || sig == TARGET_SIGSTOP) &&
  598. act != NULL && act->_sa_handler != TARGET_SIG_DFL) {
  599. return -TARGET_EINVAL;
  600. }
  601. if (block_signals()) {
  602. return -TARGET_ERESTART;
  603. }
  604. k = &sigact_table[sig - 1];
  605. if (oact) {
  606. oact->_sa_handler = tswapal(k->_sa_handler);
  607. oact->sa_flags = tswap32(k->sa_flags);
  608. oact->sa_mask = k->sa_mask;
  609. }
  610. if (act) {
  611. k->_sa_handler = tswapal(act->_sa_handler);
  612. k->sa_flags = tswap32(act->sa_flags);
  613. k->sa_mask = act->sa_mask;
  614. /* Update the host signal state. */
  615. host_sig = target_to_host_signal(sig);
  616. if (host_sig != SIGSEGV && host_sig != SIGBUS) {
  617. memset(&act1, 0, sizeof(struct sigaction));
  618. sigfillset(&act1.sa_mask);
  619. act1.sa_flags = SA_SIGINFO;
  620. if (k->sa_flags & TARGET_SA_RESTART) {
  621. act1.sa_flags |= SA_RESTART;
  622. }
  623. /*
  624. * Note: It is important to update the host kernel signal mask to
  625. * avoid getting unexpected interrupted system calls.
  626. */
  627. if (k->_sa_handler == TARGET_SIG_IGN) {
  628. act1.sa_sigaction = (void *)SIG_IGN;
  629. } else if (k->_sa_handler == TARGET_SIG_DFL) {
  630. if (fatal_signal(sig)) {
  631. act1.sa_sigaction = host_signal_handler;
  632. } else {
  633. act1.sa_sigaction = (void *)SIG_DFL;
  634. }
  635. } else {
  636. act1.sa_sigaction = host_signal_handler;
  637. }
  638. ret = sigaction(host_sig, &act1, NULL);
  639. }
  640. }
  641. return ret;
  642. }
  643. static inline abi_ulong get_sigframe(struct target_sigaction *ka,
  644. CPUArchState *env, size_t frame_size)
  645. {
  646. TaskState *ts = get_task_state(thread_cpu);
  647. abi_ulong sp;
  648. /* Use default user stack */
  649. sp = get_sp_from_cpustate(env);
  650. if ((ka->sa_flags & TARGET_SA_ONSTACK) && sas_ss_flags(ts, sp) == 0) {
  651. sp = ts->sigaltstack_used.ss_sp + ts->sigaltstack_used.ss_size;
  652. }
  653. return ROUND_DOWN(sp - frame_size, TARGET_SIGSTACK_ALIGN);
  654. }
  655. /* compare to $M/$M/exec_machdep.c sendsig and sys/kern/kern_sig.c sigexit */
  656. static void setup_frame(int sig, int code, struct target_sigaction *ka,
  657. target_sigset_t *set, target_siginfo_t *tinfo, CPUArchState *env)
  658. {
  659. struct target_sigframe *frame;
  660. abi_ulong frame_addr;
  661. int i;
  662. frame_addr = get_sigframe(ka, env, sizeof(*frame));
  663. trace_user_setup_frame(env, frame_addr);
  664. if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
  665. unlock_user_struct(frame, frame_addr, 1);
  666. dump_core_and_abort(TARGET_SIGILL);
  667. return;
  668. }
  669. memset(frame, 0, sizeof(*frame));
  670. setup_sigframe_arch(env, frame_addr, frame, 0);
  671. for (i = 0; i < TARGET_NSIG_WORDS; i++) {
  672. __put_user(set->__bits[i], &frame->sf_uc.uc_sigmask.__bits[i]);
  673. }
  674. if (tinfo) {
  675. frame->sf_si.si_signo = tinfo->si_signo;
  676. frame->sf_si.si_errno = tinfo->si_errno;
  677. frame->sf_si.si_code = tinfo->si_code;
  678. frame->sf_si.si_pid = tinfo->si_pid;
  679. frame->sf_si.si_uid = tinfo->si_uid;
  680. frame->sf_si.si_status = tinfo->si_status;
  681. frame->sf_si.si_addr = tinfo->si_addr;
  682. /* see host_to_target_siginfo_noswap() for more details */
  683. frame->sf_si.si_value.sival_ptr = tinfo->si_value.sival_ptr;
  684. /*
  685. * At this point, whatever is in the _reason union is complete
  686. * and in target order, so just copy the whole thing over, even
  687. * if it's too large for this specific signal.
  688. * host_to_target_siginfo_noswap() and tswap_siginfo() have ensured
  689. * that's so.
  690. */
  691. memcpy(&frame->sf_si._reason, &tinfo->_reason,
  692. sizeof(tinfo->_reason));
  693. }
  694. set_sigtramp_args(env, sig, frame, frame_addr, ka);
  695. unlock_user_struct(frame, frame_addr, 1);
  696. }
  697. static int reset_signal_mask(target_ucontext_t *ucontext)
  698. {
  699. int i;
  700. sigset_t blocked;
  701. target_sigset_t target_set;
  702. TaskState *ts = get_task_state(thread_cpu);
  703. for (i = 0; i < TARGET_NSIG_WORDS; i++) {
  704. __get_user(target_set.__bits[i], &ucontext->uc_sigmask.__bits[i]);
  705. }
  706. target_to_host_sigset_internal(&blocked, &target_set);
  707. ts->signal_mask = blocked;
  708. return 0;
  709. }
  710. /* See sys/$M/$M/exec_machdep.c sigreturn() */
  711. long do_sigreturn(CPUArchState *env, abi_ulong addr)
  712. {
  713. long ret;
  714. abi_ulong target_ucontext;
  715. target_ucontext_t *ucontext = NULL;
  716. /* Get the target ucontext address from the stack frame */
  717. ret = get_ucontext_sigreturn(env, addr, &target_ucontext);
  718. if (is_error(ret)) {
  719. return ret;
  720. }
  721. trace_user_do_sigreturn(env, addr);
  722. if (!lock_user_struct(VERIFY_READ, ucontext, target_ucontext, 0)) {
  723. goto badframe;
  724. }
  725. /* Set the register state back to before the signal. */
  726. if (set_mcontext(env, &ucontext->uc_mcontext, 1)) {
  727. goto badframe;
  728. }
  729. /* And reset the signal mask. */
  730. if (reset_signal_mask(ucontext)) {
  731. goto badframe;
  732. }
  733. unlock_user_struct(ucontext, target_ucontext, 0);
  734. return -TARGET_EJUSTRETURN;
  735. badframe:
  736. if (ucontext != NULL) {
  737. unlock_user_struct(ucontext, target_ucontext, 0);
  738. }
  739. return -TARGET_EFAULT;
  740. }
  741. void signal_init(void)
  742. {
  743. TaskState *ts = get_task_state(thread_cpu);
  744. struct sigaction act;
  745. struct sigaction oact;
  746. int i;
  747. int host_sig;
  748. /* Set the signal mask from the host mask. */
  749. sigprocmask(0, 0, &ts->signal_mask);
  750. sigfillset(&act.sa_mask);
  751. act.sa_sigaction = host_signal_handler;
  752. act.sa_flags = SA_SIGINFO;
  753. for (i = 1; i <= TARGET_NSIG; i++) {
  754. host_sig = target_to_host_signal(i);
  755. if (host_sig == host_interrupt_signal) {
  756. continue;
  757. }
  758. sigaction(host_sig, NULL, &oact);
  759. if (oact.sa_sigaction == (void *)SIG_IGN) {
  760. sigact_table[i - 1]._sa_handler = TARGET_SIG_IGN;
  761. } else if (oact.sa_sigaction == (void *)SIG_DFL) {
  762. sigact_table[i - 1]._sa_handler = TARGET_SIG_DFL;
  763. }
  764. /*
  765. * If there's already a handler installed then something has
  766. * gone horribly wrong, so don't even try to handle that case.
  767. * Install some handlers for our own use. We need at least
  768. * SIGSEGV and SIGBUS, to detect exceptions. We can not just
  769. * trap all signals because it affects syscall interrupt
  770. * behavior. But do trap all default-fatal signals.
  771. */
  772. if (fatal_signal(i)) {
  773. sigaction(host_sig, &act, NULL);
  774. }
  775. }
  776. sigaction(host_interrupt_signal, &act, NULL);
  777. }
  778. static void handle_pending_signal(CPUArchState *env, int sig,
  779. struct emulated_sigtable *k)
  780. {
  781. CPUState *cpu = env_cpu(env);
  782. TaskState *ts = get_task_state(cpu);
  783. struct target_sigaction *sa;
  784. int code;
  785. sigset_t set;
  786. abi_ulong handler;
  787. target_siginfo_t tinfo;
  788. target_sigset_t target_old_set;
  789. trace_user_handle_signal(env, sig);
  790. k->pending = 0;
  791. sig = gdb_handlesig(cpu, sig, NULL, &k->info, sizeof(k->info));
  792. if (!sig) {
  793. sa = NULL;
  794. handler = TARGET_SIG_IGN;
  795. } else {
  796. sa = &sigact_table[sig - 1];
  797. handler = sa->_sa_handler;
  798. }
  799. if (do_strace) {
  800. print_taken_signal(sig, &k->info);
  801. }
  802. if (handler == TARGET_SIG_DFL) {
  803. /*
  804. * default handler : ignore some signal. The other are job
  805. * control or fatal.
  806. */
  807. if (sig == TARGET_SIGTSTP || sig == TARGET_SIGTTIN ||
  808. sig == TARGET_SIGTTOU) {
  809. kill(getpid(), SIGSTOP);
  810. } else if (sig != TARGET_SIGCHLD && sig != TARGET_SIGURG &&
  811. sig != TARGET_SIGINFO && sig != TARGET_SIGWINCH &&
  812. sig != TARGET_SIGCONT) {
  813. dump_core_and_abort(sig);
  814. }
  815. } else if (handler == TARGET_SIG_IGN) {
  816. /* ignore sig */
  817. } else if (handler == TARGET_SIG_ERR) {
  818. dump_core_and_abort(sig);
  819. } else {
  820. /* compute the blocked signals during the handler execution */
  821. sigset_t *blocked_set;
  822. target_to_host_sigset(&set, &sa->sa_mask);
  823. /*
  824. * SA_NODEFER indicates that the current signal should not be
  825. * blocked during the handler.
  826. */
  827. if (!(sa->sa_flags & TARGET_SA_NODEFER)) {
  828. sigaddset(&set, target_to_host_signal(sig));
  829. }
  830. /*
  831. * Save the previous blocked signal state to restore it at the
  832. * end of the signal execution (see do_sigreturn).
  833. */
  834. host_to_target_sigset_internal(&target_old_set, &ts->signal_mask);
  835. blocked_set = ts->in_sigsuspend ?
  836. &ts->sigsuspend_mask : &ts->signal_mask;
  837. sigorset(&ts->signal_mask, blocked_set, &set);
  838. ts->in_sigsuspend = false;
  839. sigprocmask(SIG_SETMASK, &ts->signal_mask, NULL);
  840. /* XXX VM86 on x86 ??? */
  841. code = k->info.si_code; /* From host, so no si_type */
  842. /* prepare the stack frame of the virtual CPU */
  843. if (sa->sa_flags & TARGET_SA_SIGINFO) {
  844. tswap_siginfo(&tinfo, &k->info);
  845. setup_frame(sig, code, sa, &target_old_set, &tinfo, env);
  846. } else {
  847. setup_frame(sig, code, sa, &target_old_set, NULL, env);
  848. }
  849. if (sa->sa_flags & TARGET_SA_RESETHAND) {
  850. sa->_sa_handler = TARGET_SIG_DFL;
  851. }
  852. }
  853. }
  854. void process_pending_signals(CPUArchState *env)
  855. {
  856. CPUState *cpu = env_cpu(env);
  857. int sig;
  858. sigset_t *blocked_set, set;
  859. struct emulated_sigtable *k;
  860. TaskState *ts = get_task_state(cpu);
  861. while (qatomic_read(&ts->signal_pending)) {
  862. sigfillset(&set);
  863. sigprocmask(SIG_SETMASK, &set, 0);
  864. restart_scan:
  865. sig = ts->sync_signal.pending;
  866. if (sig) {
  867. /*
  868. * Synchronous signals are forced by the emulated CPU in some way.
  869. * If they are set to ignore, restore the default handler (see
  870. * sys/kern_sig.c trapsignal() and execsigs() for this behavior)
  871. * though maybe this is done only when forcing exit for non SIGCHLD.
  872. */
  873. if (sigismember(&ts->signal_mask, target_to_host_signal(sig)) ||
  874. sigact_table[sig - 1]._sa_handler == TARGET_SIG_IGN) {
  875. sigdelset(&ts->signal_mask, target_to_host_signal(sig));
  876. sigact_table[sig - 1]._sa_handler = TARGET_SIG_DFL;
  877. }
  878. handle_pending_signal(env, sig, &ts->sync_signal);
  879. }
  880. k = ts->sigtab;
  881. for (sig = 1; sig <= TARGET_NSIG; sig++, k++) {
  882. blocked_set = ts->in_sigsuspend ?
  883. &ts->sigsuspend_mask : &ts->signal_mask;
  884. if (k->pending &&
  885. !sigismember(blocked_set, target_to_host_signal(sig))) {
  886. handle_pending_signal(env, sig, k);
  887. /*
  888. * Restart scan from the beginning, as handle_pending_signal
  889. * might have resulted in a new synchronous signal (eg SIGSEGV).
  890. */
  891. goto restart_scan;
  892. }
  893. }
  894. /*
  895. * Unblock signals and check one more time. Unblocking signals may cause
  896. * us to take another host signal, which will set signal_pending again.
  897. */
  898. qatomic_set(&ts->signal_pending, 0);
  899. ts->in_sigsuspend = false;
  900. set = ts->signal_mask;
  901. sigdelset(&set, SIGSEGV);
  902. sigdelset(&set, SIGBUS);
  903. sigprocmask(SIG_SETMASK, &set, 0);
  904. }
  905. ts->in_sigsuspend = false;
  906. }
  907. void cpu_loop_exit_sigsegv(CPUState *cpu, target_ulong addr,
  908. MMUAccessType access_type, bool maperr, uintptr_t ra)
  909. {
  910. const TCGCPUOps *tcg_ops = CPU_GET_CLASS(cpu)->tcg_ops;
  911. if (tcg_ops->record_sigsegv) {
  912. tcg_ops->record_sigsegv(cpu, addr, access_type, maperr, ra);
  913. }
  914. force_sig_fault(TARGET_SIGSEGV,
  915. maperr ? TARGET_SEGV_MAPERR : TARGET_SEGV_ACCERR,
  916. addr);
  917. cpu->exception_index = EXCP_INTERRUPT;
  918. cpu_loop_exit_restore(cpu, ra);
  919. }
  920. void cpu_loop_exit_sigbus(CPUState *cpu, target_ulong addr,
  921. MMUAccessType access_type, uintptr_t ra)
  922. {
  923. const TCGCPUOps *tcg_ops = CPU_GET_CLASS(cpu)->tcg_ops;
  924. if (tcg_ops->record_sigbus) {
  925. tcg_ops->record_sigbus(cpu, addr, access_type, ra);
  926. }
  927. force_sig_fault(TARGET_SIGBUS, TARGET_BUS_ADRALN, addr);
  928. cpu->exception_index = EXCP_INTERRUPT;
  929. cpu_loop_exit_restore(cpu, ra);
  930. }