signal.c 32 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049
  1. /*
  2. * Emulation of BSD signals
  3. *
  4. * Copyright (c) 2003 - 2008 Fabrice Bellard
  5. * Copyright (c) 2013 Stacey Son
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License as published by
  9. * the Free Software Foundation; either version 2 of the License, or
  10. * (at your option) any later version.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program; if not, see <http://www.gnu.org/licenses/>.
  19. */
  20. #include "qemu/osdep.h"
  21. #include "qemu/log.h"
  22. #include "qemu.h"
  23. #include "user/cpu_loop.h"
  24. #include "exec/page-protection.h"
  25. #include "user/page-protection.h"
  26. #include "user/tswap-target.h"
  27. #include "gdbstub/user.h"
  28. #include "signal-common.h"
  29. #include "trace.h"
  30. #include "hw/core/tcg-cpu-ops.h"
  31. #include "host-signal.h"
  32. /* target_siginfo_t must fit in gdbstub's siginfo save area. */
  33. QEMU_BUILD_BUG_ON(sizeof(target_siginfo_t) > MAX_SIGINFO_LENGTH);
  34. static struct target_sigaction sigact_table[TARGET_NSIG];
  35. static void host_signal_handler(int host_sig, siginfo_t *info, void *puc);
  36. static void target_to_host_sigset_internal(sigset_t *d,
  37. const target_sigset_t *s);
  38. static inline int on_sig_stack(TaskState *ts, unsigned long sp)
  39. {
  40. return sp - ts->sigaltstack_used.ss_sp < ts->sigaltstack_used.ss_size;
  41. }
  42. static inline int sas_ss_flags(TaskState *ts, unsigned long sp)
  43. {
  44. return ts->sigaltstack_used.ss_size == 0 ? SS_DISABLE :
  45. on_sig_stack(ts, sp) ? SS_ONSTACK : 0;
  46. }
  47. /*
  48. * The BSD ABIs use the same signal numbers across all the CPU architectures, so
  49. * (unlike Linux) these functions are just the identity mapping. This might not
  50. * be true for XyzBSD running on AbcBSD, which doesn't currently work.
  51. */
  52. int host_to_target_signal(int sig)
  53. {
  54. return sig;
  55. }
  56. int target_to_host_signal(int sig)
  57. {
  58. return sig;
  59. }
  60. static inline void target_sigemptyset(target_sigset_t *set)
  61. {
  62. memset(set, 0, sizeof(*set));
  63. }
  64. static inline void target_sigaddset(target_sigset_t *set, int signum)
  65. {
  66. signum--;
  67. uint32_t mask = (uint32_t)1 << (signum % TARGET_NSIG_BPW);
  68. set->__bits[signum / TARGET_NSIG_BPW] |= mask;
  69. }
  70. static inline int target_sigismember(const target_sigset_t *set, int signum)
  71. {
  72. signum--;
  73. abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
  74. return (set->__bits[signum / TARGET_NSIG_BPW] & mask) != 0;
  75. }
  76. /* Adjust the signal context to rewind out of safe-syscall if we're in it */
  77. static inline void rewind_if_in_safe_syscall(void *puc)
  78. {
  79. ucontext_t *uc = (ucontext_t *)puc;
  80. uintptr_t pcreg = host_signal_pc(uc);
  81. if (pcreg > (uintptr_t)safe_syscall_start
  82. && pcreg < (uintptr_t)safe_syscall_end) {
  83. host_signal_set_pc(uc, (uintptr_t)safe_syscall_start);
  84. }
  85. }
  86. /*
  87. * Note: The following take advantage of the BSD signal property that all
  88. * signals are available on all architectures.
  89. */
  90. static void host_to_target_sigset_internal(target_sigset_t *d,
  91. const sigset_t *s)
  92. {
  93. int i;
  94. target_sigemptyset(d);
  95. for (i = 1; i <= NSIG; i++) {
  96. if (sigismember(s, i)) {
  97. target_sigaddset(d, host_to_target_signal(i));
  98. }
  99. }
  100. }
  101. void host_to_target_sigset(target_sigset_t *d, const sigset_t *s)
  102. {
  103. target_sigset_t d1;
  104. int i;
  105. host_to_target_sigset_internal(&d1, s);
  106. for (i = 0; i < _SIG_WORDS; i++) {
  107. d->__bits[i] = tswap32(d1.__bits[i]);
  108. }
  109. }
  110. static void target_to_host_sigset_internal(sigset_t *d,
  111. const target_sigset_t *s)
  112. {
  113. int i;
  114. sigemptyset(d);
  115. for (i = 1; i <= TARGET_NSIG; i++) {
  116. if (target_sigismember(s, i)) {
  117. sigaddset(d, target_to_host_signal(i));
  118. }
  119. }
  120. }
  121. void target_to_host_sigset(sigset_t *d, const target_sigset_t *s)
  122. {
  123. target_sigset_t s1;
  124. int i;
  125. for (i = 0; i < TARGET_NSIG_WORDS; i++) {
  126. s1.__bits[i] = tswap32(s->__bits[i]);
  127. }
  128. target_to_host_sigset_internal(d, &s1);
  129. }
  130. static bool has_trapno(int tsig)
  131. {
  132. return tsig == TARGET_SIGILL ||
  133. tsig == TARGET_SIGFPE ||
  134. tsig == TARGET_SIGSEGV ||
  135. tsig == TARGET_SIGBUS ||
  136. tsig == TARGET_SIGTRAP;
  137. }
  138. /* Siginfo conversion. */
  139. /*
  140. * Populate tinfo w/o swapping based on guessing which fields are valid.
  141. */
  142. static inline void host_to_target_siginfo_noswap(target_siginfo_t *tinfo,
  143. const siginfo_t *info)
  144. {
  145. int sig = host_to_target_signal(info->si_signo);
  146. int si_code = info->si_code;
  147. int si_type;
  148. /*
  149. * Make sure we that the variable portion of the target siginfo is zeroed
  150. * out so we don't leak anything into that.
  151. */
  152. memset(&tinfo->_reason, 0, sizeof(tinfo->_reason));
  153. /*
  154. * This is awkward, because we have to use a combination of the si_code and
  155. * si_signo to figure out which of the union's members are valid.o We
  156. * therefore make our best guess.
  157. *
  158. * Once we have made our guess, we record it in the top 16 bits of
  159. * the si_code, so that tswap_siginfo() later can use it.
  160. * tswap_siginfo() will strip these top bits out before writing
  161. * si_code to the guest (sign-extending the lower bits).
  162. */
  163. tinfo->si_signo = sig;
  164. tinfo->si_errno = info->si_errno;
  165. tinfo->si_code = info->si_code;
  166. tinfo->si_pid = info->si_pid;
  167. tinfo->si_uid = info->si_uid;
  168. tinfo->si_status = info->si_status;
  169. tinfo->si_addr = (abi_ulong)(unsigned long)info->si_addr;
  170. /*
  171. * si_value is opaque to kernel. On all FreeBSD platforms,
  172. * sizeof(sival_ptr) >= sizeof(sival_int) so the following
  173. * always will copy the larger element.
  174. */
  175. tinfo->si_value.sival_ptr =
  176. (abi_ulong)(unsigned long)info->si_value.sival_ptr;
  177. switch (si_code) {
  178. /*
  179. * All the SI_xxx codes that are defined here are global to
  180. * all the signals (they have values that none of the other,
  181. * more specific signal info will set).
  182. */
  183. case SI_USER:
  184. case SI_LWP:
  185. case SI_KERNEL:
  186. case SI_QUEUE:
  187. case SI_ASYNCIO:
  188. /*
  189. * Only the fixed parts are valid (though FreeBSD doesn't always
  190. * set all the fields to non-zero values.
  191. */
  192. si_type = QEMU_SI_NOINFO;
  193. break;
  194. case SI_TIMER:
  195. tinfo->_reason._timer._timerid = info->_reason._timer._timerid;
  196. tinfo->_reason._timer._overrun = info->_reason._timer._overrun;
  197. si_type = QEMU_SI_TIMER;
  198. break;
  199. case SI_MESGQ:
  200. tinfo->_reason._mesgq._mqd = info->_reason._mesgq._mqd;
  201. si_type = QEMU_SI_MESGQ;
  202. break;
  203. default:
  204. /*
  205. * We have to go based on the signal number now to figure out
  206. * what's valid.
  207. */
  208. si_type = QEMU_SI_NOINFO;
  209. if (has_trapno(sig)) {
  210. tinfo->_reason._fault._trapno = info->_reason._fault._trapno;
  211. si_type = QEMU_SI_FAULT;
  212. }
  213. #ifdef TARGET_SIGPOLL
  214. /*
  215. * FreeBSD never had SIGPOLL, but emulates it for Linux so there's
  216. * a chance it may popup in the future.
  217. */
  218. if (sig == TARGET_SIGPOLL) {
  219. tinfo->_reason._poll._band = info->_reason._poll._band;
  220. si_type = QEMU_SI_POLL;
  221. }
  222. #endif
  223. /*
  224. * Unsure that this can actually be generated, and our support for
  225. * capsicum is somewhere between weak and non-existent, but if we get
  226. * one, then we know what to save.
  227. */
  228. #ifdef QEMU_SI_CAPSICUM
  229. if (sig == TARGET_SIGTRAP) {
  230. tinfo->_reason._capsicum._syscall =
  231. info->_reason._capsicum._syscall;
  232. si_type = QEMU_SI_CAPSICUM;
  233. }
  234. #endif
  235. break;
  236. }
  237. tinfo->si_code = deposit32(si_code, 24, 8, si_type);
  238. }
  239. static void tswap_siginfo(target_siginfo_t *tinfo, const target_siginfo_t *info)
  240. {
  241. int si_type = extract32(info->si_code, 24, 8);
  242. int si_code = sextract32(info->si_code, 0, 24);
  243. __put_user(info->si_signo, &tinfo->si_signo);
  244. __put_user(info->si_errno, &tinfo->si_errno);
  245. __put_user(si_code, &tinfo->si_code); /* Zero out si_type, it's internal */
  246. __put_user(info->si_pid, &tinfo->si_pid);
  247. __put_user(info->si_uid, &tinfo->si_uid);
  248. __put_user(info->si_status, &tinfo->si_status);
  249. __put_user(info->si_addr, &tinfo->si_addr);
  250. /*
  251. * Unswapped, because we passed it through mostly untouched. si_value is
  252. * opaque to the kernel, so we didn't bother with potentially wasting cycles
  253. * to swap it into host byte order.
  254. */
  255. tinfo->si_value.sival_ptr = info->si_value.sival_ptr;
  256. /*
  257. * We can use our internal marker of which fields in the structure
  258. * are valid, rather than duplicating the guesswork of
  259. * host_to_target_siginfo_noswap() here.
  260. */
  261. switch (si_type) {
  262. case QEMU_SI_NOINFO: /* No additional info */
  263. break;
  264. case QEMU_SI_FAULT:
  265. __put_user(info->_reason._fault._trapno,
  266. &tinfo->_reason._fault._trapno);
  267. break;
  268. case QEMU_SI_TIMER:
  269. __put_user(info->_reason._timer._timerid,
  270. &tinfo->_reason._timer._timerid);
  271. __put_user(info->_reason._timer._overrun,
  272. &tinfo->_reason._timer._overrun);
  273. break;
  274. case QEMU_SI_MESGQ:
  275. __put_user(info->_reason._mesgq._mqd, &tinfo->_reason._mesgq._mqd);
  276. break;
  277. case QEMU_SI_POLL:
  278. /* Note: Not generated on FreeBSD */
  279. __put_user(info->_reason._poll._band, &tinfo->_reason._poll._band);
  280. break;
  281. #ifdef QEMU_SI_CAPSICUM
  282. case QEMU_SI_CAPSICUM:
  283. __put_user(info->_reason._capsicum._syscall,
  284. &tinfo->_reason._capsicum._syscall);
  285. break;
  286. #endif
  287. default:
  288. g_assert_not_reached();
  289. }
  290. }
  291. void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info)
  292. {
  293. host_to_target_siginfo_noswap(tinfo, info);
  294. tswap_siginfo(tinfo, tinfo);
  295. }
  296. int block_signals(void)
  297. {
  298. TaskState *ts = get_task_state(thread_cpu);
  299. sigset_t set;
  300. /*
  301. * It's OK to block everything including SIGSEGV, because we won't run any
  302. * further guest code before unblocking signals in
  303. * process_pending_signals(). We depend on the FreeBSD behavior here where
  304. * this will only affect this thread's signal mask. We don't use
  305. * pthread_sigmask which might seem more correct because that routine also
  306. * does odd things with SIGCANCEL to implement pthread_cancel().
  307. */
  308. sigfillset(&set);
  309. sigprocmask(SIG_SETMASK, &set, 0);
  310. return qatomic_xchg(&ts->signal_pending, 1);
  311. }
  312. /* Returns 1 if given signal should dump core if not handled. */
  313. static int core_dump_signal(int sig)
  314. {
  315. switch (sig) {
  316. case TARGET_SIGABRT:
  317. case TARGET_SIGFPE:
  318. case TARGET_SIGILL:
  319. case TARGET_SIGQUIT:
  320. case TARGET_SIGSEGV:
  321. case TARGET_SIGTRAP:
  322. case TARGET_SIGBUS:
  323. return 1;
  324. default:
  325. return 0;
  326. }
  327. }
  328. /* Abort execution with signal. */
  329. static G_NORETURN
  330. void dump_core_and_abort(int target_sig)
  331. {
  332. CPUState *cpu = thread_cpu;
  333. CPUArchState *env = cpu_env(cpu);
  334. TaskState *ts = get_task_state(cpu);
  335. int core_dumped = 0;
  336. int host_sig;
  337. struct sigaction act;
  338. host_sig = target_to_host_signal(target_sig);
  339. gdb_signalled(env, target_sig);
  340. /* Dump core if supported by target binary format */
  341. if (core_dump_signal(target_sig) && (ts->bprm->core_dump != NULL)) {
  342. stop_all_tasks();
  343. core_dumped =
  344. ((*ts->bprm->core_dump)(target_sig, env) == 0);
  345. }
  346. if (core_dumped) {
  347. struct rlimit nodump;
  348. /*
  349. * We already dumped the core of target process, we don't want
  350. * a coredump of qemu itself.
  351. */
  352. getrlimit(RLIMIT_CORE, &nodump);
  353. nodump.rlim_cur = 0;
  354. setrlimit(RLIMIT_CORE, &nodump);
  355. (void) fprintf(stderr, "qemu: uncaught target signal %d (%s) "
  356. "- %s\n", target_sig, strsignal(host_sig), "core dumped");
  357. }
  358. /*
  359. * The proper exit code for dying from an uncaught signal is
  360. * -<signal>. The kernel doesn't allow exit() or _exit() to pass
  361. * a negative value. To get the proper exit code we need to
  362. * actually die from an uncaught signal. Here the default signal
  363. * handler is installed, we send ourself a signal and we wait for
  364. * it to arrive.
  365. */
  366. memset(&act, 0, sizeof(act));
  367. sigfillset(&act.sa_mask);
  368. act.sa_handler = SIG_DFL;
  369. sigaction(host_sig, &act, NULL);
  370. kill(getpid(), host_sig);
  371. /*
  372. * Make sure the signal isn't masked (just reuse the mask inside
  373. * of act).
  374. */
  375. sigdelset(&act.sa_mask, host_sig);
  376. sigsuspend(&act.sa_mask);
  377. /* unreachable */
  378. abort();
  379. }
  380. /*
  381. * Queue a signal so that it will be send to the virtual CPU as soon as
  382. * possible.
  383. */
  384. void queue_signal(CPUArchState *env, int sig, int si_type,
  385. target_siginfo_t *info)
  386. {
  387. CPUState *cpu = env_cpu(env);
  388. TaskState *ts = get_task_state(cpu);
  389. trace_user_queue_signal(env, sig);
  390. info->si_code = deposit32(info->si_code, 24, 8, si_type);
  391. ts->sync_signal.info = *info;
  392. ts->sync_signal.pending = sig;
  393. /* Signal that a new signal is pending. */
  394. qatomic_set(&ts->signal_pending, 1);
  395. return;
  396. }
  397. static int fatal_signal(int sig)
  398. {
  399. switch (sig) {
  400. case TARGET_SIGCHLD:
  401. case TARGET_SIGURG:
  402. case TARGET_SIGWINCH:
  403. case TARGET_SIGINFO:
  404. /* Ignored by default. */
  405. return 0;
  406. case TARGET_SIGCONT:
  407. case TARGET_SIGSTOP:
  408. case TARGET_SIGTSTP:
  409. case TARGET_SIGTTIN:
  410. case TARGET_SIGTTOU:
  411. /* Job control signals. */
  412. return 0;
  413. default:
  414. return 1;
  415. }
  416. }
  417. /*
  418. * Force a synchronously taken QEMU_SI_FAULT signal. For QEMU the
  419. * 'force' part is handled in process_pending_signals().
  420. */
  421. void force_sig_fault(int sig, int code, abi_ulong addr)
  422. {
  423. CPUState *cpu = thread_cpu;
  424. target_siginfo_t info = {};
  425. info.si_signo = sig;
  426. info.si_errno = 0;
  427. info.si_code = code;
  428. info.si_addr = addr;
  429. queue_signal(cpu_env(cpu), sig, QEMU_SI_FAULT, &info);
  430. }
  431. static void host_signal_handler(int host_sig, siginfo_t *info, void *puc)
  432. {
  433. CPUState *cpu = thread_cpu;
  434. TaskState *ts = get_task_state(cpu);
  435. target_siginfo_t tinfo;
  436. ucontext_t *uc = puc;
  437. struct emulated_sigtable *k;
  438. int guest_sig;
  439. uintptr_t pc = 0;
  440. bool sync_sig = false;
  441. /*
  442. * Non-spoofed SIGSEGV and SIGBUS are synchronous, and need special
  443. * handling wrt signal blocking and unwinding.
  444. */
  445. if ((host_sig == SIGSEGV || host_sig == SIGBUS) && info->si_code > 0) {
  446. MMUAccessType access_type;
  447. uintptr_t host_addr;
  448. abi_ptr guest_addr;
  449. bool is_write;
  450. host_addr = (uintptr_t)info->si_addr;
  451. /*
  452. * Convert forcefully to guest address space: addresses outside
  453. * reserved_va are still valid to report via SEGV_MAPERR.
  454. */
  455. guest_addr = h2g_nocheck(host_addr);
  456. pc = host_signal_pc(uc);
  457. is_write = host_signal_write(info, uc);
  458. access_type = adjust_signal_pc(&pc, is_write);
  459. if (host_sig == SIGSEGV) {
  460. bool maperr = true;
  461. if (info->si_code == SEGV_ACCERR && h2g_valid(host_addr)) {
  462. /* If this was a write to a TB protected page, restart. */
  463. if (is_write &&
  464. handle_sigsegv_accerr_write(cpu, &uc->uc_sigmask,
  465. pc, guest_addr)) {
  466. return;
  467. }
  468. /*
  469. * With reserved_va, the whole address space is PROT_NONE,
  470. * which means that we may get ACCERR when we want MAPERR.
  471. */
  472. if (page_get_flags(guest_addr) & PAGE_VALID) {
  473. maperr = false;
  474. } else {
  475. info->si_code = SEGV_MAPERR;
  476. }
  477. }
  478. sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
  479. cpu_loop_exit_sigsegv(cpu, guest_addr, access_type, maperr, pc);
  480. } else {
  481. sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
  482. if (info->si_code == BUS_ADRALN) {
  483. cpu_loop_exit_sigbus(cpu, guest_addr, access_type, pc);
  484. }
  485. }
  486. sync_sig = true;
  487. }
  488. /* Get the target signal number. */
  489. guest_sig = host_to_target_signal(host_sig);
  490. if (guest_sig < 1 || guest_sig > TARGET_NSIG) {
  491. return;
  492. }
  493. trace_user_host_signal(cpu, host_sig, guest_sig);
  494. host_to_target_siginfo_noswap(&tinfo, info);
  495. k = &ts->sigtab[guest_sig - 1];
  496. k->info = tinfo;
  497. k->pending = guest_sig;
  498. ts->signal_pending = 1;
  499. /*
  500. * For synchronous signals, unwind the cpu state to the faulting
  501. * insn and then exit back to the main loop so that the signal
  502. * is delivered immediately.
  503. */
  504. if (sync_sig) {
  505. cpu->exception_index = EXCP_INTERRUPT;
  506. cpu_loop_exit_restore(cpu, pc);
  507. }
  508. rewind_if_in_safe_syscall(puc);
  509. /*
  510. * Block host signals until target signal handler entered. We
  511. * can't block SIGSEGV or SIGBUS while we're executing guest
  512. * code in case the guest code provokes one in the window between
  513. * now and it getting out to the main loop. Signals will be
  514. * unblocked again in process_pending_signals().
  515. */
  516. sigfillset(&uc->uc_sigmask);
  517. sigdelset(&uc->uc_sigmask, SIGSEGV);
  518. sigdelset(&uc->uc_sigmask, SIGBUS);
  519. /* Interrupt the virtual CPU as soon as possible. */
  520. cpu_exit(thread_cpu);
  521. }
  522. /* do_sigaltstack() returns target values and errnos. */
  523. /* compare to kern/kern_sig.c sys_sigaltstack() and kern_sigaltstack() */
  524. abi_long do_sigaltstack(abi_ulong uss_addr, abi_ulong uoss_addr, abi_ulong sp)
  525. {
  526. TaskState *ts = get_task_state(thread_cpu);
  527. int ret;
  528. target_stack_t oss;
  529. if (uoss_addr) {
  530. /* Save current signal stack params */
  531. oss.ss_sp = tswapl(ts->sigaltstack_used.ss_sp);
  532. oss.ss_size = tswapl(ts->sigaltstack_used.ss_size);
  533. oss.ss_flags = tswapl(sas_ss_flags(ts, sp));
  534. }
  535. if (uss_addr) {
  536. target_stack_t *uss;
  537. target_stack_t ss;
  538. size_t minstacksize = TARGET_MINSIGSTKSZ;
  539. ret = -TARGET_EFAULT;
  540. if (!lock_user_struct(VERIFY_READ, uss, uss_addr, 1)) {
  541. goto out;
  542. }
  543. __get_user(ss.ss_sp, &uss->ss_sp);
  544. __get_user(ss.ss_size, &uss->ss_size);
  545. __get_user(ss.ss_flags, &uss->ss_flags);
  546. unlock_user_struct(uss, uss_addr, 0);
  547. ret = -TARGET_EPERM;
  548. if (on_sig_stack(ts, sp)) {
  549. goto out;
  550. }
  551. ret = -TARGET_EINVAL;
  552. if (ss.ss_flags != TARGET_SS_DISABLE
  553. && ss.ss_flags != TARGET_SS_ONSTACK
  554. && ss.ss_flags != 0) {
  555. goto out;
  556. }
  557. if (ss.ss_flags == TARGET_SS_DISABLE) {
  558. ss.ss_size = 0;
  559. ss.ss_sp = 0;
  560. } else {
  561. ret = -TARGET_ENOMEM;
  562. if (ss.ss_size < minstacksize) {
  563. goto out;
  564. }
  565. }
  566. ts->sigaltstack_used.ss_sp = ss.ss_sp;
  567. ts->sigaltstack_used.ss_size = ss.ss_size;
  568. }
  569. if (uoss_addr) {
  570. ret = -TARGET_EFAULT;
  571. if (copy_to_user(uoss_addr, &oss, sizeof(oss))) {
  572. goto out;
  573. }
  574. }
  575. ret = 0;
  576. out:
  577. return ret;
  578. }
  579. /* do_sigaction() return host values and errnos */
  580. int do_sigaction(int sig, const struct target_sigaction *act,
  581. struct target_sigaction *oact)
  582. {
  583. struct target_sigaction *k;
  584. struct sigaction act1;
  585. int host_sig;
  586. int ret = 0;
  587. if (sig < 1 || sig > TARGET_NSIG) {
  588. return -TARGET_EINVAL;
  589. }
  590. if ((sig == TARGET_SIGKILL || sig == TARGET_SIGSTOP) &&
  591. act != NULL && act->_sa_handler != TARGET_SIG_DFL) {
  592. return -TARGET_EINVAL;
  593. }
  594. if (block_signals()) {
  595. return -TARGET_ERESTART;
  596. }
  597. k = &sigact_table[sig - 1];
  598. if (oact) {
  599. oact->_sa_handler = tswapal(k->_sa_handler);
  600. oact->sa_flags = tswap32(k->sa_flags);
  601. oact->sa_mask = k->sa_mask;
  602. }
  603. if (act) {
  604. k->_sa_handler = tswapal(act->_sa_handler);
  605. k->sa_flags = tswap32(act->sa_flags);
  606. k->sa_mask = act->sa_mask;
  607. /* Update the host signal state. */
  608. host_sig = target_to_host_signal(sig);
  609. if (host_sig != SIGSEGV && host_sig != SIGBUS) {
  610. memset(&act1, 0, sizeof(struct sigaction));
  611. sigfillset(&act1.sa_mask);
  612. act1.sa_flags = SA_SIGINFO;
  613. if (k->sa_flags & TARGET_SA_RESTART) {
  614. act1.sa_flags |= SA_RESTART;
  615. }
  616. /*
  617. * Note: It is important to update the host kernel signal mask to
  618. * avoid getting unexpected interrupted system calls.
  619. */
  620. if (k->_sa_handler == TARGET_SIG_IGN) {
  621. act1.sa_sigaction = (void *)SIG_IGN;
  622. } else if (k->_sa_handler == TARGET_SIG_DFL) {
  623. if (fatal_signal(sig)) {
  624. act1.sa_sigaction = host_signal_handler;
  625. } else {
  626. act1.sa_sigaction = (void *)SIG_DFL;
  627. }
  628. } else {
  629. act1.sa_sigaction = host_signal_handler;
  630. }
  631. ret = sigaction(host_sig, &act1, NULL);
  632. }
  633. }
  634. return ret;
  635. }
  636. static inline abi_ulong get_sigframe(struct target_sigaction *ka,
  637. CPUArchState *env, size_t frame_size)
  638. {
  639. TaskState *ts = get_task_state(thread_cpu);
  640. abi_ulong sp;
  641. /* Use default user stack */
  642. sp = get_sp_from_cpustate(env);
  643. if ((ka->sa_flags & TARGET_SA_ONSTACK) && sas_ss_flags(ts, sp) == 0) {
  644. sp = ts->sigaltstack_used.ss_sp + ts->sigaltstack_used.ss_size;
  645. }
  646. return ROUND_DOWN(sp - frame_size, TARGET_SIGSTACK_ALIGN);
  647. }
  648. /* compare to $M/$M/exec_machdep.c sendsig and sys/kern/kern_sig.c sigexit */
  649. static void setup_frame(int sig, int code, struct target_sigaction *ka,
  650. target_sigset_t *set, target_siginfo_t *tinfo, CPUArchState *env)
  651. {
  652. struct target_sigframe *frame;
  653. abi_ulong frame_addr;
  654. int i;
  655. frame_addr = get_sigframe(ka, env, sizeof(*frame));
  656. trace_user_setup_frame(env, frame_addr);
  657. if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
  658. unlock_user_struct(frame, frame_addr, 1);
  659. dump_core_and_abort(TARGET_SIGILL);
  660. return;
  661. }
  662. memset(frame, 0, sizeof(*frame));
  663. setup_sigframe_arch(env, frame_addr, frame, 0);
  664. for (i = 0; i < TARGET_NSIG_WORDS; i++) {
  665. __put_user(set->__bits[i], &frame->sf_uc.uc_sigmask.__bits[i]);
  666. }
  667. if (tinfo) {
  668. frame->sf_si.si_signo = tinfo->si_signo;
  669. frame->sf_si.si_errno = tinfo->si_errno;
  670. frame->sf_si.si_code = tinfo->si_code;
  671. frame->sf_si.si_pid = tinfo->si_pid;
  672. frame->sf_si.si_uid = tinfo->si_uid;
  673. frame->sf_si.si_status = tinfo->si_status;
  674. frame->sf_si.si_addr = tinfo->si_addr;
  675. /* see host_to_target_siginfo_noswap() for more details */
  676. frame->sf_si.si_value.sival_ptr = tinfo->si_value.sival_ptr;
  677. /*
  678. * At this point, whatever is in the _reason union is complete
  679. * and in target order, so just copy the whole thing over, even
  680. * if it's too large for this specific signal.
  681. * host_to_target_siginfo_noswap() and tswap_siginfo() have ensured
  682. * that's so.
  683. */
  684. memcpy(&frame->sf_si._reason, &tinfo->_reason,
  685. sizeof(tinfo->_reason));
  686. }
  687. set_sigtramp_args(env, sig, frame, frame_addr, ka);
  688. unlock_user_struct(frame, frame_addr, 1);
  689. }
  690. static int reset_signal_mask(target_ucontext_t *ucontext)
  691. {
  692. int i;
  693. sigset_t blocked;
  694. target_sigset_t target_set;
  695. TaskState *ts = get_task_state(thread_cpu);
  696. for (i = 0; i < TARGET_NSIG_WORDS; i++) {
  697. __get_user(target_set.__bits[i], &ucontext->uc_sigmask.__bits[i]);
  698. }
  699. target_to_host_sigset_internal(&blocked, &target_set);
  700. ts->signal_mask = blocked;
  701. return 0;
  702. }
  703. /* See sys/$M/$M/exec_machdep.c sigreturn() */
  704. long do_sigreturn(CPUArchState *env, abi_ulong addr)
  705. {
  706. long ret;
  707. abi_ulong target_ucontext;
  708. target_ucontext_t *ucontext = NULL;
  709. /* Get the target ucontext address from the stack frame */
  710. ret = get_ucontext_sigreturn(env, addr, &target_ucontext);
  711. if (is_error(ret)) {
  712. return ret;
  713. }
  714. trace_user_do_sigreturn(env, addr);
  715. if (!lock_user_struct(VERIFY_READ, ucontext, target_ucontext, 0)) {
  716. goto badframe;
  717. }
  718. /* Set the register state back to before the signal. */
  719. if (set_mcontext(env, &ucontext->uc_mcontext, 1)) {
  720. goto badframe;
  721. }
  722. /* And reset the signal mask. */
  723. if (reset_signal_mask(ucontext)) {
  724. goto badframe;
  725. }
  726. unlock_user_struct(ucontext, target_ucontext, 0);
  727. return -TARGET_EJUSTRETURN;
  728. badframe:
  729. if (ucontext != NULL) {
  730. unlock_user_struct(ucontext, target_ucontext, 0);
  731. }
  732. return -TARGET_EFAULT;
  733. }
  734. void signal_init(void)
  735. {
  736. TaskState *ts = get_task_state(thread_cpu);
  737. struct sigaction act;
  738. struct sigaction oact;
  739. int i;
  740. int host_sig;
  741. /* Set the signal mask from the host mask. */
  742. sigprocmask(0, 0, &ts->signal_mask);
  743. sigfillset(&act.sa_mask);
  744. act.sa_sigaction = host_signal_handler;
  745. act.sa_flags = SA_SIGINFO;
  746. for (i = 1; i <= TARGET_NSIG; i++) {
  747. host_sig = target_to_host_signal(i);
  748. sigaction(host_sig, NULL, &oact);
  749. if (oact.sa_sigaction == (void *)SIG_IGN) {
  750. sigact_table[i - 1]._sa_handler = TARGET_SIG_IGN;
  751. } else if (oact.sa_sigaction == (void *)SIG_DFL) {
  752. sigact_table[i - 1]._sa_handler = TARGET_SIG_DFL;
  753. }
  754. /*
  755. * If there's already a handler installed then something has
  756. * gone horribly wrong, so don't even try to handle that case.
  757. * Install some handlers for our own use. We need at least
  758. * SIGSEGV and SIGBUS, to detect exceptions. We can not just
  759. * trap all signals because it affects syscall interrupt
  760. * behavior. But do trap all default-fatal signals.
  761. */
  762. if (fatal_signal(i)) {
  763. sigaction(host_sig, &act, NULL);
  764. }
  765. }
  766. }
  767. static void handle_pending_signal(CPUArchState *env, int sig,
  768. struct emulated_sigtable *k)
  769. {
  770. CPUState *cpu = env_cpu(env);
  771. TaskState *ts = get_task_state(cpu);
  772. struct target_sigaction *sa;
  773. int code;
  774. sigset_t set;
  775. abi_ulong handler;
  776. target_siginfo_t tinfo;
  777. target_sigset_t target_old_set;
  778. trace_user_handle_signal(env, sig);
  779. k->pending = 0;
  780. sig = gdb_handlesig(cpu, sig, NULL, &k->info, sizeof(k->info));
  781. if (!sig) {
  782. sa = NULL;
  783. handler = TARGET_SIG_IGN;
  784. } else {
  785. sa = &sigact_table[sig - 1];
  786. handler = sa->_sa_handler;
  787. }
  788. if (do_strace) {
  789. print_taken_signal(sig, &k->info);
  790. }
  791. if (handler == TARGET_SIG_DFL) {
  792. /*
  793. * default handler : ignore some signal. The other are job
  794. * control or fatal.
  795. */
  796. if (sig == TARGET_SIGTSTP || sig == TARGET_SIGTTIN ||
  797. sig == TARGET_SIGTTOU) {
  798. kill(getpid(), SIGSTOP);
  799. } else if (sig != TARGET_SIGCHLD && sig != TARGET_SIGURG &&
  800. sig != TARGET_SIGINFO && sig != TARGET_SIGWINCH &&
  801. sig != TARGET_SIGCONT) {
  802. dump_core_and_abort(sig);
  803. }
  804. } else if (handler == TARGET_SIG_IGN) {
  805. /* ignore sig */
  806. } else if (handler == TARGET_SIG_ERR) {
  807. dump_core_and_abort(sig);
  808. } else {
  809. /* compute the blocked signals during the handler execution */
  810. sigset_t *blocked_set;
  811. target_to_host_sigset(&set, &sa->sa_mask);
  812. /*
  813. * SA_NODEFER indicates that the current signal should not be
  814. * blocked during the handler.
  815. */
  816. if (!(sa->sa_flags & TARGET_SA_NODEFER)) {
  817. sigaddset(&set, target_to_host_signal(sig));
  818. }
  819. /*
  820. * Save the previous blocked signal state to restore it at the
  821. * end of the signal execution (see do_sigreturn).
  822. */
  823. host_to_target_sigset_internal(&target_old_set, &ts->signal_mask);
  824. blocked_set = ts->in_sigsuspend ?
  825. &ts->sigsuspend_mask : &ts->signal_mask;
  826. sigorset(&ts->signal_mask, blocked_set, &set);
  827. ts->in_sigsuspend = false;
  828. sigprocmask(SIG_SETMASK, &ts->signal_mask, NULL);
  829. /* XXX VM86 on x86 ??? */
  830. code = k->info.si_code; /* From host, so no si_type */
  831. /* prepare the stack frame of the virtual CPU */
  832. if (sa->sa_flags & TARGET_SA_SIGINFO) {
  833. tswap_siginfo(&tinfo, &k->info);
  834. setup_frame(sig, code, sa, &target_old_set, &tinfo, env);
  835. } else {
  836. setup_frame(sig, code, sa, &target_old_set, NULL, env);
  837. }
  838. if (sa->sa_flags & TARGET_SA_RESETHAND) {
  839. sa->_sa_handler = TARGET_SIG_DFL;
  840. }
  841. }
  842. }
  843. void process_pending_signals(CPUArchState *env)
  844. {
  845. CPUState *cpu = env_cpu(env);
  846. int sig;
  847. sigset_t *blocked_set, set;
  848. struct emulated_sigtable *k;
  849. TaskState *ts = get_task_state(cpu);
  850. while (qatomic_read(&ts->signal_pending)) {
  851. sigfillset(&set);
  852. sigprocmask(SIG_SETMASK, &set, 0);
  853. restart_scan:
  854. sig = ts->sync_signal.pending;
  855. if (sig) {
  856. /*
  857. * Synchronous signals are forced by the emulated CPU in some way.
  858. * If they are set to ignore, restore the default handler (see
  859. * sys/kern_sig.c trapsignal() and execsigs() for this behavior)
  860. * though maybe this is done only when forcing exit for non SIGCHLD.
  861. */
  862. if (sigismember(&ts->signal_mask, target_to_host_signal(sig)) ||
  863. sigact_table[sig - 1]._sa_handler == TARGET_SIG_IGN) {
  864. sigdelset(&ts->signal_mask, target_to_host_signal(sig));
  865. sigact_table[sig - 1]._sa_handler = TARGET_SIG_DFL;
  866. }
  867. handle_pending_signal(env, sig, &ts->sync_signal);
  868. }
  869. k = ts->sigtab;
  870. for (sig = 1; sig <= TARGET_NSIG; sig++, k++) {
  871. blocked_set = ts->in_sigsuspend ?
  872. &ts->sigsuspend_mask : &ts->signal_mask;
  873. if (k->pending &&
  874. !sigismember(blocked_set, target_to_host_signal(sig))) {
  875. handle_pending_signal(env, sig, k);
  876. /*
  877. * Restart scan from the beginning, as handle_pending_signal
  878. * might have resulted in a new synchronous signal (eg SIGSEGV).
  879. */
  880. goto restart_scan;
  881. }
  882. }
  883. /*
  884. * Unblock signals and check one more time. Unblocking signals may cause
  885. * us to take another host signal, which will set signal_pending again.
  886. */
  887. qatomic_set(&ts->signal_pending, 0);
  888. ts->in_sigsuspend = false;
  889. set = ts->signal_mask;
  890. sigdelset(&set, SIGSEGV);
  891. sigdelset(&set, SIGBUS);
  892. sigprocmask(SIG_SETMASK, &set, 0);
  893. }
  894. ts->in_sigsuspend = false;
  895. }
  896. void cpu_loop_exit_sigsegv(CPUState *cpu, target_ulong addr,
  897. MMUAccessType access_type, bool maperr, uintptr_t ra)
  898. {
  899. const TCGCPUOps *tcg_ops = CPU_GET_CLASS(cpu)->tcg_ops;
  900. if (tcg_ops->record_sigsegv) {
  901. tcg_ops->record_sigsegv(cpu, addr, access_type, maperr, ra);
  902. }
  903. force_sig_fault(TARGET_SIGSEGV,
  904. maperr ? TARGET_SEGV_MAPERR : TARGET_SEGV_ACCERR,
  905. addr);
  906. cpu->exception_index = EXCP_INTERRUPT;
  907. cpu_loop_exit_restore(cpu, ra);
  908. }
  909. void cpu_loop_exit_sigbus(CPUState *cpu, target_ulong addr,
  910. MMUAccessType access_type, uintptr_t ra)
  911. {
  912. const TCGCPUOps *tcg_ops = CPU_GET_CLASS(cpu)->tcg_ops;
  913. if (tcg_ops->record_sigbus) {
  914. tcg_ops->record_sigbus(cpu, addr, access_type, ra);
  915. }
  916. force_sig_fault(TARGET_SIGBUS, TARGET_BUS_ADRALN, addr);
  917. cpu->exception_index = EXCP_INTERRUPT;
  918. cpu_loop_exit_restore(cpu, ra);
  919. }