2
0

signal.c 45 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437
  1. /*
  2. * Emulation of Linux signals
  3. *
  4. * Copyright (c) 2003 Fabrice Bellard
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, see <http://www.gnu.org/licenses/>.
  18. */
  19. #include "qemu/osdep.h"
  20. #include "qemu/bitops.h"
  21. #include "qemu/cutils.h"
  22. #include "gdbstub/user.h"
  23. #include "exec/page-protection.h"
  24. #include "accel/tcg/cpu-ops.h"
  25. #include <sys/ucontext.h>
  26. #include <sys/resource.h>
  27. #include "qemu.h"
  28. #include "user-internals.h"
  29. #include "strace.h"
  30. #include "loader.h"
  31. #include "trace.h"
  32. #include "signal-common.h"
  33. #include "host-signal.h"
  34. #include "user/cpu_loop.h"
  35. #include "user/page-protection.h"
  36. #include "user/safe-syscall.h"
  37. #include "user/signal.h"
  38. #include "tcg/tcg.h"
  39. /* target_siginfo_t must fit in gdbstub's siginfo save area. */
  40. QEMU_BUILD_BUG_ON(sizeof(target_siginfo_t) > MAX_SIGINFO_LENGTH);
  41. static struct target_sigaction sigact_table[TARGET_NSIG];
  42. static void host_signal_handler(int host_signum, siginfo_t *info,
  43. void *puc);
  44. /* Fallback addresses into sigtramp page. */
  45. abi_ulong default_sigreturn;
  46. abi_ulong default_rt_sigreturn;
  47. /*
  48. * System includes define _NSIG as SIGRTMAX + 1, but qemu (like the kernel)
  49. * defines TARGET_NSIG as TARGET_SIGRTMAX and the first signal is 1.
  50. * Signal number 0 is reserved for use as kill(pid, 0), to test whether
  51. * a process exists without sending it a signal.
  52. */
  53. #ifdef __SIGRTMAX
  54. QEMU_BUILD_BUG_ON(__SIGRTMAX + 1 != _NSIG);
  55. #endif
  56. static uint8_t host_to_target_signal_table[_NSIG] = {
  57. #define MAKE_SIG_ENTRY(sig) [sig] = TARGET_##sig,
  58. MAKE_SIGNAL_LIST
  59. #undef MAKE_SIG_ENTRY
  60. };
  61. static uint8_t target_to_host_signal_table[TARGET_NSIG + 1];
  62. /* valid sig is between 1 and _NSIG - 1 */
  63. int host_to_target_signal(int sig)
  64. {
  65. if (sig < 1) {
  66. return sig;
  67. }
  68. if (sig >= _NSIG) {
  69. return TARGET_NSIG + 1;
  70. }
  71. return host_to_target_signal_table[sig];
  72. }
  73. /* valid sig is between 1 and TARGET_NSIG */
  74. int target_to_host_signal(int sig)
  75. {
  76. if (sig < 1) {
  77. return sig;
  78. }
  79. if (sig > TARGET_NSIG) {
  80. return _NSIG;
  81. }
  82. return target_to_host_signal_table[sig];
  83. }
  84. static inline void target_sigaddset(target_sigset_t *set, int signum)
  85. {
  86. signum--;
  87. abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
  88. set->sig[signum / TARGET_NSIG_BPW] |= mask;
  89. }
  90. static inline int target_sigismember(const target_sigset_t *set, int signum)
  91. {
  92. signum--;
  93. abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
  94. return ((set->sig[signum / TARGET_NSIG_BPW] & mask) != 0);
  95. }
  96. void host_to_target_sigset_internal(target_sigset_t *d,
  97. const sigset_t *s)
  98. {
  99. int host_sig, target_sig;
  100. target_sigemptyset(d);
  101. for (host_sig = 1; host_sig < _NSIG; host_sig++) {
  102. target_sig = host_to_target_signal(host_sig);
  103. if (target_sig < 1 || target_sig > TARGET_NSIG) {
  104. continue;
  105. }
  106. if (sigismember(s, host_sig)) {
  107. target_sigaddset(d, target_sig);
  108. }
  109. }
  110. }
  111. void host_to_target_sigset(target_sigset_t *d, const sigset_t *s)
  112. {
  113. target_sigset_t d1;
  114. int i;
  115. host_to_target_sigset_internal(&d1, s);
  116. for(i = 0;i < TARGET_NSIG_WORDS; i++)
  117. d->sig[i] = tswapal(d1.sig[i]);
  118. }
  119. void target_to_host_sigset_internal(sigset_t *d,
  120. const target_sigset_t *s)
  121. {
  122. int host_sig, target_sig;
  123. sigemptyset(d);
  124. for (target_sig = 1; target_sig <= TARGET_NSIG; target_sig++) {
  125. host_sig = target_to_host_signal(target_sig);
  126. if (host_sig < 1 || host_sig >= _NSIG) {
  127. continue;
  128. }
  129. if (target_sigismember(s, target_sig)) {
  130. sigaddset(d, host_sig);
  131. }
  132. }
  133. }
  134. void target_to_host_sigset(sigset_t *d, const target_sigset_t *s)
  135. {
  136. target_sigset_t s1;
  137. int i;
  138. for(i = 0;i < TARGET_NSIG_WORDS; i++)
  139. s1.sig[i] = tswapal(s->sig[i]);
  140. target_to_host_sigset_internal(d, &s1);
  141. }
  142. void host_to_target_old_sigset(abi_ulong *old_sigset,
  143. const sigset_t *sigset)
  144. {
  145. target_sigset_t d;
  146. host_to_target_sigset(&d, sigset);
  147. *old_sigset = d.sig[0];
  148. }
  149. void target_to_host_old_sigset(sigset_t *sigset,
  150. const abi_ulong *old_sigset)
  151. {
  152. target_sigset_t d;
  153. int i;
  154. d.sig[0] = *old_sigset;
  155. for(i = 1;i < TARGET_NSIG_WORDS; i++)
  156. d.sig[i] = 0;
  157. target_to_host_sigset(sigset, &d);
  158. }
  159. int block_signals(void)
  160. {
  161. TaskState *ts = get_task_state(thread_cpu);
  162. sigset_t set;
  163. /* It's OK to block everything including SIGSEGV, because we won't
  164. * run any further guest code before unblocking signals in
  165. * process_pending_signals().
  166. */
  167. sigfillset(&set);
  168. sigprocmask(SIG_SETMASK, &set, 0);
  169. return qatomic_xchg(&ts->signal_pending, 1);
  170. }
  171. /* Wrapper for sigprocmask function
  172. * Emulates a sigprocmask in a safe way for the guest. Note that set and oldset
  173. * are host signal set, not guest ones. Returns -QEMU_ERESTARTSYS if
  174. * a signal was already pending and the syscall must be restarted, or
  175. * 0 on success.
  176. * If set is NULL, this is guaranteed not to fail.
  177. */
  178. int do_sigprocmask(int how, const sigset_t *set, sigset_t *oldset)
  179. {
  180. TaskState *ts = get_task_state(thread_cpu);
  181. if (oldset) {
  182. *oldset = ts->signal_mask;
  183. }
  184. if (set) {
  185. int i;
  186. if (block_signals()) {
  187. return -QEMU_ERESTARTSYS;
  188. }
  189. switch (how) {
  190. case SIG_BLOCK:
  191. sigorset(&ts->signal_mask, &ts->signal_mask, set);
  192. break;
  193. case SIG_UNBLOCK:
  194. for (i = 1; i <= NSIG; ++i) {
  195. if (sigismember(set, i)) {
  196. sigdelset(&ts->signal_mask, i);
  197. }
  198. }
  199. break;
  200. case SIG_SETMASK:
  201. ts->signal_mask = *set;
  202. break;
  203. default:
  204. g_assert_not_reached();
  205. }
  206. /* Silently ignore attempts to change blocking status of KILL or STOP */
  207. sigdelset(&ts->signal_mask, SIGKILL);
  208. sigdelset(&ts->signal_mask, SIGSTOP);
  209. }
  210. return 0;
  211. }
  212. /* Just set the guest's signal mask to the specified value; the
  213. * caller is assumed to have called block_signals() already.
  214. */
  215. void set_sigmask(const sigset_t *set)
  216. {
  217. TaskState *ts = get_task_state(thread_cpu);
  218. ts->signal_mask = *set;
  219. }
  220. /* sigaltstack management */
  221. int on_sig_stack(unsigned long sp)
  222. {
  223. TaskState *ts = get_task_state(thread_cpu);
  224. return (sp - ts->sigaltstack_used.ss_sp
  225. < ts->sigaltstack_used.ss_size);
  226. }
  227. int sas_ss_flags(unsigned long sp)
  228. {
  229. TaskState *ts = get_task_state(thread_cpu);
  230. return (ts->sigaltstack_used.ss_size == 0 ? SS_DISABLE
  231. : on_sig_stack(sp) ? SS_ONSTACK : 0);
  232. }
  233. abi_ulong target_sigsp(abi_ulong sp, struct target_sigaction *ka)
  234. {
  235. /*
  236. * This is the X/Open sanctioned signal stack switching.
  237. */
  238. TaskState *ts = get_task_state(thread_cpu);
  239. if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp)) {
  240. return ts->sigaltstack_used.ss_sp + ts->sigaltstack_used.ss_size;
  241. }
  242. return sp;
  243. }
  244. void target_save_altstack(target_stack_t *uss, CPUArchState *env)
  245. {
  246. TaskState *ts = get_task_state(thread_cpu);
  247. __put_user(ts->sigaltstack_used.ss_sp, &uss->ss_sp);
  248. __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &uss->ss_flags);
  249. __put_user(ts->sigaltstack_used.ss_size, &uss->ss_size);
  250. }
  251. abi_long target_restore_altstack(target_stack_t *uss, CPUArchState *env)
  252. {
  253. TaskState *ts = get_task_state(thread_cpu);
  254. size_t minstacksize = TARGET_MINSIGSTKSZ;
  255. target_stack_t ss;
  256. #if defined(TARGET_PPC64)
  257. /* ELF V2 for PPC64 has a 4K minimum stack size for signal handlers */
  258. struct image_info *image = ts->info;
  259. if (get_ppc64_abi(image) > 1) {
  260. minstacksize = 4096;
  261. }
  262. #endif
  263. __get_user(ss.ss_sp, &uss->ss_sp);
  264. __get_user(ss.ss_size, &uss->ss_size);
  265. __get_user(ss.ss_flags, &uss->ss_flags);
  266. if (on_sig_stack(get_sp_from_cpustate(env))) {
  267. return -TARGET_EPERM;
  268. }
  269. switch (ss.ss_flags) {
  270. default:
  271. return -TARGET_EINVAL;
  272. case TARGET_SS_DISABLE:
  273. ss.ss_size = 0;
  274. ss.ss_sp = 0;
  275. break;
  276. case TARGET_SS_ONSTACK:
  277. case 0:
  278. if (ss.ss_size < minstacksize) {
  279. return -TARGET_ENOMEM;
  280. }
  281. break;
  282. }
  283. ts->sigaltstack_used.ss_sp = ss.ss_sp;
  284. ts->sigaltstack_used.ss_size = ss.ss_size;
  285. return 0;
  286. }
  287. /* siginfo conversion */
  288. static inline void host_to_target_siginfo_noswap(target_siginfo_t *tinfo,
  289. const siginfo_t *info)
  290. {
  291. int sig = host_to_target_signal(info->si_signo);
  292. int si_code = info->si_code;
  293. int si_type;
  294. tinfo->si_signo = sig;
  295. tinfo->si_errno = 0;
  296. tinfo->si_code = info->si_code;
  297. /* This memset serves two purposes:
  298. * (1) ensure we don't leak random junk to the guest later
  299. * (2) placate false positives from gcc about fields
  300. * being used uninitialized if it chooses to inline both this
  301. * function and tswap_siginfo() into host_to_target_siginfo().
  302. */
  303. memset(tinfo->_sifields._pad, 0, sizeof(tinfo->_sifields._pad));
  304. /* This is awkward, because we have to use a combination of
  305. * the si_code and si_signo to figure out which of the union's
  306. * members are valid. (Within the host kernel it is always possible
  307. * to tell, but the kernel carefully avoids giving userspace the
  308. * high 16 bits of si_code, so we don't have the information to
  309. * do this the easy way...) We therefore make our best guess,
  310. * bearing in mind that a guest can spoof most of the si_codes
  311. * via rt_sigqueueinfo() if it likes.
  312. *
  313. * Once we have made our guess, we record it in the top 16 bits of
  314. * the si_code, so that tswap_siginfo() later can use it.
  315. * tswap_siginfo() will strip these top bits out before writing
  316. * si_code to the guest (sign-extending the lower bits).
  317. */
  318. switch (si_code) {
  319. case SI_USER:
  320. case SI_TKILL:
  321. case SI_KERNEL:
  322. /* Sent via kill(), tkill() or tgkill(), or direct from the kernel.
  323. * These are the only unspoofable si_code values.
  324. */
  325. tinfo->_sifields._kill._pid = info->si_pid;
  326. tinfo->_sifields._kill._uid = info->si_uid;
  327. si_type = QEMU_SI_KILL;
  328. break;
  329. default:
  330. /* Everything else is spoofable. Make best guess based on signal */
  331. switch (sig) {
  332. case TARGET_SIGCHLD:
  333. tinfo->_sifields._sigchld._pid = info->si_pid;
  334. tinfo->_sifields._sigchld._uid = info->si_uid;
  335. if (si_code == CLD_EXITED)
  336. tinfo->_sifields._sigchld._status = info->si_status;
  337. else
  338. tinfo->_sifields._sigchld._status
  339. = host_to_target_signal(info->si_status & 0x7f)
  340. | (info->si_status & ~0x7f);
  341. tinfo->_sifields._sigchld._utime = info->si_utime;
  342. tinfo->_sifields._sigchld._stime = info->si_stime;
  343. si_type = QEMU_SI_CHLD;
  344. break;
  345. case TARGET_SIGIO:
  346. tinfo->_sifields._sigpoll._band = info->si_band;
  347. tinfo->_sifields._sigpoll._fd = info->si_fd;
  348. si_type = QEMU_SI_POLL;
  349. break;
  350. default:
  351. /* Assume a sigqueue()/mq_notify()/rt_sigqueueinfo() source. */
  352. tinfo->_sifields._rt._pid = info->si_pid;
  353. tinfo->_sifields._rt._uid = info->si_uid;
  354. /* XXX: potential problem if 64 bit */
  355. tinfo->_sifields._rt._sigval.sival_ptr
  356. = (abi_ulong)(unsigned long)info->si_value.sival_ptr;
  357. si_type = QEMU_SI_RT;
  358. break;
  359. }
  360. break;
  361. }
  362. tinfo->si_code = deposit32(si_code, 16, 16, si_type);
  363. }
  364. static void tswap_siginfo(target_siginfo_t *tinfo,
  365. const target_siginfo_t *info)
  366. {
  367. int si_type = extract32(info->si_code, 16, 16);
  368. int si_code = sextract32(info->si_code, 0, 16);
  369. __put_user(info->si_signo, &tinfo->si_signo);
  370. __put_user(info->si_errno, &tinfo->si_errno);
  371. __put_user(si_code, &tinfo->si_code);
  372. /* We can use our internal marker of which fields in the structure
  373. * are valid, rather than duplicating the guesswork of
  374. * host_to_target_siginfo_noswap() here.
  375. */
  376. switch (si_type) {
  377. case QEMU_SI_KILL:
  378. __put_user(info->_sifields._kill._pid, &tinfo->_sifields._kill._pid);
  379. __put_user(info->_sifields._kill._uid, &tinfo->_sifields._kill._uid);
  380. break;
  381. case QEMU_SI_TIMER:
  382. __put_user(info->_sifields._timer._timer1,
  383. &tinfo->_sifields._timer._timer1);
  384. __put_user(info->_sifields._timer._timer2,
  385. &tinfo->_sifields._timer._timer2);
  386. break;
  387. case QEMU_SI_POLL:
  388. __put_user(info->_sifields._sigpoll._band,
  389. &tinfo->_sifields._sigpoll._band);
  390. __put_user(info->_sifields._sigpoll._fd,
  391. &tinfo->_sifields._sigpoll._fd);
  392. break;
  393. case QEMU_SI_FAULT:
  394. __put_user(info->_sifields._sigfault._addr,
  395. &tinfo->_sifields._sigfault._addr);
  396. break;
  397. case QEMU_SI_CHLD:
  398. __put_user(info->_sifields._sigchld._pid,
  399. &tinfo->_sifields._sigchld._pid);
  400. __put_user(info->_sifields._sigchld._uid,
  401. &tinfo->_sifields._sigchld._uid);
  402. __put_user(info->_sifields._sigchld._status,
  403. &tinfo->_sifields._sigchld._status);
  404. __put_user(info->_sifields._sigchld._utime,
  405. &tinfo->_sifields._sigchld._utime);
  406. __put_user(info->_sifields._sigchld._stime,
  407. &tinfo->_sifields._sigchld._stime);
  408. break;
  409. case QEMU_SI_RT:
  410. __put_user(info->_sifields._rt._pid, &tinfo->_sifields._rt._pid);
  411. __put_user(info->_sifields._rt._uid, &tinfo->_sifields._rt._uid);
  412. __put_user(info->_sifields._rt._sigval.sival_ptr,
  413. &tinfo->_sifields._rt._sigval.sival_ptr);
  414. break;
  415. default:
  416. g_assert_not_reached();
  417. }
  418. }
  419. void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info)
  420. {
  421. target_siginfo_t tgt_tmp;
  422. host_to_target_siginfo_noswap(&tgt_tmp, info);
  423. tswap_siginfo(tinfo, &tgt_tmp);
  424. }
  425. /* XXX: we support only POSIX RT signals are used. */
  426. /* XXX: find a solution for 64 bit (additional malloced data is needed) */
  427. void target_to_host_siginfo(siginfo_t *info, const target_siginfo_t *tinfo)
  428. {
  429. /* This conversion is used only for the rt_sigqueueinfo syscall,
  430. * and so we know that the _rt fields are the valid ones.
  431. */
  432. abi_ulong sival_ptr;
  433. __get_user(info->si_signo, &tinfo->si_signo);
  434. __get_user(info->si_errno, &tinfo->si_errno);
  435. __get_user(info->si_code, &tinfo->si_code);
  436. __get_user(info->si_pid, &tinfo->_sifields._rt._pid);
  437. __get_user(info->si_uid, &tinfo->_sifields._rt._uid);
  438. __get_user(sival_ptr, &tinfo->_sifields._rt._sigval.sival_ptr);
  439. info->si_value.sival_ptr = (void *)(long)sival_ptr;
  440. }
  441. /* returns 1 if given signal should dump core if not handled */
  442. static int core_dump_signal(int sig)
  443. {
  444. switch (sig) {
  445. case TARGET_SIGABRT:
  446. case TARGET_SIGFPE:
  447. case TARGET_SIGILL:
  448. case TARGET_SIGQUIT:
  449. case TARGET_SIGSEGV:
  450. case TARGET_SIGTRAP:
  451. case TARGET_SIGBUS:
  452. return (1);
  453. default:
  454. return (0);
  455. }
  456. }
  457. int host_interrupt_signal;
  458. static void signal_table_init(const char *rtsig_map)
  459. {
  460. int hsig, tsig, count;
  461. if (rtsig_map) {
  462. /*
  463. * Map host RT signals to target RT signals according to the
  464. * user-provided specification.
  465. */
  466. const char *s = rtsig_map;
  467. while (true) {
  468. int i;
  469. if (qemu_strtoi(s, &s, 10, &tsig) || *s++ != ' ') {
  470. fprintf(stderr, "Malformed target signal in QEMU_RTSIG_MAP\n");
  471. exit(EXIT_FAILURE);
  472. }
  473. if (qemu_strtoi(s, &s, 10, &hsig) || *s++ != ' ') {
  474. fprintf(stderr, "Malformed host signal in QEMU_RTSIG_MAP\n");
  475. exit(EXIT_FAILURE);
  476. }
  477. if (qemu_strtoi(s, &s, 10, &count) || (*s && *s != ',')) {
  478. fprintf(stderr, "Malformed signal count in QEMU_RTSIG_MAP\n");
  479. exit(EXIT_FAILURE);
  480. }
  481. for (i = 0; i < count; i++, tsig++, hsig++) {
  482. if (tsig < TARGET_SIGRTMIN || tsig > TARGET_NSIG) {
  483. fprintf(stderr, "%d is not a target rt signal\n", tsig);
  484. exit(EXIT_FAILURE);
  485. }
  486. if (hsig < SIGRTMIN || hsig > SIGRTMAX) {
  487. fprintf(stderr, "%d is not a host rt signal\n", hsig);
  488. exit(EXIT_FAILURE);
  489. }
  490. if (host_to_target_signal_table[hsig]) {
  491. fprintf(stderr, "%d already maps %d\n",
  492. hsig, host_to_target_signal_table[hsig]);
  493. exit(EXIT_FAILURE);
  494. }
  495. host_to_target_signal_table[hsig] = tsig;
  496. }
  497. if (*s) {
  498. s++;
  499. } else {
  500. break;
  501. }
  502. }
  503. } else {
  504. /*
  505. * Default host-to-target RT signal mapping.
  506. *
  507. * Signals are supported starting from TARGET_SIGRTMIN and going up
  508. * until we run out of host realtime signals. Glibc uses the lower 2
  509. * RT signals and (hopefully) nobody uses the upper ones.
  510. * This is why SIGRTMIN (34) is generally greater than __SIGRTMIN (32).
  511. * To fix this properly we would need to do manual signal delivery
  512. * multiplexed over a single host signal.
  513. * Attempts for configure "missing" signals via sigaction will be
  514. * silently ignored.
  515. *
  516. * Reserve two signals for internal usage (see below).
  517. */
  518. hsig = SIGRTMIN + 2;
  519. for (tsig = TARGET_SIGRTMIN;
  520. hsig <= SIGRTMAX && tsig <= TARGET_NSIG;
  521. hsig++, tsig++) {
  522. host_to_target_signal_table[hsig] = tsig;
  523. }
  524. }
  525. /*
  526. * Remap the target SIGABRT, so that we can distinguish host abort
  527. * from guest abort. When the guest registers a signal handler or
  528. * calls raise(SIGABRT), the host will raise SIG_RTn. If the guest
  529. * arrives at dump_core_and_abort(), we will map back to host SIGABRT
  530. * so that the parent (native or emulated) sees the correct signal.
  531. * Finally, also map host to guest SIGABRT so that the emulated
  532. * parent sees the correct mapping from wait status.
  533. */
  534. host_to_target_signal_table[SIGABRT] = 0;
  535. for (hsig = SIGRTMIN; hsig <= SIGRTMAX; hsig++) {
  536. if (!host_to_target_signal_table[hsig]) {
  537. if (host_interrupt_signal) {
  538. host_to_target_signal_table[hsig] = TARGET_SIGABRT;
  539. break;
  540. } else {
  541. host_interrupt_signal = hsig;
  542. }
  543. }
  544. }
  545. if (hsig > SIGRTMAX) {
  546. fprintf(stderr,
  547. "No rt signals left for interrupt and SIGABRT mapping\n");
  548. exit(EXIT_FAILURE);
  549. }
  550. /* Invert the mapping that has already been assigned. */
  551. for (hsig = 1; hsig < _NSIG; hsig++) {
  552. tsig = host_to_target_signal_table[hsig];
  553. if (tsig) {
  554. if (target_to_host_signal_table[tsig]) {
  555. fprintf(stderr, "%d is already mapped to %d\n",
  556. tsig, target_to_host_signal_table[tsig]);
  557. exit(EXIT_FAILURE);
  558. }
  559. target_to_host_signal_table[tsig] = hsig;
  560. }
  561. }
  562. host_to_target_signal_table[SIGABRT] = TARGET_SIGABRT;
  563. /* Map everything else out-of-bounds. */
  564. for (hsig = 1; hsig < _NSIG; hsig++) {
  565. if (host_to_target_signal_table[hsig] == 0) {
  566. host_to_target_signal_table[hsig] = TARGET_NSIG + 1;
  567. }
  568. }
  569. for (count = 0, tsig = 1; tsig <= TARGET_NSIG; tsig++) {
  570. if (target_to_host_signal_table[tsig] == 0) {
  571. target_to_host_signal_table[tsig] = _NSIG;
  572. count++;
  573. }
  574. }
  575. trace_signal_table_init(count);
  576. }
  577. void signal_init(const char *rtsig_map)
  578. {
  579. TaskState *ts = get_task_state(thread_cpu);
  580. struct sigaction act, oact;
  581. /* initialize signal conversion tables */
  582. signal_table_init(rtsig_map);
  583. /* Set the signal mask from the host mask. */
  584. sigprocmask(0, 0, &ts->signal_mask);
  585. sigfillset(&act.sa_mask);
  586. act.sa_flags = SA_SIGINFO;
  587. act.sa_sigaction = host_signal_handler;
  588. /*
  589. * A parent process may configure ignored signals, but all other
  590. * signals are default. For any target signals that have no host
  591. * mapping, set to ignore. For all core_dump_signal, install our
  592. * host signal handler so that we may invoke dump_core_and_abort.
  593. * This includes SIGSEGV and SIGBUS, which are also need our signal
  594. * handler for paging and exceptions.
  595. */
  596. for (int tsig = 1; tsig <= TARGET_NSIG; tsig++) {
  597. int hsig = target_to_host_signal(tsig);
  598. abi_ptr thand = TARGET_SIG_IGN;
  599. if (hsig >= _NSIG) {
  600. continue;
  601. }
  602. /* As we force remap SIGABRT, cannot probe and install in one step. */
  603. if (tsig == TARGET_SIGABRT) {
  604. sigaction(SIGABRT, NULL, &oact);
  605. sigaction(hsig, &act, NULL);
  606. } else {
  607. struct sigaction *iact = core_dump_signal(tsig) ? &act : NULL;
  608. sigaction(hsig, iact, &oact);
  609. }
  610. if (oact.sa_sigaction != (void *)SIG_IGN) {
  611. thand = TARGET_SIG_DFL;
  612. }
  613. sigact_table[tsig - 1]._sa_handler = thand;
  614. }
  615. sigaction(host_interrupt_signal, &act, NULL);
  616. }
  617. /* Force a synchronously taken signal. The kernel force_sig() function
  618. * also forces the signal to "not blocked, not ignored", but for QEMU
  619. * that work is done in process_pending_signals().
  620. */
  621. void force_sig(int sig)
  622. {
  623. CPUState *cpu = thread_cpu;
  624. target_siginfo_t info = {};
  625. info.si_signo = sig;
  626. info.si_errno = 0;
  627. info.si_code = TARGET_SI_KERNEL;
  628. info._sifields._kill._pid = 0;
  629. info._sifields._kill._uid = 0;
  630. queue_signal(cpu_env(cpu), info.si_signo, QEMU_SI_KILL, &info);
  631. }
  632. /*
  633. * Force a synchronously taken QEMU_SI_FAULT signal. For QEMU the
  634. * 'force' part is handled in process_pending_signals().
  635. */
  636. void force_sig_fault(int sig, int code, abi_ulong addr)
  637. {
  638. CPUState *cpu = thread_cpu;
  639. target_siginfo_t info = {};
  640. info.si_signo = sig;
  641. info.si_errno = 0;
  642. info.si_code = code;
  643. info._sifields._sigfault._addr = addr;
  644. queue_signal(cpu_env(cpu), sig, QEMU_SI_FAULT, &info);
  645. }
  646. /* Force a SIGSEGV if we couldn't write to memory trying to set
  647. * up the signal frame. oldsig is the signal we were trying to handle
  648. * at the point of failure.
  649. */
  650. #if !defined(TARGET_RISCV)
  651. void force_sigsegv(int oldsig)
  652. {
  653. if (oldsig == SIGSEGV) {
  654. /* Make sure we don't try to deliver the signal again; this will
  655. * end up with handle_pending_signal() calling dump_core_and_abort().
  656. */
  657. sigact_table[oldsig - 1]._sa_handler = TARGET_SIG_DFL;
  658. }
  659. force_sig(TARGET_SIGSEGV);
  660. }
  661. #endif
  662. void cpu_loop_exit_sigsegv(CPUState *cpu, target_ulong addr,
  663. MMUAccessType access_type, bool maperr, uintptr_t ra)
  664. {
  665. const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops;
  666. if (tcg_ops->record_sigsegv) {
  667. tcg_ops->record_sigsegv(cpu, addr, access_type, maperr, ra);
  668. }
  669. force_sig_fault(TARGET_SIGSEGV,
  670. maperr ? TARGET_SEGV_MAPERR : TARGET_SEGV_ACCERR,
  671. addr);
  672. cpu->exception_index = EXCP_INTERRUPT;
  673. cpu_loop_exit_restore(cpu, ra);
  674. }
  675. void cpu_loop_exit_sigbus(CPUState *cpu, target_ulong addr,
  676. MMUAccessType access_type, uintptr_t ra)
  677. {
  678. const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops;
  679. if (tcg_ops->record_sigbus) {
  680. tcg_ops->record_sigbus(cpu, addr, access_type, ra);
  681. }
  682. force_sig_fault(TARGET_SIGBUS, TARGET_BUS_ADRALN, addr);
  683. cpu->exception_index = EXCP_INTERRUPT;
  684. cpu_loop_exit_restore(cpu, ra);
  685. }
  686. /* abort execution with signal */
  687. static G_NORETURN
  688. void die_with_signal(int host_sig)
  689. {
  690. struct sigaction act = {
  691. .sa_handler = SIG_DFL,
  692. };
  693. /*
  694. * The proper exit code for dying from an uncaught signal is -<signal>.
  695. * The kernel doesn't allow exit() or _exit() to pass a negative value.
  696. * To get the proper exit code we need to actually die from an uncaught
  697. * signal. Here the default signal handler is installed, we send
  698. * the signal and we wait for it to arrive.
  699. */
  700. sigfillset(&act.sa_mask);
  701. sigaction(host_sig, &act, NULL);
  702. kill(getpid(), host_sig);
  703. /* Make sure the signal isn't masked (reusing the mask inside of act). */
  704. sigdelset(&act.sa_mask, host_sig);
  705. sigsuspend(&act.sa_mask);
  706. /* unreachable */
  707. _exit(EXIT_FAILURE);
  708. }
  709. static G_NORETURN
  710. void dump_core_and_abort(CPUArchState *env, int target_sig)
  711. {
  712. CPUState *cpu = env_cpu(env);
  713. TaskState *ts = get_task_state(cpu);
  714. int host_sig, core_dumped = 0;
  715. /* On exit, undo the remapping of SIGABRT. */
  716. if (target_sig == TARGET_SIGABRT) {
  717. host_sig = SIGABRT;
  718. } else {
  719. host_sig = target_to_host_signal(target_sig);
  720. }
  721. trace_user_dump_core_and_abort(env, target_sig, host_sig);
  722. gdb_signalled(env, target_sig);
  723. /* dump core if supported by target binary format */
  724. if (core_dump_signal(target_sig) && (ts->bprm->core_dump != NULL)) {
  725. stop_all_tasks();
  726. core_dumped =
  727. ((*ts->bprm->core_dump)(target_sig, env) == 0);
  728. }
  729. if (core_dumped) {
  730. /* we already dumped the core of target process, we don't want
  731. * a coredump of qemu itself */
  732. struct rlimit nodump;
  733. getrlimit(RLIMIT_CORE, &nodump);
  734. nodump.rlim_cur=0;
  735. setrlimit(RLIMIT_CORE, &nodump);
  736. (void) fprintf(stderr, "qemu: uncaught target signal %d (%s) - %s\n",
  737. target_sig, strsignal(host_sig), "core dumped" );
  738. }
  739. preexit_cleanup(env, 128 + target_sig);
  740. die_with_signal(host_sig);
  741. }
  742. /* queue a signal so that it will be send to the virtual CPU as soon
  743. as possible */
  744. void queue_signal(CPUArchState *env, int sig, int si_type,
  745. target_siginfo_t *info)
  746. {
  747. CPUState *cpu = env_cpu(env);
  748. TaskState *ts = get_task_state(cpu);
  749. trace_user_queue_signal(env, sig);
  750. info->si_code = deposit32(info->si_code, 16, 16, si_type);
  751. ts->sync_signal.info = *info;
  752. ts->sync_signal.pending = sig;
  753. /* signal that a new signal is pending */
  754. qatomic_set(&ts->signal_pending, 1);
  755. }
  756. /* Adjust the signal context to rewind out of safe-syscall if we're in it */
  757. static inline void rewind_if_in_safe_syscall(void *puc)
  758. {
  759. host_sigcontext *uc = (host_sigcontext *)puc;
  760. uintptr_t pcreg = host_signal_pc(uc);
  761. if (pcreg > (uintptr_t)safe_syscall_start
  762. && pcreg < (uintptr_t)safe_syscall_end) {
  763. host_signal_set_pc(uc, (uintptr_t)safe_syscall_start);
  764. }
  765. }
  766. static G_NORETURN
  767. void die_from_signal(siginfo_t *info)
  768. {
  769. char sigbuf[4], codebuf[12];
  770. const char *sig, *code = NULL;
  771. switch (info->si_signo) {
  772. case SIGSEGV:
  773. sig = "SEGV";
  774. switch (info->si_code) {
  775. case SEGV_MAPERR:
  776. code = "MAPERR";
  777. break;
  778. case SEGV_ACCERR:
  779. code = "ACCERR";
  780. break;
  781. }
  782. break;
  783. case SIGBUS:
  784. sig = "BUS";
  785. switch (info->si_code) {
  786. case BUS_ADRALN:
  787. code = "ADRALN";
  788. break;
  789. case BUS_ADRERR:
  790. code = "ADRERR";
  791. break;
  792. }
  793. break;
  794. case SIGILL:
  795. sig = "ILL";
  796. switch (info->si_code) {
  797. case ILL_ILLOPC:
  798. code = "ILLOPC";
  799. break;
  800. case ILL_ILLOPN:
  801. code = "ILLOPN";
  802. break;
  803. case ILL_ILLADR:
  804. code = "ILLADR";
  805. break;
  806. case ILL_PRVOPC:
  807. code = "PRVOPC";
  808. break;
  809. case ILL_PRVREG:
  810. code = "PRVREG";
  811. break;
  812. case ILL_COPROC:
  813. code = "COPROC";
  814. break;
  815. }
  816. break;
  817. case SIGFPE:
  818. sig = "FPE";
  819. switch (info->si_code) {
  820. case FPE_INTDIV:
  821. code = "INTDIV";
  822. break;
  823. case FPE_INTOVF:
  824. code = "INTOVF";
  825. break;
  826. }
  827. break;
  828. case SIGTRAP:
  829. sig = "TRAP";
  830. break;
  831. default:
  832. snprintf(sigbuf, sizeof(sigbuf), "%d", info->si_signo);
  833. sig = sigbuf;
  834. break;
  835. }
  836. if (code == NULL) {
  837. snprintf(codebuf, sizeof(sigbuf), "%d", info->si_code);
  838. code = codebuf;
  839. }
  840. error_report("QEMU internal SIG%s {code=%s, addr=%p}",
  841. sig, code, info->si_addr);
  842. die_with_signal(info->si_signo);
  843. }
  844. static void host_sigsegv_handler(CPUState *cpu, siginfo_t *info,
  845. host_sigcontext *uc)
  846. {
  847. uintptr_t host_addr = (uintptr_t)info->si_addr;
  848. /*
  849. * Convert forcefully to guest address space: addresses outside
  850. * reserved_va are still valid to report via SEGV_MAPERR.
  851. */
  852. bool is_valid = h2g_valid(host_addr);
  853. abi_ptr guest_addr = h2g_nocheck(host_addr);
  854. uintptr_t pc = host_signal_pc(uc);
  855. bool is_write = host_signal_write(info, uc);
  856. MMUAccessType access_type = adjust_signal_pc(&pc, is_write);
  857. bool maperr;
  858. /* If this was a write to a TB protected page, restart. */
  859. if (is_write
  860. && is_valid
  861. && info->si_code == SEGV_ACCERR
  862. && handle_sigsegv_accerr_write(cpu, host_signal_mask(uc),
  863. pc, guest_addr)) {
  864. return;
  865. }
  866. /*
  867. * If the access was not on behalf of the guest, within the executable
  868. * mapping of the generated code buffer, then it is a host bug.
  869. */
  870. if (access_type != MMU_INST_FETCH
  871. && !in_code_gen_buffer((void *)(pc - tcg_splitwx_diff))) {
  872. die_from_signal(info);
  873. }
  874. maperr = true;
  875. if (is_valid && info->si_code == SEGV_ACCERR) {
  876. /*
  877. * With reserved_va, the whole address space is PROT_NONE,
  878. * which means that we may get ACCERR when we want MAPERR.
  879. */
  880. if (page_get_flags(guest_addr) & PAGE_VALID) {
  881. maperr = false;
  882. } else {
  883. info->si_code = SEGV_MAPERR;
  884. }
  885. }
  886. sigprocmask(SIG_SETMASK, host_signal_mask(uc), NULL);
  887. cpu_loop_exit_sigsegv(cpu, guest_addr, access_type, maperr, pc);
  888. }
  889. static uintptr_t host_sigbus_handler(CPUState *cpu, siginfo_t *info,
  890. host_sigcontext *uc)
  891. {
  892. uintptr_t pc = host_signal_pc(uc);
  893. bool is_write = host_signal_write(info, uc);
  894. MMUAccessType access_type = adjust_signal_pc(&pc, is_write);
  895. /*
  896. * If the access was not on behalf of the guest, within the executable
  897. * mapping of the generated code buffer, then it is a host bug.
  898. */
  899. if (!in_code_gen_buffer((void *)(pc - tcg_splitwx_diff))) {
  900. die_from_signal(info);
  901. }
  902. if (info->si_code == BUS_ADRALN) {
  903. uintptr_t host_addr = (uintptr_t)info->si_addr;
  904. abi_ptr guest_addr = h2g_nocheck(host_addr);
  905. sigprocmask(SIG_SETMASK, host_signal_mask(uc), NULL);
  906. cpu_loop_exit_sigbus(cpu, guest_addr, access_type, pc);
  907. }
  908. return pc;
  909. }
  910. static void host_signal_handler(int host_sig, siginfo_t *info, void *puc)
  911. {
  912. CPUState *cpu = thread_cpu;
  913. CPUArchState *env = cpu_env(cpu);
  914. TaskState *ts = get_task_state(cpu);
  915. target_siginfo_t tinfo;
  916. host_sigcontext *uc = puc;
  917. struct emulated_sigtable *k;
  918. int guest_sig;
  919. uintptr_t pc = 0;
  920. bool sync_sig = false;
  921. void *sigmask;
  922. if (host_sig == host_interrupt_signal) {
  923. ts->signal_pending = 1;
  924. cpu_exit(thread_cpu);
  925. return;
  926. }
  927. /*
  928. * Non-spoofed SIGSEGV and SIGBUS are synchronous, and need special
  929. * handling wrt signal blocking and unwinding. Non-spoofed SIGILL,
  930. * SIGFPE, SIGTRAP are always host bugs.
  931. */
  932. if (info->si_code > 0) {
  933. switch (host_sig) {
  934. case SIGSEGV:
  935. /* Only returns on handle_sigsegv_accerr_write success. */
  936. host_sigsegv_handler(cpu, info, uc);
  937. return;
  938. case SIGBUS:
  939. pc = host_sigbus_handler(cpu, info, uc);
  940. sync_sig = true;
  941. break;
  942. case SIGILL:
  943. case SIGFPE:
  944. case SIGTRAP:
  945. die_from_signal(info);
  946. }
  947. }
  948. /* get target signal number */
  949. guest_sig = host_to_target_signal(host_sig);
  950. if (guest_sig < 1 || guest_sig > TARGET_NSIG) {
  951. return;
  952. }
  953. trace_user_host_signal(env, host_sig, guest_sig);
  954. host_to_target_siginfo_noswap(&tinfo, info);
  955. k = &ts->sigtab[guest_sig - 1];
  956. k->info = tinfo;
  957. k->pending = guest_sig;
  958. ts->signal_pending = 1;
  959. /*
  960. * For synchronous signals, unwind the cpu state to the faulting
  961. * insn and then exit back to the main loop so that the signal
  962. * is delivered immediately.
  963. */
  964. if (sync_sig) {
  965. cpu->exception_index = EXCP_INTERRUPT;
  966. cpu_loop_exit_restore(cpu, pc);
  967. }
  968. rewind_if_in_safe_syscall(puc);
  969. /*
  970. * Block host signals until target signal handler entered. We
  971. * can't block SIGSEGV or SIGBUS while we're executing guest
  972. * code in case the guest code provokes one in the window between
  973. * now and it getting out to the main loop. Signals will be
  974. * unblocked again in process_pending_signals().
  975. *
  976. * WARNING: we cannot use sigfillset() here because the sigmask
  977. * field is a kernel sigset_t, which is much smaller than the
  978. * libc sigset_t which sigfillset() operates on. Using sigfillset()
  979. * would write 0xff bytes off the end of the structure and trash
  980. * data on the struct.
  981. */
  982. sigmask = host_signal_mask(uc);
  983. memset(sigmask, 0xff, SIGSET_T_SIZE);
  984. sigdelset(sigmask, SIGSEGV);
  985. sigdelset(sigmask, SIGBUS);
  986. /* interrupt the virtual CPU as soon as possible */
  987. cpu_exit(thread_cpu);
  988. }
  989. /* do_sigaltstack() returns target values and errnos. */
  990. /* compare linux/kernel/signal.c:do_sigaltstack() */
  991. abi_long do_sigaltstack(abi_ulong uss_addr, abi_ulong uoss_addr,
  992. CPUArchState *env)
  993. {
  994. target_stack_t oss, *uoss = NULL;
  995. abi_long ret = -TARGET_EFAULT;
  996. if (uoss_addr) {
  997. /* Verify writability now, but do not alter user memory yet. */
  998. if (!lock_user_struct(VERIFY_WRITE, uoss, uoss_addr, 0)) {
  999. goto out;
  1000. }
  1001. target_save_altstack(&oss, env);
  1002. }
  1003. if (uss_addr) {
  1004. target_stack_t *uss;
  1005. if (!lock_user_struct(VERIFY_READ, uss, uss_addr, 1)) {
  1006. goto out;
  1007. }
  1008. ret = target_restore_altstack(uss, env);
  1009. if (ret) {
  1010. goto out;
  1011. }
  1012. }
  1013. if (uoss_addr) {
  1014. memcpy(uoss, &oss, sizeof(oss));
  1015. unlock_user_struct(uoss, uoss_addr, 1);
  1016. uoss = NULL;
  1017. }
  1018. ret = 0;
  1019. out:
  1020. if (uoss) {
  1021. unlock_user_struct(uoss, uoss_addr, 0);
  1022. }
  1023. return ret;
  1024. }
  1025. /* do_sigaction() return target values and host errnos */
  1026. int do_sigaction(int sig, const struct target_sigaction *act,
  1027. struct target_sigaction *oact, abi_ulong ka_restorer)
  1028. {
  1029. struct target_sigaction *k;
  1030. int host_sig;
  1031. int ret = 0;
  1032. trace_signal_do_sigaction_guest(sig, TARGET_NSIG);
  1033. if (sig < 1 || sig > TARGET_NSIG) {
  1034. return -TARGET_EINVAL;
  1035. }
  1036. if (act && (sig == TARGET_SIGKILL || sig == TARGET_SIGSTOP)) {
  1037. return -TARGET_EINVAL;
  1038. }
  1039. if (block_signals()) {
  1040. return -QEMU_ERESTARTSYS;
  1041. }
  1042. k = &sigact_table[sig - 1];
  1043. if (oact) {
  1044. __put_user(k->_sa_handler, &oact->_sa_handler);
  1045. __put_user(k->sa_flags, &oact->sa_flags);
  1046. #ifdef TARGET_ARCH_HAS_SA_RESTORER
  1047. __put_user(k->sa_restorer, &oact->sa_restorer);
  1048. #endif
  1049. /* Not swapped. */
  1050. oact->sa_mask = k->sa_mask;
  1051. }
  1052. if (act) {
  1053. __get_user(k->_sa_handler, &act->_sa_handler);
  1054. __get_user(k->sa_flags, &act->sa_flags);
  1055. #ifdef TARGET_ARCH_HAS_SA_RESTORER
  1056. __get_user(k->sa_restorer, &act->sa_restorer);
  1057. #endif
  1058. #ifdef TARGET_ARCH_HAS_KA_RESTORER
  1059. k->ka_restorer = ka_restorer;
  1060. #endif
  1061. /* To be swapped in target_to_host_sigset. */
  1062. k->sa_mask = act->sa_mask;
  1063. /* we update the host linux signal state */
  1064. host_sig = target_to_host_signal(sig);
  1065. trace_signal_do_sigaction_host(host_sig, TARGET_NSIG);
  1066. if (host_sig > SIGRTMAX) {
  1067. /* we don't have enough host signals to map all target signals */
  1068. qemu_log_mask(LOG_UNIMP, "Unsupported target signal #%d, ignored\n",
  1069. sig);
  1070. /*
  1071. * we don't return an error here because some programs try to
  1072. * register an handler for all possible rt signals even if they
  1073. * don't need it.
  1074. * An error here can abort them whereas there can be no problem
  1075. * to not have the signal available later.
  1076. * This is the case for golang,
  1077. * See https://github.com/golang/go/issues/33746
  1078. * So we silently ignore the error.
  1079. */
  1080. return 0;
  1081. }
  1082. if (host_sig != SIGSEGV && host_sig != SIGBUS) {
  1083. struct sigaction act1;
  1084. sigfillset(&act1.sa_mask);
  1085. act1.sa_flags = SA_SIGINFO;
  1086. if (k->_sa_handler == TARGET_SIG_IGN) {
  1087. /*
  1088. * It is important to update the host kernel signal ignore
  1089. * state to avoid getting unexpected interrupted syscalls.
  1090. */
  1091. act1.sa_sigaction = (void *)SIG_IGN;
  1092. } else if (k->_sa_handler == TARGET_SIG_DFL) {
  1093. if (core_dump_signal(sig)) {
  1094. act1.sa_sigaction = host_signal_handler;
  1095. } else {
  1096. act1.sa_sigaction = (void *)SIG_DFL;
  1097. }
  1098. } else {
  1099. act1.sa_sigaction = host_signal_handler;
  1100. if (k->sa_flags & TARGET_SA_RESTART) {
  1101. act1.sa_flags |= SA_RESTART;
  1102. }
  1103. }
  1104. ret = sigaction(host_sig, &act1, NULL);
  1105. }
  1106. }
  1107. return ret;
  1108. }
  1109. static void handle_pending_signal(CPUArchState *cpu_env, int sig,
  1110. struct emulated_sigtable *k)
  1111. {
  1112. CPUState *cpu = env_cpu(cpu_env);
  1113. abi_ulong handler;
  1114. sigset_t set;
  1115. target_siginfo_t unswapped;
  1116. target_sigset_t target_old_set;
  1117. struct target_sigaction *sa;
  1118. TaskState *ts = get_task_state(cpu);
  1119. trace_user_handle_signal(cpu_env, sig);
  1120. /* dequeue signal */
  1121. k->pending = 0;
  1122. /*
  1123. * Writes out siginfo values byteswapped, accordingly to the target.
  1124. * It also cleans the si_type from si_code making it correct for
  1125. * the target. We must hold on to the original unswapped copy for
  1126. * strace below, because si_type is still required there.
  1127. */
  1128. if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
  1129. unswapped = k->info;
  1130. }
  1131. tswap_siginfo(&k->info, &k->info);
  1132. sig = gdb_handlesig(cpu, sig, NULL, &k->info, sizeof(k->info));
  1133. if (!sig) {
  1134. sa = NULL;
  1135. handler = TARGET_SIG_IGN;
  1136. } else {
  1137. sa = &sigact_table[sig - 1];
  1138. handler = sa->_sa_handler;
  1139. }
  1140. if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
  1141. print_taken_signal(sig, &unswapped);
  1142. }
  1143. if (handler == TARGET_SIG_DFL) {
  1144. /* default handler : ignore some signal. The other are job control or fatal */
  1145. if (sig == TARGET_SIGTSTP || sig == TARGET_SIGTTIN || sig == TARGET_SIGTTOU) {
  1146. kill(getpid(),SIGSTOP);
  1147. } else if (sig != TARGET_SIGCHLD &&
  1148. sig != TARGET_SIGURG &&
  1149. sig != TARGET_SIGWINCH &&
  1150. sig != TARGET_SIGCONT) {
  1151. dump_core_and_abort(cpu_env, sig);
  1152. }
  1153. } else if (handler == TARGET_SIG_IGN) {
  1154. /* ignore sig */
  1155. } else if (handler == TARGET_SIG_ERR) {
  1156. dump_core_and_abort(cpu_env, sig);
  1157. } else {
  1158. /* compute the blocked signals during the handler execution */
  1159. sigset_t *blocked_set;
  1160. target_to_host_sigset(&set, &sa->sa_mask);
  1161. /* SA_NODEFER indicates that the current signal should not be
  1162. blocked during the handler */
  1163. if (!(sa->sa_flags & TARGET_SA_NODEFER))
  1164. sigaddset(&set, target_to_host_signal(sig));
  1165. /* save the previous blocked signal state to restore it at the
  1166. end of the signal execution (see do_sigreturn) */
  1167. host_to_target_sigset_internal(&target_old_set, &ts->signal_mask);
  1168. /* block signals in the handler */
  1169. blocked_set = ts->in_sigsuspend ?
  1170. &ts->sigsuspend_mask : &ts->signal_mask;
  1171. sigorset(&ts->signal_mask, blocked_set, &set);
  1172. ts->in_sigsuspend = 0;
  1173. /* if the CPU is in VM86 mode, we restore the 32 bit values */
  1174. #if defined(TARGET_I386) && !defined(TARGET_X86_64)
  1175. {
  1176. CPUX86State *env = cpu_env;
  1177. if (env->eflags & VM_MASK)
  1178. save_v86_state(env);
  1179. }
  1180. #endif
  1181. /* prepare the stack frame of the virtual CPU */
  1182. #if defined(TARGET_ARCH_HAS_SETUP_FRAME)
  1183. if (sa->sa_flags & TARGET_SA_SIGINFO) {
  1184. setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
  1185. } else {
  1186. setup_frame(sig, sa, &target_old_set, cpu_env);
  1187. }
  1188. #else
  1189. /* These targets do not have traditional signals. */
  1190. setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
  1191. #endif
  1192. if (sa->sa_flags & TARGET_SA_RESETHAND) {
  1193. sa->_sa_handler = TARGET_SIG_DFL;
  1194. }
  1195. }
  1196. }
  1197. void process_pending_signals(CPUArchState *cpu_env)
  1198. {
  1199. CPUState *cpu = env_cpu(cpu_env);
  1200. int sig;
  1201. TaskState *ts = get_task_state(cpu);
  1202. sigset_t set;
  1203. sigset_t *blocked_set;
  1204. while (qatomic_read(&ts->signal_pending)) {
  1205. sigfillset(&set);
  1206. sigprocmask(SIG_SETMASK, &set, 0);
  1207. restart_scan:
  1208. sig = ts->sync_signal.pending;
  1209. if (sig) {
  1210. /* Synchronous signals are forced,
  1211. * see force_sig_info() and callers in Linux
  1212. * Note that not all of our queue_signal() calls in QEMU correspond
  1213. * to force_sig_info() calls in Linux (some are send_sig_info()).
  1214. * However it seems like a kernel bug to me to allow the process
  1215. * to block a synchronous signal since it could then just end up
  1216. * looping round and round indefinitely.
  1217. */
  1218. if (sigismember(&ts->signal_mask, target_to_host_signal_table[sig])
  1219. || sigact_table[sig - 1]._sa_handler == TARGET_SIG_IGN) {
  1220. sigdelset(&ts->signal_mask, target_to_host_signal_table[sig]);
  1221. sigact_table[sig - 1]._sa_handler = TARGET_SIG_DFL;
  1222. }
  1223. handle_pending_signal(cpu_env, sig, &ts->sync_signal);
  1224. }
  1225. for (sig = 1; sig <= TARGET_NSIG; sig++) {
  1226. blocked_set = ts->in_sigsuspend ?
  1227. &ts->sigsuspend_mask : &ts->signal_mask;
  1228. if (ts->sigtab[sig - 1].pending &&
  1229. (!sigismember(blocked_set,
  1230. target_to_host_signal_table[sig]))) {
  1231. handle_pending_signal(cpu_env, sig, &ts->sigtab[sig - 1]);
  1232. /* Restart scan from the beginning, as handle_pending_signal
  1233. * might have resulted in a new synchronous signal (eg SIGSEGV).
  1234. */
  1235. goto restart_scan;
  1236. }
  1237. }
  1238. /* if no signal is pending, unblock signals and recheck (the act
  1239. * of unblocking might cause us to take another host signal which
  1240. * will set signal_pending again).
  1241. */
  1242. qatomic_set(&ts->signal_pending, 0);
  1243. ts->in_sigsuspend = 0;
  1244. set = ts->signal_mask;
  1245. sigdelset(&set, SIGSEGV);
  1246. sigdelset(&set, SIGBUS);
  1247. sigprocmask(SIG_SETMASK, &set, 0);
  1248. }
  1249. ts->in_sigsuspend = 0;
  1250. }
  1251. int process_sigsuspend_mask(sigset_t **pset, target_ulong sigset,
  1252. target_ulong sigsize)
  1253. {
  1254. TaskState *ts = get_task_state(thread_cpu);
  1255. sigset_t *host_set = &ts->sigsuspend_mask;
  1256. target_sigset_t *target_sigset;
  1257. if (sigsize != sizeof(*target_sigset)) {
  1258. /* Like the kernel, we enforce correct size sigsets */
  1259. return -TARGET_EINVAL;
  1260. }
  1261. target_sigset = lock_user(VERIFY_READ, sigset, sigsize, 1);
  1262. if (!target_sigset) {
  1263. return -TARGET_EFAULT;
  1264. }
  1265. target_to_host_sigset(host_set, target_sigset);
  1266. unlock_user(target_sigset, sigset, 0);
  1267. *pset = host_set;
  1268. return 0;
  1269. }