signal.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732
  1. /*
  2. * Emulation of Linux signals
  3. *
  4. * Copyright (c) 2003 Fabrice Bellard
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, see <http://www.gnu.org/licenses/>.
  18. */
  19. #include "qemu/osdep.h"
  20. #include "qemu.h"
  21. #include "user-internals.h"
  22. #include "signal-common.h"
  23. #include "linux-user/trace.h"
  24. #include "user/tswap-target.h"
  25. #include "vdso-asmoffset.h"
  26. /* See arch/powerpc/include/asm/ucontext.h. Only used for 32-bit PPC;
  27. on 64-bit PPC, sigcontext and mcontext are one and the same. */
  28. struct target_mcontext {
  29. target_ulong mc_gregs[48];
  30. /* Includes fpscr. */
  31. uint64_t mc_fregs[33];
  32. #if defined(TARGET_PPC64)
  33. /* Pointer to the vector regs */
  34. target_ulong v_regs;
  35. /*
  36. * On ppc64, this mcontext structure is naturally *unaligned*,
  37. * or rather it is aligned on a 8 bytes boundary but not on
  38. * a 16 byte boundary. This pad fixes it up. This is why we
  39. * cannot use ppc_avr_t, which would force alignment. This is
  40. * also why the vector regs are referenced in the ABI by the
  41. * v_regs pointer above so any amount of padding can be added here.
  42. */
  43. target_ulong pad;
  44. /* VSCR and VRSAVE are saved separately. Also reserve space for VSX. */
  45. struct {
  46. uint64_t altivec[34 + 16][2];
  47. } mc_vregs;
  48. #else
  49. target_ulong mc_pad[2];
  50. /* We need to handle Altivec and SPE at the same time, which no
  51. kernel needs to do. Fortunately, the kernel defines this bit to
  52. be Altivec-register-large all the time, rather than trying to
  53. twiddle it based on the specific platform. */
  54. union {
  55. /* SPE vector registers. One extra for SPEFSCR. */
  56. uint32_t spe[33];
  57. /*
  58. * Altivec vector registers. One extra for VRSAVE.
  59. * On ppc32, we are already aligned to 16 bytes. We could
  60. * use ppc_avr_t, but choose to share the same type as ppc64.
  61. */
  62. uint64_t altivec[33][2];
  63. } mc_vregs;
  64. #endif
  65. };
  66. QEMU_BUILD_BUG_ON(offsetof(struct target_mcontext, mc_fregs)
  67. != offsetof_mcontext_fregs);
  68. #if defined(TARGET_PPC64)
  69. QEMU_BUILD_BUG_ON(offsetof(struct target_mcontext, v_regs)
  70. != offsetof_mcontext_vregs_ptr);
  71. #else
  72. QEMU_BUILD_BUG_ON(offsetof(struct target_mcontext, mc_vregs)
  73. != offsetof_mcontext_vregs);
  74. #endif
  75. /* See arch/powerpc/include/asm/sigcontext.h. */
  76. struct target_sigcontext {
  77. target_ulong _unused[4];
  78. int32_t signal;
  79. #if defined(TARGET_PPC64)
  80. int32_t pad0;
  81. #endif
  82. target_ulong handler;
  83. target_ulong oldmask;
  84. target_ulong regs; /* struct pt_regs __user * */
  85. #if defined(TARGET_PPC64)
  86. struct target_mcontext mcontext;
  87. #endif
  88. };
  89. /* Indices for target_mcontext.mc_gregs, below.
  90. See arch/powerpc/include/asm/ptrace.h for details. */
  91. enum {
  92. TARGET_PT_R0 = 0,
  93. TARGET_PT_R1 = 1,
  94. TARGET_PT_R2 = 2,
  95. TARGET_PT_R3 = 3,
  96. TARGET_PT_R4 = 4,
  97. TARGET_PT_R5 = 5,
  98. TARGET_PT_R6 = 6,
  99. TARGET_PT_R7 = 7,
  100. TARGET_PT_R8 = 8,
  101. TARGET_PT_R9 = 9,
  102. TARGET_PT_R10 = 10,
  103. TARGET_PT_R11 = 11,
  104. TARGET_PT_R12 = 12,
  105. TARGET_PT_R13 = 13,
  106. TARGET_PT_R14 = 14,
  107. TARGET_PT_R15 = 15,
  108. TARGET_PT_R16 = 16,
  109. TARGET_PT_R17 = 17,
  110. TARGET_PT_R18 = 18,
  111. TARGET_PT_R19 = 19,
  112. TARGET_PT_R20 = 20,
  113. TARGET_PT_R21 = 21,
  114. TARGET_PT_R22 = 22,
  115. TARGET_PT_R23 = 23,
  116. TARGET_PT_R24 = 24,
  117. TARGET_PT_R25 = 25,
  118. TARGET_PT_R26 = 26,
  119. TARGET_PT_R27 = 27,
  120. TARGET_PT_R28 = 28,
  121. TARGET_PT_R29 = 29,
  122. TARGET_PT_R30 = 30,
  123. TARGET_PT_R31 = 31,
  124. TARGET_PT_NIP = 32,
  125. TARGET_PT_MSR = 33,
  126. TARGET_PT_ORIG_R3 = 34,
  127. TARGET_PT_CTR = 35,
  128. TARGET_PT_LNK = 36,
  129. TARGET_PT_XER = 37,
  130. TARGET_PT_CCR = 38,
  131. /* Yes, there are two registers with #39. One is 64-bit only. */
  132. TARGET_PT_MQ = 39,
  133. TARGET_PT_SOFTE = 39,
  134. TARGET_PT_TRAP = 40,
  135. TARGET_PT_DAR = 41,
  136. TARGET_PT_DSISR = 42,
  137. TARGET_PT_RESULT = 43,
  138. TARGET_PT_REGS_COUNT = 44
  139. };
  140. struct target_ucontext {
  141. target_ulong tuc_flags;
  142. target_ulong tuc_link; /* ucontext_t __user * */
  143. struct target_sigaltstack tuc_stack;
  144. #if !defined(TARGET_PPC64)
  145. int32_t tuc_pad[7];
  146. target_ulong tuc_regs; /* struct mcontext __user *
  147. points to uc_mcontext field */
  148. #endif
  149. target_sigset_t tuc_sigmask;
  150. #if defined(TARGET_PPC64)
  151. target_sigset_t unused[15]; /* Allow for uc_sigmask growth */
  152. struct target_sigcontext tuc_sigcontext;
  153. #else
  154. int32_t tuc_maskext[30];
  155. int32_t tuc_pad2[3];
  156. struct target_mcontext tuc_mcontext;
  157. #endif
  158. };
  159. #if !defined(TARGET_PPC64)
  160. /* See arch/powerpc/kernel/signal_32.c. */
  161. struct target_sigframe {
  162. struct target_sigcontext sctx;
  163. struct target_mcontext mctx;
  164. int32_t abigap[56];
  165. };
  166. QEMU_BUILD_BUG_ON(offsetof(struct target_sigframe, mctx)
  167. != offsetof_sigframe_mcontext);
  168. #endif
  169. #if defined(TARGET_PPC64)
  170. #define TARGET_TRAMP_SIZE 6
  171. struct target_rt_sigframe {
  172. /* sys_rt_sigreturn requires the ucontext be the first field */
  173. struct target_ucontext uc;
  174. target_ulong _unused[2];
  175. uint32_t trampoline[TARGET_TRAMP_SIZE];
  176. target_ulong pinfo; /* struct siginfo __user * */
  177. target_ulong puc; /* void __user * */
  178. struct target_siginfo info;
  179. /* 64 bit ABI allows for 288 bytes below sp before decrementing it. */
  180. char abigap[288];
  181. } __attribute__((aligned(16)));
  182. QEMU_BUILD_BUG_ON(offsetof(struct target_rt_sigframe,
  183. uc.tuc_sigcontext.mcontext)
  184. != offsetof_rt_sigframe_mcontext);
  185. #else
  186. struct target_rt_sigframe {
  187. struct target_siginfo info;
  188. struct target_ucontext uc;
  189. int32_t abigap[56];
  190. };
  191. QEMU_BUILD_BUG_ON(offsetof(struct target_rt_sigframe, uc.tuc_mcontext)
  192. != offsetof_rt_sigframe_mcontext);
  193. #endif
  194. #if defined(TARGET_PPC64)
  195. struct target_func_ptr {
  196. target_ulong entry;
  197. target_ulong toc;
  198. };
  199. #endif
  200. /* See arch/powerpc/kernel/signal.c. */
  201. static target_ulong get_sigframe(struct target_sigaction *ka,
  202. CPUPPCState *env,
  203. int frame_size)
  204. {
  205. target_ulong oldsp;
  206. oldsp = target_sigsp(get_sp_from_cpustate(env), ka);
  207. return (oldsp - frame_size) & ~0xFUL;
  208. }
  209. #if TARGET_BIG_ENDIAN == HOST_BIG_ENDIAN
  210. #define PPC_VEC_HI 0
  211. #define PPC_VEC_LO 1
  212. #else
  213. #define PPC_VEC_HI 1
  214. #define PPC_VEC_LO 0
  215. #endif
  216. static void save_user_regs(CPUPPCState *env, struct target_mcontext *frame)
  217. {
  218. target_ulong msr = env->msr;
  219. int i;
  220. uint32_t ccr = 0;
  221. /* In general, the kernel attempts to be intelligent about what it
  222. needs to save for Altivec/FP/SPE registers. We don't care that
  223. much, so we just go ahead and save everything. */
  224. /* Save general registers. */
  225. for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
  226. __put_user(env->gpr[i], &frame->mc_gregs[i]);
  227. }
  228. __put_user(env->nip, &frame->mc_gregs[TARGET_PT_NIP]);
  229. __put_user(env->ctr, &frame->mc_gregs[TARGET_PT_CTR]);
  230. __put_user(env->lr, &frame->mc_gregs[TARGET_PT_LNK]);
  231. __put_user(cpu_read_xer(env), &frame->mc_gregs[TARGET_PT_XER]);
  232. ccr = ppc_get_cr(env);
  233. __put_user(ccr, &frame->mc_gregs[TARGET_PT_CCR]);
  234. /* Save Altivec registers if necessary. */
  235. if (env->insns_flags & PPC_ALTIVEC) {
  236. uint32_t *vrsave;
  237. for (i = 0; i < 32; i++) {
  238. ppc_avr_t *avr = cpu_avr_ptr(env, i);
  239. ppc_avr_t *vreg = (ppc_avr_t *)&frame->mc_vregs.altivec[i];
  240. __put_user(avr->u64[PPC_VEC_HI], &vreg->u64[0]);
  241. __put_user(avr->u64[PPC_VEC_LO], &vreg->u64[1]);
  242. }
  243. #if defined(TARGET_PPC64)
  244. vrsave = (uint32_t *)&frame->mc_vregs.altivec[33];
  245. /* 64-bit needs to put a pointer to the vectors in the frame */
  246. __put_user(h2g(frame->mc_vregs.altivec), &frame->v_regs);
  247. #else
  248. vrsave = (uint32_t *)&frame->mc_vregs.altivec[32];
  249. #endif
  250. __put_user((uint32_t)env->spr[SPR_VRSAVE], vrsave);
  251. }
  252. #if defined(TARGET_PPC64)
  253. /* Save VSX second halves */
  254. if (env->insns_flags2 & PPC2_VSX) {
  255. uint64_t *vsregs = (uint64_t *)&frame->mc_vregs.altivec[34];
  256. for (i = 0; i < 32; i++) {
  257. uint64_t *vsrl = cpu_vsrl_ptr(env, i);
  258. __put_user(*vsrl, &vsregs[i]);
  259. }
  260. }
  261. #endif
  262. /* Save floating point registers. */
  263. if (env->insns_flags & PPC_FLOAT) {
  264. for (i = 0; i < 32; i++) {
  265. uint64_t *fpr = cpu_fpr_ptr(env, i);
  266. __put_user(*fpr, &frame->mc_fregs[i]);
  267. }
  268. __put_user((uint64_t) env->fpscr, &frame->mc_fregs[32]);
  269. }
  270. #if !defined(TARGET_PPC64)
  271. /* Save SPE registers. The kernel only saves the high half. */
  272. if (env->insns_flags & PPC_SPE) {
  273. for (i = 0; i < ARRAY_SIZE(env->gprh); i++) {
  274. __put_user(env->gprh[i], &frame->mc_vregs.spe[i]);
  275. }
  276. __put_user(env->spe_fscr, &frame->mc_vregs.spe[32]);
  277. }
  278. #endif
  279. /* Store MSR. */
  280. __put_user(msr, &frame->mc_gregs[TARGET_PT_MSR]);
  281. }
  282. static void encode_trampoline(int sigret, uint32_t *tramp)
  283. {
  284. /* Set up the sigreturn trampoline: li r0,sigret; sc. */
  285. __put_user(0x38000000 | sigret, &tramp[0]);
  286. __put_user(0x44000002, &tramp[1]);
  287. }
  288. static void restore_user_regs(CPUPPCState *env,
  289. struct target_mcontext *frame, int sig)
  290. {
  291. target_ulong save_r2 = 0;
  292. target_ulong msr;
  293. target_ulong xer;
  294. target_ulong ccr;
  295. int i;
  296. if (!sig) {
  297. save_r2 = env->gpr[2];
  298. }
  299. /* Restore general registers. */
  300. for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
  301. __get_user(env->gpr[i], &frame->mc_gregs[i]);
  302. }
  303. __get_user(env->nip, &frame->mc_gregs[TARGET_PT_NIP]);
  304. __get_user(env->ctr, &frame->mc_gregs[TARGET_PT_CTR]);
  305. __get_user(env->lr, &frame->mc_gregs[TARGET_PT_LNK]);
  306. __get_user(xer, &frame->mc_gregs[TARGET_PT_XER]);
  307. cpu_write_xer(env, xer);
  308. __get_user(ccr, &frame->mc_gregs[TARGET_PT_CCR]);
  309. ppc_set_cr(env, ccr);
  310. if (!sig) {
  311. env->gpr[2] = save_r2;
  312. }
  313. /* Restore MSR. */
  314. __get_user(msr, &frame->mc_gregs[TARGET_PT_MSR]);
  315. /* If doing signal return, restore the previous little-endian mode. */
  316. if (sig) {
  317. ppc_store_msr(env, ((env->msr & ~(1ull << MSR_LE)) |
  318. (msr & (1ull << MSR_LE))));
  319. }
  320. /* Restore Altivec registers if necessary. */
  321. if (env->insns_flags & PPC_ALTIVEC) {
  322. ppc_avr_t *v_regs;
  323. uint32_t *vrsave;
  324. #if defined(TARGET_PPC64)
  325. uint64_t v_addr;
  326. /* 64-bit needs to recover the pointer to the vectors from the frame */
  327. __get_user(v_addr, &frame->v_regs);
  328. v_regs = g2h(env_cpu(env), v_addr);
  329. #else
  330. v_regs = (ppc_avr_t *)frame->mc_vregs.altivec;
  331. #endif
  332. for (i = 0; i < 32; i++) {
  333. ppc_avr_t *avr = cpu_avr_ptr(env, i);
  334. ppc_avr_t *vreg = &v_regs[i];
  335. __get_user(avr->u64[PPC_VEC_HI], &vreg->u64[0]);
  336. __get_user(avr->u64[PPC_VEC_LO], &vreg->u64[1]);
  337. }
  338. #if defined(TARGET_PPC64)
  339. vrsave = (uint32_t *)&v_regs[33];
  340. #else
  341. vrsave = (uint32_t *)&v_regs[32];
  342. #endif
  343. __get_user(env->spr[SPR_VRSAVE], vrsave);
  344. }
  345. #if defined(TARGET_PPC64)
  346. /* Restore VSX second halves */
  347. if (env->insns_flags2 & PPC2_VSX) {
  348. uint64_t *vsregs = (uint64_t *)&frame->mc_vregs.altivec[34];
  349. for (i = 0; i < 32; i++) {
  350. uint64_t *vsrl = cpu_vsrl_ptr(env, i);
  351. __get_user(*vsrl, &vsregs[i]);
  352. }
  353. }
  354. #endif
  355. /* Restore floating point registers. */
  356. if (env->insns_flags & PPC_FLOAT) {
  357. uint64_t fpscr;
  358. for (i = 0; i < 32; i++) {
  359. uint64_t *fpr = cpu_fpr_ptr(env, i);
  360. __get_user(*fpr, &frame->mc_fregs[i]);
  361. }
  362. __get_user(fpscr, &frame->mc_fregs[32]);
  363. env->fpscr = (uint32_t) fpscr;
  364. }
  365. #if !defined(TARGET_PPC64)
  366. /* Save SPE registers. The kernel only saves the high half. */
  367. if (env->insns_flags & PPC_SPE) {
  368. for (i = 0; i < ARRAY_SIZE(env->gprh); i++) {
  369. __get_user(env->gprh[i], &frame->mc_vregs.spe[i]);
  370. }
  371. __get_user(env->spe_fscr, &frame->mc_vregs.spe[32]);
  372. }
  373. #endif
  374. }
  375. #if !defined(TARGET_PPC64)
  376. void setup_frame(int sig, struct target_sigaction *ka,
  377. target_sigset_t *set, CPUPPCState *env)
  378. {
  379. struct target_sigframe *frame;
  380. struct target_sigcontext *sc;
  381. target_ulong frame_addr, newsp;
  382. int err = 0;
  383. frame_addr = get_sigframe(ka, env, sizeof(*frame));
  384. trace_user_setup_frame(env, frame_addr);
  385. if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1))
  386. goto sigsegv;
  387. sc = &frame->sctx;
  388. __put_user(ka->_sa_handler, &sc->handler);
  389. __put_user(set->sig[0], &sc->oldmask);
  390. __put_user(set->sig[1], &sc->_unused[3]);
  391. __put_user(h2g(&frame->mctx), &sc->regs);
  392. __put_user(sig, &sc->signal);
  393. /* Save user regs. */
  394. save_user_regs(env, &frame->mctx);
  395. env->lr = default_sigreturn;
  396. /* Turn off all fp exceptions. */
  397. env->fpscr = 0;
  398. /* Create a stack frame for the caller of the handler. */
  399. newsp = frame_addr - SIGNAL_FRAMESIZE;
  400. err |= put_user(env->gpr[1], newsp, target_ulong);
  401. if (err)
  402. goto sigsegv;
  403. /* Set up registers for signal handler. */
  404. env->gpr[1] = newsp;
  405. env->gpr[3] = sig;
  406. env->gpr[4] = frame_addr + offsetof(struct target_sigframe, sctx);
  407. env->nip = (target_ulong) ka->_sa_handler;
  408. /* Signal handlers are entered in big-endian mode. */
  409. ppc_store_msr(env, env->msr & ~(1ull << MSR_LE));
  410. unlock_user_struct(frame, frame_addr, 1);
  411. return;
  412. sigsegv:
  413. unlock_user_struct(frame, frame_addr, 1);
  414. force_sigsegv(sig);
  415. }
  416. #endif /* !defined(TARGET_PPC64) */
  417. void setup_rt_frame(int sig, struct target_sigaction *ka,
  418. target_siginfo_t *info,
  419. target_sigset_t *set, CPUPPCState *env)
  420. {
  421. struct target_rt_sigframe *rt_sf;
  422. struct target_mcontext *mctx = 0;
  423. target_ulong rt_sf_addr, newsp = 0;
  424. int i, err = 0;
  425. #if defined(TARGET_PPC64)
  426. struct target_sigcontext *sc = 0;
  427. struct image_info *image = get_task_state(thread_cpu)->info;
  428. #endif
  429. rt_sf_addr = get_sigframe(ka, env, sizeof(*rt_sf));
  430. if (!lock_user_struct(VERIFY_WRITE, rt_sf, rt_sf_addr, 1))
  431. goto sigsegv;
  432. rt_sf->info = *info;
  433. __put_user(0, &rt_sf->uc.tuc_flags);
  434. __put_user(0, &rt_sf->uc.tuc_link);
  435. target_save_altstack(&rt_sf->uc.tuc_stack, env);
  436. #if !defined(TARGET_PPC64)
  437. __put_user(h2g (&rt_sf->uc.tuc_mcontext),
  438. &rt_sf->uc.tuc_regs);
  439. #endif
  440. for (i = 0; i < TARGET_NSIG_WORDS; i++) {
  441. __put_user(set->sig[i], &rt_sf->uc.tuc_sigmask.sig[i]);
  442. }
  443. #if defined(TARGET_PPC64)
  444. mctx = &rt_sf->uc.tuc_sigcontext.mcontext;
  445. sc = &rt_sf->uc.tuc_sigcontext;
  446. __put_user(h2g(mctx), &sc->regs);
  447. __put_user(sig, &sc->signal);
  448. #else
  449. mctx = &rt_sf->uc.tuc_mcontext;
  450. #endif
  451. save_user_regs(env, mctx);
  452. env->lr = default_rt_sigreturn;
  453. /* Turn off all fp exceptions. */
  454. env->fpscr = 0;
  455. /* Create a stack frame for the caller of the handler. */
  456. newsp = rt_sf_addr - (SIGNAL_FRAMESIZE + 16);
  457. err |= put_user(env->gpr[1], newsp, target_ulong);
  458. if (err)
  459. goto sigsegv;
  460. /* Set up registers for signal handler. */
  461. env->gpr[1] = newsp;
  462. env->gpr[3] = (target_ulong) sig;
  463. env->gpr[4] = (target_ulong) h2g(&rt_sf->info);
  464. env->gpr[5] = (target_ulong) h2g(&rt_sf->uc);
  465. env->gpr[6] = (target_ulong) h2g(rt_sf);
  466. #if defined(TARGET_PPC64)
  467. if (get_ppc64_abi(image) < 2) {
  468. /* ELFv1 PPC64 function pointers are pointers to OPD entries. */
  469. struct target_func_ptr *handler =
  470. (struct target_func_ptr *)g2h(env_cpu(env), ka->_sa_handler);
  471. env->nip = tswapl(handler->entry);
  472. env->gpr[2] = tswapl(handler->toc);
  473. } else {
  474. /* ELFv2 PPC64 function pointers are entry points. R12 must also be set. */
  475. env->gpr[12] = env->nip = ka->_sa_handler;
  476. }
  477. #else
  478. env->nip = (target_ulong) ka->_sa_handler;
  479. #endif
  480. #if TARGET_BIG_ENDIAN
  481. /* Signal handlers are entered in big-endian mode. */
  482. ppc_store_msr(env, env->msr & ~(1ull << MSR_LE));
  483. #else
  484. /* Signal handlers are entered in little-endian mode. */
  485. ppc_store_msr(env, env->msr | (1ull << MSR_LE));
  486. #endif
  487. unlock_user_struct(rt_sf, rt_sf_addr, 1);
  488. return;
  489. sigsegv:
  490. unlock_user_struct(rt_sf, rt_sf_addr, 1);
  491. force_sigsegv(sig);
  492. }
  493. #if !defined(TARGET_PPC64)
  494. long do_sigreturn(CPUPPCState *env)
  495. {
  496. struct target_sigcontext *sc = NULL;
  497. struct target_mcontext *sr = NULL;
  498. target_ulong sr_addr = 0, sc_addr;
  499. sigset_t blocked;
  500. target_sigset_t set;
  501. sc_addr = env->gpr[1] + SIGNAL_FRAMESIZE;
  502. if (!lock_user_struct(VERIFY_READ, sc, sc_addr, 1))
  503. goto sigsegv;
  504. __get_user(set.sig[0], &sc->oldmask);
  505. __get_user(set.sig[1], &sc->_unused[3]);
  506. target_to_host_sigset_internal(&blocked, &set);
  507. set_sigmask(&blocked);
  508. __get_user(sr_addr, &sc->regs);
  509. if (!lock_user_struct(VERIFY_READ, sr, sr_addr, 1))
  510. goto sigsegv;
  511. restore_user_regs(env, sr, 1);
  512. unlock_user_struct(sr, sr_addr, 1);
  513. unlock_user_struct(sc, sc_addr, 1);
  514. return -QEMU_ESIGRETURN;
  515. sigsegv:
  516. unlock_user_struct(sr, sr_addr, 1);
  517. unlock_user_struct(sc, sc_addr, 1);
  518. force_sig(TARGET_SIGSEGV);
  519. return -QEMU_ESIGRETURN;
  520. }
  521. #endif /* !defined(TARGET_PPC64) */
  522. /* See arch/powerpc/kernel/signal_32.c. */
  523. static int do_setcontext(struct target_ucontext *ucp, CPUPPCState *env, int sig)
  524. {
  525. struct target_mcontext *mcp;
  526. target_ulong mcp_addr;
  527. sigset_t blocked;
  528. target_sigset_t set;
  529. if (copy_from_user(&set, h2g(ucp) + offsetof(struct target_ucontext, tuc_sigmask),
  530. sizeof (set)))
  531. return 1;
  532. #if defined(TARGET_PPC64)
  533. mcp_addr = h2g(ucp) +
  534. offsetof(struct target_ucontext, tuc_sigcontext.mcontext);
  535. #else
  536. __get_user(mcp_addr, &ucp->tuc_regs);
  537. #endif
  538. if (!lock_user_struct(VERIFY_READ, mcp, mcp_addr, 1))
  539. return 1;
  540. target_to_host_sigset(&blocked, &set);
  541. set_sigmask(&blocked);
  542. restore_user_regs(env, mcp, sig);
  543. unlock_user_struct(mcp, mcp_addr, 1);
  544. return 0;
  545. }
  546. long do_rt_sigreturn(CPUPPCState *env)
  547. {
  548. struct target_rt_sigframe *rt_sf = NULL;
  549. target_ulong rt_sf_addr;
  550. rt_sf_addr = env->gpr[1] + SIGNAL_FRAMESIZE + 16;
  551. if (!lock_user_struct(VERIFY_READ, rt_sf, rt_sf_addr, 1))
  552. goto sigsegv;
  553. if (do_setcontext(&rt_sf->uc, env, 1))
  554. goto sigsegv;
  555. target_restore_altstack(&rt_sf->uc.tuc_stack, env);
  556. unlock_user_struct(rt_sf, rt_sf_addr, 1);
  557. return -QEMU_ESIGRETURN;
  558. sigsegv:
  559. unlock_user_struct(rt_sf, rt_sf_addr, 1);
  560. force_sig(TARGET_SIGSEGV);
  561. return -QEMU_ESIGRETURN;
  562. }
  563. /* This syscall implements {get,set,swap}context for userland. */
  564. abi_long do_swapcontext(CPUArchState *env, abi_ulong uold_ctx,
  565. abi_ulong unew_ctx, abi_long ctx_size)
  566. {
  567. struct target_ucontext *uctx;
  568. struct target_mcontext *mctx;
  569. /* For ppc32, ctx_size is "reserved for future use".
  570. * For ppc64, we do not yet support the VSX extension.
  571. */
  572. if (ctx_size < sizeof(struct target_ucontext)) {
  573. return -TARGET_EINVAL;
  574. }
  575. if (uold_ctx) {
  576. TaskState *ts = get_task_state(thread_cpu);
  577. if (!lock_user_struct(VERIFY_WRITE, uctx, uold_ctx, 1)) {
  578. return -TARGET_EFAULT;
  579. }
  580. #ifdef TARGET_PPC64
  581. mctx = &uctx->tuc_sigcontext.mcontext;
  582. #else
  583. /* ??? The kernel aligns the pointer down here into padding, but
  584. * in setup_rt_frame we don't. Be self-compatible for now.
  585. */
  586. mctx = &uctx->tuc_mcontext;
  587. __put_user(h2g(mctx), &uctx->tuc_regs);
  588. #endif
  589. save_user_regs(env, mctx);
  590. host_to_target_sigset(&uctx->tuc_sigmask, &ts->signal_mask);
  591. unlock_user_struct(uctx, uold_ctx, 1);
  592. }
  593. if (unew_ctx) {
  594. int err;
  595. if (!lock_user_struct(VERIFY_READ, uctx, unew_ctx, 1)) {
  596. return -TARGET_EFAULT;
  597. }
  598. err = do_setcontext(uctx, env, 0);
  599. unlock_user_struct(uctx, unew_ctx, 1);
  600. if (err) {
  601. /* We cannot return to a partially updated context. */
  602. force_sig(TARGET_SIGSEGV);
  603. }
  604. return -QEMU_ESIGRETURN;
  605. }
  606. return 0;
  607. }
  608. void setup_sigtramp(abi_ulong sigtramp_page)
  609. {
  610. uint32_t *tramp = lock_user(VERIFY_WRITE, sigtramp_page, 2 * 8, 0);
  611. assert(tramp != NULL);
  612. #ifdef TARGET_ARCH_HAS_SETUP_FRAME
  613. default_sigreturn = sigtramp_page;
  614. encode_trampoline(TARGET_NR_sigreturn, tramp + 0);
  615. #endif
  616. default_rt_sigreturn = sigtramp_page + 8;
  617. encode_trampoline(TARGET_NR_rt_sigreturn, tramp + 2);
  618. unlock_user(tramp, sigtramp_page, 2 * 8);
  619. }