cpu_loop.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509
  1. /*
  2. * qemu user cpu loop
  3. *
  4. * Copyright (c) 2003-2008 Fabrice Bellard
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, see <http://www.gnu.org/licenses/>.
  18. */
  19. #include "qemu/osdep.h"
  20. #include "qemu.h"
  21. #include "user-internals.h"
  22. #include "elf.h"
  23. #include "user/cpu_loop.h"
  24. #include "signal-common.h"
  25. #include "semihosting/common-semi.h"
  26. #include "exec/page-protection.h"
  27. #include "user/page-protection.h"
  28. #include "target/arm/syndrome.h"
  29. #define get_user_code_u32(x, gaddr, env) \
  30. ({ abi_long __r = get_user_u32((x), (gaddr)); \
  31. if (!__r && bswap_code(arm_sctlr_b(env))) { \
  32. (x) = bswap32(x); \
  33. } \
  34. __r; \
  35. })
  36. /*
  37. * Note that if we need to do data accesses here, they should do a
  38. * bswap if arm_cpu_bswap_data() returns true.
  39. */
  40. /*
  41. * Similar to code in accel/tcg/user-exec.c, but outside the execution loop.
  42. * Must be called with mmap_lock.
  43. * We get the PC of the entry address - which is as good as anything,
  44. * on a real kernel what you get depends on which mode it uses.
  45. */
  46. static void *atomic_mmu_lookup(CPUArchState *env, uint32_t addr, int size)
  47. {
  48. int need_flags = PAGE_READ | PAGE_WRITE_ORG | PAGE_VALID;
  49. int page_flags;
  50. /* Enforce guest required alignment. */
  51. if (unlikely(addr & (size - 1))) {
  52. force_sig_fault(TARGET_SIGBUS, TARGET_BUS_ADRALN, addr);
  53. return NULL;
  54. }
  55. page_flags = page_get_flags(addr);
  56. if (unlikely((page_flags & need_flags) != need_flags)) {
  57. force_sig_fault(TARGET_SIGSEGV,
  58. page_flags & PAGE_VALID ?
  59. TARGET_SEGV_ACCERR : TARGET_SEGV_MAPERR, addr);
  60. return NULL;
  61. }
  62. return g2h(env_cpu(env), addr);
  63. }
  64. /*
  65. * See the Linux kernel's Documentation/arm/kernel_user_helpers.rst
  66. * Input:
  67. * r0 = oldval
  68. * r1 = newval
  69. * r2 = pointer to target value
  70. *
  71. * Output:
  72. * r0 = 0 if *ptr was changed, non-0 if no exchange happened
  73. * C set if *ptr was changed, clear if no exchange happened
  74. */
  75. static void arm_kernel_cmpxchg32_helper(CPUARMState *env)
  76. {
  77. uint32_t oldval, newval, val, addr, cpsr, *host_addr;
  78. /* Swap if host != guest endianness, for the host cmpxchg below */
  79. oldval = tswap32(env->regs[0]);
  80. newval = tswap32(env->regs[1]);
  81. addr = env->regs[2];
  82. mmap_lock();
  83. host_addr = atomic_mmu_lookup(env, addr, 4);
  84. if (!host_addr) {
  85. mmap_unlock();
  86. return;
  87. }
  88. val = qatomic_cmpxchg__nocheck(host_addr, oldval, newval);
  89. mmap_unlock();
  90. cpsr = (val == oldval) * CPSR_C;
  91. cpsr_write(env, cpsr, CPSR_C, CPSRWriteByInstr);
  92. env->regs[0] = cpsr ? 0 : -1;
  93. }
  94. /*
  95. * See the Linux kernel's Documentation/arm/kernel_user_helpers.rst
  96. * Input:
  97. * r0 = pointer to oldval
  98. * r1 = pointer to newval
  99. * r2 = pointer to target value
  100. *
  101. * Output:
  102. * r0 = 0 if *ptr was changed, non-0 if no exchange happened
  103. * C set if *ptr was changed, clear if no exchange happened
  104. *
  105. * Note segv's in kernel helpers are a bit tricky, we can set the
  106. * data address sensibly but the PC address is just the entry point.
  107. */
  108. static void arm_kernel_cmpxchg64_helper(CPUARMState *env)
  109. {
  110. uint64_t oldval, newval, val;
  111. uint32_t addr, cpsr;
  112. uint64_t *host_addr;
  113. addr = env->regs[0];
  114. if (get_user_u64(oldval, addr)) {
  115. goto segv;
  116. }
  117. addr = env->regs[1];
  118. if (get_user_u64(newval, addr)) {
  119. goto segv;
  120. }
  121. mmap_lock();
  122. addr = env->regs[2];
  123. host_addr = atomic_mmu_lookup(env, addr, 8);
  124. if (!host_addr) {
  125. mmap_unlock();
  126. return;
  127. }
  128. /* Swap if host != guest endianness, for the host cmpxchg below */
  129. oldval = tswap64(oldval);
  130. newval = tswap64(newval);
  131. #ifdef CONFIG_ATOMIC64
  132. val = qatomic_cmpxchg__nocheck(host_addr, oldval, newval);
  133. cpsr = (val == oldval) * CPSR_C;
  134. #else
  135. /*
  136. * This only works between threads, not between processes, but since
  137. * the host has no 64-bit cmpxchg, it is the best that we can do.
  138. */
  139. start_exclusive();
  140. val = *host_addr;
  141. if (val == oldval) {
  142. *host_addr = newval;
  143. cpsr = CPSR_C;
  144. } else {
  145. cpsr = 0;
  146. }
  147. end_exclusive();
  148. #endif
  149. mmap_unlock();
  150. cpsr_write(env, cpsr, CPSR_C, CPSRWriteByInstr);
  151. env->regs[0] = cpsr ? 0 : -1;
  152. return;
  153. segv:
  154. force_sig_fault(TARGET_SIGSEGV,
  155. page_get_flags(addr) & PAGE_VALID ?
  156. TARGET_SEGV_ACCERR : TARGET_SEGV_MAPERR, addr);
  157. }
  158. /* Handle a jump to the kernel code page. */
  159. static int
  160. do_kernel_trap(CPUARMState *env)
  161. {
  162. uint32_t addr;
  163. switch (env->regs[15]) {
  164. case 0xffff0fa0: /* __kernel_memory_barrier */
  165. smp_mb();
  166. break;
  167. case 0xffff0fc0: /* __kernel_cmpxchg */
  168. arm_kernel_cmpxchg32_helper(env);
  169. break;
  170. case 0xffff0fe0: /* __kernel_get_tls */
  171. env->regs[0] = cpu_get_tls(env);
  172. break;
  173. case 0xffff0f60: /* __kernel_cmpxchg64 */
  174. arm_kernel_cmpxchg64_helper(env);
  175. break;
  176. default:
  177. return 1;
  178. }
  179. /* Jump back to the caller. */
  180. addr = env->regs[14];
  181. if (addr & 1) {
  182. env->thumb = true;
  183. addr &= ~1;
  184. }
  185. env->regs[15] = addr;
  186. return 0;
  187. }
  188. static bool insn_is_linux_bkpt(uint32_t opcode, bool is_thumb)
  189. {
  190. /*
  191. * Return true if this insn is one of the three magic UDF insns
  192. * which the kernel treats as breakpoint insns.
  193. */
  194. if (!is_thumb) {
  195. return (opcode & 0x0fffffff) == 0x07f001f0;
  196. } else {
  197. /*
  198. * Note that we get the two halves of the 32-bit T32 insn
  199. * in the opposite order to the value the kernel uses in
  200. * its undef_hook struct.
  201. */
  202. return ((opcode & 0xffff) == 0xde01) || (opcode == 0xa000f7f0);
  203. }
  204. }
  205. static bool emulate_arm_fpa11(CPUARMState *env, uint32_t opcode)
  206. {
  207. TaskState *ts = get_task_state(env_cpu(env));
  208. int rc = EmulateAll(opcode, &ts->fpa, env);
  209. int raise, enabled;
  210. if (rc == 0) {
  211. /* Illegal instruction */
  212. return false;
  213. }
  214. if (rc > 0) {
  215. /* Everything ok. */
  216. env->regs[15] += 4;
  217. return true;
  218. }
  219. /* FP exception */
  220. rc = -rc;
  221. raise = 0;
  222. /* Translate softfloat flags to FPSR flags */
  223. if (rc & float_flag_invalid) {
  224. raise |= BIT_IOC;
  225. }
  226. if (rc & float_flag_divbyzero) {
  227. raise |= BIT_DZC;
  228. }
  229. if (rc & float_flag_overflow) {
  230. raise |= BIT_OFC;
  231. }
  232. if (rc & float_flag_underflow) {
  233. raise |= BIT_UFC;
  234. }
  235. if (rc & float_flag_inexact) {
  236. raise |= BIT_IXC;
  237. }
  238. /* Accumulate unenabled exceptions */
  239. enabled = ts->fpa.fpsr >> 16;
  240. ts->fpa.fpsr |= raise & ~enabled;
  241. if (raise & enabled) {
  242. /*
  243. * The kernel's nwfpe emulator does not pass a real si_code.
  244. * It merely uses send_sig(SIGFPE, current, 1), which results in
  245. * __send_signal() filling out SI_KERNEL with pid and uid 0 (under
  246. * the "SEND_SIG_PRIV" case). That's what our force_sig() does.
  247. */
  248. force_sig(TARGET_SIGFPE);
  249. } else {
  250. env->regs[15] += 4;
  251. }
  252. return true;
  253. }
  254. void cpu_loop(CPUARMState *env)
  255. {
  256. CPUState *cs = env_cpu(env);
  257. int trapnr, si_signo, si_code;
  258. unsigned int n, insn;
  259. abi_ulong ret;
  260. for(;;) {
  261. cpu_exec_start(cs);
  262. trapnr = cpu_exec(cs);
  263. cpu_exec_end(cs);
  264. process_queued_cpu_work(cs);
  265. switch(trapnr) {
  266. case EXCP_UDEF:
  267. case EXCP_NOCP:
  268. case EXCP_INVSTATE:
  269. {
  270. uint32_t opcode;
  271. /* we handle the FPU emulation here, as Linux */
  272. /* we get the opcode */
  273. /* FIXME - what to do if get_user() fails? */
  274. get_user_code_u32(opcode, env->regs[15], env);
  275. /*
  276. * The Linux kernel treats some UDF patterns specially
  277. * to use as breakpoints (instead of the architectural
  278. * bkpt insn). These should trigger a SIGTRAP rather
  279. * than SIGILL.
  280. */
  281. if (insn_is_linux_bkpt(opcode, env->thumb)) {
  282. goto excp_debug;
  283. }
  284. if (!env->thumb && emulate_arm_fpa11(env, opcode)) {
  285. break;
  286. }
  287. force_sig_fault(TARGET_SIGILL, TARGET_ILL_ILLOPN,
  288. env->regs[15]);
  289. }
  290. break;
  291. case EXCP_SWI:
  292. {
  293. env->eabi = true;
  294. /* system call */
  295. if (env->thumb) {
  296. /* Thumb is always EABI style with syscall number in r7 */
  297. n = env->regs[7];
  298. } else {
  299. /*
  300. * Equivalent of kernel CONFIG_OABI_COMPAT: read the
  301. * Arm SVC insn to extract the immediate, which is the
  302. * syscall number in OABI.
  303. */
  304. /* FIXME - what to do if get_user() fails? */
  305. get_user_code_u32(insn, env->regs[15] - 4, env);
  306. n = insn & 0xffffff;
  307. if (n == 0) {
  308. /* zero immediate: EABI, syscall number in r7 */
  309. n = env->regs[7];
  310. } else {
  311. /*
  312. * This XOR matches the kernel code: an immediate
  313. * in the valid range (0x900000 .. 0x9fffff) is
  314. * converted into the correct EABI-style syscall
  315. * number; invalid immediates end up as values
  316. * > 0xfffff and are handled below as out-of-range.
  317. */
  318. n ^= ARM_SYSCALL_BASE;
  319. env->eabi = false;
  320. }
  321. }
  322. if (n > ARM_NR_BASE) {
  323. switch (n) {
  324. case ARM_NR_cacheflush:
  325. /* nop */
  326. break;
  327. case ARM_NR_set_tls:
  328. cpu_set_tls(env, env->regs[0]);
  329. env->regs[0] = 0;
  330. break;
  331. case ARM_NR_breakpoint:
  332. env->regs[15] -= env->thumb ? 2 : 4;
  333. goto excp_debug;
  334. case ARM_NR_get_tls:
  335. env->regs[0] = cpu_get_tls(env);
  336. break;
  337. default:
  338. if (n < 0xf0800) {
  339. /*
  340. * Syscalls 0xf0000..0xf07ff (or 0x9f0000..
  341. * 0x9f07ff in OABI numbering) are defined
  342. * to return -ENOSYS rather than raising
  343. * SIGILL. Note that we have already
  344. * removed the 0x900000 prefix.
  345. */
  346. qemu_log_mask(LOG_UNIMP,
  347. "qemu: Unsupported ARM syscall: 0x%x\n",
  348. n);
  349. env->regs[0] = -TARGET_ENOSYS;
  350. } else {
  351. /*
  352. * Otherwise SIGILL. This includes any SWI with
  353. * immediate not originally 0x9fxxxx, because
  354. * of the earlier XOR.
  355. * Like the real kernel, we report the addr of the
  356. * SWI in the siginfo si_addr but leave the PC
  357. * pointing at the insn after the SWI.
  358. */
  359. abi_ulong faultaddr = env->regs[15];
  360. faultaddr -= env->thumb ? 2 : 4;
  361. force_sig_fault(TARGET_SIGILL, TARGET_ILL_ILLTRP,
  362. faultaddr);
  363. }
  364. break;
  365. }
  366. } else {
  367. ret = do_syscall(env,
  368. n,
  369. env->regs[0],
  370. env->regs[1],
  371. env->regs[2],
  372. env->regs[3],
  373. env->regs[4],
  374. env->regs[5],
  375. 0, 0);
  376. if (ret == -QEMU_ERESTARTSYS) {
  377. env->regs[15] -= env->thumb ? 2 : 4;
  378. } else if (ret != -QEMU_ESIGRETURN) {
  379. env->regs[0] = ret;
  380. }
  381. }
  382. }
  383. break;
  384. case EXCP_SEMIHOST:
  385. do_common_semihosting(cs);
  386. env->regs[15] += env->thumb ? 2 : 4;
  387. break;
  388. case EXCP_INTERRUPT:
  389. /* just indicate that signals should be handled asap */
  390. break;
  391. case EXCP_PREFETCH_ABORT:
  392. case EXCP_DATA_ABORT:
  393. /* For user-only we don't set TTBCR_EAE, so look at the FSR. */
  394. switch (env->exception.fsr & 0x1f) {
  395. case 0x1: /* Alignment */
  396. si_signo = TARGET_SIGBUS;
  397. si_code = TARGET_BUS_ADRALN;
  398. break;
  399. case 0x3: /* Access flag fault, level 1 */
  400. case 0x6: /* Access flag fault, level 2 */
  401. case 0x9: /* Domain fault, level 1 */
  402. case 0xb: /* Domain fault, level 2 */
  403. case 0xd: /* Permission fault, level 1 */
  404. case 0xf: /* Permission fault, level 2 */
  405. si_signo = TARGET_SIGSEGV;
  406. si_code = TARGET_SEGV_ACCERR;
  407. break;
  408. case 0x5: /* Translation fault, level 1 */
  409. case 0x7: /* Translation fault, level 2 */
  410. si_signo = TARGET_SIGSEGV;
  411. si_code = TARGET_SEGV_MAPERR;
  412. break;
  413. default:
  414. g_assert_not_reached();
  415. }
  416. force_sig_fault(si_signo, si_code, env->exception.vaddress);
  417. break;
  418. case EXCP_DEBUG:
  419. case EXCP_BKPT:
  420. excp_debug:
  421. force_sig_fault(TARGET_SIGTRAP, TARGET_TRAP_BRKPT, env->regs[15]);
  422. break;
  423. case EXCP_KERNEL_TRAP:
  424. if (do_kernel_trap(env))
  425. goto error;
  426. break;
  427. case EXCP_YIELD:
  428. /* nothing to do here for user-mode, just resume guest code */
  429. break;
  430. case EXCP_ATOMIC:
  431. cpu_exec_step_atomic(cs);
  432. break;
  433. default:
  434. error:
  435. EXCP_DUMP(env, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr);
  436. abort();
  437. }
  438. process_pending_signals(env);
  439. }
  440. }
  441. void target_cpu_copy_regs(CPUArchState *env, target_pt_regs *regs)
  442. {
  443. CPUState *cpu = env_cpu(env);
  444. TaskState *ts = get_task_state(cpu);
  445. struct image_info *info = ts->info;
  446. int i;
  447. cpsr_write(env, regs->uregs[16], CPSR_USER | CPSR_EXEC,
  448. CPSRWriteByInstr);
  449. for(i = 0; i < 16; i++) {
  450. env->regs[i] = regs->uregs[i];
  451. }
  452. #if TARGET_BIG_ENDIAN
  453. /* Enable BE8. */
  454. if (EF_ARM_EABI_VERSION(info->elf_flags) >= EF_ARM_EABI_VER4
  455. && (info->elf_flags & EF_ARM_BE8)) {
  456. env->uncached_cpsr |= CPSR_E;
  457. env->cp15.sctlr_el[1] |= SCTLR_E0E;
  458. } else {
  459. env->cp15.sctlr_el[1] |= SCTLR_B;
  460. }
  461. arm_rebuild_hflags(env);
  462. #endif
  463. ts->stack_base = info->start_stack;
  464. ts->heap_base = info->brk;
  465. /* This will be filled in on the first SYS_HEAPINFO call. */
  466. ts->heap_limit = 0;
  467. }