cpu-exec.c 41 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199
  1. /*
  2. * i386 emulator main execution loop
  3. *
  4. * Copyright (c) 2003-2005 Fabrice Bellard
  5. *
  6. * This library is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2 of the License, or (at your option) any later version.
  10. *
  11. * This library is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  18. */
  19. #include "config.h"
  20. #include "exec.h"
  21. #include "disas.h"
  22. #include "tcg.h"
  23. #include "kvm.h"
  24. #if !defined(CONFIG_SOFTMMU)
  25. #undef EAX
  26. #undef ECX
  27. #undef EDX
  28. #undef EBX
  29. #undef ESP
  30. #undef EBP
  31. #undef ESI
  32. #undef EDI
  33. #undef EIP
  34. #include <signal.h>
  35. #ifdef __linux__
  36. #include <sys/ucontext.h>
  37. #endif
  38. #endif
  39. #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
  40. // Work around ugly bugs in glibc that mangle global register contents
  41. #undef env
  42. #define env cpu_single_env
  43. #endif
  44. int tb_invalidated_flag;
  45. //#define CONFIG_DEBUG_EXEC
  46. //#define DEBUG_SIGNAL
  47. int qemu_cpu_has_work(CPUState *env)
  48. {
  49. return cpu_has_work(env);
  50. }
  51. void cpu_loop_exit(void)
  52. {
  53. /* NOTE: the register at this point must be saved by hand because
  54. longjmp restore them */
  55. regs_to_env();
  56. longjmp(env->jmp_env, 1);
  57. }
  58. /* exit the current TB from a signal handler. The host registers are
  59. restored in a state compatible with the CPU emulator
  60. */
  61. void cpu_resume_from_signal(CPUState *env1, void *puc)
  62. {
  63. #if !defined(CONFIG_SOFTMMU)
  64. #ifdef __linux__
  65. struct ucontext *uc = puc;
  66. #elif defined(__OpenBSD__)
  67. struct sigcontext *uc = puc;
  68. #endif
  69. #endif
  70. env = env1;
  71. /* XXX: restore cpu registers saved in host registers */
  72. #if !defined(CONFIG_SOFTMMU)
  73. if (puc) {
  74. /* XXX: use siglongjmp ? */
  75. #ifdef __linux__
  76. sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
  77. #elif defined(__OpenBSD__)
  78. sigprocmask(SIG_SETMASK, &uc->sc_mask, NULL);
  79. #endif
  80. }
  81. #endif
  82. env->exception_index = -1;
  83. longjmp(env->jmp_env, 1);
  84. }
  85. /* Execute the code without caching the generated code. An interpreter
  86. could be used if available. */
  87. static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
  88. {
  89. unsigned long next_tb;
  90. TranslationBlock *tb;
  91. /* Should never happen.
  92. We only end up here when an existing TB is too long. */
  93. if (max_cycles > CF_COUNT_MASK)
  94. max_cycles = CF_COUNT_MASK;
  95. tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
  96. max_cycles);
  97. env->current_tb = tb;
  98. /* execute the generated code */
  99. next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
  100. if ((next_tb & 3) == 2) {
  101. /* Restore PC. This may happen if async event occurs before
  102. the TB starts executing. */
  103. cpu_pc_from_tb(env, tb);
  104. }
  105. tb_phys_invalidate(tb, -1);
  106. tb_free(tb);
  107. }
  108. static TranslationBlock *tb_find_slow(target_ulong pc,
  109. target_ulong cs_base,
  110. uint64_t flags)
  111. {
  112. TranslationBlock *tb, **ptb1;
  113. unsigned int h;
  114. target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
  115. tb_invalidated_flag = 0;
  116. regs_to_env(); /* XXX: do it just before cpu_gen_code() */
  117. /* find translated block using physical mappings */
  118. phys_pc = get_phys_addr_code(env, pc);
  119. phys_page1 = phys_pc & TARGET_PAGE_MASK;
  120. phys_page2 = -1;
  121. h = tb_phys_hash_func(phys_pc);
  122. ptb1 = &tb_phys_hash[h];
  123. for(;;) {
  124. tb = *ptb1;
  125. if (!tb)
  126. goto not_found;
  127. if (tb->pc == pc &&
  128. tb->page_addr[0] == phys_page1 &&
  129. tb->cs_base == cs_base &&
  130. tb->flags == flags) {
  131. /* check next page if needed */
  132. if (tb->page_addr[1] != -1) {
  133. virt_page2 = (pc & TARGET_PAGE_MASK) +
  134. TARGET_PAGE_SIZE;
  135. phys_page2 = get_phys_addr_code(env, virt_page2);
  136. if (tb->page_addr[1] == phys_page2)
  137. goto found;
  138. } else {
  139. goto found;
  140. }
  141. }
  142. ptb1 = &tb->phys_hash_next;
  143. }
  144. not_found:
  145. /* if no translated code available, then translate it now */
  146. tb = tb_gen_code(env, pc, cs_base, flags, 0);
  147. found:
  148. /* we add the TB in the virtual pc hash table */
  149. env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
  150. return tb;
  151. }
  152. static inline TranslationBlock *tb_find_fast(void)
  153. {
  154. TranslationBlock *tb;
  155. target_ulong cs_base, pc;
  156. int flags;
  157. /* we record a subset of the CPU state. It will
  158. always be the same before a given translated block
  159. is executed. */
  160. cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
  161. tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
  162. if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
  163. tb->flags != flags)) {
  164. tb = tb_find_slow(pc, cs_base, flags);
  165. }
  166. return tb;
  167. }
  168. static CPUDebugExcpHandler *debug_excp_handler;
  169. CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
  170. {
  171. CPUDebugExcpHandler *old_handler = debug_excp_handler;
  172. debug_excp_handler = handler;
  173. return old_handler;
  174. }
  175. static void cpu_handle_debug_exception(CPUState *env)
  176. {
  177. CPUWatchpoint *wp;
  178. if (!env->watchpoint_hit)
  179. QTAILQ_FOREACH(wp, &env->watchpoints, entry)
  180. wp->flags &= ~BP_WATCHPOINT_HIT;
  181. if (debug_excp_handler)
  182. debug_excp_handler(env);
  183. }
  184. /* main execution loop */
  185. int cpu_exec(CPUState *env1)
  186. {
  187. #define DECLARE_HOST_REGS 1
  188. #include "hostregs_helper.h"
  189. int ret, interrupt_request;
  190. TranslationBlock *tb;
  191. uint8_t *tc_ptr;
  192. unsigned long next_tb;
  193. if (cpu_halted(env1) == EXCP_HALTED)
  194. return EXCP_HALTED;
  195. cpu_single_env = env1;
  196. /* first we save global registers */
  197. #define SAVE_HOST_REGS 1
  198. #include "hostregs_helper.h"
  199. env = env1;
  200. env_to_regs();
  201. #if defined(TARGET_I386)
  202. /* put eflags in CPU temporary format */
  203. CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
  204. DF = 1 - (2 * ((env->eflags >> 10) & 1));
  205. CC_OP = CC_OP_EFLAGS;
  206. env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
  207. #elif defined(TARGET_SPARC)
  208. #elif defined(TARGET_M68K)
  209. env->cc_op = CC_OP_FLAGS;
  210. env->cc_dest = env->sr & 0xf;
  211. env->cc_x = (env->sr >> 4) & 1;
  212. #elif defined(TARGET_ALPHA)
  213. #elif defined(TARGET_ARM)
  214. #elif defined(TARGET_PPC)
  215. #elif defined(TARGET_MICROBLAZE)
  216. #elif defined(TARGET_MIPS)
  217. #elif defined(TARGET_SH4)
  218. #elif defined(TARGET_CRIS)
  219. /* XXXXX */
  220. #else
  221. #error unsupported target CPU
  222. #endif
  223. env->exception_index = -1;
  224. /* prepare setjmp context for exception handling */
  225. for(;;) {
  226. if (setjmp(env->jmp_env) == 0) {
  227. #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
  228. #undef env
  229. env = cpu_single_env;
  230. #define env cpu_single_env
  231. #endif
  232. env->current_tb = NULL;
  233. /* if an exception is pending, we execute it here */
  234. if (env->exception_index >= 0) {
  235. if (env->exception_index >= EXCP_INTERRUPT) {
  236. /* exit request from the cpu execution loop */
  237. ret = env->exception_index;
  238. if (ret == EXCP_DEBUG)
  239. cpu_handle_debug_exception(env);
  240. break;
  241. } else {
  242. #if defined(CONFIG_USER_ONLY)
  243. /* if user mode only, we simulate a fake exception
  244. which will be handled outside the cpu execution
  245. loop */
  246. #if defined(TARGET_I386)
  247. do_interrupt_user(env->exception_index,
  248. env->exception_is_int,
  249. env->error_code,
  250. env->exception_next_eip);
  251. /* successfully delivered */
  252. env->old_exception = -1;
  253. #endif
  254. ret = env->exception_index;
  255. break;
  256. #else
  257. #if defined(TARGET_I386)
  258. /* simulate a real cpu exception. On i386, it can
  259. trigger new exceptions, but we do not handle
  260. double or triple faults yet. */
  261. do_interrupt(env->exception_index,
  262. env->exception_is_int,
  263. env->error_code,
  264. env->exception_next_eip, 0);
  265. /* successfully delivered */
  266. env->old_exception = -1;
  267. #elif defined(TARGET_PPC)
  268. do_interrupt(env);
  269. #elif defined(TARGET_MICROBLAZE)
  270. do_interrupt(env);
  271. #elif defined(TARGET_MIPS)
  272. do_interrupt(env);
  273. #elif defined(TARGET_SPARC)
  274. do_interrupt(env);
  275. #elif defined(TARGET_ARM)
  276. do_interrupt(env);
  277. #elif defined(TARGET_SH4)
  278. do_interrupt(env);
  279. #elif defined(TARGET_ALPHA)
  280. do_interrupt(env);
  281. #elif defined(TARGET_CRIS)
  282. do_interrupt(env);
  283. #elif defined(TARGET_M68K)
  284. do_interrupt(0);
  285. #endif
  286. #endif
  287. }
  288. env->exception_index = -1;
  289. }
  290. if (kvm_enabled()) {
  291. kvm_cpu_exec(env);
  292. longjmp(env->jmp_env, 1);
  293. }
  294. next_tb = 0; /* force lookup of first TB */
  295. for(;;) {
  296. interrupt_request = env->interrupt_request;
  297. if (unlikely(interrupt_request)) {
  298. if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
  299. /* Mask out external interrupts for this step. */
  300. interrupt_request &= ~(CPU_INTERRUPT_HARD |
  301. CPU_INTERRUPT_FIQ |
  302. CPU_INTERRUPT_SMI |
  303. CPU_INTERRUPT_NMI);
  304. }
  305. if (interrupt_request & CPU_INTERRUPT_DEBUG) {
  306. env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
  307. env->exception_index = EXCP_DEBUG;
  308. cpu_loop_exit();
  309. }
  310. #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
  311. defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
  312. defined(TARGET_MICROBLAZE)
  313. if (interrupt_request & CPU_INTERRUPT_HALT) {
  314. env->interrupt_request &= ~CPU_INTERRUPT_HALT;
  315. env->halted = 1;
  316. env->exception_index = EXCP_HLT;
  317. cpu_loop_exit();
  318. }
  319. #endif
  320. #if defined(TARGET_I386)
  321. if (interrupt_request & CPU_INTERRUPT_INIT) {
  322. svm_check_intercept(SVM_EXIT_INIT);
  323. do_cpu_init(env);
  324. env->exception_index = EXCP_HALTED;
  325. cpu_loop_exit();
  326. } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
  327. do_cpu_sipi(env);
  328. } else if (env->hflags2 & HF2_GIF_MASK) {
  329. if ((interrupt_request & CPU_INTERRUPT_SMI) &&
  330. !(env->hflags & HF_SMM_MASK)) {
  331. svm_check_intercept(SVM_EXIT_SMI);
  332. env->interrupt_request &= ~CPU_INTERRUPT_SMI;
  333. do_smm_enter();
  334. next_tb = 0;
  335. } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
  336. !(env->hflags2 & HF2_NMI_MASK)) {
  337. env->interrupt_request &= ~CPU_INTERRUPT_NMI;
  338. env->hflags2 |= HF2_NMI_MASK;
  339. do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
  340. next_tb = 0;
  341. } else if (interrupt_request & CPU_INTERRUPT_MCE) {
  342. env->interrupt_request &= ~CPU_INTERRUPT_MCE;
  343. do_interrupt(EXCP12_MCHK, 0, 0, 0, 0);
  344. next_tb = 0;
  345. } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
  346. (((env->hflags2 & HF2_VINTR_MASK) &&
  347. (env->hflags2 & HF2_HIF_MASK)) ||
  348. (!(env->hflags2 & HF2_VINTR_MASK) &&
  349. (env->eflags & IF_MASK &&
  350. !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
  351. int intno;
  352. svm_check_intercept(SVM_EXIT_INTR);
  353. env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
  354. intno = cpu_get_pic_interrupt(env);
  355. qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
  356. #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
  357. #undef env
  358. env = cpu_single_env;
  359. #define env cpu_single_env
  360. #endif
  361. do_interrupt(intno, 0, 0, 0, 1);
  362. /* ensure that no TB jump will be modified as
  363. the program flow was changed */
  364. next_tb = 0;
  365. #if !defined(CONFIG_USER_ONLY)
  366. } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
  367. (env->eflags & IF_MASK) &&
  368. !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
  369. int intno;
  370. /* FIXME: this should respect TPR */
  371. svm_check_intercept(SVM_EXIT_VINTR);
  372. intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
  373. qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
  374. do_interrupt(intno, 0, 0, 0, 1);
  375. env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
  376. next_tb = 0;
  377. #endif
  378. }
  379. }
  380. #elif defined(TARGET_PPC)
  381. #if 0
  382. if ((interrupt_request & CPU_INTERRUPT_RESET)) {
  383. cpu_ppc_reset(env);
  384. }
  385. #endif
  386. if (interrupt_request & CPU_INTERRUPT_HARD) {
  387. ppc_hw_interrupt(env);
  388. if (env->pending_interrupts == 0)
  389. env->interrupt_request &= ~CPU_INTERRUPT_HARD;
  390. next_tb = 0;
  391. }
  392. #elif defined(TARGET_MICROBLAZE)
  393. if ((interrupt_request & CPU_INTERRUPT_HARD)
  394. && (env->sregs[SR_MSR] & MSR_IE)
  395. && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
  396. && !(env->iflags & (D_FLAG | IMM_FLAG))) {
  397. env->exception_index = EXCP_IRQ;
  398. do_interrupt(env);
  399. next_tb = 0;
  400. }
  401. #elif defined(TARGET_MIPS)
  402. if ((interrupt_request & CPU_INTERRUPT_HARD) &&
  403. (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
  404. (env->CP0_Status & (1 << CP0St_IE)) &&
  405. !(env->CP0_Status & (1 << CP0St_EXL)) &&
  406. !(env->CP0_Status & (1 << CP0St_ERL)) &&
  407. !(env->hflags & MIPS_HFLAG_DM)) {
  408. /* Raise it */
  409. env->exception_index = EXCP_EXT_INTERRUPT;
  410. env->error_code = 0;
  411. do_interrupt(env);
  412. next_tb = 0;
  413. }
  414. #elif defined(TARGET_SPARC)
  415. if ((interrupt_request & CPU_INTERRUPT_HARD) &&
  416. cpu_interrupts_enabled(env)) {
  417. int pil = env->interrupt_index & 15;
  418. int type = env->interrupt_index & 0xf0;
  419. if (((type == TT_EXTINT) &&
  420. (pil == 15 || pil > env->psrpil)) ||
  421. type != TT_EXTINT) {
  422. env->interrupt_request &= ~CPU_INTERRUPT_HARD;
  423. env->exception_index = env->interrupt_index;
  424. do_interrupt(env);
  425. env->interrupt_index = 0;
  426. next_tb = 0;
  427. }
  428. } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
  429. //do_interrupt(0, 0, 0, 0, 0);
  430. env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
  431. }
  432. #elif defined(TARGET_ARM)
  433. if (interrupt_request & CPU_INTERRUPT_FIQ
  434. && !(env->uncached_cpsr & CPSR_F)) {
  435. env->exception_index = EXCP_FIQ;
  436. do_interrupt(env);
  437. next_tb = 0;
  438. }
  439. /* ARMv7-M interrupt return works by loading a magic value
  440. into the PC. On real hardware the load causes the
  441. return to occur. The qemu implementation performs the
  442. jump normally, then does the exception return when the
  443. CPU tries to execute code at the magic address.
  444. This will cause the magic PC value to be pushed to
  445. the stack if an interrupt occured at the wrong time.
  446. We avoid this by disabling interrupts when
  447. pc contains a magic address. */
  448. if (interrupt_request & CPU_INTERRUPT_HARD
  449. && ((IS_M(env) && env->regs[15] < 0xfffffff0)
  450. || !(env->uncached_cpsr & CPSR_I))) {
  451. env->exception_index = EXCP_IRQ;
  452. do_interrupt(env);
  453. next_tb = 0;
  454. }
  455. #elif defined(TARGET_SH4)
  456. if (interrupt_request & CPU_INTERRUPT_HARD) {
  457. do_interrupt(env);
  458. next_tb = 0;
  459. }
  460. #elif defined(TARGET_ALPHA)
  461. if (interrupt_request & CPU_INTERRUPT_HARD) {
  462. do_interrupt(env);
  463. next_tb = 0;
  464. }
  465. #elif defined(TARGET_CRIS)
  466. if (interrupt_request & CPU_INTERRUPT_HARD
  467. && (env->pregs[PR_CCS] & I_FLAG)) {
  468. env->exception_index = EXCP_IRQ;
  469. do_interrupt(env);
  470. next_tb = 0;
  471. }
  472. if (interrupt_request & CPU_INTERRUPT_NMI
  473. && (env->pregs[PR_CCS] & M_FLAG)) {
  474. env->exception_index = EXCP_NMI;
  475. do_interrupt(env);
  476. next_tb = 0;
  477. }
  478. #elif defined(TARGET_M68K)
  479. if (interrupt_request & CPU_INTERRUPT_HARD
  480. && ((env->sr & SR_I) >> SR_I_SHIFT)
  481. < env->pending_level) {
  482. /* Real hardware gets the interrupt vector via an
  483. IACK cycle at this point. Current emulated
  484. hardware doesn't rely on this, so we
  485. provide/save the vector when the interrupt is
  486. first signalled. */
  487. env->exception_index = env->pending_vector;
  488. do_interrupt(1);
  489. next_tb = 0;
  490. }
  491. #endif
  492. /* Don't use the cached interupt_request value,
  493. do_interrupt may have updated the EXITTB flag. */
  494. if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
  495. env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
  496. /* ensure that no TB jump will be modified as
  497. the program flow was changed */
  498. next_tb = 0;
  499. }
  500. }
  501. if (unlikely(env->exit_request)) {
  502. env->exit_request = 0;
  503. env->exception_index = EXCP_INTERRUPT;
  504. cpu_loop_exit();
  505. }
  506. #ifdef CONFIG_DEBUG_EXEC
  507. if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
  508. /* restore flags in standard format */
  509. regs_to_env();
  510. #if defined(TARGET_I386)
  511. env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
  512. log_cpu_state(env, X86_DUMP_CCOP);
  513. env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
  514. #elif defined(TARGET_ARM)
  515. log_cpu_state(env, 0);
  516. #elif defined(TARGET_SPARC)
  517. log_cpu_state(env, 0);
  518. #elif defined(TARGET_PPC)
  519. log_cpu_state(env, 0);
  520. #elif defined(TARGET_M68K)
  521. cpu_m68k_flush_flags(env, env->cc_op);
  522. env->cc_op = CC_OP_FLAGS;
  523. env->sr = (env->sr & 0xffe0)
  524. | env->cc_dest | (env->cc_x << 4);
  525. log_cpu_state(env, 0);
  526. #elif defined(TARGET_MICROBLAZE)
  527. log_cpu_state(env, 0);
  528. #elif defined(TARGET_MIPS)
  529. log_cpu_state(env, 0);
  530. #elif defined(TARGET_SH4)
  531. log_cpu_state(env, 0);
  532. #elif defined(TARGET_ALPHA)
  533. log_cpu_state(env, 0);
  534. #elif defined(TARGET_CRIS)
  535. log_cpu_state(env, 0);
  536. #else
  537. #error unsupported target CPU
  538. #endif
  539. }
  540. #endif
  541. spin_lock(&tb_lock);
  542. tb = tb_find_fast();
  543. /* Note: we do it here to avoid a gcc bug on Mac OS X when
  544. doing it in tb_find_slow */
  545. if (tb_invalidated_flag) {
  546. /* as some TB could have been invalidated because
  547. of memory exceptions while generating the code, we
  548. must recompute the hash index here */
  549. next_tb = 0;
  550. tb_invalidated_flag = 0;
  551. }
  552. #ifdef CONFIG_DEBUG_EXEC
  553. qemu_log_mask(CPU_LOG_EXEC, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
  554. (long)tb->tc_ptr, tb->pc,
  555. lookup_symbol(tb->pc));
  556. #endif
  557. /* see if we can patch the calling TB. When the TB
  558. spans two pages, we cannot safely do a direct
  559. jump. */
  560. {
  561. if (next_tb != 0 && tb->page_addr[1] == -1) {
  562. tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
  563. }
  564. }
  565. spin_unlock(&tb_lock);
  566. env->current_tb = tb;
  567. /* cpu_interrupt might be called while translating the
  568. TB, but before it is linked into a potentially
  569. infinite loop and becomes env->current_tb. Avoid
  570. starting execution if there is a pending interrupt. */
  571. if (unlikely (env->exit_request))
  572. env->current_tb = NULL;
  573. while (env->current_tb) {
  574. tc_ptr = tb->tc_ptr;
  575. /* execute the generated code */
  576. #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
  577. #undef env
  578. env = cpu_single_env;
  579. #define env cpu_single_env
  580. #endif
  581. next_tb = tcg_qemu_tb_exec(tc_ptr);
  582. env->current_tb = NULL;
  583. if ((next_tb & 3) == 2) {
  584. /* Instruction counter expired. */
  585. int insns_left;
  586. tb = (TranslationBlock *)(long)(next_tb & ~3);
  587. /* Restore PC. */
  588. cpu_pc_from_tb(env, tb);
  589. insns_left = env->icount_decr.u32;
  590. if (env->icount_extra && insns_left >= 0) {
  591. /* Refill decrementer and continue execution. */
  592. env->icount_extra += insns_left;
  593. if (env->icount_extra > 0xffff) {
  594. insns_left = 0xffff;
  595. } else {
  596. insns_left = env->icount_extra;
  597. }
  598. env->icount_extra -= insns_left;
  599. env->icount_decr.u16.low = insns_left;
  600. } else {
  601. if (insns_left > 0) {
  602. /* Execute remaining instructions. */
  603. cpu_exec_nocache(insns_left, tb);
  604. }
  605. env->exception_index = EXCP_INTERRUPT;
  606. next_tb = 0;
  607. cpu_loop_exit();
  608. }
  609. }
  610. }
  611. /* reset soft MMU for next block (it can currently
  612. only be set by a memory fault) */
  613. } /* for(;;) */
  614. } else {
  615. env_to_regs();
  616. }
  617. } /* for(;;) */
  618. #if defined(TARGET_I386)
  619. /* restore flags in standard format */
  620. env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
  621. #elif defined(TARGET_ARM)
  622. /* XXX: Save/restore host fpu exception state?. */
  623. #elif defined(TARGET_SPARC)
  624. #elif defined(TARGET_PPC)
  625. #elif defined(TARGET_M68K)
  626. cpu_m68k_flush_flags(env, env->cc_op);
  627. env->cc_op = CC_OP_FLAGS;
  628. env->sr = (env->sr & 0xffe0)
  629. | env->cc_dest | (env->cc_x << 4);
  630. #elif defined(TARGET_MICROBLAZE)
  631. #elif defined(TARGET_MIPS)
  632. #elif defined(TARGET_SH4)
  633. #elif defined(TARGET_ALPHA)
  634. #elif defined(TARGET_CRIS)
  635. /* XXXXX */
  636. #else
  637. #error unsupported target CPU
  638. #endif
  639. /* restore global registers */
  640. #include "hostregs_helper.h"
  641. /* fail safe : never use cpu_single_env outside cpu_exec() */
  642. cpu_single_env = NULL;
  643. return ret;
  644. }
  645. /* must only be called from the generated code as an exception can be
  646. generated */
  647. void tb_invalidate_page_range(target_ulong start, target_ulong end)
  648. {
  649. /* XXX: cannot enable it yet because it yields to MMU exception
  650. where NIP != read address on PowerPC */
  651. #if 0
  652. target_ulong phys_addr;
  653. phys_addr = get_phys_addr_code(env, start);
  654. tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
  655. #endif
  656. }
  657. #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
  658. void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
  659. {
  660. CPUX86State *saved_env;
  661. saved_env = env;
  662. env = s;
  663. if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
  664. selector &= 0xffff;
  665. cpu_x86_load_seg_cache(env, seg_reg, selector,
  666. (selector << 4), 0xffff, 0);
  667. } else {
  668. helper_load_seg(seg_reg, selector);
  669. }
  670. env = saved_env;
  671. }
  672. void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
  673. {
  674. CPUX86State *saved_env;
  675. saved_env = env;
  676. env = s;
  677. helper_fsave(ptr, data32);
  678. env = saved_env;
  679. }
  680. void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
  681. {
  682. CPUX86State *saved_env;
  683. saved_env = env;
  684. env = s;
  685. helper_frstor(ptr, data32);
  686. env = saved_env;
  687. }
  688. #endif /* TARGET_I386 */
  689. #if !defined(CONFIG_SOFTMMU)
  690. #if defined(TARGET_I386)
  691. #define EXCEPTION_ACTION raise_exception_err(env->exception_index, env->error_code)
  692. #else
  693. #define EXCEPTION_ACTION cpu_loop_exit()
  694. #endif
  695. /* 'pc' is the host PC at which the exception was raised. 'address' is
  696. the effective address of the memory exception. 'is_write' is 1 if a
  697. write caused the exception and otherwise 0'. 'old_set' is the
  698. signal set which should be restored */
  699. static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
  700. int is_write, sigset_t *old_set,
  701. void *puc)
  702. {
  703. TranslationBlock *tb;
  704. int ret;
  705. if (cpu_single_env)
  706. env = cpu_single_env; /* XXX: find a correct solution for multithread */
  707. #if defined(DEBUG_SIGNAL)
  708. qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
  709. pc, address, is_write, *(unsigned long *)old_set);
  710. #endif
  711. /* XXX: locking issue */
  712. if (is_write && page_unprotect(h2g(address), pc, puc)) {
  713. return 1;
  714. }
  715. /* see if it is an MMU fault */
  716. ret = cpu_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
  717. if (ret < 0)
  718. return 0; /* not an MMU fault */
  719. if (ret == 0)
  720. return 1; /* the MMU fault was handled without causing real CPU fault */
  721. /* now we have a real cpu fault */
  722. tb = tb_find_pc(pc);
  723. if (tb) {
  724. /* the PC is inside the translated code. It means that we have
  725. a virtual CPU fault */
  726. cpu_restore_state(tb, env, pc, puc);
  727. }
  728. /* we restore the process signal mask as the sigreturn should
  729. do it (XXX: use sigsetjmp) */
  730. sigprocmask(SIG_SETMASK, old_set, NULL);
  731. EXCEPTION_ACTION;
  732. /* never comes here */
  733. return 1;
  734. }
  735. #if defined(__i386__)
  736. #if defined(__APPLE__)
  737. # include <sys/ucontext.h>
  738. # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
  739. # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
  740. # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
  741. # define MASK_sig(context) ((context)->uc_sigmask)
  742. #elif defined (__NetBSD__)
  743. # include <ucontext.h>
  744. # define EIP_sig(context) ((context)->uc_mcontext.__gregs[_REG_EIP])
  745. # define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
  746. # define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR])
  747. # define MASK_sig(context) ((context)->uc_sigmask)
  748. #elif defined (__FreeBSD__) || defined(__DragonFly__)
  749. # include <ucontext.h>
  750. # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext.mc_eip))
  751. # define TRAP_sig(context) ((context)->uc_mcontext.mc_trapno)
  752. # define ERROR_sig(context) ((context)->uc_mcontext.mc_err)
  753. # define MASK_sig(context) ((context)->uc_sigmask)
  754. #elif defined(__OpenBSD__)
  755. # define EIP_sig(context) ((context)->sc_eip)
  756. # define TRAP_sig(context) ((context)->sc_trapno)
  757. # define ERROR_sig(context) ((context)->sc_err)
  758. # define MASK_sig(context) ((context)->sc_mask)
  759. #else
  760. # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
  761. # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
  762. # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
  763. # define MASK_sig(context) ((context)->uc_sigmask)
  764. #endif
  765. int cpu_signal_handler(int host_signum, void *pinfo,
  766. void *puc)
  767. {
  768. siginfo_t *info = pinfo;
  769. #if defined(__NetBSD__) || defined (__FreeBSD__) || defined(__DragonFly__)
  770. ucontext_t *uc = puc;
  771. #elif defined(__OpenBSD__)
  772. struct sigcontext *uc = puc;
  773. #else
  774. struct ucontext *uc = puc;
  775. #endif
  776. unsigned long pc;
  777. int trapno;
  778. #ifndef REG_EIP
  779. /* for glibc 2.1 */
  780. #define REG_EIP EIP
  781. #define REG_ERR ERR
  782. #define REG_TRAPNO TRAPNO
  783. #endif
  784. pc = EIP_sig(uc);
  785. trapno = TRAP_sig(uc);
  786. return handle_cpu_signal(pc, (unsigned long)info->si_addr,
  787. trapno == 0xe ?
  788. (ERROR_sig(uc) >> 1) & 1 : 0,
  789. &MASK_sig(uc), puc);
  790. }
  791. #elif defined(__x86_64__)
  792. #ifdef __NetBSD__
  793. #define PC_sig(context) _UC_MACHINE_PC(context)
  794. #define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
  795. #define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR])
  796. #define MASK_sig(context) ((context)->uc_sigmask)
  797. #elif defined(__OpenBSD__)
  798. #define PC_sig(context) ((context)->sc_rip)
  799. #define TRAP_sig(context) ((context)->sc_trapno)
  800. #define ERROR_sig(context) ((context)->sc_err)
  801. #define MASK_sig(context) ((context)->sc_mask)
  802. #elif defined (__FreeBSD__) || defined(__DragonFly__)
  803. #include <ucontext.h>
  804. #define PC_sig(context) (*((unsigned long*)&(context)->uc_mcontext.mc_rip))
  805. #define TRAP_sig(context) ((context)->uc_mcontext.mc_trapno)
  806. #define ERROR_sig(context) ((context)->uc_mcontext.mc_err)
  807. #define MASK_sig(context) ((context)->uc_sigmask)
  808. #else
  809. #define PC_sig(context) ((context)->uc_mcontext.gregs[REG_RIP])
  810. #define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
  811. #define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
  812. #define MASK_sig(context) ((context)->uc_sigmask)
  813. #endif
  814. int cpu_signal_handler(int host_signum, void *pinfo,
  815. void *puc)
  816. {
  817. siginfo_t *info = pinfo;
  818. unsigned long pc;
  819. #if defined(__NetBSD__) || defined (__FreeBSD__) || defined(__DragonFly__)
  820. ucontext_t *uc = puc;
  821. #elif defined(__OpenBSD__)
  822. struct sigcontext *uc = puc;
  823. #else
  824. struct ucontext *uc = puc;
  825. #endif
  826. pc = PC_sig(uc);
  827. return handle_cpu_signal(pc, (unsigned long)info->si_addr,
  828. TRAP_sig(uc) == 0xe ?
  829. (ERROR_sig(uc) >> 1) & 1 : 0,
  830. &MASK_sig(uc), puc);
  831. }
  832. #elif defined(_ARCH_PPC)
  833. /***********************************************************************
  834. * signal context platform-specific definitions
  835. * From Wine
  836. */
  837. #ifdef linux
  838. /* All Registers access - only for local access */
  839. # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
  840. /* Gpr Registers access */
  841. # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
  842. # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
  843. # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
  844. # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
  845. # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
  846. # define LR_sig(context) REG_sig(link, context) /* Link register */
  847. # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
  848. /* Float Registers access */
  849. # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
  850. # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
  851. /* Exception Registers access */
  852. # define DAR_sig(context) REG_sig(dar, context)
  853. # define DSISR_sig(context) REG_sig(dsisr, context)
  854. # define TRAP_sig(context) REG_sig(trap, context)
  855. #endif /* linux */
  856. #ifdef __APPLE__
  857. # include <sys/ucontext.h>
  858. typedef struct ucontext SIGCONTEXT;
  859. /* All Registers access - only for local access */
  860. # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
  861. # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
  862. # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
  863. # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
  864. /* Gpr Registers access */
  865. # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
  866. # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
  867. # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
  868. # define CTR_sig(context) REG_sig(ctr, context)
  869. # define XER_sig(context) REG_sig(xer, context) /* Link register */
  870. # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
  871. # define CR_sig(context) REG_sig(cr, context) /* Condition register */
  872. /* Float Registers access */
  873. # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
  874. # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
  875. /* Exception Registers access */
  876. # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
  877. # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
  878. # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
  879. #endif /* __APPLE__ */
  880. int cpu_signal_handler(int host_signum, void *pinfo,
  881. void *puc)
  882. {
  883. siginfo_t *info = pinfo;
  884. struct ucontext *uc = puc;
  885. unsigned long pc;
  886. int is_write;
  887. pc = IAR_sig(uc);
  888. is_write = 0;
  889. #if 0
  890. /* ppc 4xx case */
  891. if (DSISR_sig(uc) & 0x00800000)
  892. is_write = 1;
  893. #else
  894. if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
  895. is_write = 1;
  896. #endif
  897. return handle_cpu_signal(pc, (unsigned long)info->si_addr,
  898. is_write, &uc->uc_sigmask, puc);
  899. }
  900. #elif defined(__alpha__)
  901. int cpu_signal_handler(int host_signum, void *pinfo,
  902. void *puc)
  903. {
  904. siginfo_t *info = pinfo;
  905. struct ucontext *uc = puc;
  906. uint32_t *pc = uc->uc_mcontext.sc_pc;
  907. uint32_t insn = *pc;
  908. int is_write = 0;
  909. /* XXX: need kernel patch to get write flag faster */
  910. switch (insn >> 26) {
  911. case 0x0d: // stw
  912. case 0x0e: // stb
  913. case 0x0f: // stq_u
  914. case 0x24: // stf
  915. case 0x25: // stg
  916. case 0x26: // sts
  917. case 0x27: // stt
  918. case 0x2c: // stl
  919. case 0x2d: // stq
  920. case 0x2e: // stl_c
  921. case 0x2f: // stq_c
  922. is_write = 1;
  923. }
  924. return handle_cpu_signal(pc, (unsigned long)info->si_addr,
  925. is_write, &uc->uc_sigmask, puc);
  926. }
  927. #elif defined(__sparc__)
  928. int cpu_signal_handler(int host_signum, void *pinfo,
  929. void *puc)
  930. {
  931. siginfo_t *info = pinfo;
  932. int is_write;
  933. uint32_t insn;
  934. #if !defined(__arch64__) || defined(CONFIG_SOLARIS)
  935. uint32_t *regs = (uint32_t *)(info + 1);
  936. void *sigmask = (regs + 20);
  937. /* XXX: is there a standard glibc define ? */
  938. unsigned long pc = regs[1];
  939. #else
  940. #ifdef __linux__
  941. struct sigcontext *sc = puc;
  942. unsigned long pc = sc->sigc_regs.tpc;
  943. void *sigmask = (void *)sc->sigc_mask;
  944. #elif defined(__OpenBSD__)
  945. struct sigcontext *uc = puc;
  946. unsigned long pc = uc->sc_pc;
  947. void *sigmask = (void *)(long)uc->sc_mask;
  948. #endif
  949. #endif
  950. /* XXX: need kernel patch to get write flag faster */
  951. is_write = 0;
  952. insn = *(uint32_t *)pc;
  953. if ((insn >> 30) == 3) {
  954. switch((insn >> 19) & 0x3f) {
  955. case 0x05: // stb
  956. case 0x15: // stba
  957. case 0x06: // sth
  958. case 0x16: // stha
  959. case 0x04: // st
  960. case 0x14: // sta
  961. case 0x07: // std
  962. case 0x17: // stda
  963. case 0x0e: // stx
  964. case 0x1e: // stxa
  965. case 0x24: // stf
  966. case 0x34: // stfa
  967. case 0x27: // stdf
  968. case 0x37: // stdfa
  969. case 0x26: // stqf
  970. case 0x36: // stqfa
  971. case 0x25: // stfsr
  972. case 0x3c: // casa
  973. case 0x3e: // casxa
  974. is_write = 1;
  975. break;
  976. }
  977. }
  978. return handle_cpu_signal(pc, (unsigned long)info->si_addr,
  979. is_write, sigmask, NULL);
  980. }
  981. #elif defined(__arm__)
  982. int cpu_signal_handler(int host_signum, void *pinfo,
  983. void *puc)
  984. {
  985. siginfo_t *info = pinfo;
  986. struct ucontext *uc = puc;
  987. unsigned long pc;
  988. int is_write;
  989. #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
  990. pc = uc->uc_mcontext.gregs[R15];
  991. #else
  992. pc = uc->uc_mcontext.arm_pc;
  993. #endif
  994. /* XXX: compute is_write */
  995. is_write = 0;
  996. return handle_cpu_signal(pc, (unsigned long)info->si_addr,
  997. is_write,
  998. &uc->uc_sigmask, puc);
  999. }
  1000. #elif defined(__mc68000)
  1001. int cpu_signal_handler(int host_signum, void *pinfo,
  1002. void *puc)
  1003. {
  1004. siginfo_t *info = pinfo;
  1005. struct ucontext *uc = puc;
  1006. unsigned long pc;
  1007. int is_write;
  1008. pc = uc->uc_mcontext.gregs[16];
  1009. /* XXX: compute is_write */
  1010. is_write = 0;
  1011. return handle_cpu_signal(pc, (unsigned long)info->si_addr,
  1012. is_write,
  1013. &uc->uc_sigmask, puc);
  1014. }
  1015. #elif defined(__ia64)
  1016. #ifndef __ISR_VALID
  1017. /* This ought to be in <bits/siginfo.h>... */
  1018. # define __ISR_VALID 1
  1019. #endif
  1020. int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
  1021. {
  1022. siginfo_t *info = pinfo;
  1023. struct ucontext *uc = puc;
  1024. unsigned long ip;
  1025. int is_write = 0;
  1026. ip = uc->uc_mcontext.sc_ip;
  1027. switch (host_signum) {
  1028. case SIGILL:
  1029. case SIGFPE:
  1030. case SIGSEGV:
  1031. case SIGBUS:
  1032. case SIGTRAP:
  1033. if (info->si_code && (info->si_segvflags & __ISR_VALID))
  1034. /* ISR.W (write-access) is bit 33: */
  1035. is_write = (info->si_isr >> 33) & 1;
  1036. break;
  1037. default:
  1038. break;
  1039. }
  1040. return handle_cpu_signal(ip, (unsigned long)info->si_addr,
  1041. is_write,
  1042. &uc->uc_sigmask, puc);
  1043. }
  1044. #elif defined(__s390__)
  1045. int cpu_signal_handler(int host_signum, void *pinfo,
  1046. void *puc)
  1047. {
  1048. siginfo_t *info = pinfo;
  1049. struct ucontext *uc = puc;
  1050. unsigned long pc;
  1051. int is_write;
  1052. pc = uc->uc_mcontext.psw.addr;
  1053. /* XXX: compute is_write */
  1054. is_write = 0;
  1055. return handle_cpu_signal(pc, (unsigned long)info->si_addr,
  1056. is_write, &uc->uc_sigmask, puc);
  1057. }
  1058. #elif defined(__mips__)
  1059. int cpu_signal_handler(int host_signum, void *pinfo,
  1060. void *puc)
  1061. {
  1062. siginfo_t *info = pinfo;
  1063. struct ucontext *uc = puc;
  1064. greg_t pc = uc->uc_mcontext.pc;
  1065. int is_write;
  1066. /* XXX: compute is_write */
  1067. is_write = 0;
  1068. return handle_cpu_signal(pc, (unsigned long)info->si_addr,
  1069. is_write, &uc->uc_sigmask, puc);
  1070. }
  1071. #elif defined(__hppa__)
  1072. int cpu_signal_handler(int host_signum, void *pinfo,
  1073. void *puc)
  1074. {
  1075. struct siginfo *info = pinfo;
  1076. struct ucontext *uc = puc;
  1077. unsigned long pc;
  1078. int is_write;
  1079. pc = uc->uc_mcontext.sc_iaoq[0];
  1080. /* FIXME: compute is_write */
  1081. is_write = 0;
  1082. return handle_cpu_signal(pc, (unsigned long)info->si_addr,
  1083. is_write,
  1084. &uc->uc_sigmask, puc);
  1085. }
  1086. #else
  1087. #error host CPU specific signal handler needed
  1088. #endif
  1089. #endif /* !defined(CONFIG_SOFTMMU) */