cpu-exec.c 51 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497
  1. /*
  2. * i386 emulator main execution loop
  3. *
  4. * Copyright (c) 2003-2005 Fabrice Bellard
  5. *
  6. * This library is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2 of the License, or (at your option) any later version.
  10. *
  11. * This library is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with this library; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
  19. */
  20. #include "config.h"
  21. #define CPU_NO_GLOBAL_REGS
  22. #include "exec.h"
  23. #include "disas.h"
  24. #include "tcg.h"
  25. #include "kvm.h"
  26. #if !defined(CONFIG_SOFTMMU)
  27. #undef EAX
  28. #undef ECX
  29. #undef EDX
  30. #undef EBX
  31. #undef ESP
  32. #undef EBP
  33. #undef ESI
  34. #undef EDI
  35. #undef EIP
  36. #include <signal.h>
  37. #ifdef __linux__
  38. #include <sys/ucontext.h>
  39. #endif
  40. #endif
  41. #if defined(__sparc__) && !defined(HOST_SOLARIS)
  42. // Work around ugly bugs in glibc that mangle global register contents
  43. #undef env
  44. #define env cpu_single_env
  45. #endif
  46. int tb_invalidated_flag;
  47. //#define DEBUG_EXEC
  48. //#define DEBUG_SIGNAL
  49. void cpu_loop_exit(void)
  50. {
  51. /* NOTE: the register at this point must be saved by hand because
  52. longjmp restore them */
  53. regs_to_env();
  54. longjmp(env->jmp_env, 1);
  55. }
  56. /* exit the current TB from a signal handler. The host registers are
  57. restored in a state compatible with the CPU emulator
  58. */
  59. void cpu_resume_from_signal(CPUState *env1, void *puc)
  60. {
  61. #if !defined(CONFIG_SOFTMMU)
  62. #ifdef __linux__
  63. struct ucontext *uc = puc;
  64. #elif defined(__OpenBSD__)
  65. struct sigcontext *uc = puc;
  66. #endif
  67. #endif
  68. env = env1;
  69. /* XXX: restore cpu registers saved in host registers */
  70. #if !defined(CONFIG_SOFTMMU)
  71. if (puc) {
  72. /* XXX: use siglongjmp ? */
  73. #ifdef __linux__
  74. sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
  75. #elif defined(__OpenBSD__)
  76. sigprocmask(SIG_SETMASK, &uc->sc_mask, NULL);
  77. #endif
  78. }
  79. #endif
  80. env->exception_index = -1;
  81. longjmp(env->jmp_env, 1);
  82. }
  83. /* Execute the code without caching the generated code. An interpreter
  84. could be used if available. */
  85. static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
  86. {
  87. unsigned long next_tb;
  88. TranslationBlock *tb;
  89. /* Should never happen.
  90. We only end up here when an existing TB is too long. */
  91. if (max_cycles > CF_COUNT_MASK)
  92. max_cycles = CF_COUNT_MASK;
  93. tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
  94. max_cycles);
  95. env->current_tb = tb;
  96. /* execute the generated code */
  97. next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
  98. if ((next_tb & 3) == 2) {
  99. /* Restore PC. This may happen if async event occurs before
  100. the TB starts executing. */
  101. cpu_pc_from_tb(env, tb);
  102. }
  103. tb_phys_invalidate(tb, -1);
  104. tb_free(tb);
  105. }
  106. static TranslationBlock *tb_find_slow(target_ulong pc,
  107. target_ulong cs_base,
  108. uint64_t flags)
  109. {
  110. TranslationBlock *tb, **ptb1;
  111. unsigned int h;
  112. target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
  113. tb_invalidated_flag = 0;
  114. regs_to_env(); /* XXX: do it just before cpu_gen_code() */
  115. /* find translated block using physical mappings */
  116. phys_pc = get_phys_addr_code(env, pc);
  117. phys_page1 = phys_pc & TARGET_PAGE_MASK;
  118. phys_page2 = -1;
  119. h = tb_phys_hash_func(phys_pc);
  120. ptb1 = &tb_phys_hash[h];
  121. for(;;) {
  122. tb = *ptb1;
  123. if (!tb)
  124. goto not_found;
  125. if (tb->pc == pc &&
  126. tb->page_addr[0] == phys_page1 &&
  127. tb->cs_base == cs_base &&
  128. tb->flags == flags) {
  129. /* check next page if needed */
  130. if (tb->page_addr[1] != -1) {
  131. virt_page2 = (pc & TARGET_PAGE_MASK) +
  132. TARGET_PAGE_SIZE;
  133. phys_page2 = get_phys_addr_code(env, virt_page2);
  134. if (tb->page_addr[1] == phys_page2)
  135. goto found;
  136. } else {
  137. goto found;
  138. }
  139. }
  140. ptb1 = &tb->phys_hash_next;
  141. }
  142. not_found:
  143. /* if no translated code available, then translate it now */
  144. tb = tb_gen_code(env, pc, cs_base, flags, 0);
  145. found:
  146. /* we add the TB in the virtual pc hash table */
  147. env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
  148. return tb;
  149. }
  150. static inline TranslationBlock *tb_find_fast(void)
  151. {
  152. TranslationBlock *tb;
  153. target_ulong cs_base, pc;
  154. int flags;
  155. /* we record a subset of the CPU state. It will
  156. always be the same before a given translated block
  157. is executed. */
  158. cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
  159. tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
  160. if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
  161. tb->flags != flags)) {
  162. tb = tb_find_slow(pc, cs_base, flags);
  163. }
  164. return tb;
  165. }
  166. static CPUDebugExcpHandler *debug_excp_handler;
  167. CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
  168. {
  169. CPUDebugExcpHandler *old_handler = debug_excp_handler;
  170. debug_excp_handler = handler;
  171. return old_handler;
  172. }
  173. static void cpu_handle_debug_exception(CPUState *env)
  174. {
  175. CPUWatchpoint *wp;
  176. if (!env->watchpoint_hit)
  177. TAILQ_FOREACH(wp, &env->watchpoints, entry)
  178. wp->flags &= ~BP_WATCHPOINT_HIT;
  179. if (debug_excp_handler)
  180. debug_excp_handler(env);
  181. }
  182. /* main execution loop */
  183. int cpu_exec(CPUState *env1)
  184. {
  185. #define DECLARE_HOST_REGS 1
  186. #include "hostregs_helper.h"
  187. int ret, interrupt_request;
  188. TranslationBlock *tb;
  189. uint8_t *tc_ptr;
  190. unsigned long next_tb;
  191. if (cpu_halted(env1) == EXCP_HALTED)
  192. return EXCP_HALTED;
  193. cpu_single_env = env1;
  194. /* first we save global registers */
  195. #define SAVE_HOST_REGS 1
  196. #include "hostregs_helper.h"
  197. env = env1;
  198. env_to_regs();
  199. #if defined(TARGET_I386)
  200. /* put eflags in CPU temporary format */
  201. CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
  202. DF = 1 - (2 * ((env->eflags >> 10) & 1));
  203. CC_OP = CC_OP_EFLAGS;
  204. env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
  205. #elif defined(TARGET_SPARC)
  206. #elif defined(TARGET_M68K)
  207. env->cc_op = CC_OP_FLAGS;
  208. env->cc_dest = env->sr & 0xf;
  209. env->cc_x = (env->sr >> 4) & 1;
  210. #elif defined(TARGET_ALPHA)
  211. #elif defined(TARGET_ARM)
  212. #elif defined(TARGET_PPC)
  213. #elif defined(TARGET_MIPS)
  214. #elif defined(TARGET_SH4)
  215. #elif defined(TARGET_CRIS)
  216. /* XXXXX */
  217. #else
  218. #error unsupported target CPU
  219. #endif
  220. env->exception_index = -1;
  221. /* prepare setjmp context for exception handling */
  222. for(;;) {
  223. if (setjmp(env->jmp_env) == 0) {
  224. env->current_tb = NULL;
  225. /* if an exception is pending, we execute it here */
  226. if (env->exception_index >= 0) {
  227. if (env->exception_index >= EXCP_INTERRUPT) {
  228. /* exit request from the cpu execution loop */
  229. ret = env->exception_index;
  230. if (ret == EXCP_DEBUG)
  231. cpu_handle_debug_exception(env);
  232. break;
  233. } else {
  234. #if defined(CONFIG_USER_ONLY)
  235. /* if user mode only, we simulate a fake exception
  236. which will be handled outside the cpu execution
  237. loop */
  238. #if defined(TARGET_I386)
  239. do_interrupt_user(env->exception_index,
  240. env->exception_is_int,
  241. env->error_code,
  242. env->exception_next_eip);
  243. /* successfully delivered */
  244. env->old_exception = -1;
  245. #endif
  246. ret = env->exception_index;
  247. break;
  248. #else
  249. #if defined(TARGET_I386)
  250. /* simulate a real cpu exception. On i386, it can
  251. trigger new exceptions, but we do not handle
  252. double or triple faults yet. */
  253. do_interrupt(env->exception_index,
  254. env->exception_is_int,
  255. env->error_code,
  256. env->exception_next_eip, 0);
  257. /* successfully delivered */
  258. env->old_exception = -1;
  259. #elif defined(TARGET_PPC)
  260. do_interrupt(env);
  261. #elif defined(TARGET_MIPS)
  262. do_interrupt(env);
  263. #elif defined(TARGET_SPARC)
  264. do_interrupt(env);
  265. #elif defined(TARGET_ARM)
  266. do_interrupt(env);
  267. #elif defined(TARGET_SH4)
  268. do_interrupt(env);
  269. #elif defined(TARGET_ALPHA)
  270. do_interrupt(env);
  271. #elif defined(TARGET_CRIS)
  272. do_interrupt(env);
  273. #elif defined(TARGET_M68K)
  274. do_interrupt(0);
  275. #endif
  276. #endif
  277. }
  278. env->exception_index = -1;
  279. }
  280. #ifdef USE_KQEMU
  281. if (kqemu_is_ok(env) && env->interrupt_request == 0 && env->exit_request == 0) {
  282. int ret;
  283. env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
  284. ret = kqemu_cpu_exec(env);
  285. /* put eflags in CPU temporary format */
  286. CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
  287. DF = 1 - (2 * ((env->eflags >> 10) & 1));
  288. CC_OP = CC_OP_EFLAGS;
  289. env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
  290. if (ret == 1) {
  291. /* exception */
  292. longjmp(env->jmp_env, 1);
  293. } else if (ret == 2) {
  294. /* softmmu execution needed */
  295. } else {
  296. if (env->interrupt_request != 0 || env->exit_request != 0) {
  297. /* hardware interrupt will be executed just after */
  298. } else {
  299. /* otherwise, we restart */
  300. longjmp(env->jmp_env, 1);
  301. }
  302. }
  303. }
  304. #endif
  305. if (kvm_enabled()) {
  306. kvm_cpu_exec(env);
  307. longjmp(env->jmp_env, 1);
  308. }
  309. next_tb = 0; /* force lookup of first TB */
  310. for(;;) {
  311. interrupt_request = env->interrupt_request;
  312. if (unlikely(interrupt_request)) {
  313. if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
  314. /* Mask out external interrupts for this step. */
  315. interrupt_request &= ~(CPU_INTERRUPT_HARD |
  316. CPU_INTERRUPT_FIQ |
  317. CPU_INTERRUPT_SMI |
  318. CPU_INTERRUPT_NMI);
  319. }
  320. if (interrupt_request & CPU_INTERRUPT_DEBUG) {
  321. env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
  322. env->exception_index = EXCP_DEBUG;
  323. cpu_loop_exit();
  324. }
  325. #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
  326. defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
  327. if (interrupt_request & CPU_INTERRUPT_HALT) {
  328. env->interrupt_request &= ~CPU_INTERRUPT_HALT;
  329. env->halted = 1;
  330. env->exception_index = EXCP_HLT;
  331. cpu_loop_exit();
  332. }
  333. #endif
  334. #if defined(TARGET_I386)
  335. if (env->hflags2 & HF2_GIF_MASK) {
  336. if ((interrupt_request & CPU_INTERRUPT_SMI) &&
  337. !(env->hflags & HF_SMM_MASK)) {
  338. svm_check_intercept(SVM_EXIT_SMI);
  339. env->interrupt_request &= ~CPU_INTERRUPT_SMI;
  340. do_smm_enter();
  341. next_tb = 0;
  342. } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
  343. !(env->hflags2 & HF2_NMI_MASK)) {
  344. env->interrupt_request &= ~CPU_INTERRUPT_NMI;
  345. env->hflags2 |= HF2_NMI_MASK;
  346. do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
  347. next_tb = 0;
  348. } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
  349. (((env->hflags2 & HF2_VINTR_MASK) &&
  350. (env->hflags2 & HF2_HIF_MASK)) ||
  351. (!(env->hflags2 & HF2_VINTR_MASK) &&
  352. (env->eflags & IF_MASK &&
  353. !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
  354. int intno;
  355. svm_check_intercept(SVM_EXIT_INTR);
  356. env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
  357. intno = cpu_get_pic_interrupt(env);
  358. qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
  359. do_interrupt(intno, 0, 0, 0, 1);
  360. /* ensure that no TB jump will be modified as
  361. the program flow was changed */
  362. next_tb = 0;
  363. #if !defined(CONFIG_USER_ONLY)
  364. } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
  365. (env->eflags & IF_MASK) &&
  366. !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
  367. int intno;
  368. /* FIXME: this should respect TPR */
  369. svm_check_intercept(SVM_EXIT_VINTR);
  370. intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
  371. qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
  372. do_interrupt(intno, 0, 0, 0, 1);
  373. env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
  374. next_tb = 0;
  375. #endif
  376. }
  377. }
  378. #elif defined(TARGET_PPC)
  379. #if 0
  380. if ((interrupt_request & CPU_INTERRUPT_RESET)) {
  381. cpu_ppc_reset(env);
  382. }
  383. #endif
  384. if (interrupt_request & CPU_INTERRUPT_HARD) {
  385. ppc_hw_interrupt(env);
  386. if (env->pending_interrupts == 0)
  387. env->interrupt_request &= ~CPU_INTERRUPT_HARD;
  388. next_tb = 0;
  389. }
  390. #elif defined(TARGET_MIPS)
  391. if ((interrupt_request & CPU_INTERRUPT_HARD) &&
  392. (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
  393. (env->CP0_Status & (1 << CP0St_IE)) &&
  394. !(env->CP0_Status & (1 << CP0St_EXL)) &&
  395. !(env->CP0_Status & (1 << CP0St_ERL)) &&
  396. !(env->hflags & MIPS_HFLAG_DM)) {
  397. /* Raise it */
  398. env->exception_index = EXCP_EXT_INTERRUPT;
  399. env->error_code = 0;
  400. do_interrupt(env);
  401. next_tb = 0;
  402. }
  403. #elif defined(TARGET_SPARC)
  404. if ((interrupt_request & CPU_INTERRUPT_HARD) &&
  405. (env->psret != 0)) {
  406. int pil = env->interrupt_index & 15;
  407. int type = env->interrupt_index & 0xf0;
  408. if (((type == TT_EXTINT) &&
  409. (pil == 15 || pil > env->psrpil)) ||
  410. type != TT_EXTINT) {
  411. env->interrupt_request &= ~CPU_INTERRUPT_HARD;
  412. env->exception_index = env->interrupt_index;
  413. do_interrupt(env);
  414. env->interrupt_index = 0;
  415. #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
  416. cpu_check_irqs(env);
  417. #endif
  418. next_tb = 0;
  419. }
  420. } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
  421. //do_interrupt(0, 0, 0, 0, 0);
  422. env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
  423. }
  424. #elif defined(TARGET_ARM)
  425. if (interrupt_request & CPU_INTERRUPT_FIQ
  426. && !(env->uncached_cpsr & CPSR_F)) {
  427. env->exception_index = EXCP_FIQ;
  428. do_interrupt(env);
  429. next_tb = 0;
  430. }
  431. /* ARMv7-M interrupt return works by loading a magic value
  432. into the PC. On real hardware the load causes the
  433. return to occur. The qemu implementation performs the
  434. jump normally, then does the exception return when the
  435. CPU tries to execute code at the magic address.
  436. This will cause the magic PC value to be pushed to
  437. the stack if an interrupt occured at the wrong time.
  438. We avoid this by disabling interrupts when
  439. pc contains a magic address. */
  440. if (interrupt_request & CPU_INTERRUPT_HARD
  441. && ((IS_M(env) && env->regs[15] < 0xfffffff0)
  442. || !(env->uncached_cpsr & CPSR_I))) {
  443. env->exception_index = EXCP_IRQ;
  444. do_interrupt(env);
  445. next_tb = 0;
  446. }
  447. #elif defined(TARGET_SH4)
  448. if (interrupt_request & CPU_INTERRUPT_HARD) {
  449. do_interrupt(env);
  450. next_tb = 0;
  451. }
  452. #elif defined(TARGET_ALPHA)
  453. if (interrupt_request & CPU_INTERRUPT_HARD) {
  454. do_interrupt(env);
  455. next_tb = 0;
  456. }
  457. #elif defined(TARGET_CRIS)
  458. if (interrupt_request & CPU_INTERRUPT_HARD
  459. && (env->pregs[PR_CCS] & I_FLAG)) {
  460. env->exception_index = EXCP_IRQ;
  461. do_interrupt(env);
  462. next_tb = 0;
  463. }
  464. if (interrupt_request & CPU_INTERRUPT_NMI
  465. && (env->pregs[PR_CCS] & M_FLAG)) {
  466. env->exception_index = EXCP_NMI;
  467. do_interrupt(env);
  468. next_tb = 0;
  469. }
  470. #elif defined(TARGET_M68K)
  471. if (interrupt_request & CPU_INTERRUPT_HARD
  472. && ((env->sr & SR_I) >> SR_I_SHIFT)
  473. < env->pending_level) {
  474. /* Real hardware gets the interrupt vector via an
  475. IACK cycle at this point. Current emulated
  476. hardware doesn't rely on this, so we
  477. provide/save the vector when the interrupt is
  478. first signalled. */
  479. env->exception_index = env->pending_vector;
  480. do_interrupt(1);
  481. next_tb = 0;
  482. }
  483. #endif
  484. /* Don't use the cached interupt_request value,
  485. do_interrupt may have updated the EXITTB flag. */
  486. if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
  487. env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
  488. /* ensure that no TB jump will be modified as
  489. the program flow was changed */
  490. next_tb = 0;
  491. }
  492. }
  493. if (unlikely(env->exit_request)) {
  494. env->exit_request = 0;
  495. env->exception_index = EXCP_INTERRUPT;
  496. cpu_loop_exit();
  497. }
  498. #ifdef DEBUG_EXEC
  499. if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
  500. /* restore flags in standard format */
  501. regs_to_env();
  502. #if defined(TARGET_I386)
  503. env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
  504. log_cpu_state(env, X86_DUMP_CCOP);
  505. env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
  506. #elif defined(TARGET_ARM)
  507. log_cpu_state(env, 0);
  508. #elif defined(TARGET_SPARC)
  509. log_cpu_state(env, 0);
  510. #elif defined(TARGET_PPC)
  511. log_cpu_state(env, 0);
  512. #elif defined(TARGET_M68K)
  513. cpu_m68k_flush_flags(env, env->cc_op);
  514. env->cc_op = CC_OP_FLAGS;
  515. env->sr = (env->sr & 0xffe0)
  516. | env->cc_dest | (env->cc_x << 4);
  517. log_cpu_state(env, 0);
  518. #elif defined(TARGET_MIPS)
  519. log_cpu_state(env, 0);
  520. #elif defined(TARGET_SH4)
  521. log_cpu_state(env, 0);
  522. #elif defined(TARGET_ALPHA)
  523. log_cpu_state(env, 0);
  524. #elif defined(TARGET_CRIS)
  525. log_cpu_state(env, 0);
  526. #else
  527. #error unsupported target CPU
  528. #endif
  529. }
  530. #endif
  531. spin_lock(&tb_lock);
  532. tb = tb_find_fast();
  533. /* Note: we do it here to avoid a gcc bug on Mac OS X when
  534. doing it in tb_find_slow */
  535. if (tb_invalidated_flag) {
  536. /* as some TB could have been invalidated because
  537. of memory exceptions while generating the code, we
  538. must recompute the hash index here */
  539. next_tb = 0;
  540. tb_invalidated_flag = 0;
  541. }
  542. #ifdef DEBUG_EXEC
  543. qemu_log_mask(CPU_LOG_EXEC, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
  544. (long)tb->tc_ptr, tb->pc,
  545. lookup_symbol(tb->pc));
  546. #endif
  547. /* see if we can patch the calling TB. When the TB
  548. spans two pages, we cannot safely do a direct
  549. jump. */
  550. {
  551. if (next_tb != 0 &&
  552. #ifdef USE_KQEMU
  553. (env->kqemu_enabled != 2) &&
  554. #endif
  555. tb->page_addr[1] == -1) {
  556. tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
  557. }
  558. }
  559. spin_unlock(&tb_lock);
  560. env->current_tb = tb;
  561. /* cpu_interrupt might be called while translating the
  562. TB, but before it is linked into a potentially
  563. infinite loop and becomes env->current_tb. Avoid
  564. starting execution if there is a pending interrupt. */
  565. if (unlikely (env->exit_request))
  566. env->current_tb = NULL;
  567. while (env->current_tb) {
  568. tc_ptr = tb->tc_ptr;
  569. /* execute the generated code */
  570. #if defined(__sparc__) && !defined(HOST_SOLARIS)
  571. #undef env
  572. env = cpu_single_env;
  573. #define env cpu_single_env
  574. #endif
  575. next_tb = tcg_qemu_tb_exec(tc_ptr);
  576. env->current_tb = NULL;
  577. if ((next_tb & 3) == 2) {
  578. /* Instruction counter expired. */
  579. int insns_left;
  580. tb = (TranslationBlock *)(long)(next_tb & ~3);
  581. /* Restore PC. */
  582. cpu_pc_from_tb(env, tb);
  583. insns_left = env->icount_decr.u32;
  584. if (env->icount_extra && insns_left >= 0) {
  585. /* Refill decrementer and continue execution. */
  586. env->icount_extra += insns_left;
  587. if (env->icount_extra > 0xffff) {
  588. insns_left = 0xffff;
  589. } else {
  590. insns_left = env->icount_extra;
  591. }
  592. env->icount_extra -= insns_left;
  593. env->icount_decr.u16.low = insns_left;
  594. } else {
  595. if (insns_left > 0) {
  596. /* Execute remaining instructions. */
  597. cpu_exec_nocache(insns_left, tb);
  598. }
  599. env->exception_index = EXCP_INTERRUPT;
  600. next_tb = 0;
  601. cpu_loop_exit();
  602. }
  603. }
  604. }
  605. /* reset soft MMU for next block (it can currently
  606. only be set by a memory fault) */
  607. #if defined(USE_KQEMU)
  608. #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
  609. if (kqemu_is_ok(env) &&
  610. (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
  611. cpu_loop_exit();
  612. }
  613. #endif
  614. } /* for(;;) */
  615. } else {
  616. env_to_regs();
  617. }
  618. } /* for(;;) */
  619. #if defined(TARGET_I386)
  620. /* restore flags in standard format */
  621. env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
  622. #elif defined(TARGET_ARM)
  623. /* XXX: Save/restore host fpu exception state?. */
  624. #elif defined(TARGET_SPARC)
  625. #elif defined(TARGET_PPC)
  626. #elif defined(TARGET_M68K)
  627. cpu_m68k_flush_flags(env, env->cc_op);
  628. env->cc_op = CC_OP_FLAGS;
  629. env->sr = (env->sr & 0xffe0)
  630. | env->cc_dest | (env->cc_x << 4);
  631. #elif defined(TARGET_MIPS)
  632. #elif defined(TARGET_SH4)
  633. #elif defined(TARGET_ALPHA)
  634. #elif defined(TARGET_CRIS)
  635. /* XXXXX */
  636. #else
  637. #error unsupported target CPU
  638. #endif
  639. /* restore global registers */
  640. #include "hostregs_helper.h"
  641. /* fail safe : never use cpu_single_env outside cpu_exec() */
  642. cpu_single_env = NULL;
  643. return ret;
  644. }
  645. /* must only be called from the generated code as an exception can be
  646. generated */
  647. void tb_invalidate_page_range(target_ulong start, target_ulong end)
  648. {
  649. /* XXX: cannot enable it yet because it yields to MMU exception
  650. where NIP != read address on PowerPC */
  651. #if 0
  652. target_ulong phys_addr;
  653. phys_addr = get_phys_addr_code(env, start);
  654. tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
  655. #endif
  656. }
  657. #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
  658. void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
  659. {
  660. CPUX86State *saved_env;
  661. saved_env = env;
  662. env = s;
  663. if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
  664. selector &= 0xffff;
  665. cpu_x86_load_seg_cache(env, seg_reg, selector,
  666. (selector << 4), 0xffff, 0);
  667. } else {
  668. helper_load_seg(seg_reg, selector);
  669. }
  670. env = saved_env;
  671. }
  672. void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
  673. {
  674. CPUX86State *saved_env;
  675. saved_env = env;
  676. env = s;
  677. helper_fsave(ptr, data32);
  678. env = saved_env;
  679. }
  680. void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
  681. {
  682. CPUX86State *saved_env;
  683. saved_env = env;
  684. env = s;
  685. helper_frstor(ptr, data32);
  686. env = saved_env;
  687. }
  688. #endif /* TARGET_I386 */
  689. #if !defined(CONFIG_SOFTMMU)
  690. #if defined(TARGET_I386)
  691. /* 'pc' is the host PC at which the exception was raised. 'address' is
  692. the effective address of the memory exception. 'is_write' is 1 if a
  693. write caused the exception and otherwise 0'. 'old_set' is the
  694. signal set which should be restored */
  695. static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
  696. int is_write, sigset_t *old_set,
  697. void *puc)
  698. {
  699. TranslationBlock *tb;
  700. int ret;
  701. if (cpu_single_env)
  702. env = cpu_single_env; /* XXX: find a correct solution for multithread */
  703. #if defined(DEBUG_SIGNAL)
  704. qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
  705. pc, address, is_write, *(unsigned long *)old_set);
  706. #endif
  707. /* XXX: locking issue */
  708. if (is_write && page_unprotect(h2g(address), pc, puc)) {
  709. return 1;
  710. }
  711. /* see if it is an MMU fault */
  712. ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
  713. if (ret < 0)
  714. return 0; /* not an MMU fault */
  715. if (ret == 0)
  716. return 1; /* the MMU fault was handled without causing real CPU fault */
  717. /* now we have a real cpu fault */
  718. tb = tb_find_pc(pc);
  719. if (tb) {
  720. /* the PC is inside the translated code. It means that we have
  721. a virtual CPU fault */
  722. cpu_restore_state(tb, env, pc, puc);
  723. }
  724. if (ret == 1) {
  725. #if 0
  726. printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
  727. env->eip, env->cr[2], env->error_code);
  728. #endif
  729. /* we restore the process signal mask as the sigreturn should
  730. do it (XXX: use sigsetjmp) */
  731. sigprocmask(SIG_SETMASK, old_set, NULL);
  732. raise_exception_err(env->exception_index, env->error_code);
  733. } else {
  734. /* activate soft MMU for this block */
  735. env->hflags |= HF_SOFTMMU_MASK;
  736. cpu_resume_from_signal(env, puc);
  737. }
  738. /* never comes here */
  739. return 1;
  740. }
  741. #elif defined(TARGET_ARM)
  742. static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
  743. int is_write, sigset_t *old_set,
  744. void *puc)
  745. {
  746. TranslationBlock *tb;
  747. int ret;
  748. if (cpu_single_env)
  749. env = cpu_single_env; /* XXX: find a correct solution for multithread */
  750. #if defined(DEBUG_SIGNAL)
  751. printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
  752. pc, address, is_write, *(unsigned long *)old_set);
  753. #endif
  754. /* XXX: locking issue */
  755. if (is_write && page_unprotect(h2g(address), pc, puc)) {
  756. return 1;
  757. }
  758. /* see if it is an MMU fault */
  759. ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
  760. if (ret < 0)
  761. return 0; /* not an MMU fault */
  762. if (ret == 0)
  763. return 1; /* the MMU fault was handled without causing real CPU fault */
  764. /* now we have a real cpu fault */
  765. tb = tb_find_pc(pc);
  766. if (tb) {
  767. /* the PC is inside the translated code. It means that we have
  768. a virtual CPU fault */
  769. cpu_restore_state(tb, env, pc, puc);
  770. }
  771. /* we restore the process signal mask as the sigreturn should
  772. do it (XXX: use sigsetjmp) */
  773. sigprocmask(SIG_SETMASK, old_set, NULL);
  774. cpu_loop_exit();
  775. /* never comes here */
  776. return 1;
  777. }
  778. #elif defined(TARGET_SPARC)
  779. static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
  780. int is_write, sigset_t *old_set,
  781. void *puc)
  782. {
  783. TranslationBlock *tb;
  784. int ret;
  785. if (cpu_single_env)
  786. env = cpu_single_env; /* XXX: find a correct solution for multithread */
  787. #if defined(DEBUG_SIGNAL)
  788. printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
  789. pc, address, is_write, *(unsigned long *)old_set);
  790. #endif
  791. /* XXX: locking issue */
  792. if (is_write && page_unprotect(h2g(address), pc, puc)) {
  793. return 1;
  794. }
  795. /* see if it is an MMU fault */
  796. ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
  797. if (ret < 0)
  798. return 0; /* not an MMU fault */
  799. if (ret == 0)
  800. return 1; /* the MMU fault was handled without causing real CPU fault */
  801. /* now we have a real cpu fault */
  802. tb = tb_find_pc(pc);
  803. if (tb) {
  804. /* the PC is inside the translated code. It means that we have
  805. a virtual CPU fault */
  806. cpu_restore_state(tb, env, pc, puc);
  807. }
  808. /* we restore the process signal mask as the sigreturn should
  809. do it (XXX: use sigsetjmp) */
  810. sigprocmask(SIG_SETMASK, old_set, NULL);
  811. cpu_loop_exit();
  812. /* never comes here */
  813. return 1;
  814. }
  815. #elif defined (TARGET_PPC)
  816. static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
  817. int is_write, sigset_t *old_set,
  818. void *puc)
  819. {
  820. TranslationBlock *tb;
  821. int ret;
  822. if (cpu_single_env)
  823. env = cpu_single_env; /* XXX: find a correct solution for multithread */
  824. #if defined(DEBUG_SIGNAL)
  825. printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
  826. pc, address, is_write, *(unsigned long *)old_set);
  827. #endif
  828. /* XXX: locking issue */
  829. if (is_write && page_unprotect(h2g(address), pc, puc)) {
  830. return 1;
  831. }
  832. /* see if it is an MMU fault */
  833. ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
  834. if (ret < 0)
  835. return 0; /* not an MMU fault */
  836. if (ret == 0)
  837. return 1; /* the MMU fault was handled without causing real CPU fault */
  838. /* now we have a real cpu fault */
  839. tb = tb_find_pc(pc);
  840. if (tb) {
  841. /* the PC is inside the translated code. It means that we have
  842. a virtual CPU fault */
  843. cpu_restore_state(tb, env, pc, puc);
  844. }
  845. if (ret == 1) {
  846. #if 0
  847. printf("PF exception: NIP=0x%08x error=0x%x %p\n",
  848. env->nip, env->error_code, tb);
  849. #endif
  850. /* we restore the process signal mask as the sigreturn should
  851. do it (XXX: use sigsetjmp) */
  852. sigprocmask(SIG_SETMASK, old_set, NULL);
  853. cpu_loop_exit();
  854. } else {
  855. /* activate soft MMU for this block */
  856. cpu_resume_from_signal(env, puc);
  857. }
  858. /* never comes here */
  859. return 1;
  860. }
  861. #elif defined(TARGET_M68K)
  862. static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
  863. int is_write, sigset_t *old_set,
  864. void *puc)
  865. {
  866. TranslationBlock *tb;
  867. int ret;
  868. if (cpu_single_env)
  869. env = cpu_single_env; /* XXX: find a correct solution for multithread */
  870. #if defined(DEBUG_SIGNAL)
  871. printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
  872. pc, address, is_write, *(unsigned long *)old_set);
  873. #endif
  874. /* XXX: locking issue */
  875. if (is_write && page_unprotect(address, pc, puc)) {
  876. return 1;
  877. }
  878. /* see if it is an MMU fault */
  879. ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
  880. if (ret < 0)
  881. return 0; /* not an MMU fault */
  882. if (ret == 0)
  883. return 1; /* the MMU fault was handled without causing real CPU fault */
  884. /* now we have a real cpu fault */
  885. tb = tb_find_pc(pc);
  886. if (tb) {
  887. /* the PC is inside the translated code. It means that we have
  888. a virtual CPU fault */
  889. cpu_restore_state(tb, env, pc, puc);
  890. }
  891. /* we restore the process signal mask as the sigreturn should
  892. do it (XXX: use sigsetjmp) */
  893. sigprocmask(SIG_SETMASK, old_set, NULL);
  894. cpu_loop_exit();
  895. /* never comes here */
  896. return 1;
  897. }
  898. #elif defined (TARGET_MIPS)
  899. static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
  900. int is_write, sigset_t *old_set,
  901. void *puc)
  902. {
  903. TranslationBlock *tb;
  904. int ret;
  905. if (cpu_single_env)
  906. env = cpu_single_env; /* XXX: find a correct solution for multithread */
  907. #if defined(DEBUG_SIGNAL)
  908. printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
  909. pc, address, is_write, *(unsigned long *)old_set);
  910. #endif
  911. /* XXX: locking issue */
  912. if (is_write && page_unprotect(h2g(address), pc, puc)) {
  913. return 1;
  914. }
  915. /* see if it is an MMU fault */
  916. ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
  917. if (ret < 0)
  918. return 0; /* not an MMU fault */
  919. if (ret == 0)
  920. return 1; /* the MMU fault was handled without causing real CPU fault */
  921. /* now we have a real cpu fault */
  922. tb = tb_find_pc(pc);
  923. if (tb) {
  924. /* the PC is inside the translated code. It means that we have
  925. a virtual CPU fault */
  926. cpu_restore_state(tb, env, pc, puc);
  927. }
  928. if (ret == 1) {
  929. #if 0
  930. printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
  931. env->PC, env->error_code, tb);
  932. #endif
  933. /* we restore the process signal mask as the sigreturn should
  934. do it (XXX: use sigsetjmp) */
  935. sigprocmask(SIG_SETMASK, old_set, NULL);
  936. cpu_loop_exit();
  937. } else {
  938. /* activate soft MMU for this block */
  939. cpu_resume_from_signal(env, puc);
  940. }
  941. /* never comes here */
  942. return 1;
  943. }
  944. #elif defined (TARGET_SH4)
  945. static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
  946. int is_write, sigset_t *old_set,
  947. void *puc)
  948. {
  949. TranslationBlock *tb;
  950. int ret;
  951. if (cpu_single_env)
  952. env = cpu_single_env; /* XXX: find a correct solution for multithread */
  953. #if defined(DEBUG_SIGNAL)
  954. printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
  955. pc, address, is_write, *(unsigned long *)old_set);
  956. #endif
  957. /* XXX: locking issue */
  958. if (is_write && page_unprotect(h2g(address), pc, puc)) {
  959. return 1;
  960. }
  961. /* see if it is an MMU fault */
  962. ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
  963. if (ret < 0)
  964. return 0; /* not an MMU fault */
  965. if (ret == 0)
  966. return 1; /* the MMU fault was handled without causing real CPU fault */
  967. /* now we have a real cpu fault */
  968. tb = tb_find_pc(pc);
  969. if (tb) {
  970. /* the PC is inside the translated code. It means that we have
  971. a virtual CPU fault */
  972. cpu_restore_state(tb, env, pc, puc);
  973. }
  974. #if 0
  975. printf("PF exception: NIP=0x%08x error=0x%x %p\n",
  976. env->nip, env->error_code, tb);
  977. #endif
  978. /* we restore the process signal mask as the sigreturn should
  979. do it (XXX: use sigsetjmp) */
  980. sigprocmask(SIG_SETMASK, old_set, NULL);
  981. cpu_loop_exit();
  982. /* never comes here */
  983. return 1;
  984. }
  985. #elif defined (TARGET_ALPHA)
  986. static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
  987. int is_write, sigset_t *old_set,
  988. void *puc)
  989. {
  990. TranslationBlock *tb;
  991. int ret;
  992. if (cpu_single_env)
  993. env = cpu_single_env; /* XXX: find a correct solution for multithread */
  994. #if defined(DEBUG_SIGNAL)
  995. printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
  996. pc, address, is_write, *(unsigned long *)old_set);
  997. #endif
  998. /* XXX: locking issue */
  999. if (is_write && page_unprotect(h2g(address), pc, puc)) {
  1000. return 1;
  1001. }
  1002. /* see if it is an MMU fault */
  1003. ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
  1004. if (ret < 0)
  1005. return 0; /* not an MMU fault */
  1006. if (ret == 0)
  1007. return 1; /* the MMU fault was handled without causing real CPU fault */
  1008. /* now we have a real cpu fault */
  1009. tb = tb_find_pc(pc);
  1010. if (tb) {
  1011. /* the PC is inside the translated code. It means that we have
  1012. a virtual CPU fault */
  1013. cpu_restore_state(tb, env, pc, puc);
  1014. }
  1015. #if 0
  1016. printf("PF exception: NIP=0x%08x error=0x%x %p\n",
  1017. env->nip, env->error_code, tb);
  1018. #endif
  1019. /* we restore the process signal mask as the sigreturn should
  1020. do it (XXX: use sigsetjmp) */
  1021. sigprocmask(SIG_SETMASK, old_set, NULL);
  1022. cpu_loop_exit();
  1023. /* never comes here */
  1024. return 1;
  1025. }
  1026. #elif defined (TARGET_CRIS)
  1027. static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
  1028. int is_write, sigset_t *old_set,
  1029. void *puc)
  1030. {
  1031. TranslationBlock *tb;
  1032. int ret;
  1033. if (cpu_single_env)
  1034. env = cpu_single_env; /* XXX: find a correct solution for multithread */
  1035. #if defined(DEBUG_SIGNAL)
  1036. printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
  1037. pc, address, is_write, *(unsigned long *)old_set);
  1038. #endif
  1039. /* XXX: locking issue */
  1040. if (is_write && page_unprotect(h2g(address), pc, puc)) {
  1041. return 1;
  1042. }
  1043. /* see if it is an MMU fault */
  1044. ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
  1045. if (ret < 0)
  1046. return 0; /* not an MMU fault */
  1047. if (ret == 0)
  1048. return 1; /* the MMU fault was handled without causing real CPU fault */
  1049. /* now we have a real cpu fault */
  1050. tb = tb_find_pc(pc);
  1051. if (tb) {
  1052. /* the PC is inside the translated code. It means that we have
  1053. a virtual CPU fault */
  1054. cpu_restore_state(tb, env, pc, puc);
  1055. }
  1056. /* we restore the process signal mask as the sigreturn should
  1057. do it (XXX: use sigsetjmp) */
  1058. sigprocmask(SIG_SETMASK, old_set, NULL);
  1059. cpu_loop_exit();
  1060. /* never comes here */
  1061. return 1;
  1062. }
  1063. #else
  1064. #error unsupported target CPU
  1065. #endif
  1066. #if defined(__i386__)
  1067. #if defined(__APPLE__)
  1068. # include <sys/ucontext.h>
  1069. # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
  1070. # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
  1071. # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
  1072. #else
  1073. # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
  1074. # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
  1075. # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
  1076. #endif
  1077. int cpu_signal_handler(int host_signum, void *pinfo,
  1078. void *puc)
  1079. {
  1080. siginfo_t *info = pinfo;
  1081. struct ucontext *uc = puc;
  1082. unsigned long pc;
  1083. int trapno;
  1084. #ifndef REG_EIP
  1085. /* for glibc 2.1 */
  1086. #define REG_EIP EIP
  1087. #define REG_ERR ERR
  1088. #define REG_TRAPNO TRAPNO
  1089. #endif
  1090. pc = EIP_sig(uc);
  1091. trapno = TRAP_sig(uc);
  1092. return handle_cpu_signal(pc, (unsigned long)info->si_addr,
  1093. trapno == 0xe ?
  1094. (ERROR_sig(uc) >> 1) & 1 : 0,
  1095. &uc->uc_sigmask, puc);
  1096. }
  1097. #elif defined(__x86_64__)
  1098. #ifdef __NetBSD__
  1099. #define REG_ERR _REG_ERR
  1100. #define REG_TRAPNO _REG_TRAPNO
  1101. #define QEMU_UC_MCONTEXT_GREGS(uc, reg) (uc)->uc_mcontext.__gregs[(reg)]
  1102. #define QEMU_UC_MACHINE_PC(uc) _UC_MACHINE_PC(uc)
  1103. #else
  1104. #define QEMU_UC_MCONTEXT_GREGS(uc, reg) (uc)->uc_mcontext.gregs[(reg)]
  1105. #define QEMU_UC_MACHINE_PC(uc) QEMU_UC_MCONTEXT_GREGS(uc, REG_RIP)
  1106. #endif
  1107. int cpu_signal_handler(int host_signum, void *pinfo,
  1108. void *puc)
  1109. {
  1110. siginfo_t *info = pinfo;
  1111. unsigned long pc;
  1112. #ifdef __NetBSD__
  1113. ucontext_t *uc = puc;
  1114. #else
  1115. struct ucontext *uc = puc;
  1116. #endif
  1117. pc = QEMU_UC_MACHINE_PC(uc);
  1118. return handle_cpu_signal(pc, (unsigned long)info->si_addr,
  1119. QEMU_UC_MCONTEXT_GREGS(uc, REG_TRAPNO) == 0xe ?
  1120. (QEMU_UC_MCONTEXT_GREGS(uc, REG_ERR) >> 1) & 1 : 0,
  1121. &uc->uc_sigmask, puc);
  1122. }
  1123. #elif defined(_ARCH_PPC)
  1124. /***********************************************************************
  1125. * signal context platform-specific definitions
  1126. * From Wine
  1127. */
  1128. #ifdef linux
  1129. /* All Registers access - only for local access */
  1130. # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
  1131. /* Gpr Registers access */
  1132. # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
  1133. # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
  1134. # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
  1135. # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
  1136. # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
  1137. # define LR_sig(context) REG_sig(link, context) /* Link register */
  1138. # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
  1139. /* Float Registers access */
  1140. # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
  1141. # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
  1142. /* Exception Registers access */
  1143. # define DAR_sig(context) REG_sig(dar, context)
  1144. # define DSISR_sig(context) REG_sig(dsisr, context)
  1145. # define TRAP_sig(context) REG_sig(trap, context)
  1146. #endif /* linux */
  1147. #ifdef __APPLE__
  1148. # include <sys/ucontext.h>
  1149. typedef struct ucontext SIGCONTEXT;
  1150. /* All Registers access - only for local access */
  1151. # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
  1152. # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
  1153. # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
  1154. # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
  1155. /* Gpr Registers access */
  1156. # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
  1157. # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
  1158. # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
  1159. # define CTR_sig(context) REG_sig(ctr, context)
  1160. # define XER_sig(context) REG_sig(xer, context) /* Link register */
  1161. # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
  1162. # define CR_sig(context) REG_sig(cr, context) /* Condition register */
  1163. /* Float Registers access */
  1164. # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
  1165. # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
  1166. /* Exception Registers access */
  1167. # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
  1168. # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
  1169. # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
  1170. #endif /* __APPLE__ */
  1171. int cpu_signal_handler(int host_signum, void *pinfo,
  1172. void *puc)
  1173. {
  1174. siginfo_t *info = pinfo;
  1175. struct ucontext *uc = puc;
  1176. unsigned long pc;
  1177. int is_write;
  1178. pc = IAR_sig(uc);
  1179. is_write = 0;
  1180. #if 0
  1181. /* ppc 4xx case */
  1182. if (DSISR_sig(uc) & 0x00800000)
  1183. is_write = 1;
  1184. #else
  1185. if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
  1186. is_write = 1;
  1187. #endif
  1188. return handle_cpu_signal(pc, (unsigned long)info->si_addr,
  1189. is_write, &uc->uc_sigmask, puc);
  1190. }
  1191. #elif defined(__alpha__)
  1192. int cpu_signal_handler(int host_signum, void *pinfo,
  1193. void *puc)
  1194. {
  1195. siginfo_t *info = pinfo;
  1196. struct ucontext *uc = puc;
  1197. uint32_t *pc = uc->uc_mcontext.sc_pc;
  1198. uint32_t insn = *pc;
  1199. int is_write = 0;
  1200. /* XXX: need kernel patch to get write flag faster */
  1201. switch (insn >> 26) {
  1202. case 0x0d: // stw
  1203. case 0x0e: // stb
  1204. case 0x0f: // stq_u
  1205. case 0x24: // stf
  1206. case 0x25: // stg
  1207. case 0x26: // sts
  1208. case 0x27: // stt
  1209. case 0x2c: // stl
  1210. case 0x2d: // stq
  1211. case 0x2e: // stl_c
  1212. case 0x2f: // stq_c
  1213. is_write = 1;
  1214. }
  1215. return handle_cpu_signal(pc, (unsigned long)info->si_addr,
  1216. is_write, &uc->uc_sigmask, puc);
  1217. }
  1218. #elif defined(__sparc__)
  1219. int cpu_signal_handler(int host_signum, void *pinfo,
  1220. void *puc)
  1221. {
  1222. siginfo_t *info = pinfo;
  1223. int is_write;
  1224. uint32_t insn;
  1225. #if !defined(__arch64__) || defined(HOST_SOLARIS)
  1226. uint32_t *regs = (uint32_t *)(info + 1);
  1227. void *sigmask = (regs + 20);
  1228. /* XXX: is there a standard glibc define ? */
  1229. unsigned long pc = regs[1];
  1230. #else
  1231. #ifdef __linux__
  1232. struct sigcontext *sc = puc;
  1233. unsigned long pc = sc->sigc_regs.tpc;
  1234. void *sigmask = (void *)sc->sigc_mask;
  1235. #elif defined(__OpenBSD__)
  1236. struct sigcontext *uc = puc;
  1237. unsigned long pc = uc->sc_pc;
  1238. void *sigmask = (void *)(long)uc->sc_mask;
  1239. #endif
  1240. #endif
  1241. /* XXX: need kernel patch to get write flag faster */
  1242. is_write = 0;
  1243. insn = *(uint32_t *)pc;
  1244. if ((insn >> 30) == 3) {
  1245. switch((insn >> 19) & 0x3f) {
  1246. case 0x05: // stb
  1247. case 0x06: // sth
  1248. case 0x04: // st
  1249. case 0x07: // std
  1250. case 0x24: // stf
  1251. case 0x27: // stdf
  1252. case 0x25: // stfsr
  1253. is_write = 1;
  1254. break;
  1255. }
  1256. }
  1257. return handle_cpu_signal(pc, (unsigned long)info->si_addr,
  1258. is_write, sigmask, NULL);
  1259. }
  1260. #elif defined(__arm__)
  1261. int cpu_signal_handler(int host_signum, void *pinfo,
  1262. void *puc)
  1263. {
  1264. siginfo_t *info = pinfo;
  1265. struct ucontext *uc = puc;
  1266. unsigned long pc;
  1267. int is_write;
  1268. #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
  1269. pc = uc->uc_mcontext.gregs[R15];
  1270. #else
  1271. pc = uc->uc_mcontext.arm_pc;
  1272. #endif
  1273. /* XXX: compute is_write */
  1274. is_write = 0;
  1275. return handle_cpu_signal(pc, (unsigned long)info->si_addr,
  1276. is_write,
  1277. &uc->uc_sigmask, puc);
  1278. }
  1279. #elif defined(__mc68000)
  1280. int cpu_signal_handler(int host_signum, void *pinfo,
  1281. void *puc)
  1282. {
  1283. siginfo_t *info = pinfo;
  1284. struct ucontext *uc = puc;
  1285. unsigned long pc;
  1286. int is_write;
  1287. pc = uc->uc_mcontext.gregs[16];
  1288. /* XXX: compute is_write */
  1289. is_write = 0;
  1290. return handle_cpu_signal(pc, (unsigned long)info->si_addr,
  1291. is_write,
  1292. &uc->uc_sigmask, puc);
  1293. }
  1294. #elif defined(__ia64)
  1295. #ifndef __ISR_VALID
  1296. /* This ought to be in <bits/siginfo.h>... */
  1297. # define __ISR_VALID 1
  1298. #endif
  1299. int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
  1300. {
  1301. siginfo_t *info = pinfo;
  1302. struct ucontext *uc = puc;
  1303. unsigned long ip;
  1304. int is_write = 0;
  1305. ip = uc->uc_mcontext.sc_ip;
  1306. switch (host_signum) {
  1307. case SIGILL:
  1308. case SIGFPE:
  1309. case SIGSEGV:
  1310. case SIGBUS:
  1311. case SIGTRAP:
  1312. if (info->si_code && (info->si_segvflags & __ISR_VALID))
  1313. /* ISR.W (write-access) is bit 33: */
  1314. is_write = (info->si_isr >> 33) & 1;
  1315. break;
  1316. default:
  1317. break;
  1318. }
  1319. return handle_cpu_signal(ip, (unsigned long)info->si_addr,
  1320. is_write,
  1321. &uc->uc_sigmask, puc);
  1322. }
  1323. #elif defined(__s390__)
  1324. int cpu_signal_handler(int host_signum, void *pinfo,
  1325. void *puc)
  1326. {
  1327. siginfo_t *info = pinfo;
  1328. struct ucontext *uc = puc;
  1329. unsigned long pc;
  1330. int is_write;
  1331. pc = uc->uc_mcontext.psw.addr;
  1332. /* XXX: compute is_write */
  1333. is_write = 0;
  1334. return handle_cpu_signal(pc, (unsigned long)info->si_addr,
  1335. is_write, &uc->uc_sigmask, puc);
  1336. }
  1337. #elif defined(__mips__)
  1338. int cpu_signal_handler(int host_signum, void *pinfo,
  1339. void *puc)
  1340. {
  1341. siginfo_t *info = pinfo;
  1342. struct ucontext *uc = puc;
  1343. greg_t pc = uc->uc_mcontext.pc;
  1344. int is_write;
  1345. /* XXX: compute is_write */
  1346. is_write = 0;
  1347. return handle_cpu_signal(pc, (unsigned long)info->si_addr,
  1348. is_write, &uc->uc_sigmask, puc);
  1349. }
  1350. #elif defined(__hppa__)
  1351. int cpu_signal_handler(int host_signum, void *pinfo,
  1352. void *puc)
  1353. {
  1354. struct siginfo *info = pinfo;
  1355. struct ucontext *uc = puc;
  1356. unsigned long pc;
  1357. int is_write;
  1358. pc = uc->uc_mcontext.sc_iaoq[0];
  1359. /* FIXME: compute is_write */
  1360. is_write = 0;
  1361. return handle_cpu_signal(pc, (unsigned long)info->si_addr,
  1362. is_write,
  1363. &uc->uc_sigmask, puc);
  1364. }
  1365. #else
  1366. #error host CPU specific signal handler needed
  1367. #endif
  1368. #endif /* !defined(CONFIG_SOFTMMU) */