translate-all.c 53 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832
  1. /*
  2. * Host code generation
  3. *
  4. * Copyright (c) 2003 Fabrice Bellard
  5. *
  6. * This library is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2 of the License, or (at your option) any later version.
  10. *
  11. * This library is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  18. */
  19. #ifdef _WIN32
  20. #include <windows.h>
  21. #else
  22. #include <sys/types.h>
  23. #include <sys/mman.h>
  24. #endif
  25. #include <stdarg.h>
  26. #include <stdlib.h>
  27. #include <stdio.h>
  28. #include <string.h>
  29. #include <inttypes.h>
  30. #include "config.h"
  31. #include "qemu-common.h"
  32. #define NO_CPU_IO_DEFS
  33. #include "cpu.h"
  34. #include "disas/disas.h"
  35. #include "tcg.h"
  36. #if defined(CONFIG_USER_ONLY)
  37. #include "qemu.h"
  38. #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
  39. #include <sys/param.h>
  40. #if __FreeBSD_version >= 700104
  41. #define HAVE_KINFO_GETVMMAP
  42. #define sigqueue sigqueue_freebsd /* avoid redefinition */
  43. #include <sys/time.h>
  44. #include <sys/proc.h>
  45. #include <machine/profile.h>
  46. #define _KERNEL
  47. #include <sys/user.h>
  48. #undef _KERNEL
  49. #undef sigqueue
  50. #include <libutil.h>
  51. #endif
  52. #endif
  53. #else
  54. #include "exec/address-spaces.h"
  55. #endif
  56. #include "exec/cputlb.h"
  57. #include "translate-all.h"
  58. #include "qemu/timer.h"
  59. //#define DEBUG_TB_INVALIDATE
  60. //#define DEBUG_FLUSH
  61. /* make various TB consistency checks */
  62. //#define DEBUG_TB_CHECK
  63. #if !defined(CONFIG_USER_ONLY)
  64. /* TB consistency checks only implemented for usermode emulation. */
  65. #undef DEBUG_TB_CHECK
  66. #endif
  67. #define SMC_BITMAP_USE_THRESHOLD 10
  68. typedef struct PageDesc {
  69. /* list of TBs intersecting this ram page */
  70. TranslationBlock *first_tb;
  71. /* in order to optimize self modifying code, we count the number
  72. of lookups we do to a given page to use a bitmap */
  73. unsigned int code_write_count;
  74. uint8_t *code_bitmap;
  75. #if defined(CONFIG_USER_ONLY)
  76. unsigned long flags;
  77. #endif
  78. } PageDesc;
  79. /* In system mode we want L1_MAP to be based on ram offsets,
  80. while in user mode we want it to be based on virtual addresses. */
  81. #if !defined(CONFIG_USER_ONLY)
  82. #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
  83. # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
  84. #else
  85. # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
  86. #endif
  87. #else
  88. # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
  89. #endif
  90. /* Size of the L2 (and L3, etc) page tables. */
  91. #define V_L2_BITS 10
  92. #define V_L2_SIZE (1 << V_L2_BITS)
  93. /* The bits remaining after N lower levels of page tables. */
  94. #define V_L1_BITS_REM \
  95. ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % V_L2_BITS)
  96. #if V_L1_BITS_REM < 4
  97. #define V_L1_BITS (V_L1_BITS_REM + V_L2_BITS)
  98. #else
  99. #define V_L1_BITS V_L1_BITS_REM
  100. #endif
  101. #define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
  102. #define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
  103. uintptr_t qemu_real_host_page_size;
  104. uintptr_t qemu_host_page_size;
  105. uintptr_t qemu_host_page_mask;
  106. /* This is a multi-level map on the virtual address space.
  107. The bottom level has pointers to PageDesc. */
  108. static void *l1_map[V_L1_SIZE];
  109. /* code generation context */
  110. TCGContext tcg_ctx;
  111. static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
  112. tb_page_addr_t phys_page2);
  113. static TranslationBlock *tb_find_pc(uintptr_t tc_ptr);
  114. void cpu_gen_init(void)
  115. {
  116. tcg_context_init(&tcg_ctx);
  117. }
  118. /* return non zero if the very first instruction is invalid so that
  119. the virtual CPU can trigger an exception.
  120. '*gen_code_size_ptr' contains the size of the generated code (host
  121. code).
  122. */
  123. int cpu_gen_code(CPUArchState *env, TranslationBlock *tb, int *gen_code_size_ptr)
  124. {
  125. TCGContext *s = &tcg_ctx;
  126. uint8_t *gen_code_buf;
  127. int gen_code_size;
  128. #ifdef CONFIG_PROFILER
  129. int64_t ti;
  130. #endif
  131. #ifdef CONFIG_PROFILER
  132. s->tb_count1++; /* includes aborted translations because of
  133. exceptions */
  134. ti = profile_getclock();
  135. #endif
  136. tcg_func_start(s);
  137. gen_intermediate_code(env, tb);
  138. /* generate machine code */
  139. gen_code_buf = tb->tc_ptr;
  140. tb->tb_next_offset[0] = 0xffff;
  141. tb->tb_next_offset[1] = 0xffff;
  142. s->tb_next_offset = tb->tb_next_offset;
  143. #ifdef USE_DIRECT_JUMP
  144. s->tb_jmp_offset = tb->tb_jmp_offset;
  145. s->tb_next = NULL;
  146. #else
  147. s->tb_jmp_offset = NULL;
  148. s->tb_next = tb->tb_next;
  149. #endif
  150. #ifdef CONFIG_PROFILER
  151. s->tb_count++;
  152. s->interm_time += profile_getclock() - ti;
  153. s->code_time -= profile_getclock();
  154. #endif
  155. gen_code_size = tcg_gen_code(s, gen_code_buf);
  156. *gen_code_size_ptr = gen_code_size;
  157. #ifdef CONFIG_PROFILER
  158. s->code_time += profile_getclock();
  159. s->code_in_len += tb->size;
  160. s->code_out_len += gen_code_size;
  161. #endif
  162. #ifdef DEBUG_DISAS
  163. if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM)) {
  164. qemu_log("OUT: [size=%d]\n", *gen_code_size_ptr);
  165. log_disas(tb->tc_ptr, *gen_code_size_ptr);
  166. qemu_log("\n");
  167. qemu_log_flush();
  168. }
  169. #endif
  170. return 0;
  171. }
  172. /* The cpu state corresponding to 'searched_pc' is restored.
  173. */
  174. static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
  175. uintptr_t searched_pc)
  176. {
  177. CPUArchState *env = cpu->env_ptr;
  178. TCGContext *s = &tcg_ctx;
  179. int j;
  180. uintptr_t tc_ptr;
  181. #ifdef CONFIG_PROFILER
  182. int64_t ti;
  183. #endif
  184. #ifdef CONFIG_PROFILER
  185. ti = profile_getclock();
  186. #endif
  187. tcg_func_start(s);
  188. gen_intermediate_code_pc(env, tb);
  189. if (use_icount) {
  190. /* Reset the cycle counter to the start of the block. */
  191. cpu->icount_decr.u16.low += tb->icount;
  192. /* Clear the IO flag. */
  193. cpu->can_do_io = 0;
  194. }
  195. /* find opc index corresponding to search_pc */
  196. tc_ptr = (uintptr_t)tb->tc_ptr;
  197. if (searched_pc < tc_ptr)
  198. return -1;
  199. s->tb_next_offset = tb->tb_next_offset;
  200. #ifdef USE_DIRECT_JUMP
  201. s->tb_jmp_offset = tb->tb_jmp_offset;
  202. s->tb_next = NULL;
  203. #else
  204. s->tb_jmp_offset = NULL;
  205. s->tb_next = tb->tb_next;
  206. #endif
  207. j = tcg_gen_code_search_pc(s, (uint8_t *)tc_ptr, searched_pc - tc_ptr);
  208. if (j < 0)
  209. return -1;
  210. /* now find start of instruction before */
  211. while (s->gen_opc_instr_start[j] == 0) {
  212. j--;
  213. }
  214. cpu->icount_decr.u16.low -= s->gen_opc_icount[j];
  215. restore_state_to_opc(env, tb, j);
  216. #ifdef CONFIG_PROFILER
  217. s->restore_time += profile_getclock() - ti;
  218. s->restore_count++;
  219. #endif
  220. return 0;
  221. }
  222. bool cpu_restore_state(CPUState *cpu, uintptr_t retaddr)
  223. {
  224. TranslationBlock *tb;
  225. tb = tb_find_pc(retaddr);
  226. if (tb) {
  227. cpu_restore_state_from_tb(cpu, tb, retaddr);
  228. return true;
  229. }
  230. return false;
  231. }
  232. #ifdef _WIN32
  233. static inline void map_exec(void *addr, long size)
  234. {
  235. DWORD old_protect;
  236. VirtualProtect(addr, size,
  237. PAGE_EXECUTE_READWRITE, &old_protect);
  238. }
  239. #else
  240. static inline void map_exec(void *addr, long size)
  241. {
  242. unsigned long start, end, page_size;
  243. page_size = getpagesize();
  244. start = (unsigned long)addr;
  245. start &= ~(page_size - 1);
  246. end = (unsigned long)addr + size;
  247. end += page_size - 1;
  248. end &= ~(page_size - 1);
  249. mprotect((void *)start, end - start,
  250. PROT_READ | PROT_WRITE | PROT_EXEC);
  251. }
  252. #endif
  253. void page_size_init(void)
  254. {
  255. /* NOTE: we can always suppose that qemu_host_page_size >=
  256. TARGET_PAGE_SIZE */
  257. #ifdef _WIN32
  258. SYSTEM_INFO system_info;
  259. GetSystemInfo(&system_info);
  260. qemu_real_host_page_size = system_info.dwPageSize;
  261. #else
  262. qemu_real_host_page_size = getpagesize();
  263. #endif
  264. if (qemu_host_page_size == 0) {
  265. qemu_host_page_size = qemu_real_host_page_size;
  266. }
  267. if (qemu_host_page_size < TARGET_PAGE_SIZE) {
  268. qemu_host_page_size = TARGET_PAGE_SIZE;
  269. }
  270. qemu_host_page_mask = ~(qemu_host_page_size - 1);
  271. }
  272. static void page_init(void)
  273. {
  274. page_size_init();
  275. #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
  276. {
  277. #ifdef HAVE_KINFO_GETVMMAP
  278. struct kinfo_vmentry *freep;
  279. int i, cnt;
  280. freep = kinfo_getvmmap(getpid(), &cnt);
  281. if (freep) {
  282. mmap_lock();
  283. for (i = 0; i < cnt; i++) {
  284. unsigned long startaddr, endaddr;
  285. startaddr = freep[i].kve_start;
  286. endaddr = freep[i].kve_end;
  287. if (h2g_valid(startaddr)) {
  288. startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
  289. if (h2g_valid(endaddr)) {
  290. endaddr = h2g(endaddr);
  291. page_set_flags(startaddr, endaddr, PAGE_RESERVED);
  292. } else {
  293. #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
  294. endaddr = ~0ul;
  295. page_set_flags(startaddr, endaddr, PAGE_RESERVED);
  296. #endif
  297. }
  298. }
  299. }
  300. free(freep);
  301. mmap_unlock();
  302. }
  303. #else
  304. FILE *f;
  305. last_brk = (unsigned long)sbrk(0);
  306. f = fopen("/compat/linux/proc/self/maps", "r");
  307. if (f) {
  308. mmap_lock();
  309. do {
  310. unsigned long startaddr, endaddr;
  311. int n;
  312. n = fscanf(f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
  313. if (n == 2 && h2g_valid(startaddr)) {
  314. startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
  315. if (h2g_valid(endaddr)) {
  316. endaddr = h2g(endaddr);
  317. } else {
  318. endaddr = ~0ul;
  319. }
  320. page_set_flags(startaddr, endaddr, PAGE_RESERVED);
  321. }
  322. } while (!feof(f));
  323. fclose(f);
  324. mmap_unlock();
  325. }
  326. #endif
  327. }
  328. #endif
  329. }
  330. static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
  331. {
  332. PageDesc *pd;
  333. void **lp;
  334. int i;
  335. #if defined(CONFIG_USER_ONLY)
  336. /* We can't use g_malloc because it may recurse into a locked mutex. */
  337. # define ALLOC(P, SIZE) \
  338. do { \
  339. P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
  340. MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
  341. } while (0)
  342. #else
  343. # define ALLOC(P, SIZE) \
  344. do { P = g_malloc0(SIZE); } while (0)
  345. #endif
  346. /* Level 1. Always allocated. */
  347. lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
  348. /* Level 2..N-1. */
  349. for (i = V_L1_SHIFT / V_L2_BITS - 1; i > 0; i--) {
  350. void **p = *lp;
  351. if (p == NULL) {
  352. if (!alloc) {
  353. return NULL;
  354. }
  355. ALLOC(p, sizeof(void *) * V_L2_SIZE);
  356. *lp = p;
  357. }
  358. lp = p + ((index >> (i * V_L2_BITS)) & (V_L2_SIZE - 1));
  359. }
  360. pd = *lp;
  361. if (pd == NULL) {
  362. if (!alloc) {
  363. return NULL;
  364. }
  365. ALLOC(pd, sizeof(PageDesc) * V_L2_SIZE);
  366. *lp = pd;
  367. }
  368. #undef ALLOC
  369. return pd + (index & (V_L2_SIZE - 1));
  370. }
  371. static inline PageDesc *page_find(tb_page_addr_t index)
  372. {
  373. return page_find_alloc(index, 0);
  374. }
  375. #if !defined(CONFIG_USER_ONLY)
  376. #define mmap_lock() do { } while (0)
  377. #define mmap_unlock() do { } while (0)
  378. #endif
  379. #if defined(CONFIG_USER_ONLY)
  380. /* Currently it is not recommended to allocate big chunks of data in
  381. user mode. It will change when a dedicated libc will be used. */
  382. /* ??? 64-bit hosts ought to have no problem mmaping data outside the
  383. region in which the guest needs to run. Revisit this. */
  384. #define USE_STATIC_CODE_GEN_BUFFER
  385. #endif
  386. /* ??? Should configure for this, not list operating systems here. */
  387. #if (defined(__linux__) \
  388. || defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
  389. || defined(__DragonFly__) || defined(__OpenBSD__) \
  390. || defined(__NetBSD__))
  391. # define USE_MMAP
  392. #endif
  393. /* Minimum size of the code gen buffer. This number is randomly chosen,
  394. but not so small that we can't have a fair number of TB's live. */
  395. #define MIN_CODE_GEN_BUFFER_SIZE (1024u * 1024)
  396. /* Maximum size of the code gen buffer we'd like to use. Unless otherwise
  397. indicated, this is constrained by the range of direct branches on the
  398. host cpu, as used by the TCG implementation of goto_tb. */
  399. #if defined(__x86_64__)
  400. # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
  401. #elif defined(__sparc__)
  402. # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
  403. #elif defined(__aarch64__)
  404. # define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024)
  405. #elif defined(__arm__)
  406. # define MAX_CODE_GEN_BUFFER_SIZE (16u * 1024 * 1024)
  407. #elif defined(__s390x__)
  408. /* We have a +- 4GB range on the branches; leave some slop. */
  409. # define MAX_CODE_GEN_BUFFER_SIZE (3ul * 1024 * 1024 * 1024)
  410. #else
  411. # define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
  412. #endif
  413. #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32u * 1024 * 1024)
  414. #define DEFAULT_CODE_GEN_BUFFER_SIZE \
  415. (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
  416. ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
  417. static inline size_t size_code_gen_buffer(size_t tb_size)
  418. {
  419. /* Size the buffer. */
  420. if (tb_size == 0) {
  421. #ifdef USE_STATIC_CODE_GEN_BUFFER
  422. tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
  423. #else
  424. /* ??? Needs adjustments. */
  425. /* ??? If we relax the requirement that CONFIG_USER_ONLY use the
  426. static buffer, we could size this on RESERVED_VA, on the text
  427. segment size of the executable, or continue to use the default. */
  428. tb_size = (unsigned long)(ram_size / 4);
  429. #endif
  430. }
  431. if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) {
  432. tb_size = MIN_CODE_GEN_BUFFER_SIZE;
  433. }
  434. if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) {
  435. tb_size = MAX_CODE_GEN_BUFFER_SIZE;
  436. }
  437. tcg_ctx.code_gen_buffer_size = tb_size;
  438. return tb_size;
  439. }
  440. #ifdef USE_STATIC_CODE_GEN_BUFFER
  441. static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
  442. __attribute__((aligned(CODE_GEN_ALIGN)));
  443. static inline void *alloc_code_gen_buffer(void)
  444. {
  445. map_exec(static_code_gen_buffer, tcg_ctx.code_gen_buffer_size);
  446. return static_code_gen_buffer;
  447. }
  448. #elif defined(USE_MMAP)
  449. static inline void *alloc_code_gen_buffer(void)
  450. {
  451. int flags = MAP_PRIVATE | MAP_ANONYMOUS;
  452. uintptr_t start = 0;
  453. void *buf;
  454. /* Constrain the position of the buffer based on the host cpu.
  455. Note that these addresses are chosen in concert with the
  456. addresses assigned in the relevant linker script file. */
  457. # if defined(__PIE__) || defined(__PIC__)
  458. /* Don't bother setting a preferred location if we're building
  459. a position-independent executable. We're more likely to get
  460. an address near the main executable if we let the kernel
  461. choose the address. */
  462. # elif defined(__x86_64__) && defined(MAP_32BIT)
  463. /* Force the memory down into low memory with the executable.
  464. Leave the choice of exact location with the kernel. */
  465. flags |= MAP_32BIT;
  466. /* Cannot expect to map more than 800MB in low memory. */
  467. if (tcg_ctx.code_gen_buffer_size > 800u * 1024 * 1024) {
  468. tcg_ctx.code_gen_buffer_size = 800u * 1024 * 1024;
  469. }
  470. # elif defined(__sparc__)
  471. start = 0x40000000ul;
  472. # elif defined(__s390x__)
  473. start = 0x90000000ul;
  474. # endif
  475. buf = mmap((void *)start, tcg_ctx.code_gen_buffer_size,
  476. PROT_WRITE | PROT_READ | PROT_EXEC, flags, -1, 0);
  477. return buf == MAP_FAILED ? NULL : buf;
  478. }
  479. #else
  480. static inline void *alloc_code_gen_buffer(void)
  481. {
  482. void *buf = g_malloc(tcg_ctx.code_gen_buffer_size);
  483. if (buf) {
  484. map_exec(buf, tcg_ctx.code_gen_buffer_size);
  485. }
  486. return buf;
  487. }
  488. #endif /* USE_STATIC_CODE_GEN_BUFFER, USE_MMAP */
  489. static inline void code_gen_alloc(size_t tb_size)
  490. {
  491. tcg_ctx.code_gen_buffer_size = size_code_gen_buffer(tb_size);
  492. tcg_ctx.code_gen_buffer = alloc_code_gen_buffer();
  493. if (tcg_ctx.code_gen_buffer == NULL) {
  494. fprintf(stderr, "Could not allocate dynamic translator buffer\n");
  495. exit(1);
  496. }
  497. qemu_madvise(tcg_ctx.code_gen_buffer, tcg_ctx.code_gen_buffer_size,
  498. QEMU_MADV_HUGEPAGE);
  499. /* Steal room for the prologue at the end of the buffer. This ensures
  500. (via the MAX_CODE_GEN_BUFFER_SIZE limits above) that direct branches
  501. from TB's to the prologue are going to be in range. It also means
  502. that we don't need to mark (additional) portions of the data segment
  503. as executable. */
  504. tcg_ctx.code_gen_prologue = tcg_ctx.code_gen_buffer +
  505. tcg_ctx.code_gen_buffer_size - 1024;
  506. tcg_ctx.code_gen_buffer_size -= 1024;
  507. tcg_ctx.code_gen_buffer_max_size = tcg_ctx.code_gen_buffer_size -
  508. (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
  509. tcg_ctx.code_gen_max_blocks = tcg_ctx.code_gen_buffer_size /
  510. CODE_GEN_AVG_BLOCK_SIZE;
  511. tcg_ctx.tb_ctx.tbs =
  512. g_malloc(tcg_ctx.code_gen_max_blocks * sizeof(TranslationBlock));
  513. }
  514. /* Must be called before using the QEMU cpus. 'tb_size' is the size
  515. (in bytes) allocated to the translation buffer. Zero means default
  516. size. */
  517. void tcg_exec_init(unsigned long tb_size)
  518. {
  519. cpu_gen_init();
  520. code_gen_alloc(tb_size);
  521. tcg_ctx.code_gen_ptr = tcg_ctx.code_gen_buffer;
  522. tcg_register_jit(tcg_ctx.code_gen_buffer, tcg_ctx.code_gen_buffer_size);
  523. page_init();
  524. #if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
  525. /* There's no guest base to take into account, so go ahead and
  526. initialize the prologue now. */
  527. tcg_prologue_init(&tcg_ctx);
  528. #endif
  529. }
  530. bool tcg_enabled(void)
  531. {
  532. return tcg_ctx.code_gen_buffer != NULL;
  533. }
  534. /* Allocate a new translation block. Flush the translation buffer if
  535. too many translation blocks or too much generated code. */
  536. static TranslationBlock *tb_alloc(target_ulong pc)
  537. {
  538. TranslationBlock *tb;
  539. if (tcg_ctx.tb_ctx.nb_tbs >= tcg_ctx.code_gen_max_blocks ||
  540. (tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer) >=
  541. tcg_ctx.code_gen_buffer_max_size) {
  542. return NULL;
  543. }
  544. tb = &tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs++];
  545. tb->pc = pc;
  546. tb->cflags = 0;
  547. return tb;
  548. }
  549. void tb_free(TranslationBlock *tb)
  550. {
  551. /* In practice this is mostly used for single use temporary TB
  552. Ignore the hard cases and just back up if this TB happens to
  553. be the last one generated. */
  554. if (tcg_ctx.tb_ctx.nb_tbs > 0 &&
  555. tb == &tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs - 1]) {
  556. tcg_ctx.code_gen_ptr = tb->tc_ptr;
  557. tcg_ctx.tb_ctx.nb_tbs--;
  558. }
  559. }
  560. static inline void invalidate_page_bitmap(PageDesc *p)
  561. {
  562. if (p->code_bitmap) {
  563. g_free(p->code_bitmap);
  564. p->code_bitmap = NULL;
  565. }
  566. p->code_write_count = 0;
  567. }
  568. /* Set to NULL all the 'first_tb' fields in all PageDescs. */
  569. static void page_flush_tb_1(int level, void **lp)
  570. {
  571. int i;
  572. if (*lp == NULL) {
  573. return;
  574. }
  575. if (level == 0) {
  576. PageDesc *pd = *lp;
  577. for (i = 0; i < V_L2_SIZE; ++i) {
  578. pd[i].first_tb = NULL;
  579. invalidate_page_bitmap(pd + i);
  580. }
  581. } else {
  582. void **pp = *lp;
  583. for (i = 0; i < V_L2_SIZE; ++i) {
  584. page_flush_tb_1(level - 1, pp + i);
  585. }
  586. }
  587. }
  588. static void page_flush_tb(void)
  589. {
  590. int i;
  591. for (i = 0; i < V_L1_SIZE; i++) {
  592. page_flush_tb_1(V_L1_SHIFT / V_L2_BITS - 1, l1_map + i);
  593. }
  594. }
  595. /* flush all the translation blocks */
  596. /* XXX: tb_flush is currently not thread safe */
  597. void tb_flush(CPUArchState *env1)
  598. {
  599. CPUState *cpu = ENV_GET_CPU(env1);
  600. #if defined(DEBUG_FLUSH)
  601. printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
  602. (unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer),
  603. tcg_ctx.tb_ctx.nb_tbs, tcg_ctx.tb_ctx.nb_tbs > 0 ?
  604. ((unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer)) /
  605. tcg_ctx.tb_ctx.nb_tbs : 0);
  606. #endif
  607. if ((unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer)
  608. > tcg_ctx.code_gen_buffer_size) {
  609. cpu_abort(cpu, "Internal error: code buffer overflow\n");
  610. }
  611. tcg_ctx.tb_ctx.nb_tbs = 0;
  612. CPU_FOREACH(cpu) {
  613. memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
  614. }
  615. memset(tcg_ctx.tb_ctx.tb_phys_hash, 0, sizeof(tcg_ctx.tb_ctx.tb_phys_hash));
  616. page_flush_tb();
  617. tcg_ctx.code_gen_ptr = tcg_ctx.code_gen_buffer;
  618. /* XXX: flush processor icache at this point if cache flush is
  619. expensive */
  620. tcg_ctx.tb_ctx.tb_flush_count++;
  621. }
  622. #ifdef DEBUG_TB_CHECK
  623. static void tb_invalidate_check(target_ulong address)
  624. {
  625. TranslationBlock *tb;
  626. int i;
  627. address &= TARGET_PAGE_MASK;
  628. for (i = 0; i < CODE_GEN_PHYS_HASH_SIZE; i++) {
  629. for (tb = tb_ctx.tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
  630. if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
  631. address >= tb->pc + tb->size)) {
  632. printf("ERROR invalidate: address=" TARGET_FMT_lx
  633. " PC=%08lx size=%04x\n",
  634. address, (long)tb->pc, tb->size);
  635. }
  636. }
  637. }
  638. }
  639. /* verify that all the pages have correct rights for code */
  640. static void tb_page_check(void)
  641. {
  642. TranslationBlock *tb;
  643. int i, flags1, flags2;
  644. for (i = 0; i < CODE_GEN_PHYS_HASH_SIZE; i++) {
  645. for (tb = tcg_ctx.tb_ctx.tb_phys_hash[i]; tb != NULL;
  646. tb = tb->phys_hash_next) {
  647. flags1 = page_get_flags(tb->pc);
  648. flags2 = page_get_flags(tb->pc + tb->size - 1);
  649. if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
  650. printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
  651. (long)tb->pc, tb->size, flags1, flags2);
  652. }
  653. }
  654. }
  655. }
  656. #endif
  657. static inline void tb_hash_remove(TranslationBlock **ptb, TranslationBlock *tb)
  658. {
  659. TranslationBlock *tb1;
  660. for (;;) {
  661. tb1 = *ptb;
  662. if (tb1 == tb) {
  663. *ptb = tb1->phys_hash_next;
  664. break;
  665. }
  666. ptb = &tb1->phys_hash_next;
  667. }
  668. }
  669. static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
  670. {
  671. TranslationBlock *tb1;
  672. unsigned int n1;
  673. for (;;) {
  674. tb1 = *ptb;
  675. n1 = (uintptr_t)tb1 & 3;
  676. tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
  677. if (tb1 == tb) {
  678. *ptb = tb1->page_next[n1];
  679. break;
  680. }
  681. ptb = &tb1->page_next[n1];
  682. }
  683. }
  684. static inline void tb_jmp_remove(TranslationBlock *tb, int n)
  685. {
  686. TranslationBlock *tb1, **ptb;
  687. unsigned int n1;
  688. ptb = &tb->jmp_next[n];
  689. tb1 = *ptb;
  690. if (tb1) {
  691. /* find tb(n) in circular list */
  692. for (;;) {
  693. tb1 = *ptb;
  694. n1 = (uintptr_t)tb1 & 3;
  695. tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
  696. if (n1 == n && tb1 == tb) {
  697. break;
  698. }
  699. if (n1 == 2) {
  700. ptb = &tb1->jmp_first;
  701. } else {
  702. ptb = &tb1->jmp_next[n1];
  703. }
  704. }
  705. /* now we can suppress tb(n) from the list */
  706. *ptb = tb->jmp_next[n];
  707. tb->jmp_next[n] = NULL;
  708. }
  709. }
  710. /* reset the jump entry 'n' of a TB so that it is not chained to
  711. another TB */
  712. static inline void tb_reset_jump(TranslationBlock *tb, int n)
  713. {
  714. tb_set_jmp_target(tb, n, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[n]));
  715. }
  716. /* invalidate one TB */
  717. void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
  718. {
  719. CPUState *cpu;
  720. PageDesc *p;
  721. unsigned int h, n1;
  722. tb_page_addr_t phys_pc;
  723. TranslationBlock *tb1, *tb2;
  724. /* remove the TB from the hash list */
  725. phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
  726. h = tb_phys_hash_func(phys_pc);
  727. tb_hash_remove(&tcg_ctx.tb_ctx.tb_phys_hash[h], tb);
  728. /* remove the TB from the page list */
  729. if (tb->page_addr[0] != page_addr) {
  730. p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
  731. tb_page_remove(&p->first_tb, tb);
  732. invalidate_page_bitmap(p);
  733. }
  734. if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
  735. p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
  736. tb_page_remove(&p->first_tb, tb);
  737. invalidate_page_bitmap(p);
  738. }
  739. tcg_ctx.tb_ctx.tb_invalidated_flag = 1;
  740. /* remove the TB from the hash list */
  741. h = tb_jmp_cache_hash_func(tb->pc);
  742. CPU_FOREACH(cpu) {
  743. if (cpu->tb_jmp_cache[h] == tb) {
  744. cpu->tb_jmp_cache[h] = NULL;
  745. }
  746. }
  747. /* suppress this TB from the two jump lists */
  748. tb_jmp_remove(tb, 0);
  749. tb_jmp_remove(tb, 1);
  750. /* suppress any remaining jumps to this TB */
  751. tb1 = tb->jmp_first;
  752. for (;;) {
  753. n1 = (uintptr_t)tb1 & 3;
  754. if (n1 == 2) {
  755. break;
  756. }
  757. tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
  758. tb2 = tb1->jmp_next[n1];
  759. tb_reset_jump(tb1, n1);
  760. tb1->jmp_next[n1] = NULL;
  761. tb1 = tb2;
  762. }
  763. tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2); /* fail safe */
  764. tcg_ctx.tb_ctx.tb_phys_invalidate_count++;
  765. }
  766. static inline void set_bits(uint8_t *tab, int start, int len)
  767. {
  768. int end, mask, end1;
  769. end = start + len;
  770. tab += start >> 3;
  771. mask = 0xff << (start & 7);
  772. if ((start & ~7) == (end & ~7)) {
  773. if (start < end) {
  774. mask &= ~(0xff << (end & 7));
  775. *tab |= mask;
  776. }
  777. } else {
  778. *tab++ |= mask;
  779. start = (start + 8) & ~7;
  780. end1 = end & ~7;
  781. while (start < end1) {
  782. *tab++ = 0xff;
  783. start += 8;
  784. }
  785. if (start < end) {
  786. mask = ~(0xff << (end & 7));
  787. *tab |= mask;
  788. }
  789. }
  790. }
  791. static void build_page_bitmap(PageDesc *p)
  792. {
  793. int n, tb_start, tb_end;
  794. TranslationBlock *tb;
  795. p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
  796. tb = p->first_tb;
  797. while (tb != NULL) {
  798. n = (uintptr_t)tb & 3;
  799. tb = (TranslationBlock *)((uintptr_t)tb & ~3);
  800. /* NOTE: this is subtle as a TB may span two physical pages */
  801. if (n == 0) {
  802. /* NOTE: tb_end may be after the end of the page, but
  803. it is not a problem */
  804. tb_start = tb->pc & ~TARGET_PAGE_MASK;
  805. tb_end = tb_start + tb->size;
  806. if (tb_end > TARGET_PAGE_SIZE) {
  807. tb_end = TARGET_PAGE_SIZE;
  808. }
  809. } else {
  810. tb_start = 0;
  811. tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
  812. }
  813. set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
  814. tb = tb->page_next[n];
  815. }
  816. }
  817. TranslationBlock *tb_gen_code(CPUState *cpu,
  818. target_ulong pc, target_ulong cs_base,
  819. int flags, int cflags)
  820. {
  821. CPUArchState *env = cpu->env_ptr;
  822. TranslationBlock *tb;
  823. uint8_t *tc_ptr;
  824. tb_page_addr_t phys_pc, phys_page2;
  825. target_ulong virt_page2;
  826. int code_gen_size;
  827. phys_pc = get_page_addr_code(env, pc);
  828. tb = tb_alloc(pc);
  829. if (!tb) {
  830. /* flush must be done */
  831. tb_flush(env);
  832. /* cannot fail at this point */
  833. tb = tb_alloc(pc);
  834. /* Don't forget to invalidate previous TB info. */
  835. tcg_ctx.tb_ctx.tb_invalidated_flag = 1;
  836. }
  837. tc_ptr = tcg_ctx.code_gen_ptr;
  838. tb->tc_ptr = tc_ptr;
  839. tb->cs_base = cs_base;
  840. tb->flags = flags;
  841. tb->cflags = cflags;
  842. cpu_gen_code(env, tb, &code_gen_size);
  843. tcg_ctx.code_gen_ptr = (void *)(((uintptr_t)tcg_ctx.code_gen_ptr +
  844. code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
  845. /* check next page if needed */
  846. virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
  847. phys_page2 = -1;
  848. if ((pc & TARGET_PAGE_MASK) != virt_page2) {
  849. phys_page2 = get_page_addr_code(env, virt_page2);
  850. }
  851. tb_link_page(tb, phys_pc, phys_page2);
  852. return tb;
  853. }
  854. /*
  855. * Invalidate all TBs which intersect with the target physical address range
  856. * [start;end[. NOTE: start and end may refer to *different* physical pages.
  857. * 'is_cpu_write_access' should be true if called from a real cpu write
  858. * access: the virtual CPU will exit the current TB if code is modified inside
  859. * this TB.
  860. */
  861. void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end,
  862. int is_cpu_write_access)
  863. {
  864. while (start < end) {
  865. tb_invalidate_phys_page_range(start, end, is_cpu_write_access);
  866. start &= TARGET_PAGE_MASK;
  867. start += TARGET_PAGE_SIZE;
  868. }
  869. }
  870. /*
  871. * Invalidate all TBs which intersect with the target physical address range
  872. * [start;end[. NOTE: start and end must refer to the *same* physical page.
  873. * 'is_cpu_write_access' should be true if called from a real cpu write
  874. * access: the virtual CPU will exit the current TB if code is modified inside
  875. * this TB.
  876. */
  877. void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
  878. int is_cpu_write_access)
  879. {
  880. TranslationBlock *tb, *tb_next, *saved_tb;
  881. CPUState *cpu = current_cpu;
  882. #if defined(TARGET_HAS_PRECISE_SMC)
  883. CPUArchState *env = NULL;
  884. #endif
  885. tb_page_addr_t tb_start, tb_end;
  886. PageDesc *p;
  887. int n;
  888. #ifdef TARGET_HAS_PRECISE_SMC
  889. int current_tb_not_found = is_cpu_write_access;
  890. TranslationBlock *current_tb = NULL;
  891. int current_tb_modified = 0;
  892. target_ulong current_pc = 0;
  893. target_ulong current_cs_base = 0;
  894. int current_flags = 0;
  895. #endif /* TARGET_HAS_PRECISE_SMC */
  896. p = page_find(start >> TARGET_PAGE_BITS);
  897. if (!p) {
  898. return;
  899. }
  900. if (!p->code_bitmap &&
  901. ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
  902. is_cpu_write_access) {
  903. /* build code bitmap */
  904. build_page_bitmap(p);
  905. }
  906. #if defined(TARGET_HAS_PRECISE_SMC)
  907. if (cpu != NULL) {
  908. env = cpu->env_ptr;
  909. }
  910. #endif
  911. /* we remove all the TBs in the range [start, end[ */
  912. /* XXX: see if in some cases it could be faster to invalidate all
  913. the code */
  914. tb = p->first_tb;
  915. while (tb != NULL) {
  916. n = (uintptr_t)tb & 3;
  917. tb = (TranslationBlock *)((uintptr_t)tb & ~3);
  918. tb_next = tb->page_next[n];
  919. /* NOTE: this is subtle as a TB may span two physical pages */
  920. if (n == 0) {
  921. /* NOTE: tb_end may be after the end of the page, but
  922. it is not a problem */
  923. tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
  924. tb_end = tb_start + tb->size;
  925. } else {
  926. tb_start = tb->page_addr[1];
  927. tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
  928. }
  929. if (!(tb_end <= start || tb_start >= end)) {
  930. #ifdef TARGET_HAS_PRECISE_SMC
  931. if (current_tb_not_found) {
  932. current_tb_not_found = 0;
  933. current_tb = NULL;
  934. if (cpu->mem_io_pc) {
  935. /* now we have a real cpu fault */
  936. current_tb = tb_find_pc(cpu->mem_io_pc);
  937. }
  938. }
  939. if (current_tb == tb &&
  940. (current_tb->cflags & CF_COUNT_MASK) != 1) {
  941. /* If we are modifying the current TB, we must stop
  942. its execution. We could be more precise by checking
  943. that the modification is after the current PC, but it
  944. would require a specialized function to partially
  945. restore the CPU state */
  946. current_tb_modified = 1;
  947. cpu_restore_state_from_tb(cpu, current_tb, cpu->mem_io_pc);
  948. cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
  949. &current_flags);
  950. }
  951. #endif /* TARGET_HAS_PRECISE_SMC */
  952. /* we need to do that to handle the case where a signal
  953. occurs while doing tb_phys_invalidate() */
  954. saved_tb = NULL;
  955. if (cpu != NULL) {
  956. saved_tb = cpu->current_tb;
  957. cpu->current_tb = NULL;
  958. }
  959. tb_phys_invalidate(tb, -1);
  960. if (cpu != NULL) {
  961. cpu->current_tb = saved_tb;
  962. if (cpu->interrupt_request && cpu->current_tb) {
  963. cpu_interrupt(cpu, cpu->interrupt_request);
  964. }
  965. }
  966. }
  967. tb = tb_next;
  968. }
  969. #if !defined(CONFIG_USER_ONLY)
  970. /* if no code remaining, no need to continue to use slow writes */
  971. if (!p->first_tb) {
  972. invalidate_page_bitmap(p);
  973. if (is_cpu_write_access) {
  974. tlb_unprotect_code_phys(cpu, start, cpu->mem_io_vaddr);
  975. }
  976. }
  977. #endif
  978. #ifdef TARGET_HAS_PRECISE_SMC
  979. if (current_tb_modified) {
  980. /* we generate a block containing just the instruction
  981. modifying the memory. It will ensure that it cannot modify
  982. itself */
  983. cpu->current_tb = NULL;
  984. tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1);
  985. cpu_resume_from_signal(cpu, NULL);
  986. }
  987. #endif
  988. }
  989. /* len must be <= 8 and start must be a multiple of len */
  990. void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
  991. {
  992. PageDesc *p;
  993. int offset, b;
  994. #if 0
  995. if (1) {
  996. qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
  997. cpu_single_env->mem_io_vaddr, len,
  998. cpu_single_env->eip,
  999. cpu_single_env->eip +
  1000. (intptr_t)cpu_single_env->segs[R_CS].base);
  1001. }
  1002. #endif
  1003. p = page_find(start >> TARGET_PAGE_BITS);
  1004. if (!p) {
  1005. return;
  1006. }
  1007. if (p->code_bitmap) {
  1008. offset = start & ~TARGET_PAGE_MASK;
  1009. b = p->code_bitmap[offset >> 3] >> (offset & 7);
  1010. if (b & ((1 << len) - 1)) {
  1011. goto do_invalidate;
  1012. }
  1013. } else {
  1014. do_invalidate:
  1015. tb_invalidate_phys_page_range(start, start + len, 1);
  1016. }
  1017. }
  1018. #if !defined(CONFIG_SOFTMMU)
  1019. static void tb_invalidate_phys_page(tb_page_addr_t addr,
  1020. uintptr_t pc, void *puc,
  1021. bool locked)
  1022. {
  1023. TranslationBlock *tb;
  1024. PageDesc *p;
  1025. int n;
  1026. #ifdef TARGET_HAS_PRECISE_SMC
  1027. TranslationBlock *current_tb = NULL;
  1028. CPUState *cpu = current_cpu;
  1029. CPUArchState *env = NULL;
  1030. int current_tb_modified = 0;
  1031. target_ulong current_pc = 0;
  1032. target_ulong current_cs_base = 0;
  1033. int current_flags = 0;
  1034. #endif
  1035. addr &= TARGET_PAGE_MASK;
  1036. p = page_find(addr >> TARGET_PAGE_BITS);
  1037. if (!p) {
  1038. return;
  1039. }
  1040. tb = p->first_tb;
  1041. #ifdef TARGET_HAS_PRECISE_SMC
  1042. if (tb && pc != 0) {
  1043. current_tb = tb_find_pc(pc);
  1044. }
  1045. if (cpu != NULL) {
  1046. env = cpu->env_ptr;
  1047. }
  1048. #endif
  1049. while (tb != NULL) {
  1050. n = (uintptr_t)tb & 3;
  1051. tb = (TranslationBlock *)((uintptr_t)tb & ~3);
  1052. #ifdef TARGET_HAS_PRECISE_SMC
  1053. if (current_tb == tb &&
  1054. (current_tb->cflags & CF_COUNT_MASK) != 1) {
  1055. /* If we are modifying the current TB, we must stop
  1056. its execution. We could be more precise by checking
  1057. that the modification is after the current PC, but it
  1058. would require a specialized function to partially
  1059. restore the CPU state */
  1060. current_tb_modified = 1;
  1061. cpu_restore_state_from_tb(cpu, current_tb, pc);
  1062. cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
  1063. &current_flags);
  1064. }
  1065. #endif /* TARGET_HAS_PRECISE_SMC */
  1066. tb_phys_invalidate(tb, addr);
  1067. tb = tb->page_next[n];
  1068. }
  1069. p->first_tb = NULL;
  1070. #ifdef TARGET_HAS_PRECISE_SMC
  1071. if (current_tb_modified) {
  1072. /* we generate a block containing just the instruction
  1073. modifying the memory. It will ensure that it cannot modify
  1074. itself */
  1075. cpu->current_tb = NULL;
  1076. tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1);
  1077. if (locked) {
  1078. mmap_unlock();
  1079. }
  1080. cpu_resume_from_signal(cpu, puc);
  1081. }
  1082. #endif
  1083. }
  1084. #endif
  1085. /* add the tb in the target page and protect it if necessary */
  1086. static inline void tb_alloc_page(TranslationBlock *tb,
  1087. unsigned int n, tb_page_addr_t page_addr)
  1088. {
  1089. PageDesc *p;
  1090. #ifndef CONFIG_USER_ONLY
  1091. bool page_already_protected;
  1092. #endif
  1093. tb->page_addr[n] = page_addr;
  1094. p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
  1095. tb->page_next[n] = p->first_tb;
  1096. #ifndef CONFIG_USER_ONLY
  1097. page_already_protected = p->first_tb != NULL;
  1098. #endif
  1099. p->first_tb = (TranslationBlock *)((uintptr_t)tb | n);
  1100. invalidate_page_bitmap(p);
  1101. #if defined(TARGET_HAS_SMC) || 1
  1102. #if defined(CONFIG_USER_ONLY)
  1103. if (p->flags & PAGE_WRITE) {
  1104. target_ulong addr;
  1105. PageDesc *p2;
  1106. int prot;
  1107. /* force the host page as non writable (writes will have a
  1108. page fault + mprotect overhead) */
  1109. page_addr &= qemu_host_page_mask;
  1110. prot = 0;
  1111. for (addr = page_addr; addr < page_addr + qemu_host_page_size;
  1112. addr += TARGET_PAGE_SIZE) {
  1113. p2 = page_find(addr >> TARGET_PAGE_BITS);
  1114. if (!p2) {
  1115. continue;
  1116. }
  1117. prot |= p2->flags;
  1118. p2->flags &= ~PAGE_WRITE;
  1119. }
  1120. mprotect(g2h(page_addr), qemu_host_page_size,
  1121. (prot & PAGE_BITS) & ~PAGE_WRITE);
  1122. #ifdef DEBUG_TB_INVALIDATE
  1123. printf("protecting code page: 0x" TARGET_FMT_lx "\n",
  1124. page_addr);
  1125. #endif
  1126. }
  1127. #else
  1128. /* if some code is already present, then the pages are already
  1129. protected. So we handle the case where only the first TB is
  1130. allocated in a physical page */
  1131. if (!page_already_protected) {
  1132. tlb_protect_code(page_addr);
  1133. }
  1134. #endif
  1135. #endif /* TARGET_HAS_SMC */
  1136. }
  1137. /* add a new TB and link it to the physical page tables. phys_page2 is
  1138. (-1) to indicate that only one page contains the TB. */
  1139. static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
  1140. tb_page_addr_t phys_page2)
  1141. {
  1142. unsigned int h;
  1143. TranslationBlock **ptb;
  1144. /* Grab the mmap lock to stop another thread invalidating this TB
  1145. before we are done. */
  1146. mmap_lock();
  1147. /* add in the physical hash table */
  1148. h = tb_phys_hash_func(phys_pc);
  1149. ptb = &tcg_ctx.tb_ctx.tb_phys_hash[h];
  1150. tb->phys_hash_next = *ptb;
  1151. *ptb = tb;
  1152. /* add in the page list */
  1153. tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
  1154. if (phys_page2 != -1) {
  1155. tb_alloc_page(tb, 1, phys_page2);
  1156. } else {
  1157. tb->page_addr[1] = -1;
  1158. }
  1159. tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2);
  1160. tb->jmp_next[0] = NULL;
  1161. tb->jmp_next[1] = NULL;
  1162. /* init original jump addresses */
  1163. if (tb->tb_next_offset[0] != 0xffff) {
  1164. tb_reset_jump(tb, 0);
  1165. }
  1166. if (tb->tb_next_offset[1] != 0xffff) {
  1167. tb_reset_jump(tb, 1);
  1168. }
  1169. #ifdef DEBUG_TB_CHECK
  1170. tb_page_check();
  1171. #endif
  1172. mmap_unlock();
  1173. }
  1174. /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
  1175. tb[1].tc_ptr. Return NULL if not found */
  1176. static TranslationBlock *tb_find_pc(uintptr_t tc_ptr)
  1177. {
  1178. int m_min, m_max, m;
  1179. uintptr_t v;
  1180. TranslationBlock *tb;
  1181. if (tcg_ctx.tb_ctx.nb_tbs <= 0) {
  1182. return NULL;
  1183. }
  1184. if (tc_ptr < (uintptr_t)tcg_ctx.code_gen_buffer ||
  1185. tc_ptr >= (uintptr_t)tcg_ctx.code_gen_ptr) {
  1186. return NULL;
  1187. }
  1188. /* binary search (cf Knuth) */
  1189. m_min = 0;
  1190. m_max = tcg_ctx.tb_ctx.nb_tbs - 1;
  1191. while (m_min <= m_max) {
  1192. m = (m_min + m_max) >> 1;
  1193. tb = &tcg_ctx.tb_ctx.tbs[m];
  1194. v = (uintptr_t)tb->tc_ptr;
  1195. if (v == tc_ptr) {
  1196. return tb;
  1197. } else if (tc_ptr < v) {
  1198. m_max = m - 1;
  1199. } else {
  1200. m_min = m + 1;
  1201. }
  1202. }
  1203. return &tcg_ctx.tb_ctx.tbs[m_max];
  1204. }
  1205. #if defined(TARGET_HAS_ICE) && !defined(CONFIG_USER_ONLY)
  1206. void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr)
  1207. {
  1208. ram_addr_t ram_addr;
  1209. MemoryRegion *mr;
  1210. hwaddr l = 1;
  1211. mr = address_space_translate(as, addr, &addr, &l, false);
  1212. if (!(memory_region_is_ram(mr)
  1213. || memory_region_is_romd(mr))) {
  1214. return;
  1215. }
  1216. ram_addr = (memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK)
  1217. + addr;
  1218. tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
  1219. }
  1220. #endif /* TARGET_HAS_ICE && !defined(CONFIG_USER_ONLY) */
  1221. void tb_check_watchpoint(CPUState *cpu)
  1222. {
  1223. TranslationBlock *tb;
  1224. tb = tb_find_pc(cpu->mem_io_pc);
  1225. if (!tb) {
  1226. cpu_abort(cpu, "check_watchpoint: could not find TB for pc=%p",
  1227. (void *)cpu->mem_io_pc);
  1228. }
  1229. cpu_restore_state_from_tb(cpu, tb, cpu->mem_io_pc);
  1230. tb_phys_invalidate(tb, -1);
  1231. }
  1232. #ifndef CONFIG_USER_ONLY
  1233. /* mask must never be zero, except for A20 change call */
  1234. static void tcg_handle_interrupt(CPUState *cpu, int mask)
  1235. {
  1236. int old_mask;
  1237. old_mask = cpu->interrupt_request;
  1238. cpu->interrupt_request |= mask;
  1239. /*
  1240. * If called from iothread context, wake the target cpu in
  1241. * case its halted.
  1242. */
  1243. if (!qemu_cpu_is_self(cpu)) {
  1244. qemu_cpu_kick(cpu);
  1245. return;
  1246. }
  1247. if (use_icount) {
  1248. cpu->icount_decr.u16.high = 0xffff;
  1249. if (!cpu_can_do_io(cpu)
  1250. && (mask & ~old_mask) != 0) {
  1251. cpu_abort(cpu, "Raised interrupt while not in I/O function");
  1252. }
  1253. } else {
  1254. cpu->tcg_exit_req = 1;
  1255. }
  1256. }
  1257. CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
  1258. /* in deterministic execution mode, instructions doing device I/Os
  1259. must be at the end of the TB */
  1260. void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
  1261. {
  1262. #if defined(TARGET_MIPS) || defined(TARGET_SH4)
  1263. CPUArchState *env = cpu->env_ptr;
  1264. #endif
  1265. TranslationBlock *tb;
  1266. uint32_t n, cflags;
  1267. target_ulong pc, cs_base;
  1268. uint64_t flags;
  1269. tb = tb_find_pc(retaddr);
  1270. if (!tb) {
  1271. cpu_abort(cpu, "cpu_io_recompile: could not find TB for pc=%p",
  1272. (void *)retaddr);
  1273. }
  1274. n = cpu->icount_decr.u16.low + tb->icount;
  1275. cpu_restore_state_from_tb(cpu, tb, retaddr);
  1276. /* Calculate how many instructions had been executed before the fault
  1277. occurred. */
  1278. n = n - cpu->icount_decr.u16.low;
  1279. /* Generate a new TB ending on the I/O insn. */
  1280. n++;
  1281. /* On MIPS and SH, delay slot instructions can only be restarted if
  1282. they were already the first instruction in the TB. If this is not
  1283. the first instruction in a TB then re-execute the preceding
  1284. branch. */
  1285. #if defined(TARGET_MIPS)
  1286. if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
  1287. env->active_tc.PC -= 4;
  1288. cpu->icount_decr.u16.low++;
  1289. env->hflags &= ~MIPS_HFLAG_BMASK;
  1290. }
  1291. #elif defined(TARGET_SH4)
  1292. if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
  1293. && n > 1) {
  1294. env->pc -= 2;
  1295. cpu->icount_decr.u16.low++;
  1296. env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
  1297. }
  1298. #endif
  1299. /* This should never happen. */
  1300. if (n > CF_COUNT_MASK) {
  1301. cpu_abort(cpu, "TB too big during recompile");
  1302. }
  1303. cflags = n | CF_LAST_IO;
  1304. pc = tb->pc;
  1305. cs_base = tb->cs_base;
  1306. flags = tb->flags;
  1307. tb_phys_invalidate(tb, -1);
  1308. /* FIXME: In theory this could raise an exception. In practice
  1309. we have already translated the block once so it's probably ok. */
  1310. tb_gen_code(cpu, pc, cs_base, flags, cflags);
  1311. /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
  1312. the first in the TB) then we end up generating a whole new TB and
  1313. repeating the fault, which is horribly inefficient.
  1314. Better would be to execute just this insn uncached, or generate a
  1315. second new TB. */
  1316. cpu_resume_from_signal(cpu, NULL);
  1317. }
  1318. void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr)
  1319. {
  1320. unsigned int i;
  1321. /* Discard jump cache entries for any tb which might potentially
  1322. overlap the flushed page. */
  1323. i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
  1324. memset(&cpu->tb_jmp_cache[i], 0,
  1325. TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
  1326. i = tb_jmp_cache_hash_page(addr);
  1327. memset(&cpu->tb_jmp_cache[i], 0,
  1328. TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
  1329. }
  1330. void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
  1331. {
  1332. int i, target_code_size, max_target_code_size;
  1333. int direct_jmp_count, direct_jmp2_count, cross_page;
  1334. TranslationBlock *tb;
  1335. target_code_size = 0;
  1336. max_target_code_size = 0;
  1337. cross_page = 0;
  1338. direct_jmp_count = 0;
  1339. direct_jmp2_count = 0;
  1340. for (i = 0; i < tcg_ctx.tb_ctx.nb_tbs; i++) {
  1341. tb = &tcg_ctx.tb_ctx.tbs[i];
  1342. target_code_size += tb->size;
  1343. if (tb->size > max_target_code_size) {
  1344. max_target_code_size = tb->size;
  1345. }
  1346. if (tb->page_addr[1] != -1) {
  1347. cross_page++;
  1348. }
  1349. if (tb->tb_next_offset[0] != 0xffff) {
  1350. direct_jmp_count++;
  1351. if (tb->tb_next_offset[1] != 0xffff) {
  1352. direct_jmp2_count++;
  1353. }
  1354. }
  1355. }
  1356. /* XXX: avoid using doubles ? */
  1357. cpu_fprintf(f, "Translation buffer state:\n");
  1358. cpu_fprintf(f, "gen code size %td/%zd\n",
  1359. tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer,
  1360. tcg_ctx.code_gen_buffer_max_size);
  1361. cpu_fprintf(f, "TB count %d/%d\n",
  1362. tcg_ctx.tb_ctx.nb_tbs, tcg_ctx.code_gen_max_blocks);
  1363. cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
  1364. tcg_ctx.tb_ctx.nb_tbs ? target_code_size /
  1365. tcg_ctx.tb_ctx.nb_tbs : 0,
  1366. max_target_code_size);
  1367. cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
  1368. tcg_ctx.tb_ctx.nb_tbs ? (tcg_ctx.code_gen_ptr -
  1369. tcg_ctx.code_gen_buffer) /
  1370. tcg_ctx.tb_ctx.nb_tbs : 0,
  1371. target_code_size ? (double) (tcg_ctx.code_gen_ptr -
  1372. tcg_ctx.code_gen_buffer) /
  1373. target_code_size : 0);
  1374. cpu_fprintf(f, "cross page TB count %d (%d%%)\n", cross_page,
  1375. tcg_ctx.tb_ctx.nb_tbs ? (cross_page * 100) /
  1376. tcg_ctx.tb_ctx.nb_tbs : 0);
  1377. cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
  1378. direct_jmp_count,
  1379. tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp_count * 100) /
  1380. tcg_ctx.tb_ctx.nb_tbs : 0,
  1381. direct_jmp2_count,
  1382. tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp2_count * 100) /
  1383. tcg_ctx.tb_ctx.nb_tbs : 0);
  1384. cpu_fprintf(f, "\nStatistics:\n");
  1385. cpu_fprintf(f, "TB flush count %d\n", tcg_ctx.tb_ctx.tb_flush_count);
  1386. cpu_fprintf(f, "TB invalidate count %d\n",
  1387. tcg_ctx.tb_ctx.tb_phys_invalidate_count);
  1388. cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
  1389. tcg_dump_info(f, cpu_fprintf);
  1390. }
  1391. #else /* CONFIG_USER_ONLY */
  1392. void cpu_interrupt(CPUState *cpu, int mask)
  1393. {
  1394. cpu->interrupt_request |= mask;
  1395. cpu->tcg_exit_req = 1;
  1396. }
  1397. /*
  1398. * Walks guest process memory "regions" one by one
  1399. * and calls callback function 'fn' for each region.
  1400. */
  1401. struct walk_memory_regions_data {
  1402. walk_memory_regions_fn fn;
  1403. void *priv;
  1404. uintptr_t start;
  1405. int prot;
  1406. };
  1407. static int walk_memory_regions_end(struct walk_memory_regions_data *data,
  1408. abi_ulong end, int new_prot)
  1409. {
  1410. if (data->start != -1ul) {
  1411. int rc = data->fn(data->priv, data->start, end, data->prot);
  1412. if (rc != 0) {
  1413. return rc;
  1414. }
  1415. }
  1416. data->start = (new_prot ? end : -1ul);
  1417. data->prot = new_prot;
  1418. return 0;
  1419. }
  1420. static int walk_memory_regions_1(struct walk_memory_regions_data *data,
  1421. abi_ulong base, int level, void **lp)
  1422. {
  1423. abi_ulong pa;
  1424. int i, rc;
  1425. if (*lp == NULL) {
  1426. return walk_memory_regions_end(data, base, 0);
  1427. }
  1428. if (level == 0) {
  1429. PageDesc *pd = *lp;
  1430. for (i = 0; i < V_L2_SIZE; ++i) {
  1431. int prot = pd[i].flags;
  1432. pa = base | (i << TARGET_PAGE_BITS);
  1433. if (prot != data->prot) {
  1434. rc = walk_memory_regions_end(data, pa, prot);
  1435. if (rc != 0) {
  1436. return rc;
  1437. }
  1438. }
  1439. }
  1440. } else {
  1441. void **pp = *lp;
  1442. for (i = 0; i < V_L2_SIZE; ++i) {
  1443. pa = base | ((abi_ulong)i <<
  1444. (TARGET_PAGE_BITS + V_L2_BITS * level));
  1445. rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
  1446. if (rc != 0) {
  1447. return rc;
  1448. }
  1449. }
  1450. }
  1451. return 0;
  1452. }
  1453. int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
  1454. {
  1455. struct walk_memory_regions_data data;
  1456. uintptr_t i;
  1457. data.fn = fn;
  1458. data.priv = priv;
  1459. data.start = -1ul;
  1460. data.prot = 0;
  1461. for (i = 0; i < V_L1_SIZE; i++) {
  1462. int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
  1463. V_L1_SHIFT / V_L2_BITS - 1, l1_map + i);
  1464. if (rc != 0) {
  1465. return rc;
  1466. }
  1467. }
  1468. return walk_memory_regions_end(&data, 0, 0);
  1469. }
  1470. static int dump_region(void *priv, abi_ulong start,
  1471. abi_ulong end, unsigned long prot)
  1472. {
  1473. FILE *f = (FILE *)priv;
  1474. (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
  1475. " "TARGET_ABI_FMT_lx" %c%c%c\n",
  1476. start, end, end - start,
  1477. ((prot & PAGE_READ) ? 'r' : '-'),
  1478. ((prot & PAGE_WRITE) ? 'w' : '-'),
  1479. ((prot & PAGE_EXEC) ? 'x' : '-'));
  1480. return 0;
  1481. }
  1482. /* dump memory mappings */
  1483. void page_dump(FILE *f)
  1484. {
  1485. const int length = sizeof(abi_ulong) * 2;
  1486. (void) fprintf(f, "%-*s %-*s %-*s %s\n",
  1487. length, "start", length, "end", length, "size", "prot");
  1488. walk_memory_regions(f, dump_region);
  1489. }
  1490. int page_get_flags(target_ulong address)
  1491. {
  1492. PageDesc *p;
  1493. p = page_find(address >> TARGET_PAGE_BITS);
  1494. if (!p) {
  1495. return 0;
  1496. }
  1497. return p->flags;
  1498. }
  1499. /* Modify the flags of a page and invalidate the code if necessary.
  1500. The flag PAGE_WRITE_ORG is positioned automatically depending
  1501. on PAGE_WRITE. The mmap_lock should already be held. */
  1502. void page_set_flags(target_ulong start, target_ulong end, int flags)
  1503. {
  1504. target_ulong addr, len;
  1505. /* This function should never be called with addresses outside the
  1506. guest address space. If this assert fires, it probably indicates
  1507. a missing call to h2g_valid. */
  1508. #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
  1509. assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
  1510. #endif
  1511. assert(start < end);
  1512. start = start & TARGET_PAGE_MASK;
  1513. end = TARGET_PAGE_ALIGN(end);
  1514. if (flags & PAGE_WRITE) {
  1515. flags |= PAGE_WRITE_ORG;
  1516. }
  1517. for (addr = start, len = end - start;
  1518. len != 0;
  1519. len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
  1520. PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
  1521. /* If the write protection bit is set, then we invalidate
  1522. the code inside. */
  1523. if (!(p->flags & PAGE_WRITE) &&
  1524. (flags & PAGE_WRITE) &&
  1525. p->first_tb) {
  1526. tb_invalidate_phys_page(addr, 0, NULL, false);
  1527. }
  1528. p->flags = flags;
  1529. }
  1530. }
  1531. int page_check_range(target_ulong start, target_ulong len, int flags)
  1532. {
  1533. PageDesc *p;
  1534. target_ulong end;
  1535. target_ulong addr;
  1536. /* This function should never be called with addresses outside the
  1537. guest address space. If this assert fires, it probably indicates
  1538. a missing call to h2g_valid. */
  1539. #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
  1540. assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
  1541. #endif
  1542. if (len == 0) {
  1543. return 0;
  1544. }
  1545. if (start + len - 1 < start) {
  1546. /* We've wrapped around. */
  1547. return -1;
  1548. }
  1549. /* must do before we loose bits in the next step */
  1550. end = TARGET_PAGE_ALIGN(start + len);
  1551. start = start & TARGET_PAGE_MASK;
  1552. for (addr = start, len = end - start;
  1553. len != 0;
  1554. len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
  1555. p = page_find(addr >> TARGET_PAGE_BITS);
  1556. if (!p) {
  1557. return -1;
  1558. }
  1559. if (!(p->flags & PAGE_VALID)) {
  1560. return -1;
  1561. }
  1562. if ((flags & PAGE_READ) && !(p->flags & PAGE_READ)) {
  1563. return -1;
  1564. }
  1565. if (flags & PAGE_WRITE) {
  1566. if (!(p->flags & PAGE_WRITE_ORG)) {
  1567. return -1;
  1568. }
  1569. /* unprotect the page if it was put read-only because it
  1570. contains translated code */
  1571. if (!(p->flags & PAGE_WRITE)) {
  1572. if (!page_unprotect(addr, 0, NULL)) {
  1573. return -1;
  1574. }
  1575. }
  1576. }
  1577. }
  1578. return 0;
  1579. }
  1580. /* called from signal handler: invalidate the code and unprotect the
  1581. page. Return TRUE if the fault was successfully handled. */
  1582. int page_unprotect(target_ulong address, uintptr_t pc, void *puc)
  1583. {
  1584. unsigned int prot;
  1585. PageDesc *p;
  1586. target_ulong host_start, host_end, addr;
  1587. /* Technically this isn't safe inside a signal handler. However we
  1588. know this only ever happens in a synchronous SEGV handler, so in
  1589. practice it seems to be ok. */
  1590. mmap_lock();
  1591. p = page_find(address >> TARGET_PAGE_BITS);
  1592. if (!p) {
  1593. mmap_unlock();
  1594. return 0;
  1595. }
  1596. /* if the page was really writable, then we change its
  1597. protection back to writable */
  1598. if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
  1599. host_start = address & qemu_host_page_mask;
  1600. host_end = host_start + qemu_host_page_size;
  1601. prot = 0;
  1602. for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
  1603. p = page_find(addr >> TARGET_PAGE_BITS);
  1604. p->flags |= PAGE_WRITE;
  1605. prot |= p->flags;
  1606. /* and since the content will be modified, we must invalidate
  1607. the corresponding translated code. */
  1608. tb_invalidate_phys_page(addr, pc, puc, true);
  1609. #ifdef DEBUG_TB_CHECK
  1610. tb_invalidate_check(addr);
  1611. #endif
  1612. }
  1613. mprotect((void *)g2h(host_start), qemu_host_page_size,
  1614. prot & PAGE_BITS);
  1615. mmap_unlock();
  1616. return 1;
  1617. }
  1618. mmap_unlock();
  1619. return 0;
  1620. }
  1621. #endif /* CONFIG_USER_ONLY */