translate-all.c 72 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490
  1. /*
  2. * Host code generation
  3. *
  4. * Copyright (c) 2003 Fabrice Bellard
  5. *
  6. * This library is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * This library is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  18. */
  19. #include "qemu/osdep.h"
  20. #define NO_CPU_IO_DEFS
  21. #include "trace.h"
  22. #include "disas/disas.h"
  23. #include "exec/exec-all.h"
  24. #include "tcg/tcg.h"
  25. #if defined(CONFIG_USER_ONLY)
  26. #include "qemu.h"
  27. #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
  28. #include <sys/param.h>
  29. #if __FreeBSD_version >= 700104
  30. #define HAVE_KINFO_GETVMMAP
  31. #define sigqueue sigqueue_freebsd /* avoid redefinition */
  32. #include <sys/proc.h>
  33. #include <machine/profile.h>
  34. #define _KERNEL
  35. #include <sys/user.h>
  36. #undef _KERNEL
  37. #undef sigqueue
  38. #include <libutil.h>
  39. #endif
  40. #endif
  41. #else
  42. #include "exec/ram_addr.h"
  43. #endif
  44. #include "exec/cputlb.h"
  45. #include "exec/translate-all.h"
  46. #include "qemu/bitmap.h"
  47. #include "qemu/qemu-print.h"
  48. #include "qemu/timer.h"
  49. #include "qemu/main-loop.h"
  50. #include "qemu/cacheinfo.h"
  51. #include "exec/log.h"
  52. #include "sysemu/cpus.h"
  53. #include "sysemu/cpu-timers.h"
  54. #include "sysemu/tcg.h"
  55. #include "qapi/error.h"
  56. #include "hw/core/tcg-cpu-ops.h"
  57. #include "tb-hash.h"
  58. #include "tb-context.h"
  59. #include "internal.h"
  60. /* #define DEBUG_TB_INVALIDATE */
  61. /* #define DEBUG_TB_FLUSH */
  62. /* make various TB consistency checks */
  63. /* #define DEBUG_TB_CHECK */
  64. #ifdef DEBUG_TB_INVALIDATE
  65. #define DEBUG_TB_INVALIDATE_GATE 1
  66. #else
  67. #define DEBUG_TB_INVALIDATE_GATE 0
  68. #endif
  69. #ifdef DEBUG_TB_FLUSH
  70. #define DEBUG_TB_FLUSH_GATE 1
  71. #else
  72. #define DEBUG_TB_FLUSH_GATE 0
  73. #endif
  74. #if !defined(CONFIG_USER_ONLY)
  75. /* TB consistency checks only implemented for usermode emulation. */
  76. #undef DEBUG_TB_CHECK
  77. #endif
  78. #ifdef DEBUG_TB_CHECK
  79. #define DEBUG_TB_CHECK_GATE 1
  80. #else
  81. #define DEBUG_TB_CHECK_GATE 0
  82. #endif
  83. /* Access to the various translations structures need to be serialised via locks
  84. * for consistency.
  85. * In user-mode emulation access to the memory related structures are protected
  86. * with mmap_lock.
  87. * In !user-mode we use per-page locks.
  88. */
  89. #ifdef CONFIG_SOFTMMU
  90. #define assert_memory_lock()
  91. #else
  92. #define assert_memory_lock() tcg_debug_assert(have_mmap_lock())
  93. #endif
  94. #define SMC_BITMAP_USE_THRESHOLD 10
  95. typedef struct PageDesc {
  96. /* list of TBs intersecting this ram page */
  97. uintptr_t first_tb;
  98. #ifdef CONFIG_SOFTMMU
  99. /* in order to optimize self modifying code, we count the number
  100. of lookups we do to a given page to use a bitmap */
  101. unsigned long *code_bitmap;
  102. unsigned int code_write_count;
  103. #else
  104. unsigned long flags;
  105. void *target_data;
  106. #endif
  107. #ifndef CONFIG_USER_ONLY
  108. QemuSpin lock;
  109. #endif
  110. } PageDesc;
  111. /**
  112. * struct page_entry - page descriptor entry
  113. * @pd: pointer to the &struct PageDesc of the page this entry represents
  114. * @index: page index of the page
  115. * @locked: whether the page is locked
  116. *
  117. * This struct helps us keep track of the locked state of a page, without
  118. * bloating &struct PageDesc.
  119. *
  120. * A page lock protects accesses to all fields of &struct PageDesc.
  121. *
  122. * See also: &struct page_collection.
  123. */
  124. struct page_entry {
  125. PageDesc *pd;
  126. tb_page_addr_t index;
  127. bool locked;
  128. };
  129. /**
  130. * struct page_collection - tracks a set of pages (i.e. &struct page_entry's)
  131. * @tree: Binary search tree (BST) of the pages, with key == page index
  132. * @max: Pointer to the page in @tree with the highest page index
  133. *
  134. * To avoid deadlock we lock pages in ascending order of page index.
  135. * When operating on a set of pages, we need to keep track of them so that
  136. * we can lock them in order and also unlock them later. For this we collect
  137. * pages (i.e. &struct page_entry's) in a binary search @tree. Given that the
  138. * @tree implementation we use does not provide an O(1) operation to obtain the
  139. * highest-ranked element, we use @max to keep track of the inserted page
  140. * with the highest index. This is valuable because if a page is not in
  141. * the tree and its index is higher than @max's, then we can lock it
  142. * without breaking the locking order rule.
  143. *
  144. * Note on naming: 'struct page_set' would be shorter, but we already have a few
  145. * page_set_*() helpers, so page_collection is used instead to avoid confusion.
  146. *
  147. * See also: page_collection_lock().
  148. */
  149. struct page_collection {
  150. GTree *tree;
  151. struct page_entry *max;
  152. };
  153. /* list iterators for lists of tagged pointers in TranslationBlock */
  154. #define TB_FOR_EACH_TAGGED(head, tb, n, field) \
  155. for (n = (head) & 1, tb = (TranslationBlock *)((head) & ~1); \
  156. tb; tb = (TranslationBlock *)tb->field[n], n = (uintptr_t)tb & 1, \
  157. tb = (TranslationBlock *)((uintptr_t)tb & ~1))
  158. #define PAGE_FOR_EACH_TB(pagedesc, tb, n) \
  159. TB_FOR_EACH_TAGGED((pagedesc)->first_tb, tb, n, page_next)
  160. #define TB_FOR_EACH_JMP(head_tb, tb, n) \
  161. TB_FOR_EACH_TAGGED((head_tb)->jmp_list_head, tb, n, jmp_list_next)
  162. /*
  163. * In system mode we want L1_MAP to be based on ram offsets,
  164. * while in user mode we want it to be based on virtual addresses.
  165. *
  166. * TODO: For user mode, see the caveat re host vs guest virtual
  167. * address spaces near GUEST_ADDR_MAX.
  168. */
  169. #if !defined(CONFIG_USER_ONLY)
  170. #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
  171. # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
  172. #else
  173. # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
  174. #endif
  175. #else
  176. # define L1_MAP_ADDR_SPACE_BITS MIN(HOST_LONG_BITS, TARGET_ABI_BITS)
  177. #endif
  178. /* Size of the L2 (and L3, etc) page tables. */
  179. #define V_L2_BITS 10
  180. #define V_L2_SIZE (1 << V_L2_BITS)
  181. /* Make sure all possible CPU event bits fit in tb->trace_vcpu_dstate */
  182. QEMU_BUILD_BUG_ON(CPU_TRACE_DSTATE_MAX_EVENTS >
  183. sizeof_field(TranslationBlock, trace_vcpu_dstate)
  184. * BITS_PER_BYTE);
  185. /*
  186. * L1 Mapping properties
  187. */
  188. static int v_l1_size;
  189. static int v_l1_shift;
  190. static int v_l2_levels;
  191. /* The bottom level has pointers to PageDesc, and is indexed by
  192. * anything from 4 to (V_L2_BITS + 3) bits, depending on target page size.
  193. */
  194. #define V_L1_MIN_BITS 4
  195. #define V_L1_MAX_BITS (V_L2_BITS + 3)
  196. #define V_L1_MAX_SIZE (1 << V_L1_MAX_BITS)
  197. static void *l1_map[V_L1_MAX_SIZE];
  198. TBContext tb_ctx;
  199. static void page_table_config_init(void)
  200. {
  201. uint32_t v_l1_bits;
  202. assert(TARGET_PAGE_BITS);
  203. /* The bits remaining after N lower levels of page tables. */
  204. v_l1_bits = (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % V_L2_BITS;
  205. if (v_l1_bits < V_L1_MIN_BITS) {
  206. v_l1_bits += V_L2_BITS;
  207. }
  208. v_l1_size = 1 << v_l1_bits;
  209. v_l1_shift = L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - v_l1_bits;
  210. v_l2_levels = v_l1_shift / V_L2_BITS - 1;
  211. assert(v_l1_bits <= V_L1_MAX_BITS);
  212. assert(v_l1_shift % V_L2_BITS == 0);
  213. assert(v_l2_levels >= 0);
  214. }
  215. /* Encode VAL as a signed leb128 sequence at P.
  216. Return P incremented past the encoded value. */
  217. static uint8_t *encode_sleb128(uint8_t *p, target_long val)
  218. {
  219. int more, byte;
  220. do {
  221. byte = val & 0x7f;
  222. val >>= 7;
  223. more = !((val == 0 && (byte & 0x40) == 0)
  224. || (val == -1 && (byte & 0x40) != 0));
  225. if (more) {
  226. byte |= 0x80;
  227. }
  228. *p++ = byte;
  229. } while (more);
  230. return p;
  231. }
  232. /* Decode a signed leb128 sequence at *PP; increment *PP past the
  233. decoded value. Return the decoded value. */
  234. static target_long decode_sleb128(const uint8_t **pp)
  235. {
  236. const uint8_t *p = *pp;
  237. target_long val = 0;
  238. int byte, shift = 0;
  239. do {
  240. byte = *p++;
  241. val |= (target_ulong)(byte & 0x7f) << shift;
  242. shift += 7;
  243. } while (byte & 0x80);
  244. if (shift < TARGET_LONG_BITS && (byte & 0x40)) {
  245. val |= -(target_ulong)1 << shift;
  246. }
  247. *pp = p;
  248. return val;
  249. }
  250. /* Encode the data collected about the instructions while compiling TB.
  251. Place the data at BLOCK, and return the number of bytes consumed.
  252. The logical table consists of TARGET_INSN_START_WORDS target_ulong's,
  253. which come from the target's insn_start data, followed by a uintptr_t
  254. which comes from the host pc of the end of the code implementing the insn.
  255. Each line of the table is encoded as sleb128 deltas from the previous
  256. line. The seed for the first line is { tb->pc, 0..., tb->tc.ptr }.
  257. That is, the first column is seeded with the guest pc, the last column
  258. with the host pc, and the middle columns with zeros. */
  259. static int encode_search(TranslationBlock *tb, uint8_t *block)
  260. {
  261. uint8_t *highwater = tcg_ctx->code_gen_highwater;
  262. uint8_t *p = block;
  263. int i, j, n;
  264. for (i = 0, n = tb->icount; i < n; ++i) {
  265. target_ulong prev;
  266. for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
  267. if (i == 0) {
  268. prev = (j == 0 ? tb->pc : 0);
  269. } else {
  270. prev = tcg_ctx->gen_insn_data[i - 1][j];
  271. }
  272. p = encode_sleb128(p, tcg_ctx->gen_insn_data[i][j] - prev);
  273. }
  274. prev = (i == 0 ? 0 : tcg_ctx->gen_insn_end_off[i - 1]);
  275. p = encode_sleb128(p, tcg_ctx->gen_insn_end_off[i] - prev);
  276. /* Test for (pending) buffer overflow. The assumption is that any
  277. one row beginning below the high water mark cannot overrun
  278. the buffer completely. Thus we can test for overflow after
  279. encoding a row without having to check during encoding. */
  280. if (unlikely(p > highwater)) {
  281. return -1;
  282. }
  283. }
  284. return p - block;
  285. }
  286. /* The cpu state corresponding to 'searched_pc' is restored.
  287. * When reset_icount is true, current TB will be interrupted and
  288. * icount should be recalculated.
  289. */
  290. static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
  291. uintptr_t searched_pc, bool reset_icount)
  292. {
  293. target_ulong data[TARGET_INSN_START_WORDS] = { tb->pc };
  294. uintptr_t host_pc = (uintptr_t)tb->tc.ptr;
  295. CPUArchState *env = cpu->env_ptr;
  296. const uint8_t *p = tb->tc.ptr + tb->tc.size;
  297. int i, j, num_insns = tb->icount;
  298. #ifdef CONFIG_PROFILER
  299. TCGProfile *prof = &tcg_ctx->prof;
  300. int64_t ti = profile_getclock();
  301. #endif
  302. searched_pc -= GETPC_ADJ;
  303. if (searched_pc < host_pc) {
  304. return -1;
  305. }
  306. /* Reconstruct the stored insn data while looking for the point at
  307. which the end of the insn exceeds the searched_pc. */
  308. for (i = 0; i < num_insns; ++i) {
  309. for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
  310. data[j] += decode_sleb128(&p);
  311. }
  312. host_pc += decode_sleb128(&p);
  313. if (host_pc > searched_pc) {
  314. goto found;
  315. }
  316. }
  317. return -1;
  318. found:
  319. if (reset_icount && (tb_cflags(tb) & CF_USE_ICOUNT)) {
  320. assert(icount_enabled());
  321. /* Reset the cycle counter to the start of the block
  322. and shift if to the number of actually executed instructions */
  323. cpu_neg(cpu)->icount_decr.u16.low += num_insns - i;
  324. }
  325. restore_state_to_opc(env, tb, data);
  326. #ifdef CONFIG_PROFILER
  327. qatomic_set(&prof->restore_time,
  328. prof->restore_time + profile_getclock() - ti);
  329. qatomic_set(&prof->restore_count, prof->restore_count + 1);
  330. #endif
  331. return 0;
  332. }
  333. bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc, bool will_exit)
  334. {
  335. /*
  336. * The host_pc has to be in the rx region of the code buffer.
  337. * If it is not we will not be able to resolve it here.
  338. * The two cases where host_pc will not be correct are:
  339. *
  340. * - fault during translation (instruction fetch)
  341. * - fault from helper (not using GETPC() macro)
  342. *
  343. * Either way we need return early as we can't resolve it here.
  344. */
  345. if (in_code_gen_buffer((const void *)(host_pc - tcg_splitwx_diff))) {
  346. TranslationBlock *tb = tcg_tb_lookup(host_pc);
  347. if (tb) {
  348. cpu_restore_state_from_tb(cpu, tb, host_pc, will_exit);
  349. return true;
  350. }
  351. }
  352. return false;
  353. }
  354. void page_init(void)
  355. {
  356. page_size_init();
  357. page_table_config_init();
  358. #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
  359. {
  360. #ifdef HAVE_KINFO_GETVMMAP
  361. struct kinfo_vmentry *freep;
  362. int i, cnt;
  363. freep = kinfo_getvmmap(getpid(), &cnt);
  364. if (freep) {
  365. mmap_lock();
  366. for (i = 0; i < cnt; i++) {
  367. unsigned long startaddr, endaddr;
  368. startaddr = freep[i].kve_start;
  369. endaddr = freep[i].kve_end;
  370. if (h2g_valid(startaddr)) {
  371. startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
  372. if (h2g_valid(endaddr)) {
  373. endaddr = h2g(endaddr);
  374. page_set_flags(startaddr, endaddr, PAGE_RESERVED);
  375. } else {
  376. #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
  377. endaddr = ~0ul;
  378. page_set_flags(startaddr, endaddr, PAGE_RESERVED);
  379. #endif
  380. }
  381. }
  382. }
  383. free(freep);
  384. mmap_unlock();
  385. }
  386. #else
  387. FILE *f;
  388. last_brk = (unsigned long)sbrk(0);
  389. f = fopen("/compat/linux/proc/self/maps", "r");
  390. if (f) {
  391. mmap_lock();
  392. do {
  393. unsigned long startaddr, endaddr;
  394. int n;
  395. n = fscanf(f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
  396. if (n == 2 && h2g_valid(startaddr)) {
  397. startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
  398. if (h2g_valid(endaddr)) {
  399. endaddr = h2g(endaddr);
  400. } else {
  401. endaddr = ~0ul;
  402. }
  403. page_set_flags(startaddr, endaddr, PAGE_RESERVED);
  404. }
  405. } while (!feof(f));
  406. fclose(f);
  407. mmap_unlock();
  408. }
  409. #endif
  410. }
  411. #endif
  412. }
  413. static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
  414. {
  415. PageDesc *pd;
  416. void **lp;
  417. int i;
  418. /* Level 1. Always allocated. */
  419. lp = l1_map + ((index >> v_l1_shift) & (v_l1_size - 1));
  420. /* Level 2..N-1. */
  421. for (i = v_l2_levels; i > 0; i--) {
  422. void **p = qatomic_rcu_read(lp);
  423. if (p == NULL) {
  424. void *existing;
  425. if (!alloc) {
  426. return NULL;
  427. }
  428. p = g_new0(void *, V_L2_SIZE);
  429. existing = qatomic_cmpxchg(lp, NULL, p);
  430. if (unlikely(existing)) {
  431. g_free(p);
  432. p = existing;
  433. }
  434. }
  435. lp = p + ((index >> (i * V_L2_BITS)) & (V_L2_SIZE - 1));
  436. }
  437. pd = qatomic_rcu_read(lp);
  438. if (pd == NULL) {
  439. void *existing;
  440. if (!alloc) {
  441. return NULL;
  442. }
  443. pd = g_new0(PageDesc, V_L2_SIZE);
  444. #ifndef CONFIG_USER_ONLY
  445. {
  446. int i;
  447. for (i = 0; i < V_L2_SIZE; i++) {
  448. qemu_spin_init(&pd[i].lock);
  449. }
  450. }
  451. #endif
  452. existing = qatomic_cmpxchg(lp, NULL, pd);
  453. if (unlikely(existing)) {
  454. #ifndef CONFIG_USER_ONLY
  455. {
  456. int i;
  457. for (i = 0; i < V_L2_SIZE; i++) {
  458. qemu_spin_destroy(&pd[i].lock);
  459. }
  460. }
  461. #endif
  462. g_free(pd);
  463. pd = existing;
  464. }
  465. }
  466. return pd + (index & (V_L2_SIZE - 1));
  467. }
  468. static inline PageDesc *page_find(tb_page_addr_t index)
  469. {
  470. return page_find_alloc(index, 0);
  471. }
  472. static void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1,
  473. PageDesc **ret_p2, tb_page_addr_t phys2, int alloc);
  474. /* In user-mode page locks aren't used; mmap_lock is enough */
  475. #ifdef CONFIG_USER_ONLY
  476. #define assert_page_locked(pd) tcg_debug_assert(have_mmap_lock())
  477. static inline void page_lock(PageDesc *pd)
  478. { }
  479. static inline void page_unlock(PageDesc *pd)
  480. { }
  481. static inline void page_lock_tb(const TranslationBlock *tb)
  482. { }
  483. static inline void page_unlock_tb(const TranslationBlock *tb)
  484. { }
  485. struct page_collection *
  486. page_collection_lock(tb_page_addr_t start, tb_page_addr_t end)
  487. {
  488. return NULL;
  489. }
  490. void page_collection_unlock(struct page_collection *set)
  491. { }
  492. #else /* !CONFIG_USER_ONLY */
  493. #ifdef CONFIG_DEBUG_TCG
  494. static __thread GHashTable *ht_pages_locked_debug;
  495. static void ht_pages_locked_debug_init(void)
  496. {
  497. if (ht_pages_locked_debug) {
  498. return;
  499. }
  500. ht_pages_locked_debug = g_hash_table_new(NULL, NULL);
  501. }
  502. static bool page_is_locked(const PageDesc *pd)
  503. {
  504. PageDesc *found;
  505. ht_pages_locked_debug_init();
  506. found = g_hash_table_lookup(ht_pages_locked_debug, pd);
  507. return !!found;
  508. }
  509. static void page_lock__debug(PageDesc *pd)
  510. {
  511. ht_pages_locked_debug_init();
  512. g_assert(!page_is_locked(pd));
  513. g_hash_table_insert(ht_pages_locked_debug, pd, pd);
  514. }
  515. static void page_unlock__debug(const PageDesc *pd)
  516. {
  517. bool removed;
  518. ht_pages_locked_debug_init();
  519. g_assert(page_is_locked(pd));
  520. removed = g_hash_table_remove(ht_pages_locked_debug, pd);
  521. g_assert(removed);
  522. }
  523. static void
  524. do_assert_page_locked(const PageDesc *pd, const char *file, int line)
  525. {
  526. if (unlikely(!page_is_locked(pd))) {
  527. error_report("assert_page_lock: PageDesc %p not locked @ %s:%d",
  528. pd, file, line);
  529. abort();
  530. }
  531. }
  532. #define assert_page_locked(pd) do_assert_page_locked(pd, __FILE__, __LINE__)
  533. void assert_no_pages_locked(void)
  534. {
  535. ht_pages_locked_debug_init();
  536. g_assert(g_hash_table_size(ht_pages_locked_debug) == 0);
  537. }
  538. #else /* !CONFIG_DEBUG_TCG */
  539. #define assert_page_locked(pd)
  540. static inline void page_lock__debug(const PageDesc *pd)
  541. {
  542. }
  543. static inline void page_unlock__debug(const PageDesc *pd)
  544. {
  545. }
  546. #endif /* CONFIG_DEBUG_TCG */
  547. static inline void page_lock(PageDesc *pd)
  548. {
  549. page_lock__debug(pd);
  550. qemu_spin_lock(&pd->lock);
  551. }
  552. static inline void page_unlock(PageDesc *pd)
  553. {
  554. qemu_spin_unlock(&pd->lock);
  555. page_unlock__debug(pd);
  556. }
  557. /* lock the page(s) of a TB in the correct acquisition order */
  558. static inline void page_lock_tb(const TranslationBlock *tb)
  559. {
  560. page_lock_pair(NULL, tb->page_addr[0], NULL, tb->page_addr[1], 0);
  561. }
  562. static inline void page_unlock_tb(const TranslationBlock *tb)
  563. {
  564. PageDesc *p1 = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
  565. page_unlock(p1);
  566. if (unlikely(tb->page_addr[1] != -1)) {
  567. PageDesc *p2 = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
  568. if (p2 != p1) {
  569. page_unlock(p2);
  570. }
  571. }
  572. }
  573. static inline struct page_entry *
  574. page_entry_new(PageDesc *pd, tb_page_addr_t index)
  575. {
  576. struct page_entry *pe = g_malloc(sizeof(*pe));
  577. pe->index = index;
  578. pe->pd = pd;
  579. pe->locked = false;
  580. return pe;
  581. }
  582. static void page_entry_destroy(gpointer p)
  583. {
  584. struct page_entry *pe = p;
  585. g_assert(pe->locked);
  586. page_unlock(pe->pd);
  587. g_free(pe);
  588. }
  589. /* returns false on success */
  590. static bool page_entry_trylock(struct page_entry *pe)
  591. {
  592. bool busy;
  593. busy = qemu_spin_trylock(&pe->pd->lock);
  594. if (!busy) {
  595. g_assert(!pe->locked);
  596. pe->locked = true;
  597. page_lock__debug(pe->pd);
  598. }
  599. return busy;
  600. }
  601. static void do_page_entry_lock(struct page_entry *pe)
  602. {
  603. page_lock(pe->pd);
  604. g_assert(!pe->locked);
  605. pe->locked = true;
  606. }
  607. static gboolean page_entry_lock(gpointer key, gpointer value, gpointer data)
  608. {
  609. struct page_entry *pe = value;
  610. do_page_entry_lock(pe);
  611. return FALSE;
  612. }
  613. static gboolean page_entry_unlock(gpointer key, gpointer value, gpointer data)
  614. {
  615. struct page_entry *pe = value;
  616. if (pe->locked) {
  617. pe->locked = false;
  618. page_unlock(pe->pd);
  619. }
  620. return FALSE;
  621. }
  622. /*
  623. * Trylock a page, and if successful, add the page to a collection.
  624. * Returns true ("busy") if the page could not be locked; false otherwise.
  625. */
  626. static bool page_trylock_add(struct page_collection *set, tb_page_addr_t addr)
  627. {
  628. tb_page_addr_t index = addr >> TARGET_PAGE_BITS;
  629. struct page_entry *pe;
  630. PageDesc *pd;
  631. pe = g_tree_lookup(set->tree, &index);
  632. if (pe) {
  633. return false;
  634. }
  635. pd = page_find(index);
  636. if (pd == NULL) {
  637. return false;
  638. }
  639. pe = page_entry_new(pd, index);
  640. g_tree_insert(set->tree, &pe->index, pe);
  641. /*
  642. * If this is either (1) the first insertion or (2) a page whose index
  643. * is higher than any other so far, just lock the page and move on.
  644. */
  645. if (set->max == NULL || pe->index > set->max->index) {
  646. set->max = pe;
  647. do_page_entry_lock(pe);
  648. return false;
  649. }
  650. /*
  651. * Try to acquire out-of-order lock; if busy, return busy so that we acquire
  652. * locks in order.
  653. */
  654. return page_entry_trylock(pe);
  655. }
  656. static gint tb_page_addr_cmp(gconstpointer ap, gconstpointer bp, gpointer udata)
  657. {
  658. tb_page_addr_t a = *(const tb_page_addr_t *)ap;
  659. tb_page_addr_t b = *(const tb_page_addr_t *)bp;
  660. if (a == b) {
  661. return 0;
  662. } else if (a < b) {
  663. return -1;
  664. }
  665. return 1;
  666. }
  667. /*
  668. * Lock a range of pages ([@start,@end[) as well as the pages of all
  669. * intersecting TBs.
  670. * Locking order: acquire locks in ascending order of page index.
  671. */
  672. struct page_collection *
  673. page_collection_lock(tb_page_addr_t start, tb_page_addr_t end)
  674. {
  675. struct page_collection *set = g_malloc(sizeof(*set));
  676. tb_page_addr_t index;
  677. PageDesc *pd;
  678. start >>= TARGET_PAGE_BITS;
  679. end >>= TARGET_PAGE_BITS;
  680. g_assert(start <= end);
  681. set->tree = g_tree_new_full(tb_page_addr_cmp, NULL, NULL,
  682. page_entry_destroy);
  683. set->max = NULL;
  684. assert_no_pages_locked();
  685. retry:
  686. g_tree_foreach(set->tree, page_entry_lock, NULL);
  687. for (index = start; index <= end; index++) {
  688. TranslationBlock *tb;
  689. int n;
  690. pd = page_find(index);
  691. if (pd == NULL) {
  692. continue;
  693. }
  694. if (page_trylock_add(set, index << TARGET_PAGE_BITS)) {
  695. g_tree_foreach(set->tree, page_entry_unlock, NULL);
  696. goto retry;
  697. }
  698. assert_page_locked(pd);
  699. PAGE_FOR_EACH_TB(pd, tb, n) {
  700. if (page_trylock_add(set, tb->page_addr[0]) ||
  701. (tb->page_addr[1] != -1 &&
  702. page_trylock_add(set, tb->page_addr[1]))) {
  703. /* drop all locks, and reacquire in order */
  704. g_tree_foreach(set->tree, page_entry_unlock, NULL);
  705. goto retry;
  706. }
  707. }
  708. }
  709. return set;
  710. }
  711. void page_collection_unlock(struct page_collection *set)
  712. {
  713. /* entries are unlocked and freed via page_entry_destroy */
  714. g_tree_destroy(set->tree);
  715. g_free(set);
  716. }
  717. #endif /* !CONFIG_USER_ONLY */
  718. static void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1,
  719. PageDesc **ret_p2, tb_page_addr_t phys2, int alloc)
  720. {
  721. PageDesc *p1, *p2;
  722. tb_page_addr_t page1;
  723. tb_page_addr_t page2;
  724. assert_memory_lock();
  725. g_assert(phys1 != -1);
  726. page1 = phys1 >> TARGET_PAGE_BITS;
  727. page2 = phys2 >> TARGET_PAGE_BITS;
  728. p1 = page_find_alloc(page1, alloc);
  729. if (ret_p1) {
  730. *ret_p1 = p1;
  731. }
  732. if (likely(phys2 == -1)) {
  733. page_lock(p1);
  734. return;
  735. } else if (page1 == page2) {
  736. page_lock(p1);
  737. if (ret_p2) {
  738. *ret_p2 = p1;
  739. }
  740. return;
  741. }
  742. p2 = page_find_alloc(page2, alloc);
  743. if (ret_p2) {
  744. *ret_p2 = p2;
  745. }
  746. if (page1 < page2) {
  747. page_lock(p1);
  748. page_lock(p2);
  749. } else {
  750. page_lock(p2);
  751. page_lock(p1);
  752. }
  753. }
  754. static bool tb_cmp(const void *ap, const void *bp)
  755. {
  756. const TranslationBlock *a = ap;
  757. const TranslationBlock *b = bp;
  758. return a->pc == b->pc &&
  759. a->cs_base == b->cs_base &&
  760. a->flags == b->flags &&
  761. (tb_cflags(a) & ~CF_INVALID) == (tb_cflags(b) & ~CF_INVALID) &&
  762. a->trace_vcpu_dstate == b->trace_vcpu_dstate &&
  763. a->page_addr[0] == b->page_addr[0] &&
  764. a->page_addr[1] == b->page_addr[1];
  765. }
  766. void tb_htable_init(void)
  767. {
  768. unsigned int mode = QHT_MODE_AUTO_RESIZE;
  769. qht_init(&tb_ctx.htable, tb_cmp, CODE_GEN_HTABLE_SIZE, mode);
  770. }
  771. /* call with @p->lock held */
  772. static inline void invalidate_page_bitmap(PageDesc *p)
  773. {
  774. assert_page_locked(p);
  775. #ifdef CONFIG_SOFTMMU
  776. g_free(p->code_bitmap);
  777. p->code_bitmap = NULL;
  778. p->code_write_count = 0;
  779. #endif
  780. }
  781. /* Set to NULL all the 'first_tb' fields in all PageDescs. */
  782. static void page_flush_tb_1(int level, void **lp)
  783. {
  784. int i;
  785. if (*lp == NULL) {
  786. return;
  787. }
  788. if (level == 0) {
  789. PageDesc *pd = *lp;
  790. for (i = 0; i < V_L2_SIZE; ++i) {
  791. page_lock(&pd[i]);
  792. pd[i].first_tb = (uintptr_t)NULL;
  793. invalidate_page_bitmap(pd + i);
  794. page_unlock(&pd[i]);
  795. }
  796. } else {
  797. void **pp = *lp;
  798. for (i = 0; i < V_L2_SIZE; ++i) {
  799. page_flush_tb_1(level - 1, pp + i);
  800. }
  801. }
  802. }
  803. static void page_flush_tb(void)
  804. {
  805. int i, l1_sz = v_l1_size;
  806. for (i = 0; i < l1_sz; i++) {
  807. page_flush_tb_1(v_l2_levels, l1_map + i);
  808. }
  809. }
  810. static gboolean tb_host_size_iter(gpointer key, gpointer value, gpointer data)
  811. {
  812. const TranslationBlock *tb = value;
  813. size_t *size = data;
  814. *size += tb->tc.size;
  815. return false;
  816. }
  817. /* flush all the translation blocks */
  818. static void do_tb_flush(CPUState *cpu, run_on_cpu_data tb_flush_count)
  819. {
  820. bool did_flush = false;
  821. mmap_lock();
  822. /* If it is already been done on request of another CPU,
  823. * just retry.
  824. */
  825. if (tb_ctx.tb_flush_count != tb_flush_count.host_int) {
  826. goto done;
  827. }
  828. did_flush = true;
  829. if (DEBUG_TB_FLUSH_GATE) {
  830. size_t nb_tbs = tcg_nb_tbs();
  831. size_t host_size = 0;
  832. tcg_tb_foreach(tb_host_size_iter, &host_size);
  833. printf("qemu: flush code_size=%zu nb_tbs=%zu avg_tb_size=%zu\n",
  834. tcg_code_size(), nb_tbs, nb_tbs > 0 ? host_size / nb_tbs : 0);
  835. }
  836. CPU_FOREACH(cpu) {
  837. cpu_tb_jmp_cache_clear(cpu);
  838. }
  839. qht_reset_size(&tb_ctx.htable, CODE_GEN_HTABLE_SIZE);
  840. page_flush_tb();
  841. tcg_region_reset_all();
  842. /* XXX: flush processor icache at this point if cache flush is
  843. expensive */
  844. qatomic_mb_set(&tb_ctx.tb_flush_count, tb_ctx.tb_flush_count + 1);
  845. done:
  846. mmap_unlock();
  847. if (did_flush) {
  848. qemu_plugin_flush_cb();
  849. }
  850. }
  851. void tb_flush(CPUState *cpu)
  852. {
  853. if (tcg_enabled()) {
  854. unsigned tb_flush_count = qatomic_mb_read(&tb_ctx.tb_flush_count);
  855. if (cpu_in_exclusive_context(cpu)) {
  856. do_tb_flush(cpu, RUN_ON_CPU_HOST_INT(tb_flush_count));
  857. } else {
  858. async_safe_run_on_cpu(cpu, do_tb_flush,
  859. RUN_ON_CPU_HOST_INT(tb_flush_count));
  860. }
  861. }
  862. }
  863. /*
  864. * Formerly ifdef DEBUG_TB_CHECK. These debug functions are user-mode-only,
  865. * so in order to prevent bit rot we compile them unconditionally in user-mode,
  866. * and let the optimizer get rid of them by wrapping their user-only callers
  867. * with if (DEBUG_TB_CHECK_GATE).
  868. */
  869. #ifdef CONFIG_USER_ONLY
  870. static void do_tb_invalidate_check(void *p, uint32_t hash, void *userp)
  871. {
  872. TranslationBlock *tb = p;
  873. target_ulong addr = *(target_ulong *)userp;
  874. if (!(addr + TARGET_PAGE_SIZE <= tb->pc || addr >= tb->pc + tb->size)) {
  875. printf("ERROR invalidate: address=" TARGET_FMT_lx
  876. " PC=%08lx size=%04x\n", addr, (long)tb->pc, tb->size);
  877. }
  878. }
  879. /* verify that all the pages have correct rights for code
  880. *
  881. * Called with mmap_lock held.
  882. */
  883. static void tb_invalidate_check(target_ulong address)
  884. {
  885. address &= TARGET_PAGE_MASK;
  886. qht_iter(&tb_ctx.htable, do_tb_invalidate_check, &address);
  887. }
  888. static void do_tb_page_check(void *p, uint32_t hash, void *userp)
  889. {
  890. TranslationBlock *tb = p;
  891. int flags1, flags2;
  892. flags1 = page_get_flags(tb->pc);
  893. flags2 = page_get_flags(tb->pc + tb->size - 1);
  894. if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
  895. printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
  896. (long)tb->pc, tb->size, flags1, flags2);
  897. }
  898. }
  899. /* verify that all the pages have correct rights for code */
  900. static void tb_page_check(void)
  901. {
  902. qht_iter(&tb_ctx.htable, do_tb_page_check, NULL);
  903. }
  904. #endif /* CONFIG_USER_ONLY */
  905. /*
  906. * user-mode: call with mmap_lock held
  907. * !user-mode: call with @pd->lock held
  908. */
  909. static inline void tb_page_remove(PageDesc *pd, TranslationBlock *tb)
  910. {
  911. TranslationBlock *tb1;
  912. uintptr_t *pprev;
  913. unsigned int n1;
  914. assert_page_locked(pd);
  915. pprev = &pd->first_tb;
  916. PAGE_FOR_EACH_TB(pd, tb1, n1) {
  917. if (tb1 == tb) {
  918. *pprev = tb1->page_next[n1];
  919. return;
  920. }
  921. pprev = &tb1->page_next[n1];
  922. }
  923. g_assert_not_reached();
  924. }
  925. /* remove @orig from its @n_orig-th jump list */
  926. static inline void tb_remove_from_jmp_list(TranslationBlock *orig, int n_orig)
  927. {
  928. uintptr_t ptr, ptr_locked;
  929. TranslationBlock *dest;
  930. TranslationBlock *tb;
  931. uintptr_t *pprev;
  932. int n;
  933. /* mark the LSB of jmp_dest[] so that no further jumps can be inserted */
  934. ptr = qatomic_or_fetch(&orig->jmp_dest[n_orig], 1);
  935. dest = (TranslationBlock *)(ptr & ~1);
  936. if (dest == NULL) {
  937. return;
  938. }
  939. qemu_spin_lock(&dest->jmp_lock);
  940. /*
  941. * While acquiring the lock, the jump might have been removed if the
  942. * destination TB was invalidated; check again.
  943. */
  944. ptr_locked = qatomic_read(&orig->jmp_dest[n_orig]);
  945. if (ptr_locked != ptr) {
  946. qemu_spin_unlock(&dest->jmp_lock);
  947. /*
  948. * The only possibility is that the jump was unlinked via
  949. * tb_jump_unlink(dest). Seeing here another destination would be a bug,
  950. * because we set the LSB above.
  951. */
  952. g_assert(ptr_locked == 1 && dest->cflags & CF_INVALID);
  953. return;
  954. }
  955. /*
  956. * We first acquired the lock, and since the destination pointer matches,
  957. * we know for sure that @orig is in the jmp list.
  958. */
  959. pprev = &dest->jmp_list_head;
  960. TB_FOR_EACH_JMP(dest, tb, n) {
  961. if (tb == orig && n == n_orig) {
  962. *pprev = tb->jmp_list_next[n];
  963. /* no need to set orig->jmp_dest[n]; setting the LSB was enough */
  964. qemu_spin_unlock(&dest->jmp_lock);
  965. return;
  966. }
  967. pprev = &tb->jmp_list_next[n];
  968. }
  969. g_assert_not_reached();
  970. }
  971. /* reset the jump entry 'n' of a TB so that it is not chained to
  972. another TB */
  973. static inline void tb_reset_jump(TranslationBlock *tb, int n)
  974. {
  975. uintptr_t addr = (uintptr_t)(tb->tc.ptr + tb->jmp_reset_offset[n]);
  976. tb_set_jmp_target(tb, n, addr);
  977. }
  978. /* remove any jumps to the TB */
  979. static inline void tb_jmp_unlink(TranslationBlock *dest)
  980. {
  981. TranslationBlock *tb;
  982. int n;
  983. qemu_spin_lock(&dest->jmp_lock);
  984. TB_FOR_EACH_JMP(dest, tb, n) {
  985. tb_reset_jump(tb, n);
  986. qatomic_and(&tb->jmp_dest[n], (uintptr_t)NULL | 1);
  987. /* No need to clear the list entry; setting the dest ptr is enough */
  988. }
  989. dest->jmp_list_head = (uintptr_t)NULL;
  990. qemu_spin_unlock(&dest->jmp_lock);
  991. }
  992. /*
  993. * In user-mode, call with mmap_lock held.
  994. * In !user-mode, if @rm_from_page_list is set, call with the TB's pages'
  995. * locks held.
  996. */
  997. static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list)
  998. {
  999. CPUState *cpu;
  1000. PageDesc *p;
  1001. uint32_t h;
  1002. tb_page_addr_t phys_pc;
  1003. uint32_t orig_cflags = tb_cflags(tb);
  1004. assert_memory_lock();
  1005. /* make sure no further incoming jumps will be chained to this TB */
  1006. qemu_spin_lock(&tb->jmp_lock);
  1007. qatomic_set(&tb->cflags, tb->cflags | CF_INVALID);
  1008. qemu_spin_unlock(&tb->jmp_lock);
  1009. /* remove the TB from the hash list */
  1010. phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
  1011. h = tb_hash_func(phys_pc, tb->pc, tb->flags, orig_cflags,
  1012. tb->trace_vcpu_dstate);
  1013. if (!qht_remove(&tb_ctx.htable, tb, h)) {
  1014. return;
  1015. }
  1016. /* remove the TB from the page list */
  1017. if (rm_from_page_list) {
  1018. p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
  1019. tb_page_remove(p, tb);
  1020. invalidate_page_bitmap(p);
  1021. if (tb->page_addr[1] != -1) {
  1022. p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
  1023. tb_page_remove(p, tb);
  1024. invalidate_page_bitmap(p);
  1025. }
  1026. }
  1027. /* remove the TB from the hash list */
  1028. h = tb_jmp_cache_hash_func(tb->pc);
  1029. CPU_FOREACH(cpu) {
  1030. if (qatomic_read(&cpu->tb_jmp_cache[h]) == tb) {
  1031. qatomic_set(&cpu->tb_jmp_cache[h], NULL);
  1032. }
  1033. }
  1034. /* suppress this TB from the two jump lists */
  1035. tb_remove_from_jmp_list(tb, 0);
  1036. tb_remove_from_jmp_list(tb, 1);
  1037. /* suppress any remaining jumps to this TB */
  1038. tb_jmp_unlink(tb);
  1039. qatomic_set(&tb_ctx.tb_phys_invalidate_count,
  1040. tb_ctx.tb_phys_invalidate_count + 1);
  1041. }
  1042. static void tb_phys_invalidate__locked(TranslationBlock *tb)
  1043. {
  1044. qemu_thread_jit_write();
  1045. do_tb_phys_invalidate(tb, true);
  1046. qemu_thread_jit_execute();
  1047. }
  1048. /* invalidate one TB
  1049. *
  1050. * Called with mmap_lock held in user-mode.
  1051. */
  1052. void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
  1053. {
  1054. if (page_addr == -1 && tb->page_addr[0] != -1) {
  1055. page_lock_tb(tb);
  1056. do_tb_phys_invalidate(tb, true);
  1057. page_unlock_tb(tb);
  1058. } else {
  1059. do_tb_phys_invalidate(tb, false);
  1060. }
  1061. }
  1062. #ifdef CONFIG_SOFTMMU
  1063. /* call with @p->lock held */
  1064. static void build_page_bitmap(PageDesc *p)
  1065. {
  1066. int n, tb_start, tb_end;
  1067. TranslationBlock *tb;
  1068. assert_page_locked(p);
  1069. p->code_bitmap = bitmap_new(TARGET_PAGE_SIZE);
  1070. PAGE_FOR_EACH_TB(p, tb, n) {
  1071. /* NOTE: this is subtle as a TB may span two physical pages */
  1072. if (n == 0) {
  1073. /* NOTE: tb_end may be after the end of the page, but
  1074. it is not a problem */
  1075. tb_start = tb->pc & ~TARGET_PAGE_MASK;
  1076. tb_end = tb_start + tb->size;
  1077. if (tb_end > TARGET_PAGE_SIZE) {
  1078. tb_end = TARGET_PAGE_SIZE;
  1079. }
  1080. } else {
  1081. tb_start = 0;
  1082. tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
  1083. }
  1084. bitmap_set(p->code_bitmap, tb_start, tb_end - tb_start);
  1085. }
  1086. }
  1087. #endif
  1088. /* add the tb in the target page and protect it if necessary
  1089. *
  1090. * Called with mmap_lock held for user-mode emulation.
  1091. * Called with @p->lock held in !user-mode.
  1092. */
  1093. static inline void tb_page_add(PageDesc *p, TranslationBlock *tb,
  1094. unsigned int n, tb_page_addr_t page_addr)
  1095. {
  1096. #ifndef CONFIG_USER_ONLY
  1097. bool page_already_protected;
  1098. #endif
  1099. assert_page_locked(p);
  1100. tb->page_addr[n] = page_addr;
  1101. tb->page_next[n] = p->first_tb;
  1102. #ifndef CONFIG_USER_ONLY
  1103. page_already_protected = p->first_tb != (uintptr_t)NULL;
  1104. #endif
  1105. p->first_tb = (uintptr_t)tb | n;
  1106. invalidate_page_bitmap(p);
  1107. #if defined(CONFIG_USER_ONLY)
  1108. /* translator_loop() must have made all TB pages non-writable */
  1109. assert(!(p->flags & PAGE_WRITE));
  1110. #else
  1111. /* if some code is already present, then the pages are already
  1112. protected. So we handle the case where only the first TB is
  1113. allocated in a physical page */
  1114. if (!page_already_protected) {
  1115. tlb_protect_code(page_addr);
  1116. }
  1117. #endif
  1118. }
  1119. /*
  1120. * Add a new TB and link it to the physical page tables. phys_page2 is
  1121. * (-1) to indicate that only one page contains the TB.
  1122. *
  1123. * Called with mmap_lock held for user-mode emulation.
  1124. *
  1125. * Returns a pointer @tb, or a pointer to an existing TB that matches @tb.
  1126. * Note that in !user-mode, another thread might have already added a TB
  1127. * for the same block of guest code that @tb corresponds to. In that case,
  1128. * the caller should discard the original @tb, and use instead the returned TB.
  1129. */
  1130. static TranslationBlock *
  1131. tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
  1132. tb_page_addr_t phys_page2)
  1133. {
  1134. PageDesc *p;
  1135. PageDesc *p2 = NULL;
  1136. void *existing_tb = NULL;
  1137. uint32_t h;
  1138. assert_memory_lock();
  1139. tcg_debug_assert(!(tb->cflags & CF_INVALID));
  1140. /*
  1141. * Add the TB to the page list, acquiring first the pages's locks.
  1142. * We keep the locks held until after inserting the TB in the hash table,
  1143. * so that if the insertion fails we know for sure that the TBs are still
  1144. * in the page descriptors.
  1145. * Note that inserting into the hash table first isn't an option, since
  1146. * we can only insert TBs that are fully initialized.
  1147. */
  1148. page_lock_pair(&p, phys_pc, &p2, phys_page2, 1);
  1149. tb_page_add(p, tb, 0, phys_pc & TARGET_PAGE_MASK);
  1150. if (p2) {
  1151. tb_page_add(p2, tb, 1, phys_page2);
  1152. } else {
  1153. tb->page_addr[1] = -1;
  1154. }
  1155. /* add in the hash table */
  1156. h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb->cflags,
  1157. tb->trace_vcpu_dstate);
  1158. qht_insert(&tb_ctx.htable, tb, h, &existing_tb);
  1159. /* remove TB from the page(s) if we couldn't insert it */
  1160. if (unlikely(existing_tb)) {
  1161. tb_page_remove(p, tb);
  1162. invalidate_page_bitmap(p);
  1163. if (p2) {
  1164. tb_page_remove(p2, tb);
  1165. invalidate_page_bitmap(p2);
  1166. }
  1167. tb = existing_tb;
  1168. }
  1169. if (p2 && p2 != p) {
  1170. page_unlock(p2);
  1171. }
  1172. page_unlock(p);
  1173. #ifdef CONFIG_USER_ONLY
  1174. if (DEBUG_TB_CHECK_GATE) {
  1175. tb_page_check();
  1176. }
  1177. #endif
  1178. return tb;
  1179. }
  1180. /* Called with mmap_lock held for user mode emulation. */
  1181. TranslationBlock *tb_gen_code(CPUState *cpu,
  1182. target_ulong pc, target_ulong cs_base,
  1183. uint32_t flags, int cflags)
  1184. {
  1185. CPUArchState *env = cpu->env_ptr;
  1186. TranslationBlock *tb, *existing_tb;
  1187. tb_page_addr_t phys_pc, phys_page2;
  1188. target_ulong virt_page2;
  1189. tcg_insn_unit *gen_code_buf;
  1190. int gen_code_size, search_size, max_insns;
  1191. #ifdef CONFIG_PROFILER
  1192. TCGProfile *prof = &tcg_ctx->prof;
  1193. int64_t ti;
  1194. #endif
  1195. assert_memory_lock();
  1196. qemu_thread_jit_write();
  1197. phys_pc = get_page_addr_code(env, pc);
  1198. if (phys_pc == -1) {
  1199. /* Generate a one-shot TB with 1 insn in it */
  1200. cflags = (cflags & ~CF_COUNT_MASK) | CF_LAST_IO | 1;
  1201. }
  1202. max_insns = cflags & CF_COUNT_MASK;
  1203. if (max_insns == 0) {
  1204. max_insns = TCG_MAX_INSNS;
  1205. }
  1206. QEMU_BUILD_BUG_ON(CF_COUNT_MASK + 1 != TCG_MAX_INSNS);
  1207. buffer_overflow:
  1208. tb = tcg_tb_alloc(tcg_ctx);
  1209. if (unlikely(!tb)) {
  1210. /* flush must be done */
  1211. tb_flush(cpu);
  1212. mmap_unlock();
  1213. /* Make the execution loop process the flush as soon as possible. */
  1214. cpu->exception_index = EXCP_INTERRUPT;
  1215. cpu_loop_exit(cpu);
  1216. }
  1217. gen_code_buf = tcg_ctx->code_gen_ptr;
  1218. tb->tc.ptr = tcg_splitwx_to_rx(gen_code_buf);
  1219. tb->pc = pc;
  1220. tb->cs_base = cs_base;
  1221. tb->flags = flags;
  1222. tb->cflags = cflags;
  1223. tb->trace_vcpu_dstate = *cpu->trace_dstate;
  1224. tcg_ctx->tb_cflags = cflags;
  1225. tb_overflow:
  1226. #ifdef CONFIG_PROFILER
  1227. /* includes aborted translations because of exceptions */
  1228. qatomic_set(&prof->tb_count1, prof->tb_count1 + 1);
  1229. ti = profile_getclock();
  1230. #endif
  1231. gen_code_size = sigsetjmp(tcg_ctx->jmp_trans, 0);
  1232. if (unlikely(gen_code_size != 0)) {
  1233. goto error_return;
  1234. }
  1235. tcg_func_start(tcg_ctx);
  1236. tcg_ctx->cpu = env_cpu(env);
  1237. gen_intermediate_code(cpu, tb, max_insns);
  1238. assert(tb->size != 0);
  1239. tcg_ctx->cpu = NULL;
  1240. max_insns = tb->icount;
  1241. trace_translate_block(tb, tb->pc, tb->tc.ptr);
  1242. /* generate machine code */
  1243. tb->jmp_reset_offset[0] = TB_JMP_RESET_OFFSET_INVALID;
  1244. tb->jmp_reset_offset[1] = TB_JMP_RESET_OFFSET_INVALID;
  1245. tcg_ctx->tb_jmp_reset_offset = tb->jmp_reset_offset;
  1246. if (TCG_TARGET_HAS_direct_jump) {
  1247. tcg_ctx->tb_jmp_insn_offset = tb->jmp_target_arg;
  1248. tcg_ctx->tb_jmp_target_addr = NULL;
  1249. } else {
  1250. tcg_ctx->tb_jmp_insn_offset = NULL;
  1251. tcg_ctx->tb_jmp_target_addr = tb->jmp_target_arg;
  1252. }
  1253. #ifdef CONFIG_PROFILER
  1254. qatomic_set(&prof->tb_count, prof->tb_count + 1);
  1255. qatomic_set(&prof->interm_time,
  1256. prof->interm_time + profile_getclock() - ti);
  1257. ti = profile_getclock();
  1258. #endif
  1259. gen_code_size = tcg_gen_code(tcg_ctx, tb);
  1260. if (unlikely(gen_code_size < 0)) {
  1261. error_return:
  1262. switch (gen_code_size) {
  1263. case -1:
  1264. /*
  1265. * Overflow of code_gen_buffer, or the current slice of it.
  1266. *
  1267. * TODO: We don't need to re-do gen_intermediate_code, nor
  1268. * should we re-do the tcg optimization currently hidden
  1269. * inside tcg_gen_code. All that should be required is to
  1270. * flush the TBs, allocate a new TB, re-initialize it per
  1271. * above, and re-do the actual code generation.
  1272. */
  1273. qemu_log_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT,
  1274. "Restarting code generation for "
  1275. "code_gen_buffer overflow\n");
  1276. goto buffer_overflow;
  1277. case -2:
  1278. /*
  1279. * The code generated for the TranslationBlock is too large.
  1280. * The maximum size allowed by the unwind info is 64k.
  1281. * There may be stricter constraints from relocations
  1282. * in the tcg backend.
  1283. *
  1284. * Try again with half as many insns as we attempted this time.
  1285. * If a single insn overflows, there's a bug somewhere...
  1286. */
  1287. assert(max_insns > 1);
  1288. max_insns /= 2;
  1289. qemu_log_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT,
  1290. "Restarting code generation with "
  1291. "smaller translation block (max %d insns)\n",
  1292. max_insns);
  1293. goto tb_overflow;
  1294. default:
  1295. g_assert_not_reached();
  1296. }
  1297. }
  1298. search_size = encode_search(tb, (void *)gen_code_buf + gen_code_size);
  1299. if (unlikely(search_size < 0)) {
  1300. goto buffer_overflow;
  1301. }
  1302. tb->tc.size = gen_code_size;
  1303. #ifdef CONFIG_PROFILER
  1304. qatomic_set(&prof->code_time, prof->code_time + profile_getclock() - ti);
  1305. qatomic_set(&prof->code_in_len, prof->code_in_len + tb->size);
  1306. qatomic_set(&prof->code_out_len, prof->code_out_len + gen_code_size);
  1307. qatomic_set(&prof->search_out_len, prof->search_out_len + search_size);
  1308. #endif
  1309. #ifdef DEBUG_DISAS
  1310. if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM) &&
  1311. qemu_log_in_addr_range(tb->pc)) {
  1312. FILE *logfile = qemu_log_lock();
  1313. int code_size, data_size;
  1314. const tcg_target_ulong *rx_data_gen_ptr;
  1315. size_t chunk_start;
  1316. int insn = 0;
  1317. if (tcg_ctx->data_gen_ptr) {
  1318. rx_data_gen_ptr = tcg_splitwx_to_rx(tcg_ctx->data_gen_ptr);
  1319. code_size = (const void *)rx_data_gen_ptr - tb->tc.ptr;
  1320. data_size = gen_code_size - code_size;
  1321. } else {
  1322. rx_data_gen_ptr = 0;
  1323. code_size = gen_code_size;
  1324. data_size = 0;
  1325. }
  1326. /* Dump header and the first instruction */
  1327. qemu_log("OUT: [size=%d]\n", gen_code_size);
  1328. qemu_log(" -- guest addr 0x" TARGET_FMT_lx " + tb prologue\n",
  1329. tcg_ctx->gen_insn_data[insn][0]);
  1330. chunk_start = tcg_ctx->gen_insn_end_off[insn];
  1331. log_disas(tb->tc.ptr, chunk_start);
  1332. /*
  1333. * Dump each instruction chunk, wrapping up empty chunks into
  1334. * the next instruction. The whole array is offset so the
  1335. * first entry is the beginning of the 2nd instruction.
  1336. */
  1337. while (insn < tb->icount) {
  1338. size_t chunk_end = tcg_ctx->gen_insn_end_off[insn];
  1339. if (chunk_end > chunk_start) {
  1340. qemu_log(" -- guest addr 0x" TARGET_FMT_lx "\n",
  1341. tcg_ctx->gen_insn_data[insn][0]);
  1342. log_disas(tb->tc.ptr + chunk_start, chunk_end - chunk_start);
  1343. chunk_start = chunk_end;
  1344. }
  1345. insn++;
  1346. }
  1347. if (chunk_start < code_size) {
  1348. qemu_log(" -- tb slow paths + alignment\n");
  1349. log_disas(tb->tc.ptr + chunk_start, code_size - chunk_start);
  1350. }
  1351. /* Finally dump any data we may have after the block */
  1352. if (data_size) {
  1353. int i;
  1354. qemu_log(" data: [size=%d]\n", data_size);
  1355. for (i = 0; i < data_size / sizeof(tcg_target_ulong); i++) {
  1356. if (sizeof(tcg_target_ulong) == 8) {
  1357. qemu_log("0x%08" PRIxPTR ": .quad 0x%016" TCG_PRIlx "\n",
  1358. (uintptr_t)&rx_data_gen_ptr[i], rx_data_gen_ptr[i]);
  1359. } else if (sizeof(tcg_target_ulong) == 4) {
  1360. qemu_log("0x%08" PRIxPTR ": .long 0x%08" TCG_PRIlx "\n",
  1361. (uintptr_t)&rx_data_gen_ptr[i], rx_data_gen_ptr[i]);
  1362. } else {
  1363. qemu_build_not_reached();
  1364. }
  1365. }
  1366. }
  1367. qemu_log("\n");
  1368. qemu_log_flush();
  1369. qemu_log_unlock(logfile);
  1370. }
  1371. #endif
  1372. qatomic_set(&tcg_ctx->code_gen_ptr, (void *)
  1373. ROUND_UP((uintptr_t)gen_code_buf + gen_code_size + search_size,
  1374. CODE_GEN_ALIGN));
  1375. /* init jump list */
  1376. qemu_spin_init(&tb->jmp_lock);
  1377. tb->jmp_list_head = (uintptr_t)NULL;
  1378. tb->jmp_list_next[0] = (uintptr_t)NULL;
  1379. tb->jmp_list_next[1] = (uintptr_t)NULL;
  1380. tb->jmp_dest[0] = (uintptr_t)NULL;
  1381. tb->jmp_dest[1] = (uintptr_t)NULL;
  1382. /* init original jump addresses which have been set during tcg_gen_code() */
  1383. if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
  1384. tb_reset_jump(tb, 0);
  1385. }
  1386. if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
  1387. tb_reset_jump(tb, 1);
  1388. }
  1389. /*
  1390. * If the TB is not associated with a physical RAM page then
  1391. * it must be a temporary one-insn TB, and we have nothing to do
  1392. * except fill in the page_addr[] fields. Return early before
  1393. * attempting to link to other TBs or add to the lookup table.
  1394. */
  1395. if (phys_pc == -1) {
  1396. tb->page_addr[0] = tb->page_addr[1] = -1;
  1397. return tb;
  1398. }
  1399. /*
  1400. * Insert TB into the corresponding region tree before publishing it
  1401. * through QHT. Otherwise rewinding happened in the TB might fail to
  1402. * lookup itself using host PC.
  1403. */
  1404. tcg_tb_insert(tb);
  1405. /* check next page if needed */
  1406. virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
  1407. phys_page2 = -1;
  1408. if ((pc & TARGET_PAGE_MASK) != virt_page2) {
  1409. phys_page2 = get_page_addr_code(env, virt_page2);
  1410. }
  1411. /*
  1412. * No explicit memory barrier is required -- tb_link_page() makes the
  1413. * TB visible in a consistent state.
  1414. */
  1415. existing_tb = tb_link_page(tb, phys_pc, phys_page2);
  1416. /* if the TB already exists, discard what we just translated */
  1417. if (unlikely(existing_tb != tb)) {
  1418. uintptr_t orig_aligned = (uintptr_t)gen_code_buf;
  1419. orig_aligned -= ROUND_UP(sizeof(*tb), qemu_icache_linesize);
  1420. qatomic_set(&tcg_ctx->code_gen_ptr, (void *)orig_aligned);
  1421. tcg_tb_remove(tb);
  1422. return existing_tb;
  1423. }
  1424. return tb;
  1425. }
  1426. /*
  1427. * @p must be non-NULL.
  1428. * user-mode: call with mmap_lock held.
  1429. * !user-mode: call with all @pages locked.
  1430. */
  1431. static void
  1432. tb_invalidate_phys_page_range__locked(struct page_collection *pages,
  1433. PageDesc *p, tb_page_addr_t start,
  1434. tb_page_addr_t end,
  1435. uintptr_t retaddr)
  1436. {
  1437. TranslationBlock *tb;
  1438. tb_page_addr_t tb_start, tb_end;
  1439. int n;
  1440. #ifdef TARGET_HAS_PRECISE_SMC
  1441. CPUState *cpu = current_cpu;
  1442. CPUArchState *env = NULL;
  1443. bool current_tb_not_found = retaddr != 0;
  1444. bool current_tb_modified = false;
  1445. TranslationBlock *current_tb = NULL;
  1446. target_ulong current_pc = 0;
  1447. target_ulong current_cs_base = 0;
  1448. uint32_t current_flags = 0;
  1449. #endif /* TARGET_HAS_PRECISE_SMC */
  1450. assert_page_locked(p);
  1451. #if defined(TARGET_HAS_PRECISE_SMC)
  1452. if (cpu != NULL) {
  1453. env = cpu->env_ptr;
  1454. }
  1455. #endif
  1456. /* we remove all the TBs in the range [start, end[ */
  1457. /* XXX: see if in some cases it could be faster to invalidate all
  1458. the code */
  1459. PAGE_FOR_EACH_TB(p, tb, n) {
  1460. assert_page_locked(p);
  1461. /* NOTE: this is subtle as a TB may span two physical pages */
  1462. if (n == 0) {
  1463. /* NOTE: tb_end may be after the end of the page, but
  1464. it is not a problem */
  1465. tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
  1466. tb_end = tb_start + tb->size;
  1467. } else {
  1468. tb_start = tb->page_addr[1];
  1469. tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
  1470. }
  1471. if (!(tb_end <= start || tb_start >= end)) {
  1472. #ifdef TARGET_HAS_PRECISE_SMC
  1473. if (current_tb_not_found) {
  1474. current_tb_not_found = false;
  1475. /* now we have a real cpu fault */
  1476. current_tb = tcg_tb_lookup(retaddr);
  1477. }
  1478. if (current_tb == tb &&
  1479. (tb_cflags(current_tb) & CF_COUNT_MASK) != 1) {
  1480. /*
  1481. * If we are modifying the current TB, we must stop
  1482. * its execution. We could be more precise by checking
  1483. * that the modification is after the current PC, but it
  1484. * would require a specialized function to partially
  1485. * restore the CPU state.
  1486. */
  1487. current_tb_modified = true;
  1488. cpu_restore_state_from_tb(cpu, current_tb, retaddr, true);
  1489. cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
  1490. &current_flags);
  1491. }
  1492. #endif /* TARGET_HAS_PRECISE_SMC */
  1493. tb_phys_invalidate__locked(tb);
  1494. }
  1495. }
  1496. #if !defined(CONFIG_USER_ONLY)
  1497. /* if no code remaining, no need to continue to use slow writes */
  1498. if (!p->first_tb) {
  1499. invalidate_page_bitmap(p);
  1500. tlb_unprotect_code(start);
  1501. }
  1502. #endif
  1503. #ifdef TARGET_HAS_PRECISE_SMC
  1504. if (current_tb_modified) {
  1505. page_collection_unlock(pages);
  1506. /* Force execution of one insn next time. */
  1507. cpu->cflags_next_tb = 1 | CF_NOIRQ | curr_cflags(cpu);
  1508. mmap_unlock();
  1509. cpu_loop_exit_noexc(cpu);
  1510. }
  1511. #endif
  1512. }
  1513. /*
  1514. * Invalidate all TBs which intersect with the target physical address range
  1515. * [start;end[. NOTE: start and end must refer to the *same* physical page.
  1516. * 'is_cpu_write_access' should be true if called from a real cpu write
  1517. * access: the virtual CPU will exit the current TB if code is modified inside
  1518. * this TB.
  1519. *
  1520. * Called with mmap_lock held for user-mode emulation
  1521. */
  1522. void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end)
  1523. {
  1524. struct page_collection *pages;
  1525. PageDesc *p;
  1526. assert_memory_lock();
  1527. p = page_find(start >> TARGET_PAGE_BITS);
  1528. if (p == NULL) {
  1529. return;
  1530. }
  1531. pages = page_collection_lock(start, end);
  1532. tb_invalidate_phys_page_range__locked(pages, p, start, end, 0);
  1533. page_collection_unlock(pages);
  1534. }
  1535. /*
  1536. * Invalidate all TBs which intersect with the target physical address range
  1537. * [start;end[. NOTE: start and end may refer to *different* physical pages.
  1538. * 'is_cpu_write_access' should be true if called from a real cpu write
  1539. * access: the virtual CPU will exit the current TB if code is modified inside
  1540. * this TB.
  1541. *
  1542. * Called with mmap_lock held for user-mode emulation.
  1543. */
  1544. #ifdef CONFIG_SOFTMMU
  1545. void tb_invalidate_phys_range(ram_addr_t start, ram_addr_t end)
  1546. #else
  1547. void tb_invalidate_phys_range(target_ulong start, target_ulong end)
  1548. #endif
  1549. {
  1550. struct page_collection *pages;
  1551. tb_page_addr_t next;
  1552. assert_memory_lock();
  1553. pages = page_collection_lock(start, end);
  1554. for (next = (start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
  1555. start < end;
  1556. start = next, next += TARGET_PAGE_SIZE) {
  1557. PageDesc *pd = page_find(start >> TARGET_PAGE_BITS);
  1558. tb_page_addr_t bound = MIN(next, end);
  1559. if (pd == NULL) {
  1560. continue;
  1561. }
  1562. tb_invalidate_phys_page_range__locked(pages, pd, start, bound, 0);
  1563. }
  1564. page_collection_unlock(pages);
  1565. }
  1566. #ifdef CONFIG_SOFTMMU
  1567. /* len must be <= 8 and start must be a multiple of len.
  1568. * Called via softmmu_template.h when code areas are written to with
  1569. * iothread mutex not held.
  1570. *
  1571. * Call with all @pages in the range [@start, @start + len[ locked.
  1572. */
  1573. void tb_invalidate_phys_page_fast(struct page_collection *pages,
  1574. tb_page_addr_t start, int len,
  1575. uintptr_t retaddr)
  1576. {
  1577. PageDesc *p;
  1578. assert_memory_lock();
  1579. p = page_find(start >> TARGET_PAGE_BITS);
  1580. if (!p) {
  1581. return;
  1582. }
  1583. assert_page_locked(p);
  1584. if (!p->code_bitmap &&
  1585. ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD) {
  1586. build_page_bitmap(p);
  1587. }
  1588. if (p->code_bitmap) {
  1589. unsigned int nr;
  1590. unsigned long b;
  1591. nr = start & ~TARGET_PAGE_MASK;
  1592. b = p->code_bitmap[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG - 1));
  1593. if (b & ((1 << len) - 1)) {
  1594. goto do_invalidate;
  1595. }
  1596. } else {
  1597. do_invalidate:
  1598. tb_invalidate_phys_page_range__locked(pages, p, start, start + len,
  1599. retaddr);
  1600. }
  1601. }
  1602. #else
  1603. /* Called with mmap_lock held. If pc is not 0 then it indicates the
  1604. * host PC of the faulting store instruction that caused this invalidate.
  1605. * Returns true if the caller needs to abort execution of the current
  1606. * TB (because it was modified by this store and the guest CPU has
  1607. * precise-SMC semantics).
  1608. */
  1609. static bool tb_invalidate_phys_page(tb_page_addr_t addr, uintptr_t pc)
  1610. {
  1611. TranslationBlock *tb;
  1612. PageDesc *p;
  1613. int n;
  1614. #ifdef TARGET_HAS_PRECISE_SMC
  1615. TranslationBlock *current_tb = NULL;
  1616. CPUState *cpu = current_cpu;
  1617. CPUArchState *env = NULL;
  1618. int current_tb_modified = 0;
  1619. target_ulong current_pc = 0;
  1620. target_ulong current_cs_base = 0;
  1621. uint32_t current_flags = 0;
  1622. #endif
  1623. assert_memory_lock();
  1624. addr &= TARGET_PAGE_MASK;
  1625. p = page_find(addr >> TARGET_PAGE_BITS);
  1626. if (!p) {
  1627. return false;
  1628. }
  1629. #ifdef TARGET_HAS_PRECISE_SMC
  1630. if (p->first_tb && pc != 0) {
  1631. current_tb = tcg_tb_lookup(pc);
  1632. }
  1633. if (cpu != NULL) {
  1634. env = cpu->env_ptr;
  1635. }
  1636. #endif
  1637. assert_page_locked(p);
  1638. PAGE_FOR_EACH_TB(p, tb, n) {
  1639. #ifdef TARGET_HAS_PRECISE_SMC
  1640. if (current_tb == tb &&
  1641. (tb_cflags(current_tb) & CF_COUNT_MASK) != 1) {
  1642. /* If we are modifying the current TB, we must stop
  1643. its execution. We could be more precise by checking
  1644. that the modification is after the current PC, but it
  1645. would require a specialized function to partially
  1646. restore the CPU state */
  1647. current_tb_modified = 1;
  1648. cpu_restore_state_from_tb(cpu, current_tb, pc, true);
  1649. cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
  1650. &current_flags);
  1651. }
  1652. #endif /* TARGET_HAS_PRECISE_SMC */
  1653. tb_phys_invalidate(tb, addr);
  1654. }
  1655. p->first_tb = (uintptr_t)NULL;
  1656. #ifdef TARGET_HAS_PRECISE_SMC
  1657. if (current_tb_modified) {
  1658. /* Force execution of one insn next time. */
  1659. cpu->cflags_next_tb = 1 | CF_NOIRQ | curr_cflags(cpu);
  1660. return true;
  1661. }
  1662. #endif
  1663. return false;
  1664. }
  1665. #endif
  1666. /* user-mode: call with mmap_lock held */
  1667. void tb_check_watchpoint(CPUState *cpu, uintptr_t retaddr)
  1668. {
  1669. TranslationBlock *tb;
  1670. assert_memory_lock();
  1671. tb = tcg_tb_lookup(retaddr);
  1672. if (tb) {
  1673. /* We can use retranslation to find the PC. */
  1674. cpu_restore_state_from_tb(cpu, tb, retaddr, true);
  1675. tb_phys_invalidate(tb, -1);
  1676. } else {
  1677. /* The exception probably happened in a helper. The CPU state should
  1678. have been saved before calling it. Fetch the PC from there. */
  1679. CPUArchState *env = cpu->env_ptr;
  1680. target_ulong pc, cs_base;
  1681. tb_page_addr_t addr;
  1682. uint32_t flags;
  1683. cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
  1684. addr = get_page_addr_code(env, pc);
  1685. if (addr != -1) {
  1686. tb_invalidate_phys_range(addr, addr + 1);
  1687. }
  1688. }
  1689. }
  1690. #ifndef CONFIG_USER_ONLY
  1691. /*
  1692. * In deterministic execution mode, instructions doing device I/Os
  1693. * must be at the end of the TB.
  1694. *
  1695. * Called by softmmu_template.h, with iothread mutex not held.
  1696. */
  1697. void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
  1698. {
  1699. TranslationBlock *tb;
  1700. CPUClass *cc;
  1701. uint32_t n;
  1702. tb = tcg_tb_lookup(retaddr);
  1703. if (!tb) {
  1704. cpu_abort(cpu, "cpu_io_recompile: could not find TB for pc=%p",
  1705. (void *)retaddr);
  1706. }
  1707. cpu_restore_state_from_tb(cpu, tb, retaddr, true);
  1708. /*
  1709. * Some guests must re-execute the branch when re-executing a delay
  1710. * slot instruction. When this is the case, adjust icount and N
  1711. * to account for the re-execution of the branch.
  1712. */
  1713. n = 1;
  1714. cc = CPU_GET_CLASS(cpu);
  1715. if (cc->tcg_ops->io_recompile_replay_branch &&
  1716. cc->tcg_ops->io_recompile_replay_branch(cpu, tb)) {
  1717. cpu_neg(cpu)->icount_decr.u16.low++;
  1718. n = 2;
  1719. }
  1720. /*
  1721. * Exit the loop and potentially generate a new TB executing the
  1722. * just the I/O insns. We also limit instrumentation to memory
  1723. * operations only (which execute after completion) so we don't
  1724. * double instrument the instruction.
  1725. */
  1726. cpu->cflags_next_tb = curr_cflags(cpu) | CF_MEMI_ONLY | CF_LAST_IO | n;
  1727. qemu_log_mask_and_addr(CPU_LOG_EXEC, tb->pc,
  1728. "cpu_io_recompile: rewound execution of TB to "
  1729. TARGET_FMT_lx "\n", tb->pc);
  1730. cpu_loop_exit_noexc(cpu);
  1731. }
  1732. static void print_qht_statistics(struct qht_stats hst, GString *buf)
  1733. {
  1734. uint32_t hgram_opts;
  1735. size_t hgram_bins;
  1736. char *hgram;
  1737. if (!hst.head_buckets) {
  1738. return;
  1739. }
  1740. g_string_append_printf(buf, "TB hash buckets %zu/%zu "
  1741. "(%0.2f%% head buckets used)\n",
  1742. hst.used_head_buckets, hst.head_buckets,
  1743. (double)hst.used_head_buckets /
  1744. hst.head_buckets * 100);
  1745. hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS;
  1746. hgram_opts |= QDIST_PR_100X | QDIST_PR_PERCENT;
  1747. if (qdist_xmax(&hst.occupancy) - qdist_xmin(&hst.occupancy) == 1) {
  1748. hgram_opts |= QDIST_PR_NODECIMAL;
  1749. }
  1750. hgram = qdist_pr(&hst.occupancy, 10, hgram_opts);
  1751. g_string_append_printf(buf, "TB hash occupancy %0.2f%% avg chain occ. "
  1752. "Histogram: %s\n",
  1753. qdist_avg(&hst.occupancy) * 100, hgram);
  1754. g_free(hgram);
  1755. hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS;
  1756. hgram_bins = qdist_xmax(&hst.chain) - qdist_xmin(&hst.chain);
  1757. if (hgram_bins > 10) {
  1758. hgram_bins = 10;
  1759. } else {
  1760. hgram_bins = 0;
  1761. hgram_opts |= QDIST_PR_NODECIMAL | QDIST_PR_NOBINRANGE;
  1762. }
  1763. hgram = qdist_pr(&hst.chain, hgram_bins, hgram_opts);
  1764. g_string_append_printf(buf, "TB hash avg chain %0.3f buckets. "
  1765. "Histogram: %s\n",
  1766. qdist_avg(&hst.chain), hgram);
  1767. g_free(hgram);
  1768. }
  1769. struct tb_tree_stats {
  1770. size_t nb_tbs;
  1771. size_t host_size;
  1772. size_t target_size;
  1773. size_t max_target_size;
  1774. size_t direct_jmp_count;
  1775. size_t direct_jmp2_count;
  1776. size_t cross_page;
  1777. };
  1778. static gboolean tb_tree_stats_iter(gpointer key, gpointer value, gpointer data)
  1779. {
  1780. const TranslationBlock *tb = value;
  1781. struct tb_tree_stats *tst = data;
  1782. tst->nb_tbs++;
  1783. tst->host_size += tb->tc.size;
  1784. tst->target_size += tb->size;
  1785. if (tb->size > tst->max_target_size) {
  1786. tst->max_target_size = tb->size;
  1787. }
  1788. if (tb->page_addr[1] != -1) {
  1789. tst->cross_page++;
  1790. }
  1791. if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
  1792. tst->direct_jmp_count++;
  1793. if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
  1794. tst->direct_jmp2_count++;
  1795. }
  1796. }
  1797. return false;
  1798. }
  1799. void dump_exec_info(GString *buf)
  1800. {
  1801. struct tb_tree_stats tst = {};
  1802. struct qht_stats hst;
  1803. size_t nb_tbs, flush_full, flush_part, flush_elide;
  1804. tcg_tb_foreach(tb_tree_stats_iter, &tst);
  1805. nb_tbs = tst.nb_tbs;
  1806. /* XXX: avoid using doubles ? */
  1807. g_string_append_printf(buf, "Translation buffer state:\n");
  1808. /*
  1809. * Report total code size including the padding and TB structs;
  1810. * otherwise users might think "-accel tcg,tb-size" is not honoured.
  1811. * For avg host size we use the precise numbers from tb_tree_stats though.
  1812. */
  1813. g_string_append_printf(buf, "gen code size %zu/%zu\n",
  1814. tcg_code_size(), tcg_code_capacity());
  1815. g_string_append_printf(buf, "TB count %zu\n", nb_tbs);
  1816. g_string_append_printf(buf, "TB avg target size %zu max=%zu bytes\n",
  1817. nb_tbs ? tst.target_size / nb_tbs : 0,
  1818. tst.max_target_size);
  1819. g_string_append_printf(buf, "TB avg host size %zu bytes "
  1820. "(expansion ratio: %0.1f)\n",
  1821. nb_tbs ? tst.host_size / nb_tbs : 0,
  1822. tst.target_size ?
  1823. (double)tst.host_size / tst.target_size : 0);
  1824. g_string_append_printf(buf, "cross page TB count %zu (%zu%%)\n",
  1825. tst.cross_page,
  1826. nb_tbs ? (tst.cross_page * 100) / nb_tbs : 0);
  1827. g_string_append_printf(buf, "direct jump count %zu (%zu%%) "
  1828. "(2 jumps=%zu %zu%%)\n",
  1829. tst.direct_jmp_count,
  1830. nb_tbs ? (tst.direct_jmp_count * 100) / nb_tbs : 0,
  1831. tst.direct_jmp2_count,
  1832. nb_tbs ? (tst.direct_jmp2_count * 100) / nb_tbs : 0);
  1833. qht_statistics_init(&tb_ctx.htable, &hst);
  1834. print_qht_statistics(hst, buf);
  1835. qht_statistics_destroy(&hst);
  1836. g_string_append_printf(buf, "\nStatistics:\n");
  1837. g_string_append_printf(buf, "TB flush count %u\n",
  1838. qatomic_read(&tb_ctx.tb_flush_count));
  1839. g_string_append_printf(buf, "TB invalidate count %u\n",
  1840. qatomic_read(&tb_ctx.tb_phys_invalidate_count));
  1841. tlb_flush_counts(&flush_full, &flush_part, &flush_elide);
  1842. g_string_append_printf(buf, "TLB full flushes %zu\n", flush_full);
  1843. g_string_append_printf(buf, "TLB partial flushes %zu\n", flush_part);
  1844. g_string_append_printf(buf, "TLB elided flushes %zu\n", flush_elide);
  1845. tcg_dump_info(buf);
  1846. }
  1847. void dump_opcount_info(GString *buf)
  1848. {
  1849. tcg_dump_op_count(buf);
  1850. }
  1851. #else /* CONFIG_USER_ONLY */
  1852. void cpu_interrupt(CPUState *cpu, int mask)
  1853. {
  1854. g_assert(qemu_mutex_iothread_locked());
  1855. cpu->interrupt_request |= mask;
  1856. qatomic_set(&cpu_neg(cpu)->icount_decr.u16.high, -1);
  1857. }
  1858. /*
  1859. * Walks guest process memory "regions" one by one
  1860. * and calls callback function 'fn' for each region.
  1861. */
  1862. struct walk_memory_regions_data {
  1863. walk_memory_regions_fn fn;
  1864. void *priv;
  1865. target_ulong start;
  1866. int prot;
  1867. };
  1868. static int walk_memory_regions_end(struct walk_memory_regions_data *data,
  1869. target_ulong end, int new_prot)
  1870. {
  1871. if (data->start != -1u) {
  1872. int rc = data->fn(data->priv, data->start, end, data->prot);
  1873. if (rc != 0) {
  1874. return rc;
  1875. }
  1876. }
  1877. data->start = (new_prot ? end : -1u);
  1878. data->prot = new_prot;
  1879. return 0;
  1880. }
  1881. static int walk_memory_regions_1(struct walk_memory_regions_data *data,
  1882. target_ulong base, int level, void **lp)
  1883. {
  1884. target_ulong pa;
  1885. int i, rc;
  1886. if (*lp == NULL) {
  1887. return walk_memory_regions_end(data, base, 0);
  1888. }
  1889. if (level == 0) {
  1890. PageDesc *pd = *lp;
  1891. for (i = 0; i < V_L2_SIZE; ++i) {
  1892. int prot = pd[i].flags;
  1893. pa = base | (i << TARGET_PAGE_BITS);
  1894. if (prot != data->prot) {
  1895. rc = walk_memory_regions_end(data, pa, prot);
  1896. if (rc != 0) {
  1897. return rc;
  1898. }
  1899. }
  1900. }
  1901. } else {
  1902. void **pp = *lp;
  1903. for (i = 0; i < V_L2_SIZE; ++i) {
  1904. pa = base | ((target_ulong)i <<
  1905. (TARGET_PAGE_BITS + V_L2_BITS * level));
  1906. rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
  1907. if (rc != 0) {
  1908. return rc;
  1909. }
  1910. }
  1911. }
  1912. return 0;
  1913. }
  1914. int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
  1915. {
  1916. struct walk_memory_regions_data data;
  1917. uintptr_t i, l1_sz = v_l1_size;
  1918. data.fn = fn;
  1919. data.priv = priv;
  1920. data.start = -1u;
  1921. data.prot = 0;
  1922. for (i = 0; i < l1_sz; i++) {
  1923. target_ulong base = i << (v_l1_shift + TARGET_PAGE_BITS);
  1924. int rc = walk_memory_regions_1(&data, base, v_l2_levels, l1_map + i);
  1925. if (rc != 0) {
  1926. return rc;
  1927. }
  1928. }
  1929. return walk_memory_regions_end(&data, 0, 0);
  1930. }
  1931. static int dump_region(void *priv, target_ulong start,
  1932. target_ulong end, unsigned long prot)
  1933. {
  1934. FILE *f = (FILE *)priv;
  1935. (void) fprintf(f, TARGET_FMT_lx"-"TARGET_FMT_lx
  1936. " "TARGET_FMT_lx" %c%c%c\n",
  1937. start, end, end - start,
  1938. ((prot & PAGE_READ) ? 'r' : '-'),
  1939. ((prot & PAGE_WRITE) ? 'w' : '-'),
  1940. ((prot & PAGE_EXEC) ? 'x' : '-'));
  1941. return 0;
  1942. }
  1943. /* dump memory mappings */
  1944. void page_dump(FILE *f)
  1945. {
  1946. const int length = sizeof(target_ulong) * 2;
  1947. (void) fprintf(f, "%-*s %-*s %-*s %s\n",
  1948. length, "start", length, "end", length, "size", "prot");
  1949. walk_memory_regions(f, dump_region);
  1950. }
  1951. int page_get_flags(target_ulong address)
  1952. {
  1953. PageDesc *p;
  1954. p = page_find(address >> TARGET_PAGE_BITS);
  1955. if (!p) {
  1956. return 0;
  1957. }
  1958. return p->flags;
  1959. }
  1960. /* Modify the flags of a page and invalidate the code if necessary.
  1961. The flag PAGE_WRITE_ORG is positioned automatically depending
  1962. on PAGE_WRITE. The mmap_lock should already be held. */
  1963. void page_set_flags(target_ulong start, target_ulong end, int flags)
  1964. {
  1965. target_ulong addr, len;
  1966. bool reset_target_data;
  1967. /* This function should never be called with addresses outside the
  1968. guest address space. If this assert fires, it probably indicates
  1969. a missing call to h2g_valid. */
  1970. assert(end - 1 <= GUEST_ADDR_MAX);
  1971. assert(start < end);
  1972. /* Only set PAGE_ANON with new mappings. */
  1973. assert(!(flags & PAGE_ANON) || (flags & PAGE_RESET));
  1974. assert_memory_lock();
  1975. start = start & TARGET_PAGE_MASK;
  1976. end = TARGET_PAGE_ALIGN(end);
  1977. if (flags & PAGE_WRITE) {
  1978. flags |= PAGE_WRITE_ORG;
  1979. }
  1980. reset_target_data = !(flags & PAGE_VALID) || (flags & PAGE_RESET);
  1981. flags &= ~PAGE_RESET;
  1982. for (addr = start, len = end - start;
  1983. len != 0;
  1984. len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
  1985. PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
  1986. /* If the write protection bit is set, then we invalidate
  1987. the code inside. */
  1988. if (!(p->flags & PAGE_WRITE) &&
  1989. (flags & PAGE_WRITE) &&
  1990. p->first_tb) {
  1991. tb_invalidate_phys_page(addr, 0);
  1992. }
  1993. if (reset_target_data) {
  1994. g_free(p->target_data);
  1995. p->target_data = NULL;
  1996. p->flags = flags;
  1997. } else {
  1998. /* Using mprotect on a page does not change MAP_ANON. */
  1999. p->flags = (p->flags & PAGE_ANON) | flags;
  2000. }
  2001. }
  2002. }
  2003. void *page_get_target_data(target_ulong address)
  2004. {
  2005. PageDesc *p = page_find(address >> TARGET_PAGE_BITS);
  2006. return p ? p->target_data : NULL;
  2007. }
  2008. void *page_alloc_target_data(target_ulong address, size_t size)
  2009. {
  2010. PageDesc *p = page_find(address >> TARGET_PAGE_BITS);
  2011. void *ret = NULL;
  2012. if (p->flags & PAGE_VALID) {
  2013. ret = p->target_data;
  2014. if (!ret) {
  2015. p->target_data = ret = g_malloc0(size);
  2016. }
  2017. }
  2018. return ret;
  2019. }
  2020. int page_check_range(target_ulong start, target_ulong len, int flags)
  2021. {
  2022. PageDesc *p;
  2023. target_ulong end;
  2024. target_ulong addr;
  2025. /* This function should never be called with addresses outside the
  2026. guest address space. If this assert fires, it probably indicates
  2027. a missing call to h2g_valid. */
  2028. if (TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS) {
  2029. assert(start < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
  2030. }
  2031. if (len == 0) {
  2032. return 0;
  2033. }
  2034. if (start + len - 1 < start) {
  2035. /* We've wrapped around. */
  2036. return -1;
  2037. }
  2038. /* must do before we loose bits in the next step */
  2039. end = TARGET_PAGE_ALIGN(start + len);
  2040. start = start & TARGET_PAGE_MASK;
  2041. for (addr = start, len = end - start;
  2042. len != 0;
  2043. len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
  2044. p = page_find(addr >> TARGET_PAGE_BITS);
  2045. if (!p) {
  2046. return -1;
  2047. }
  2048. if (!(p->flags & PAGE_VALID)) {
  2049. return -1;
  2050. }
  2051. if ((flags & PAGE_READ) && !(p->flags & PAGE_READ)) {
  2052. return -1;
  2053. }
  2054. if (flags & PAGE_WRITE) {
  2055. if (!(p->flags & PAGE_WRITE_ORG)) {
  2056. return -1;
  2057. }
  2058. /* unprotect the page if it was put read-only because it
  2059. contains translated code */
  2060. if (!(p->flags & PAGE_WRITE)) {
  2061. if (!page_unprotect(addr, 0)) {
  2062. return -1;
  2063. }
  2064. }
  2065. }
  2066. }
  2067. return 0;
  2068. }
  2069. void page_protect(tb_page_addr_t page_addr)
  2070. {
  2071. target_ulong addr;
  2072. PageDesc *p;
  2073. int prot;
  2074. p = page_find(page_addr >> TARGET_PAGE_BITS);
  2075. if (p && (p->flags & PAGE_WRITE)) {
  2076. /*
  2077. * Force the host page as non writable (writes will have a page fault +
  2078. * mprotect overhead).
  2079. */
  2080. page_addr &= qemu_host_page_mask;
  2081. prot = 0;
  2082. for (addr = page_addr; addr < page_addr + qemu_host_page_size;
  2083. addr += TARGET_PAGE_SIZE) {
  2084. p = page_find(addr >> TARGET_PAGE_BITS);
  2085. if (!p) {
  2086. continue;
  2087. }
  2088. prot |= p->flags;
  2089. p->flags &= ~PAGE_WRITE;
  2090. }
  2091. mprotect(g2h_untagged(page_addr), qemu_host_page_size,
  2092. (prot & PAGE_BITS) & ~PAGE_WRITE);
  2093. if (DEBUG_TB_INVALIDATE_GATE) {
  2094. printf("protecting code page: 0x" TB_PAGE_ADDR_FMT "\n", page_addr);
  2095. }
  2096. }
  2097. }
  2098. /* called from signal handler: invalidate the code and unprotect the
  2099. * page. Return 0 if the fault was not handled, 1 if it was handled,
  2100. * and 2 if it was handled but the caller must cause the TB to be
  2101. * immediately exited. (We can only return 2 if the 'pc' argument is
  2102. * non-zero.)
  2103. */
  2104. int page_unprotect(target_ulong address, uintptr_t pc)
  2105. {
  2106. unsigned int prot;
  2107. bool current_tb_invalidated;
  2108. PageDesc *p;
  2109. target_ulong host_start, host_end, addr;
  2110. /* Technically this isn't safe inside a signal handler. However we
  2111. know this only ever happens in a synchronous SEGV handler, so in
  2112. practice it seems to be ok. */
  2113. mmap_lock();
  2114. p = page_find(address >> TARGET_PAGE_BITS);
  2115. if (!p) {
  2116. mmap_unlock();
  2117. return 0;
  2118. }
  2119. /* if the page was really writable, then we change its
  2120. protection back to writable */
  2121. if (p->flags & PAGE_WRITE_ORG) {
  2122. current_tb_invalidated = false;
  2123. if (p->flags & PAGE_WRITE) {
  2124. /* If the page is actually marked WRITE then assume this is because
  2125. * this thread raced with another one which got here first and
  2126. * set the page to PAGE_WRITE and did the TB invalidate for us.
  2127. */
  2128. #ifdef TARGET_HAS_PRECISE_SMC
  2129. TranslationBlock *current_tb = tcg_tb_lookup(pc);
  2130. if (current_tb) {
  2131. current_tb_invalidated = tb_cflags(current_tb) & CF_INVALID;
  2132. }
  2133. #endif
  2134. } else {
  2135. host_start = address & qemu_host_page_mask;
  2136. host_end = host_start + qemu_host_page_size;
  2137. prot = 0;
  2138. for (addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE) {
  2139. p = page_find(addr >> TARGET_PAGE_BITS);
  2140. p->flags |= PAGE_WRITE;
  2141. prot |= p->flags;
  2142. /* and since the content will be modified, we must invalidate
  2143. the corresponding translated code. */
  2144. current_tb_invalidated |= tb_invalidate_phys_page(addr, pc);
  2145. #ifdef CONFIG_USER_ONLY
  2146. if (DEBUG_TB_CHECK_GATE) {
  2147. tb_invalidate_check(addr);
  2148. }
  2149. #endif
  2150. }
  2151. mprotect((void *)g2h_untagged(host_start), qemu_host_page_size,
  2152. prot & PAGE_BITS);
  2153. }
  2154. mmap_unlock();
  2155. /* If current TB was invalidated return to main loop */
  2156. return current_tb_invalidated ? 2 : 1;
  2157. }
  2158. mmap_unlock();
  2159. return 0;
  2160. }
  2161. #endif /* CONFIG_USER_ONLY */
  2162. /* This is a wrapper for common code that can not use CONFIG_SOFTMMU */
  2163. void tcg_flush_softmmu_tlb(CPUState *cs)
  2164. {
  2165. #ifdef CONFIG_SOFTMMU
  2166. tlb_flush(cs);
  2167. #endif
  2168. }