memory.c 64 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127
  1. /*
  2. * Physical memory management
  3. *
  4. * Copyright 2011 Red Hat, Inc. and/or its affiliates
  5. *
  6. * Authors:
  7. * Avi Kivity <avi@redhat.com>
  8. *
  9. * This work is licensed under the terms of the GNU GPL, version 2. See
  10. * the COPYING file in the top-level directory.
  11. *
  12. * Contributions after 2012-01-13 are licensed under the terms of the
  13. * GNU GPL, version 2 or (at your option) any later version.
  14. */
  15. #include "exec/memory.h"
  16. #include "exec/address-spaces.h"
  17. #include "exec/ioport.h"
  18. #include "qapi/visitor.h"
  19. #include "qemu/bitops.h"
  20. #include "qom/object.h"
  21. #include "trace.h"
  22. #include <assert.h>
  23. #include "exec/memory-internal.h"
  24. #include "exec/ram_addr.h"
  25. #include "sysemu/sysemu.h"
  26. //#define DEBUG_UNASSIGNED
  27. static unsigned memory_region_transaction_depth;
  28. static bool memory_region_update_pending;
  29. static bool ioeventfd_update_pending;
  30. static bool global_dirty_log = false;
  31. /* flat_view_mutex is taken around reading as->current_map; the critical
  32. * section is extremely short, so I'm using a single mutex for every AS.
  33. * We could also RCU for the read-side.
  34. *
  35. * The BQL is taken around transaction commits, hence both locks are taken
  36. * while writing to as->current_map (with the BQL taken outside).
  37. */
  38. static QemuMutex flat_view_mutex;
  39. static QTAILQ_HEAD(memory_listeners, MemoryListener) memory_listeners
  40. = QTAILQ_HEAD_INITIALIZER(memory_listeners);
  41. static QTAILQ_HEAD(, AddressSpace) address_spaces
  42. = QTAILQ_HEAD_INITIALIZER(address_spaces);
  43. static void memory_init(void)
  44. {
  45. qemu_mutex_init(&flat_view_mutex);
  46. }
  47. typedef struct AddrRange AddrRange;
  48. /*
  49. * Note using signed integers limits us to physical addresses at most
  50. * 63 bits wide. They are needed for negative offsetting in aliases
  51. * (large MemoryRegion::alias_offset).
  52. */
  53. struct AddrRange {
  54. Int128 start;
  55. Int128 size;
  56. };
  57. static AddrRange addrrange_make(Int128 start, Int128 size)
  58. {
  59. return (AddrRange) { start, size };
  60. }
  61. static bool addrrange_equal(AddrRange r1, AddrRange r2)
  62. {
  63. return int128_eq(r1.start, r2.start) && int128_eq(r1.size, r2.size);
  64. }
  65. static Int128 addrrange_end(AddrRange r)
  66. {
  67. return int128_add(r.start, r.size);
  68. }
  69. static AddrRange addrrange_shift(AddrRange range, Int128 delta)
  70. {
  71. int128_addto(&range.start, delta);
  72. return range;
  73. }
  74. static bool addrrange_contains(AddrRange range, Int128 addr)
  75. {
  76. return int128_ge(addr, range.start)
  77. && int128_lt(addr, addrrange_end(range));
  78. }
  79. static bool addrrange_intersects(AddrRange r1, AddrRange r2)
  80. {
  81. return addrrange_contains(r1, r2.start)
  82. || addrrange_contains(r2, r1.start);
  83. }
  84. static AddrRange addrrange_intersection(AddrRange r1, AddrRange r2)
  85. {
  86. Int128 start = int128_max(r1.start, r2.start);
  87. Int128 end = int128_min(addrrange_end(r1), addrrange_end(r2));
  88. return addrrange_make(start, int128_sub(end, start));
  89. }
  90. enum ListenerDirection { Forward, Reverse };
  91. static bool memory_listener_match(MemoryListener *listener,
  92. MemoryRegionSection *section)
  93. {
  94. return !listener->address_space_filter
  95. || listener->address_space_filter == section->address_space;
  96. }
  97. #define MEMORY_LISTENER_CALL_GLOBAL(_callback, _direction, _args...) \
  98. do { \
  99. MemoryListener *_listener; \
  100. \
  101. switch (_direction) { \
  102. case Forward: \
  103. QTAILQ_FOREACH(_listener, &memory_listeners, link) { \
  104. if (_listener->_callback) { \
  105. _listener->_callback(_listener, ##_args); \
  106. } \
  107. } \
  108. break; \
  109. case Reverse: \
  110. QTAILQ_FOREACH_REVERSE(_listener, &memory_listeners, \
  111. memory_listeners, link) { \
  112. if (_listener->_callback) { \
  113. _listener->_callback(_listener, ##_args); \
  114. } \
  115. } \
  116. break; \
  117. default: \
  118. abort(); \
  119. } \
  120. } while (0)
  121. #define MEMORY_LISTENER_CALL(_callback, _direction, _section, _args...) \
  122. do { \
  123. MemoryListener *_listener; \
  124. \
  125. switch (_direction) { \
  126. case Forward: \
  127. QTAILQ_FOREACH(_listener, &memory_listeners, link) { \
  128. if (_listener->_callback \
  129. && memory_listener_match(_listener, _section)) { \
  130. _listener->_callback(_listener, _section, ##_args); \
  131. } \
  132. } \
  133. break; \
  134. case Reverse: \
  135. QTAILQ_FOREACH_REVERSE(_listener, &memory_listeners, \
  136. memory_listeners, link) { \
  137. if (_listener->_callback \
  138. && memory_listener_match(_listener, _section)) { \
  139. _listener->_callback(_listener, _section, ##_args); \
  140. } \
  141. } \
  142. break; \
  143. default: \
  144. abort(); \
  145. } \
  146. } while (0)
  147. /* No need to ref/unref .mr, the FlatRange keeps it alive. */
  148. #define MEMORY_LISTENER_UPDATE_REGION(fr, as, dir, callback) \
  149. MEMORY_LISTENER_CALL(callback, dir, (&(MemoryRegionSection) { \
  150. .mr = (fr)->mr, \
  151. .address_space = (as), \
  152. .offset_within_region = (fr)->offset_in_region, \
  153. .size = (fr)->addr.size, \
  154. .offset_within_address_space = int128_get64((fr)->addr.start), \
  155. .readonly = (fr)->readonly, \
  156. }))
  157. struct CoalescedMemoryRange {
  158. AddrRange addr;
  159. QTAILQ_ENTRY(CoalescedMemoryRange) link;
  160. };
  161. struct MemoryRegionIoeventfd {
  162. AddrRange addr;
  163. bool match_data;
  164. uint64_t data;
  165. EventNotifier *e;
  166. };
  167. static bool memory_region_ioeventfd_before(MemoryRegionIoeventfd a,
  168. MemoryRegionIoeventfd b)
  169. {
  170. if (int128_lt(a.addr.start, b.addr.start)) {
  171. return true;
  172. } else if (int128_gt(a.addr.start, b.addr.start)) {
  173. return false;
  174. } else if (int128_lt(a.addr.size, b.addr.size)) {
  175. return true;
  176. } else if (int128_gt(a.addr.size, b.addr.size)) {
  177. return false;
  178. } else if (a.match_data < b.match_data) {
  179. return true;
  180. } else if (a.match_data > b.match_data) {
  181. return false;
  182. } else if (a.match_data) {
  183. if (a.data < b.data) {
  184. return true;
  185. } else if (a.data > b.data) {
  186. return false;
  187. }
  188. }
  189. if (a.e < b.e) {
  190. return true;
  191. } else if (a.e > b.e) {
  192. return false;
  193. }
  194. return false;
  195. }
  196. static bool memory_region_ioeventfd_equal(MemoryRegionIoeventfd a,
  197. MemoryRegionIoeventfd b)
  198. {
  199. return !memory_region_ioeventfd_before(a, b)
  200. && !memory_region_ioeventfd_before(b, a);
  201. }
  202. typedef struct FlatRange FlatRange;
  203. typedef struct FlatView FlatView;
  204. /* Range of memory in the global map. Addresses are absolute. */
  205. struct FlatRange {
  206. MemoryRegion *mr;
  207. hwaddr offset_in_region;
  208. AddrRange addr;
  209. uint8_t dirty_log_mask;
  210. bool romd_mode;
  211. bool readonly;
  212. };
  213. /* Flattened global view of current active memory hierarchy. Kept in sorted
  214. * order.
  215. */
  216. struct FlatView {
  217. unsigned ref;
  218. FlatRange *ranges;
  219. unsigned nr;
  220. unsigned nr_allocated;
  221. };
  222. typedef struct AddressSpaceOps AddressSpaceOps;
  223. #define FOR_EACH_FLAT_RANGE(var, view) \
  224. for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var)
  225. static bool flatrange_equal(FlatRange *a, FlatRange *b)
  226. {
  227. return a->mr == b->mr
  228. && addrrange_equal(a->addr, b->addr)
  229. && a->offset_in_region == b->offset_in_region
  230. && a->romd_mode == b->romd_mode
  231. && a->readonly == b->readonly;
  232. }
  233. static void flatview_init(FlatView *view)
  234. {
  235. view->ref = 1;
  236. view->ranges = NULL;
  237. view->nr = 0;
  238. view->nr_allocated = 0;
  239. }
  240. /* Insert a range into a given position. Caller is responsible for maintaining
  241. * sorting order.
  242. */
  243. static void flatview_insert(FlatView *view, unsigned pos, FlatRange *range)
  244. {
  245. if (view->nr == view->nr_allocated) {
  246. view->nr_allocated = MAX(2 * view->nr, 10);
  247. view->ranges = g_realloc(view->ranges,
  248. view->nr_allocated * sizeof(*view->ranges));
  249. }
  250. memmove(view->ranges + pos + 1, view->ranges + pos,
  251. (view->nr - pos) * sizeof(FlatRange));
  252. view->ranges[pos] = *range;
  253. memory_region_ref(range->mr);
  254. ++view->nr;
  255. }
  256. static void flatview_destroy(FlatView *view)
  257. {
  258. int i;
  259. for (i = 0; i < view->nr; i++) {
  260. memory_region_unref(view->ranges[i].mr);
  261. }
  262. g_free(view->ranges);
  263. g_free(view);
  264. }
  265. static void flatview_ref(FlatView *view)
  266. {
  267. atomic_inc(&view->ref);
  268. }
  269. static void flatview_unref(FlatView *view)
  270. {
  271. if (atomic_fetch_dec(&view->ref) == 1) {
  272. flatview_destroy(view);
  273. }
  274. }
  275. static bool can_merge(FlatRange *r1, FlatRange *r2)
  276. {
  277. return int128_eq(addrrange_end(r1->addr), r2->addr.start)
  278. && r1->mr == r2->mr
  279. && int128_eq(int128_add(int128_make64(r1->offset_in_region),
  280. r1->addr.size),
  281. int128_make64(r2->offset_in_region))
  282. && r1->dirty_log_mask == r2->dirty_log_mask
  283. && r1->romd_mode == r2->romd_mode
  284. && r1->readonly == r2->readonly;
  285. }
  286. /* Attempt to simplify a view by merging adjacent ranges */
  287. static void flatview_simplify(FlatView *view)
  288. {
  289. unsigned i, j;
  290. i = 0;
  291. while (i < view->nr) {
  292. j = i + 1;
  293. while (j < view->nr
  294. && can_merge(&view->ranges[j-1], &view->ranges[j])) {
  295. int128_addto(&view->ranges[i].addr.size, view->ranges[j].addr.size);
  296. ++j;
  297. }
  298. ++i;
  299. memmove(&view->ranges[i], &view->ranges[j],
  300. (view->nr - j) * sizeof(view->ranges[j]));
  301. view->nr -= j - i;
  302. }
  303. }
  304. static bool memory_region_big_endian(MemoryRegion *mr)
  305. {
  306. #ifdef TARGET_WORDS_BIGENDIAN
  307. return mr->ops->endianness != DEVICE_LITTLE_ENDIAN;
  308. #else
  309. return mr->ops->endianness == DEVICE_BIG_ENDIAN;
  310. #endif
  311. }
  312. static bool memory_region_wrong_endianness(MemoryRegion *mr)
  313. {
  314. #ifdef TARGET_WORDS_BIGENDIAN
  315. return mr->ops->endianness == DEVICE_LITTLE_ENDIAN;
  316. #else
  317. return mr->ops->endianness == DEVICE_BIG_ENDIAN;
  318. #endif
  319. }
  320. static void adjust_endianness(MemoryRegion *mr, uint64_t *data, unsigned size)
  321. {
  322. if (memory_region_wrong_endianness(mr)) {
  323. switch (size) {
  324. case 1:
  325. break;
  326. case 2:
  327. *data = bswap16(*data);
  328. break;
  329. case 4:
  330. *data = bswap32(*data);
  331. break;
  332. case 8:
  333. *data = bswap64(*data);
  334. break;
  335. default:
  336. abort();
  337. }
  338. }
  339. }
  340. static void memory_region_oldmmio_read_accessor(MemoryRegion *mr,
  341. hwaddr addr,
  342. uint64_t *value,
  343. unsigned size,
  344. unsigned shift,
  345. uint64_t mask)
  346. {
  347. uint64_t tmp;
  348. tmp = mr->ops->old_mmio.read[ctz32(size)](mr->opaque, addr);
  349. trace_memory_region_ops_read(mr, addr, tmp, size);
  350. *value |= (tmp & mask) << shift;
  351. }
  352. static void memory_region_read_accessor(MemoryRegion *mr,
  353. hwaddr addr,
  354. uint64_t *value,
  355. unsigned size,
  356. unsigned shift,
  357. uint64_t mask)
  358. {
  359. uint64_t tmp;
  360. if (mr->flush_coalesced_mmio) {
  361. qemu_flush_coalesced_mmio_buffer();
  362. }
  363. tmp = mr->ops->read(mr->opaque, addr, size);
  364. trace_memory_region_ops_read(mr, addr, tmp, size);
  365. *value |= (tmp & mask) << shift;
  366. }
  367. static void memory_region_oldmmio_write_accessor(MemoryRegion *mr,
  368. hwaddr addr,
  369. uint64_t *value,
  370. unsigned size,
  371. unsigned shift,
  372. uint64_t mask)
  373. {
  374. uint64_t tmp;
  375. tmp = (*value >> shift) & mask;
  376. trace_memory_region_ops_write(mr, addr, tmp, size);
  377. mr->ops->old_mmio.write[ctz32(size)](mr->opaque, addr, tmp);
  378. }
  379. static void memory_region_write_accessor(MemoryRegion *mr,
  380. hwaddr addr,
  381. uint64_t *value,
  382. unsigned size,
  383. unsigned shift,
  384. uint64_t mask)
  385. {
  386. uint64_t tmp;
  387. if (mr->flush_coalesced_mmio) {
  388. qemu_flush_coalesced_mmio_buffer();
  389. }
  390. tmp = (*value >> shift) & mask;
  391. trace_memory_region_ops_write(mr, addr, tmp, size);
  392. mr->ops->write(mr->opaque, addr, tmp, size);
  393. }
  394. static void access_with_adjusted_size(hwaddr addr,
  395. uint64_t *value,
  396. unsigned size,
  397. unsigned access_size_min,
  398. unsigned access_size_max,
  399. void (*access)(MemoryRegion *mr,
  400. hwaddr addr,
  401. uint64_t *value,
  402. unsigned size,
  403. unsigned shift,
  404. uint64_t mask),
  405. MemoryRegion *mr)
  406. {
  407. uint64_t access_mask;
  408. unsigned access_size;
  409. unsigned i;
  410. if (!access_size_min) {
  411. access_size_min = 1;
  412. }
  413. if (!access_size_max) {
  414. access_size_max = 4;
  415. }
  416. /* FIXME: support unaligned access? */
  417. access_size = MAX(MIN(size, access_size_max), access_size_min);
  418. access_mask = -1ULL >> (64 - access_size * 8);
  419. if (memory_region_big_endian(mr)) {
  420. for (i = 0; i < size; i += access_size) {
  421. access(mr, addr + i, value, access_size,
  422. (size - access_size - i) * 8, access_mask);
  423. }
  424. } else {
  425. for (i = 0; i < size; i += access_size) {
  426. access(mr, addr + i, value, access_size, i * 8, access_mask);
  427. }
  428. }
  429. }
  430. static AddressSpace *memory_region_to_address_space(MemoryRegion *mr)
  431. {
  432. AddressSpace *as;
  433. while (mr->container) {
  434. mr = mr->container;
  435. }
  436. QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
  437. if (mr == as->root) {
  438. return as;
  439. }
  440. }
  441. return NULL;
  442. }
  443. /* Render a memory region into the global view. Ranges in @view obscure
  444. * ranges in @mr.
  445. */
  446. static void render_memory_region(FlatView *view,
  447. MemoryRegion *mr,
  448. Int128 base,
  449. AddrRange clip,
  450. bool readonly)
  451. {
  452. MemoryRegion *subregion;
  453. unsigned i;
  454. hwaddr offset_in_region;
  455. Int128 remain;
  456. Int128 now;
  457. FlatRange fr;
  458. AddrRange tmp;
  459. if (!mr->enabled) {
  460. return;
  461. }
  462. int128_addto(&base, int128_make64(mr->addr));
  463. readonly |= mr->readonly;
  464. tmp = addrrange_make(base, mr->size);
  465. if (!addrrange_intersects(tmp, clip)) {
  466. return;
  467. }
  468. clip = addrrange_intersection(tmp, clip);
  469. if (mr->alias) {
  470. int128_subfrom(&base, int128_make64(mr->alias->addr));
  471. int128_subfrom(&base, int128_make64(mr->alias_offset));
  472. render_memory_region(view, mr->alias, base, clip, readonly);
  473. return;
  474. }
  475. /* Render subregions in priority order. */
  476. QTAILQ_FOREACH(subregion, &mr->subregions, subregions_link) {
  477. render_memory_region(view, subregion, base, clip, readonly);
  478. }
  479. if (!mr->terminates) {
  480. return;
  481. }
  482. offset_in_region = int128_get64(int128_sub(clip.start, base));
  483. base = clip.start;
  484. remain = clip.size;
  485. fr.mr = mr;
  486. fr.dirty_log_mask = mr->dirty_log_mask;
  487. fr.romd_mode = mr->romd_mode;
  488. fr.readonly = readonly;
  489. /* Render the region itself into any gaps left by the current view. */
  490. for (i = 0; i < view->nr && int128_nz(remain); ++i) {
  491. if (int128_ge(base, addrrange_end(view->ranges[i].addr))) {
  492. continue;
  493. }
  494. if (int128_lt(base, view->ranges[i].addr.start)) {
  495. now = int128_min(remain,
  496. int128_sub(view->ranges[i].addr.start, base));
  497. fr.offset_in_region = offset_in_region;
  498. fr.addr = addrrange_make(base, now);
  499. flatview_insert(view, i, &fr);
  500. ++i;
  501. int128_addto(&base, now);
  502. offset_in_region += int128_get64(now);
  503. int128_subfrom(&remain, now);
  504. }
  505. now = int128_sub(int128_min(int128_add(base, remain),
  506. addrrange_end(view->ranges[i].addr)),
  507. base);
  508. int128_addto(&base, now);
  509. offset_in_region += int128_get64(now);
  510. int128_subfrom(&remain, now);
  511. }
  512. if (int128_nz(remain)) {
  513. fr.offset_in_region = offset_in_region;
  514. fr.addr = addrrange_make(base, remain);
  515. flatview_insert(view, i, &fr);
  516. }
  517. }
  518. /* Render a memory topology into a list of disjoint absolute ranges. */
  519. static FlatView *generate_memory_topology(MemoryRegion *mr)
  520. {
  521. FlatView *view;
  522. view = g_new(FlatView, 1);
  523. flatview_init(view);
  524. if (mr) {
  525. render_memory_region(view, mr, int128_zero(),
  526. addrrange_make(int128_zero(), int128_2_64()), false);
  527. }
  528. flatview_simplify(view);
  529. return view;
  530. }
  531. static void address_space_add_del_ioeventfds(AddressSpace *as,
  532. MemoryRegionIoeventfd *fds_new,
  533. unsigned fds_new_nb,
  534. MemoryRegionIoeventfd *fds_old,
  535. unsigned fds_old_nb)
  536. {
  537. unsigned iold, inew;
  538. MemoryRegionIoeventfd *fd;
  539. MemoryRegionSection section;
  540. /* Generate a symmetric difference of the old and new fd sets, adding
  541. * and deleting as necessary.
  542. */
  543. iold = inew = 0;
  544. while (iold < fds_old_nb || inew < fds_new_nb) {
  545. if (iold < fds_old_nb
  546. && (inew == fds_new_nb
  547. || memory_region_ioeventfd_before(fds_old[iold],
  548. fds_new[inew]))) {
  549. fd = &fds_old[iold];
  550. section = (MemoryRegionSection) {
  551. .address_space = as,
  552. .offset_within_address_space = int128_get64(fd->addr.start),
  553. .size = fd->addr.size,
  554. };
  555. MEMORY_LISTENER_CALL(eventfd_del, Forward, &section,
  556. fd->match_data, fd->data, fd->e);
  557. ++iold;
  558. } else if (inew < fds_new_nb
  559. && (iold == fds_old_nb
  560. || memory_region_ioeventfd_before(fds_new[inew],
  561. fds_old[iold]))) {
  562. fd = &fds_new[inew];
  563. section = (MemoryRegionSection) {
  564. .address_space = as,
  565. .offset_within_address_space = int128_get64(fd->addr.start),
  566. .size = fd->addr.size,
  567. };
  568. MEMORY_LISTENER_CALL(eventfd_add, Reverse, &section,
  569. fd->match_data, fd->data, fd->e);
  570. ++inew;
  571. } else {
  572. ++iold;
  573. ++inew;
  574. }
  575. }
  576. }
  577. static FlatView *address_space_get_flatview(AddressSpace *as)
  578. {
  579. FlatView *view;
  580. qemu_mutex_lock(&flat_view_mutex);
  581. view = as->current_map;
  582. flatview_ref(view);
  583. qemu_mutex_unlock(&flat_view_mutex);
  584. return view;
  585. }
  586. static void address_space_update_ioeventfds(AddressSpace *as)
  587. {
  588. FlatView *view;
  589. FlatRange *fr;
  590. unsigned ioeventfd_nb = 0;
  591. MemoryRegionIoeventfd *ioeventfds = NULL;
  592. AddrRange tmp;
  593. unsigned i;
  594. view = address_space_get_flatview(as);
  595. FOR_EACH_FLAT_RANGE(fr, view) {
  596. for (i = 0; i < fr->mr->ioeventfd_nb; ++i) {
  597. tmp = addrrange_shift(fr->mr->ioeventfds[i].addr,
  598. int128_sub(fr->addr.start,
  599. int128_make64(fr->offset_in_region)));
  600. if (addrrange_intersects(fr->addr, tmp)) {
  601. ++ioeventfd_nb;
  602. ioeventfds = g_realloc(ioeventfds,
  603. ioeventfd_nb * sizeof(*ioeventfds));
  604. ioeventfds[ioeventfd_nb-1] = fr->mr->ioeventfds[i];
  605. ioeventfds[ioeventfd_nb-1].addr = tmp;
  606. }
  607. }
  608. }
  609. address_space_add_del_ioeventfds(as, ioeventfds, ioeventfd_nb,
  610. as->ioeventfds, as->ioeventfd_nb);
  611. g_free(as->ioeventfds);
  612. as->ioeventfds = ioeventfds;
  613. as->ioeventfd_nb = ioeventfd_nb;
  614. flatview_unref(view);
  615. }
  616. static void address_space_update_topology_pass(AddressSpace *as,
  617. const FlatView *old_view,
  618. const FlatView *new_view,
  619. bool adding)
  620. {
  621. unsigned iold, inew;
  622. FlatRange *frold, *frnew;
  623. /* Generate a symmetric difference of the old and new memory maps.
  624. * Kill ranges in the old map, and instantiate ranges in the new map.
  625. */
  626. iold = inew = 0;
  627. while (iold < old_view->nr || inew < new_view->nr) {
  628. if (iold < old_view->nr) {
  629. frold = &old_view->ranges[iold];
  630. } else {
  631. frold = NULL;
  632. }
  633. if (inew < new_view->nr) {
  634. frnew = &new_view->ranges[inew];
  635. } else {
  636. frnew = NULL;
  637. }
  638. if (frold
  639. && (!frnew
  640. || int128_lt(frold->addr.start, frnew->addr.start)
  641. || (int128_eq(frold->addr.start, frnew->addr.start)
  642. && !flatrange_equal(frold, frnew)))) {
  643. /* In old but not in new, or in both but attributes changed. */
  644. if (!adding) {
  645. MEMORY_LISTENER_UPDATE_REGION(frold, as, Reverse, region_del);
  646. }
  647. ++iold;
  648. } else if (frold && frnew && flatrange_equal(frold, frnew)) {
  649. /* In both and unchanged (except logging may have changed) */
  650. if (adding) {
  651. MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_nop);
  652. if (frold->dirty_log_mask && !frnew->dirty_log_mask) {
  653. MEMORY_LISTENER_UPDATE_REGION(frnew, as, Reverse, log_stop);
  654. } else if (frnew->dirty_log_mask && !frold->dirty_log_mask) {
  655. MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, log_start);
  656. }
  657. }
  658. ++iold;
  659. ++inew;
  660. } else {
  661. /* In new */
  662. if (adding) {
  663. MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_add);
  664. }
  665. ++inew;
  666. }
  667. }
  668. }
  669. static void address_space_update_topology(AddressSpace *as)
  670. {
  671. FlatView *old_view = address_space_get_flatview(as);
  672. FlatView *new_view = generate_memory_topology(as->root);
  673. address_space_update_topology_pass(as, old_view, new_view, false);
  674. address_space_update_topology_pass(as, old_view, new_view, true);
  675. qemu_mutex_lock(&flat_view_mutex);
  676. flatview_unref(as->current_map);
  677. as->current_map = new_view;
  678. qemu_mutex_unlock(&flat_view_mutex);
  679. /* Note that all the old MemoryRegions are still alive up to this
  680. * point. This relieves most MemoryListeners from the need to
  681. * ref/unref the MemoryRegions they get---unless they use them
  682. * outside the iothread mutex, in which case precise reference
  683. * counting is necessary.
  684. */
  685. flatview_unref(old_view);
  686. address_space_update_ioeventfds(as);
  687. }
  688. void memory_region_transaction_begin(void)
  689. {
  690. qemu_flush_coalesced_mmio_buffer();
  691. ++memory_region_transaction_depth;
  692. }
  693. static void memory_region_clear_pending(void)
  694. {
  695. memory_region_update_pending = false;
  696. ioeventfd_update_pending = false;
  697. }
  698. void memory_region_transaction_commit(void)
  699. {
  700. AddressSpace *as;
  701. assert(memory_region_transaction_depth);
  702. --memory_region_transaction_depth;
  703. if (!memory_region_transaction_depth) {
  704. if (memory_region_update_pending) {
  705. MEMORY_LISTENER_CALL_GLOBAL(begin, Forward);
  706. QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
  707. address_space_update_topology(as);
  708. }
  709. MEMORY_LISTENER_CALL_GLOBAL(commit, Forward);
  710. } else if (ioeventfd_update_pending) {
  711. QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
  712. address_space_update_ioeventfds(as);
  713. }
  714. }
  715. memory_region_clear_pending();
  716. }
  717. }
  718. static void memory_region_destructor_none(MemoryRegion *mr)
  719. {
  720. }
  721. static void memory_region_destructor_ram(MemoryRegion *mr)
  722. {
  723. qemu_ram_free(mr->ram_addr);
  724. }
  725. static void memory_region_destructor_alias(MemoryRegion *mr)
  726. {
  727. memory_region_unref(mr->alias);
  728. }
  729. static void memory_region_destructor_ram_from_ptr(MemoryRegion *mr)
  730. {
  731. qemu_ram_free_from_ptr(mr->ram_addr);
  732. }
  733. static void memory_region_destructor_rom_device(MemoryRegion *mr)
  734. {
  735. qemu_ram_free(mr->ram_addr & TARGET_PAGE_MASK);
  736. }
  737. static bool memory_region_need_escape(char c)
  738. {
  739. return c == '/' || c == '[' || c == '\\' || c == ']';
  740. }
  741. static char *memory_region_escape_name(const char *name)
  742. {
  743. const char *p;
  744. char *escaped, *q;
  745. uint8_t c;
  746. size_t bytes = 0;
  747. for (p = name; *p; p++) {
  748. bytes += memory_region_need_escape(*p) ? 4 : 1;
  749. }
  750. if (bytes == p - name) {
  751. return g_memdup(name, bytes + 1);
  752. }
  753. escaped = g_malloc(bytes + 1);
  754. for (p = name, q = escaped; *p; p++) {
  755. c = *p;
  756. if (unlikely(memory_region_need_escape(c))) {
  757. *q++ = '\\';
  758. *q++ = 'x';
  759. *q++ = "0123456789abcdef"[c >> 4];
  760. c = "0123456789abcdef"[c & 15];
  761. }
  762. *q++ = c;
  763. }
  764. *q = 0;
  765. return escaped;
  766. }
  767. static void object_property_add_child_array(Object *owner,
  768. const char *name,
  769. Object *child)
  770. {
  771. int i;
  772. char *base_name = memory_region_escape_name(name);
  773. for (i = 0; ; i++) {
  774. char *full_name = g_strdup_printf("%s[%d]", base_name, i);
  775. Error *local_err = NULL;
  776. object_property_add_child(owner, full_name, child, &local_err);
  777. g_free(full_name);
  778. if (!local_err) {
  779. break;
  780. }
  781. error_free(local_err);
  782. }
  783. g_free(base_name);
  784. }
  785. void memory_region_init(MemoryRegion *mr,
  786. Object *owner,
  787. const char *name,
  788. uint64_t size)
  789. {
  790. if (!owner) {
  791. owner = qdev_get_machine();
  792. }
  793. object_initialize(mr, sizeof(*mr), TYPE_MEMORY_REGION);
  794. mr->size = int128_make64(size);
  795. if (size == UINT64_MAX) {
  796. mr->size = int128_2_64();
  797. }
  798. mr->name = g_strdup(name);
  799. if (name) {
  800. object_property_add_child_array(owner, name, OBJECT(mr));
  801. object_unref(OBJECT(mr));
  802. }
  803. }
  804. static void memory_region_get_addr(Object *obj, Visitor *v, void *opaque,
  805. const char *name, Error **errp)
  806. {
  807. MemoryRegion *mr = MEMORY_REGION(obj);
  808. uint64_t value = mr->addr;
  809. visit_type_uint64(v, &value, name, errp);
  810. }
  811. static void memory_region_get_container(Object *obj, Visitor *v, void *opaque,
  812. const char *name, Error **errp)
  813. {
  814. MemoryRegion *mr = MEMORY_REGION(obj);
  815. gchar *path = (gchar *)"";
  816. if (mr->container) {
  817. path = object_get_canonical_path(OBJECT(mr->container));
  818. }
  819. visit_type_str(v, &path, name, errp);
  820. if (mr->container) {
  821. g_free(path);
  822. }
  823. }
  824. static Object *memory_region_resolve_container(Object *obj, void *opaque,
  825. const char *part)
  826. {
  827. MemoryRegion *mr = MEMORY_REGION(obj);
  828. return OBJECT(mr->container);
  829. }
  830. static void memory_region_get_priority(Object *obj, Visitor *v, void *opaque,
  831. const char *name, Error **errp)
  832. {
  833. MemoryRegion *mr = MEMORY_REGION(obj);
  834. int32_t value = mr->priority;
  835. visit_type_int32(v, &value, name, errp);
  836. }
  837. static bool memory_region_get_may_overlap(Object *obj, Error **errp)
  838. {
  839. MemoryRegion *mr = MEMORY_REGION(obj);
  840. return mr->may_overlap;
  841. }
  842. static void memory_region_get_size(Object *obj, Visitor *v, void *opaque,
  843. const char *name, Error **errp)
  844. {
  845. MemoryRegion *mr = MEMORY_REGION(obj);
  846. uint64_t value = memory_region_size(mr);
  847. visit_type_uint64(v, &value, name, errp);
  848. }
  849. static void memory_region_initfn(Object *obj)
  850. {
  851. MemoryRegion *mr = MEMORY_REGION(obj);
  852. ObjectProperty *op;
  853. mr->ops = &unassigned_mem_ops;
  854. mr->enabled = true;
  855. mr->romd_mode = true;
  856. mr->destructor = memory_region_destructor_none;
  857. QTAILQ_INIT(&mr->subregions);
  858. QTAILQ_INIT(&mr->coalesced);
  859. op = object_property_add(OBJECT(mr), "container",
  860. "link<" TYPE_MEMORY_REGION ">",
  861. memory_region_get_container,
  862. NULL, /* memory_region_set_container */
  863. NULL, NULL, &error_abort);
  864. op->resolve = memory_region_resolve_container;
  865. object_property_add(OBJECT(mr), "addr", "uint64",
  866. memory_region_get_addr,
  867. NULL, /* memory_region_set_addr */
  868. NULL, NULL, &error_abort);
  869. object_property_add(OBJECT(mr), "priority", "uint32",
  870. memory_region_get_priority,
  871. NULL, /* memory_region_set_priority */
  872. NULL, NULL, &error_abort);
  873. object_property_add_bool(OBJECT(mr), "may-overlap",
  874. memory_region_get_may_overlap,
  875. NULL, /* memory_region_set_may_overlap */
  876. &error_abort);
  877. object_property_add(OBJECT(mr), "size", "uint64",
  878. memory_region_get_size,
  879. NULL, /* memory_region_set_size, */
  880. NULL, NULL, &error_abort);
  881. }
  882. static uint64_t unassigned_mem_read(void *opaque, hwaddr addr,
  883. unsigned size)
  884. {
  885. #ifdef DEBUG_UNASSIGNED
  886. printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
  887. #endif
  888. if (current_cpu != NULL) {
  889. cpu_unassigned_access(current_cpu, addr, false, false, 0, size);
  890. }
  891. return 0;
  892. }
  893. static void unassigned_mem_write(void *opaque, hwaddr addr,
  894. uint64_t val, unsigned size)
  895. {
  896. #ifdef DEBUG_UNASSIGNED
  897. printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
  898. #endif
  899. if (current_cpu != NULL) {
  900. cpu_unassigned_access(current_cpu, addr, true, false, 0, size);
  901. }
  902. }
  903. static bool unassigned_mem_accepts(void *opaque, hwaddr addr,
  904. unsigned size, bool is_write)
  905. {
  906. return false;
  907. }
  908. const MemoryRegionOps unassigned_mem_ops = {
  909. .valid.accepts = unassigned_mem_accepts,
  910. .endianness = DEVICE_NATIVE_ENDIAN,
  911. };
  912. bool memory_region_access_valid(MemoryRegion *mr,
  913. hwaddr addr,
  914. unsigned size,
  915. bool is_write)
  916. {
  917. int access_size_min, access_size_max;
  918. int access_size, i;
  919. if (!mr->ops->valid.unaligned && (addr & (size - 1))) {
  920. return false;
  921. }
  922. if (!mr->ops->valid.accepts) {
  923. return true;
  924. }
  925. access_size_min = mr->ops->valid.min_access_size;
  926. if (!mr->ops->valid.min_access_size) {
  927. access_size_min = 1;
  928. }
  929. access_size_max = mr->ops->valid.max_access_size;
  930. if (!mr->ops->valid.max_access_size) {
  931. access_size_max = 4;
  932. }
  933. access_size = MAX(MIN(size, access_size_max), access_size_min);
  934. for (i = 0; i < size; i += access_size) {
  935. if (!mr->ops->valid.accepts(mr->opaque, addr + i, access_size,
  936. is_write)) {
  937. return false;
  938. }
  939. }
  940. return true;
  941. }
  942. static uint64_t memory_region_dispatch_read1(MemoryRegion *mr,
  943. hwaddr addr,
  944. unsigned size)
  945. {
  946. uint64_t data = 0;
  947. if (mr->ops->read) {
  948. access_with_adjusted_size(addr, &data, size,
  949. mr->ops->impl.min_access_size,
  950. mr->ops->impl.max_access_size,
  951. memory_region_read_accessor, mr);
  952. } else {
  953. access_with_adjusted_size(addr, &data, size, 1, 4,
  954. memory_region_oldmmio_read_accessor, mr);
  955. }
  956. return data;
  957. }
  958. static bool memory_region_dispatch_read(MemoryRegion *mr,
  959. hwaddr addr,
  960. uint64_t *pval,
  961. unsigned size)
  962. {
  963. if (!memory_region_access_valid(mr, addr, size, false)) {
  964. *pval = unassigned_mem_read(mr, addr, size);
  965. return true;
  966. }
  967. *pval = memory_region_dispatch_read1(mr, addr, size);
  968. adjust_endianness(mr, pval, size);
  969. return false;
  970. }
  971. static bool memory_region_dispatch_write(MemoryRegion *mr,
  972. hwaddr addr,
  973. uint64_t data,
  974. unsigned size)
  975. {
  976. if (!memory_region_access_valid(mr, addr, size, true)) {
  977. unassigned_mem_write(mr, addr, data, size);
  978. return true;
  979. }
  980. adjust_endianness(mr, &data, size);
  981. if (mr->ops->write) {
  982. access_with_adjusted_size(addr, &data, size,
  983. mr->ops->impl.min_access_size,
  984. mr->ops->impl.max_access_size,
  985. memory_region_write_accessor, mr);
  986. } else {
  987. access_with_adjusted_size(addr, &data, size, 1, 4,
  988. memory_region_oldmmio_write_accessor, mr);
  989. }
  990. return false;
  991. }
  992. void memory_region_init_io(MemoryRegion *mr,
  993. Object *owner,
  994. const MemoryRegionOps *ops,
  995. void *opaque,
  996. const char *name,
  997. uint64_t size)
  998. {
  999. memory_region_init(mr, owner, name, size);
  1000. mr->ops = ops;
  1001. mr->opaque = opaque;
  1002. mr->terminates = true;
  1003. mr->ram_addr = ~(ram_addr_t)0;
  1004. }
  1005. void memory_region_init_ram(MemoryRegion *mr,
  1006. Object *owner,
  1007. const char *name,
  1008. uint64_t size)
  1009. {
  1010. memory_region_init(mr, owner, name, size);
  1011. mr->ram = true;
  1012. mr->terminates = true;
  1013. mr->destructor = memory_region_destructor_ram;
  1014. mr->ram_addr = qemu_ram_alloc(size, mr);
  1015. }
  1016. #ifdef __linux__
  1017. void memory_region_init_ram_from_file(MemoryRegion *mr,
  1018. struct Object *owner,
  1019. const char *name,
  1020. uint64_t size,
  1021. bool share,
  1022. const char *path,
  1023. Error **errp)
  1024. {
  1025. memory_region_init(mr, owner, name, size);
  1026. mr->ram = true;
  1027. mr->terminates = true;
  1028. mr->destructor = memory_region_destructor_ram;
  1029. mr->ram_addr = qemu_ram_alloc_from_file(size, mr, share, path, errp);
  1030. }
  1031. #endif
  1032. void memory_region_init_ram_ptr(MemoryRegion *mr,
  1033. Object *owner,
  1034. const char *name,
  1035. uint64_t size,
  1036. void *ptr)
  1037. {
  1038. memory_region_init(mr, owner, name, size);
  1039. mr->ram = true;
  1040. mr->terminates = true;
  1041. mr->destructor = memory_region_destructor_ram_from_ptr;
  1042. mr->ram_addr = qemu_ram_alloc_from_ptr(size, ptr, mr);
  1043. }
  1044. void memory_region_init_alias(MemoryRegion *mr,
  1045. Object *owner,
  1046. const char *name,
  1047. MemoryRegion *orig,
  1048. hwaddr offset,
  1049. uint64_t size)
  1050. {
  1051. memory_region_init(mr, owner, name, size);
  1052. memory_region_ref(orig);
  1053. mr->destructor = memory_region_destructor_alias;
  1054. mr->alias = orig;
  1055. mr->alias_offset = offset;
  1056. }
  1057. void memory_region_init_rom_device(MemoryRegion *mr,
  1058. Object *owner,
  1059. const MemoryRegionOps *ops,
  1060. void *opaque,
  1061. const char *name,
  1062. uint64_t size)
  1063. {
  1064. memory_region_init(mr, owner, name, size);
  1065. mr->ops = ops;
  1066. mr->opaque = opaque;
  1067. mr->terminates = true;
  1068. mr->rom_device = true;
  1069. mr->destructor = memory_region_destructor_rom_device;
  1070. mr->ram_addr = qemu_ram_alloc(size, mr);
  1071. }
  1072. void memory_region_init_iommu(MemoryRegion *mr,
  1073. Object *owner,
  1074. const MemoryRegionIOMMUOps *ops,
  1075. const char *name,
  1076. uint64_t size)
  1077. {
  1078. memory_region_init(mr, owner, name, size);
  1079. mr->iommu_ops = ops,
  1080. mr->terminates = true; /* then re-forwards */
  1081. notifier_list_init(&mr->iommu_notify);
  1082. }
  1083. void memory_region_init_reservation(MemoryRegion *mr,
  1084. Object *owner,
  1085. const char *name,
  1086. uint64_t size)
  1087. {
  1088. memory_region_init_io(mr, owner, &unassigned_mem_ops, mr, name, size);
  1089. }
  1090. static void memory_region_finalize(Object *obj)
  1091. {
  1092. MemoryRegion *mr = MEMORY_REGION(obj);
  1093. assert(QTAILQ_EMPTY(&mr->subregions));
  1094. assert(memory_region_transaction_depth == 0);
  1095. mr->destructor(mr);
  1096. memory_region_clear_coalescing(mr);
  1097. g_free((char *)mr->name);
  1098. g_free(mr->ioeventfds);
  1099. }
  1100. void memory_region_destroy(MemoryRegion *mr)
  1101. {
  1102. object_unparent(OBJECT(mr));
  1103. }
  1104. Object *memory_region_owner(MemoryRegion *mr)
  1105. {
  1106. Object *obj = OBJECT(mr);
  1107. return obj->parent;
  1108. }
  1109. void memory_region_ref(MemoryRegion *mr)
  1110. {
  1111. /* MMIO callbacks most likely will access data that belongs
  1112. * to the owner, hence the need to ref/unref the owner whenever
  1113. * the memory region is in use.
  1114. *
  1115. * The memory region is a child of its owner. As long as the
  1116. * owner doesn't call unparent itself on the memory region,
  1117. * ref-ing the owner will also keep the memory region alive.
  1118. * Memory regions without an owner are supposed to never go away,
  1119. * but we still ref/unref them for debugging purposes.
  1120. */
  1121. Object *obj = OBJECT(mr);
  1122. if (obj && obj->parent) {
  1123. object_ref(obj->parent);
  1124. } else {
  1125. object_ref(obj);
  1126. }
  1127. }
  1128. void memory_region_unref(MemoryRegion *mr)
  1129. {
  1130. Object *obj = OBJECT(mr);
  1131. if (obj && obj->parent) {
  1132. object_unref(obj->parent);
  1133. } else {
  1134. object_unref(obj);
  1135. }
  1136. }
  1137. uint64_t memory_region_size(MemoryRegion *mr)
  1138. {
  1139. if (int128_eq(mr->size, int128_2_64())) {
  1140. return UINT64_MAX;
  1141. }
  1142. return int128_get64(mr->size);
  1143. }
  1144. const char *memory_region_name(MemoryRegion *mr)
  1145. {
  1146. return mr->name;
  1147. }
  1148. bool memory_region_is_ram(MemoryRegion *mr)
  1149. {
  1150. return mr->ram;
  1151. }
  1152. bool memory_region_is_logging(MemoryRegion *mr)
  1153. {
  1154. return mr->dirty_log_mask;
  1155. }
  1156. bool memory_region_is_rom(MemoryRegion *mr)
  1157. {
  1158. return mr->ram && mr->readonly;
  1159. }
  1160. bool memory_region_is_iommu(MemoryRegion *mr)
  1161. {
  1162. return mr->iommu_ops;
  1163. }
  1164. void memory_region_register_iommu_notifier(MemoryRegion *mr, Notifier *n)
  1165. {
  1166. notifier_list_add(&mr->iommu_notify, n);
  1167. }
  1168. void memory_region_unregister_iommu_notifier(Notifier *n)
  1169. {
  1170. notifier_remove(n);
  1171. }
  1172. void memory_region_notify_iommu(MemoryRegion *mr,
  1173. IOMMUTLBEntry entry)
  1174. {
  1175. assert(memory_region_is_iommu(mr));
  1176. notifier_list_notify(&mr->iommu_notify, &entry);
  1177. }
  1178. void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client)
  1179. {
  1180. uint8_t mask = 1 << client;
  1181. memory_region_transaction_begin();
  1182. mr->dirty_log_mask = (mr->dirty_log_mask & ~mask) | (log * mask);
  1183. memory_region_update_pending |= mr->enabled;
  1184. memory_region_transaction_commit();
  1185. }
  1186. bool memory_region_get_dirty(MemoryRegion *mr, hwaddr addr,
  1187. hwaddr size, unsigned client)
  1188. {
  1189. assert(mr->terminates);
  1190. return cpu_physical_memory_get_dirty(mr->ram_addr + addr, size, client);
  1191. }
  1192. void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
  1193. hwaddr size)
  1194. {
  1195. assert(mr->terminates);
  1196. cpu_physical_memory_set_dirty_range(mr->ram_addr + addr, size);
  1197. }
  1198. bool memory_region_test_and_clear_dirty(MemoryRegion *mr, hwaddr addr,
  1199. hwaddr size, unsigned client)
  1200. {
  1201. bool ret;
  1202. assert(mr->terminates);
  1203. ret = cpu_physical_memory_get_dirty(mr->ram_addr + addr, size, client);
  1204. if (ret) {
  1205. cpu_physical_memory_reset_dirty(mr->ram_addr + addr, size, client);
  1206. }
  1207. return ret;
  1208. }
  1209. void memory_region_sync_dirty_bitmap(MemoryRegion *mr)
  1210. {
  1211. AddressSpace *as;
  1212. FlatRange *fr;
  1213. QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
  1214. FlatView *view = address_space_get_flatview(as);
  1215. FOR_EACH_FLAT_RANGE(fr, view) {
  1216. if (fr->mr == mr) {
  1217. MEMORY_LISTENER_UPDATE_REGION(fr, as, Forward, log_sync);
  1218. }
  1219. }
  1220. flatview_unref(view);
  1221. }
  1222. }
  1223. void memory_region_set_readonly(MemoryRegion *mr, bool readonly)
  1224. {
  1225. if (mr->readonly != readonly) {
  1226. memory_region_transaction_begin();
  1227. mr->readonly = readonly;
  1228. memory_region_update_pending |= mr->enabled;
  1229. memory_region_transaction_commit();
  1230. }
  1231. }
  1232. void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode)
  1233. {
  1234. if (mr->romd_mode != romd_mode) {
  1235. memory_region_transaction_begin();
  1236. mr->romd_mode = romd_mode;
  1237. memory_region_update_pending |= mr->enabled;
  1238. memory_region_transaction_commit();
  1239. }
  1240. }
  1241. void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr,
  1242. hwaddr size, unsigned client)
  1243. {
  1244. assert(mr->terminates);
  1245. cpu_physical_memory_reset_dirty(mr->ram_addr + addr, size, client);
  1246. }
  1247. int memory_region_get_fd(MemoryRegion *mr)
  1248. {
  1249. if (mr->alias) {
  1250. return memory_region_get_fd(mr->alias);
  1251. }
  1252. assert(mr->terminates);
  1253. return qemu_get_ram_fd(mr->ram_addr & TARGET_PAGE_MASK);
  1254. }
  1255. void *memory_region_get_ram_ptr(MemoryRegion *mr)
  1256. {
  1257. if (mr->alias) {
  1258. return memory_region_get_ram_ptr(mr->alias) + mr->alias_offset;
  1259. }
  1260. assert(mr->terminates);
  1261. return qemu_get_ram_ptr(mr->ram_addr & TARGET_PAGE_MASK);
  1262. }
  1263. static void memory_region_update_coalesced_range_as(MemoryRegion *mr, AddressSpace *as)
  1264. {
  1265. FlatView *view;
  1266. FlatRange *fr;
  1267. CoalescedMemoryRange *cmr;
  1268. AddrRange tmp;
  1269. MemoryRegionSection section;
  1270. view = address_space_get_flatview(as);
  1271. FOR_EACH_FLAT_RANGE(fr, view) {
  1272. if (fr->mr == mr) {
  1273. section = (MemoryRegionSection) {
  1274. .address_space = as,
  1275. .offset_within_address_space = int128_get64(fr->addr.start),
  1276. .size = fr->addr.size,
  1277. };
  1278. MEMORY_LISTENER_CALL(coalesced_mmio_del, Reverse, &section,
  1279. int128_get64(fr->addr.start),
  1280. int128_get64(fr->addr.size));
  1281. QTAILQ_FOREACH(cmr, &mr->coalesced, link) {
  1282. tmp = addrrange_shift(cmr->addr,
  1283. int128_sub(fr->addr.start,
  1284. int128_make64(fr->offset_in_region)));
  1285. if (!addrrange_intersects(tmp, fr->addr)) {
  1286. continue;
  1287. }
  1288. tmp = addrrange_intersection(tmp, fr->addr);
  1289. MEMORY_LISTENER_CALL(coalesced_mmio_add, Forward, &section,
  1290. int128_get64(tmp.start),
  1291. int128_get64(tmp.size));
  1292. }
  1293. }
  1294. }
  1295. flatview_unref(view);
  1296. }
  1297. static void memory_region_update_coalesced_range(MemoryRegion *mr)
  1298. {
  1299. AddressSpace *as;
  1300. QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
  1301. memory_region_update_coalesced_range_as(mr, as);
  1302. }
  1303. }
  1304. void memory_region_set_coalescing(MemoryRegion *mr)
  1305. {
  1306. memory_region_clear_coalescing(mr);
  1307. memory_region_add_coalescing(mr, 0, int128_get64(mr->size));
  1308. }
  1309. void memory_region_add_coalescing(MemoryRegion *mr,
  1310. hwaddr offset,
  1311. uint64_t size)
  1312. {
  1313. CoalescedMemoryRange *cmr = g_malloc(sizeof(*cmr));
  1314. cmr->addr = addrrange_make(int128_make64(offset), int128_make64(size));
  1315. QTAILQ_INSERT_TAIL(&mr->coalesced, cmr, link);
  1316. memory_region_update_coalesced_range(mr);
  1317. memory_region_set_flush_coalesced(mr);
  1318. }
  1319. void memory_region_clear_coalescing(MemoryRegion *mr)
  1320. {
  1321. CoalescedMemoryRange *cmr;
  1322. bool updated = false;
  1323. qemu_flush_coalesced_mmio_buffer();
  1324. mr->flush_coalesced_mmio = false;
  1325. while (!QTAILQ_EMPTY(&mr->coalesced)) {
  1326. cmr = QTAILQ_FIRST(&mr->coalesced);
  1327. QTAILQ_REMOVE(&mr->coalesced, cmr, link);
  1328. g_free(cmr);
  1329. updated = true;
  1330. }
  1331. if (updated) {
  1332. memory_region_update_coalesced_range(mr);
  1333. }
  1334. }
  1335. void memory_region_set_flush_coalesced(MemoryRegion *mr)
  1336. {
  1337. mr->flush_coalesced_mmio = true;
  1338. }
  1339. void memory_region_clear_flush_coalesced(MemoryRegion *mr)
  1340. {
  1341. qemu_flush_coalesced_mmio_buffer();
  1342. if (QTAILQ_EMPTY(&mr->coalesced)) {
  1343. mr->flush_coalesced_mmio = false;
  1344. }
  1345. }
  1346. void memory_region_add_eventfd(MemoryRegion *mr,
  1347. hwaddr addr,
  1348. unsigned size,
  1349. bool match_data,
  1350. uint64_t data,
  1351. EventNotifier *e)
  1352. {
  1353. MemoryRegionIoeventfd mrfd = {
  1354. .addr.start = int128_make64(addr),
  1355. .addr.size = int128_make64(size),
  1356. .match_data = match_data,
  1357. .data = data,
  1358. .e = e,
  1359. };
  1360. unsigned i;
  1361. adjust_endianness(mr, &mrfd.data, size);
  1362. memory_region_transaction_begin();
  1363. for (i = 0; i < mr->ioeventfd_nb; ++i) {
  1364. if (memory_region_ioeventfd_before(mrfd, mr->ioeventfds[i])) {
  1365. break;
  1366. }
  1367. }
  1368. ++mr->ioeventfd_nb;
  1369. mr->ioeventfds = g_realloc(mr->ioeventfds,
  1370. sizeof(*mr->ioeventfds) * mr->ioeventfd_nb);
  1371. memmove(&mr->ioeventfds[i+1], &mr->ioeventfds[i],
  1372. sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb-1 - i));
  1373. mr->ioeventfds[i] = mrfd;
  1374. ioeventfd_update_pending |= mr->enabled;
  1375. memory_region_transaction_commit();
  1376. }
  1377. void memory_region_del_eventfd(MemoryRegion *mr,
  1378. hwaddr addr,
  1379. unsigned size,
  1380. bool match_data,
  1381. uint64_t data,
  1382. EventNotifier *e)
  1383. {
  1384. MemoryRegionIoeventfd mrfd = {
  1385. .addr.start = int128_make64(addr),
  1386. .addr.size = int128_make64(size),
  1387. .match_data = match_data,
  1388. .data = data,
  1389. .e = e,
  1390. };
  1391. unsigned i;
  1392. adjust_endianness(mr, &mrfd.data, size);
  1393. memory_region_transaction_begin();
  1394. for (i = 0; i < mr->ioeventfd_nb; ++i) {
  1395. if (memory_region_ioeventfd_equal(mrfd, mr->ioeventfds[i])) {
  1396. break;
  1397. }
  1398. }
  1399. assert(i != mr->ioeventfd_nb);
  1400. memmove(&mr->ioeventfds[i], &mr->ioeventfds[i+1],
  1401. sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb - (i+1)));
  1402. --mr->ioeventfd_nb;
  1403. mr->ioeventfds = g_realloc(mr->ioeventfds,
  1404. sizeof(*mr->ioeventfds)*mr->ioeventfd_nb + 1);
  1405. ioeventfd_update_pending |= mr->enabled;
  1406. memory_region_transaction_commit();
  1407. }
  1408. static void memory_region_update_container_subregions(MemoryRegion *subregion)
  1409. {
  1410. hwaddr offset = subregion->addr;
  1411. MemoryRegion *mr = subregion->container;
  1412. MemoryRegion *other;
  1413. memory_region_transaction_begin();
  1414. memory_region_ref(subregion);
  1415. QTAILQ_FOREACH(other, &mr->subregions, subregions_link) {
  1416. if (subregion->may_overlap || other->may_overlap) {
  1417. continue;
  1418. }
  1419. if (int128_ge(int128_make64(offset),
  1420. int128_add(int128_make64(other->addr), other->size))
  1421. || int128_le(int128_add(int128_make64(offset), subregion->size),
  1422. int128_make64(other->addr))) {
  1423. continue;
  1424. }
  1425. #if 0
  1426. printf("warning: subregion collision %llx/%llx (%s) "
  1427. "vs %llx/%llx (%s)\n",
  1428. (unsigned long long)offset,
  1429. (unsigned long long)int128_get64(subregion->size),
  1430. subregion->name,
  1431. (unsigned long long)other->addr,
  1432. (unsigned long long)int128_get64(other->size),
  1433. other->name);
  1434. #endif
  1435. }
  1436. QTAILQ_FOREACH(other, &mr->subregions, subregions_link) {
  1437. if (subregion->priority >= other->priority) {
  1438. QTAILQ_INSERT_BEFORE(other, subregion, subregions_link);
  1439. goto done;
  1440. }
  1441. }
  1442. QTAILQ_INSERT_TAIL(&mr->subregions, subregion, subregions_link);
  1443. done:
  1444. memory_region_update_pending |= mr->enabled && subregion->enabled;
  1445. memory_region_transaction_commit();
  1446. }
  1447. static void memory_region_add_subregion_common(MemoryRegion *mr,
  1448. hwaddr offset,
  1449. MemoryRegion *subregion)
  1450. {
  1451. assert(!subregion->container);
  1452. subregion->container = mr;
  1453. subregion->addr = offset;
  1454. memory_region_update_container_subregions(subregion);
  1455. }
  1456. void memory_region_add_subregion(MemoryRegion *mr,
  1457. hwaddr offset,
  1458. MemoryRegion *subregion)
  1459. {
  1460. subregion->may_overlap = false;
  1461. subregion->priority = 0;
  1462. memory_region_add_subregion_common(mr, offset, subregion);
  1463. }
  1464. void memory_region_add_subregion_overlap(MemoryRegion *mr,
  1465. hwaddr offset,
  1466. MemoryRegion *subregion,
  1467. int priority)
  1468. {
  1469. subregion->may_overlap = true;
  1470. subregion->priority = priority;
  1471. memory_region_add_subregion_common(mr, offset, subregion);
  1472. }
  1473. void memory_region_del_subregion(MemoryRegion *mr,
  1474. MemoryRegion *subregion)
  1475. {
  1476. memory_region_transaction_begin();
  1477. assert(subregion->container == mr);
  1478. subregion->container = NULL;
  1479. QTAILQ_REMOVE(&mr->subregions, subregion, subregions_link);
  1480. memory_region_unref(subregion);
  1481. memory_region_update_pending |= mr->enabled && subregion->enabled;
  1482. memory_region_transaction_commit();
  1483. }
  1484. void memory_region_set_enabled(MemoryRegion *mr, bool enabled)
  1485. {
  1486. if (enabled == mr->enabled) {
  1487. return;
  1488. }
  1489. memory_region_transaction_begin();
  1490. mr->enabled = enabled;
  1491. memory_region_update_pending = true;
  1492. memory_region_transaction_commit();
  1493. }
  1494. static void memory_region_readd_subregion(MemoryRegion *mr)
  1495. {
  1496. MemoryRegion *container = mr->container;
  1497. if (container) {
  1498. memory_region_transaction_begin();
  1499. memory_region_ref(mr);
  1500. memory_region_del_subregion(container, mr);
  1501. mr->container = container;
  1502. memory_region_update_container_subregions(mr);
  1503. memory_region_unref(mr);
  1504. memory_region_transaction_commit();
  1505. }
  1506. }
  1507. void memory_region_set_address(MemoryRegion *mr, hwaddr addr)
  1508. {
  1509. if (addr != mr->addr) {
  1510. mr->addr = addr;
  1511. memory_region_readd_subregion(mr);
  1512. }
  1513. }
  1514. void memory_region_set_alias_offset(MemoryRegion *mr, hwaddr offset)
  1515. {
  1516. assert(mr->alias);
  1517. if (offset == mr->alias_offset) {
  1518. return;
  1519. }
  1520. memory_region_transaction_begin();
  1521. mr->alias_offset = offset;
  1522. memory_region_update_pending |= mr->enabled;
  1523. memory_region_transaction_commit();
  1524. }
  1525. ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr)
  1526. {
  1527. return mr->ram_addr;
  1528. }
  1529. static int cmp_flatrange_addr(const void *addr_, const void *fr_)
  1530. {
  1531. const AddrRange *addr = addr_;
  1532. const FlatRange *fr = fr_;
  1533. if (int128_le(addrrange_end(*addr), fr->addr.start)) {
  1534. return -1;
  1535. } else if (int128_ge(addr->start, addrrange_end(fr->addr))) {
  1536. return 1;
  1537. }
  1538. return 0;
  1539. }
  1540. static FlatRange *flatview_lookup(FlatView *view, AddrRange addr)
  1541. {
  1542. return bsearch(&addr, view->ranges, view->nr,
  1543. sizeof(FlatRange), cmp_flatrange_addr);
  1544. }
  1545. bool memory_region_present(MemoryRegion *container, hwaddr addr)
  1546. {
  1547. MemoryRegion *mr = memory_region_find(container, addr, 1).mr;
  1548. if (!mr || (mr == container)) {
  1549. return false;
  1550. }
  1551. memory_region_unref(mr);
  1552. return true;
  1553. }
  1554. bool memory_region_is_mapped(MemoryRegion *mr)
  1555. {
  1556. return mr->container ? true : false;
  1557. }
  1558. MemoryRegionSection memory_region_find(MemoryRegion *mr,
  1559. hwaddr addr, uint64_t size)
  1560. {
  1561. MemoryRegionSection ret = { .mr = NULL };
  1562. MemoryRegion *root;
  1563. AddressSpace *as;
  1564. AddrRange range;
  1565. FlatView *view;
  1566. FlatRange *fr;
  1567. addr += mr->addr;
  1568. for (root = mr; root->container; ) {
  1569. root = root->container;
  1570. addr += root->addr;
  1571. }
  1572. as = memory_region_to_address_space(root);
  1573. if (!as) {
  1574. return ret;
  1575. }
  1576. range = addrrange_make(int128_make64(addr), int128_make64(size));
  1577. view = address_space_get_flatview(as);
  1578. fr = flatview_lookup(view, range);
  1579. if (!fr) {
  1580. flatview_unref(view);
  1581. return ret;
  1582. }
  1583. while (fr > view->ranges && addrrange_intersects(fr[-1].addr, range)) {
  1584. --fr;
  1585. }
  1586. ret.mr = fr->mr;
  1587. ret.address_space = as;
  1588. range = addrrange_intersection(range, fr->addr);
  1589. ret.offset_within_region = fr->offset_in_region;
  1590. ret.offset_within_region += int128_get64(int128_sub(range.start,
  1591. fr->addr.start));
  1592. ret.size = range.size;
  1593. ret.offset_within_address_space = int128_get64(range.start);
  1594. ret.readonly = fr->readonly;
  1595. memory_region_ref(ret.mr);
  1596. flatview_unref(view);
  1597. return ret;
  1598. }
  1599. void address_space_sync_dirty_bitmap(AddressSpace *as)
  1600. {
  1601. FlatView *view;
  1602. FlatRange *fr;
  1603. view = address_space_get_flatview(as);
  1604. FOR_EACH_FLAT_RANGE(fr, view) {
  1605. MEMORY_LISTENER_UPDATE_REGION(fr, as, Forward, log_sync);
  1606. }
  1607. flatview_unref(view);
  1608. }
  1609. void memory_global_dirty_log_start(void)
  1610. {
  1611. global_dirty_log = true;
  1612. MEMORY_LISTENER_CALL_GLOBAL(log_global_start, Forward);
  1613. }
  1614. void memory_global_dirty_log_stop(void)
  1615. {
  1616. global_dirty_log = false;
  1617. MEMORY_LISTENER_CALL_GLOBAL(log_global_stop, Reverse);
  1618. }
  1619. static void listener_add_address_space(MemoryListener *listener,
  1620. AddressSpace *as)
  1621. {
  1622. FlatView *view;
  1623. FlatRange *fr;
  1624. if (listener->address_space_filter
  1625. && listener->address_space_filter != as) {
  1626. return;
  1627. }
  1628. if (global_dirty_log) {
  1629. if (listener->log_global_start) {
  1630. listener->log_global_start(listener);
  1631. }
  1632. }
  1633. view = address_space_get_flatview(as);
  1634. FOR_EACH_FLAT_RANGE(fr, view) {
  1635. MemoryRegionSection section = {
  1636. .mr = fr->mr,
  1637. .address_space = as,
  1638. .offset_within_region = fr->offset_in_region,
  1639. .size = fr->addr.size,
  1640. .offset_within_address_space = int128_get64(fr->addr.start),
  1641. .readonly = fr->readonly,
  1642. };
  1643. if (listener->region_add) {
  1644. listener->region_add(listener, &section);
  1645. }
  1646. }
  1647. flatview_unref(view);
  1648. }
  1649. void memory_listener_register(MemoryListener *listener, AddressSpace *filter)
  1650. {
  1651. MemoryListener *other = NULL;
  1652. AddressSpace *as;
  1653. listener->address_space_filter = filter;
  1654. if (QTAILQ_EMPTY(&memory_listeners)
  1655. || listener->priority >= QTAILQ_LAST(&memory_listeners,
  1656. memory_listeners)->priority) {
  1657. QTAILQ_INSERT_TAIL(&memory_listeners, listener, link);
  1658. } else {
  1659. QTAILQ_FOREACH(other, &memory_listeners, link) {
  1660. if (listener->priority < other->priority) {
  1661. break;
  1662. }
  1663. }
  1664. QTAILQ_INSERT_BEFORE(other, listener, link);
  1665. }
  1666. QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
  1667. listener_add_address_space(listener, as);
  1668. }
  1669. }
  1670. void memory_listener_unregister(MemoryListener *listener)
  1671. {
  1672. QTAILQ_REMOVE(&memory_listeners, listener, link);
  1673. }
  1674. void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name)
  1675. {
  1676. if (QTAILQ_EMPTY(&address_spaces)) {
  1677. memory_init();
  1678. }
  1679. memory_region_transaction_begin();
  1680. as->root = root;
  1681. as->current_map = g_new(FlatView, 1);
  1682. flatview_init(as->current_map);
  1683. as->ioeventfd_nb = 0;
  1684. as->ioeventfds = NULL;
  1685. QTAILQ_INSERT_TAIL(&address_spaces, as, address_spaces_link);
  1686. as->name = g_strdup(name ? name : "anonymous");
  1687. address_space_init_dispatch(as);
  1688. memory_region_update_pending |= root->enabled;
  1689. memory_region_transaction_commit();
  1690. }
  1691. void address_space_destroy(AddressSpace *as)
  1692. {
  1693. MemoryListener *listener;
  1694. /* Flush out anything from MemoryListeners listening in on this */
  1695. memory_region_transaction_begin();
  1696. as->root = NULL;
  1697. memory_region_transaction_commit();
  1698. QTAILQ_REMOVE(&address_spaces, as, address_spaces_link);
  1699. address_space_destroy_dispatch(as);
  1700. QTAILQ_FOREACH(listener, &memory_listeners, link) {
  1701. assert(listener->address_space_filter != as);
  1702. }
  1703. flatview_unref(as->current_map);
  1704. g_free(as->name);
  1705. g_free(as->ioeventfds);
  1706. }
  1707. bool io_mem_read(MemoryRegion *mr, hwaddr addr, uint64_t *pval, unsigned size)
  1708. {
  1709. return memory_region_dispatch_read(mr, addr, pval, size);
  1710. }
  1711. bool io_mem_write(MemoryRegion *mr, hwaddr addr,
  1712. uint64_t val, unsigned size)
  1713. {
  1714. return memory_region_dispatch_write(mr, addr, val, size);
  1715. }
  1716. typedef struct MemoryRegionList MemoryRegionList;
  1717. struct MemoryRegionList {
  1718. const MemoryRegion *mr;
  1719. bool printed;
  1720. QTAILQ_ENTRY(MemoryRegionList) queue;
  1721. };
  1722. typedef QTAILQ_HEAD(queue, MemoryRegionList) MemoryRegionListHead;
  1723. static void mtree_print_mr(fprintf_function mon_printf, void *f,
  1724. const MemoryRegion *mr, unsigned int level,
  1725. hwaddr base,
  1726. MemoryRegionListHead *alias_print_queue)
  1727. {
  1728. MemoryRegionList *new_ml, *ml, *next_ml;
  1729. MemoryRegionListHead submr_print_queue;
  1730. const MemoryRegion *submr;
  1731. unsigned int i;
  1732. if (!mr || !mr->enabled) {
  1733. return;
  1734. }
  1735. for (i = 0; i < level; i++) {
  1736. mon_printf(f, " ");
  1737. }
  1738. if (mr->alias) {
  1739. MemoryRegionList *ml;
  1740. bool found = false;
  1741. /* check if the alias is already in the queue */
  1742. QTAILQ_FOREACH(ml, alias_print_queue, queue) {
  1743. if (ml->mr == mr->alias && !ml->printed) {
  1744. found = true;
  1745. }
  1746. }
  1747. if (!found) {
  1748. ml = g_new(MemoryRegionList, 1);
  1749. ml->mr = mr->alias;
  1750. ml->printed = false;
  1751. QTAILQ_INSERT_TAIL(alias_print_queue, ml, queue);
  1752. }
  1753. mon_printf(f, TARGET_FMT_plx "-" TARGET_FMT_plx
  1754. " (prio %d, %c%c): alias %s @%s " TARGET_FMT_plx
  1755. "-" TARGET_FMT_plx "\n",
  1756. base + mr->addr,
  1757. base + mr->addr
  1758. + (int128_nz(mr->size) ?
  1759. (hwaddr)int128_get64(int128_sub(mr->size,
  1760. int128_one())) : 0),
  1761. mr->priority,
  1762. mr->romd_mode ? 'R' : '-',
  1763. !mr->readonly && !(mr->rom_device && mr->romd_mode) ? 'W'
  1764. : '-',
  1765. mr->name,
  1766. mr->alias->name,
  1767. mr->alias_offset,
  1768. mr->alias_offset
  1769. + (int128_nz(mr->size) ?
  1770. (hwaddr)int128_get64(int128_sub(mr->size,
  1771. int128_one())) : 0));
  1772. } else {
  1773. mon_printf(f,
  1774. TARGET_FMT_plx "-" TARGET_FMT_plx " (prio %d, %c%c): %s\n",
  1775. base + mr->addr,
  1776. base + mr->addr
  1777. + (int128_nz(mr->size) ?
  1778. (hwaddr)int128_get64(int128_sub(mr->size,
  1779. int128_one())) : 0),
  1780. mr->priority,
  1781. mr->romd_mode ? 'R' : '-',
  1782. !mr->readonly && !(mr->rom_device && mr->romd_mode) ? 'W'
  1783. : '-',
  1784. mr->name);
  1785. }
  1786. QTAILQ_INIT(&submr_print_queue);
  1787. QTAILQ_FOREACH(submr, &mr->subregions, subregions_link) {
  1788. new_ml = g_new(MemoryRegionList, 1);
  1789. new_ml->mr = submr;
  1790. QTAILQ_FOREACH(ml, &submr_print_queue, queue) {
  1791. if (new_ml->mr->addr < ml->mr->addr ||
  1792. (new_ml->mr->addr == ml->mr->addr &&
  1793. new_ml->mr->priority > ml->mr->priority)) {
  1794. QTAILQ_INSERT_BEFORE(ml, new_ml, queue);
  1795. new_ml = NULL;
  1796. break;
  1797. }
  1798. }
  1799. if (new_ml) {
  1800. QTAILQ_INSERT_TAIL(&submr_print_queue, new_ml, queue);
  1801. }
  1802. }
  1803. QTAILQ_FOREACH(ml, &submr_print_queue, queue) {
  1804. mtree_print_mr(mon_printf, f, ml->mr, level + 1, base + mr->addr,
  1805. alias_print_queue);
  1806. }
  1807. QTAILQ_FOREACH_SAFE(ml, &submr_print_queue, queue, next_ml) {
  1808. g_free(ml);
  1809. }
  1810. }
  1811. void mtree_info(fprintf_function mon_printf, void *f)
  1812. {
  1813. MemoryRegionListHead ml_head;
  1814. MemoryRegionList *ml, *ml2;
  1815. AddressSpace *as;
  1816. QTAILQ_INIT(&ml_head);
  1817. QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
  1818. mon_printf(f, "%s\n", as->name);
  1819. mtree_print_mr(mon_printf, f, as->root, 0, 0, &ml_head);
  1820. }
  1821. mon_printf(f, "aliases\n");
  1822. /* print aliased regions */
  1823. QTAILQ_FOREACH(ml, &ml_head, queue) {
  1824. if (!ml->printed) {
  1825. mon_printf(f, "%s\n", ml->mr->name);
  1826. mtree_print_mr(mon_printf, f, ml->mr, 0, 0, &ml_head);
  1827. }
  1828. }
  1829. QTAILQ_FOREACH_SAFE(ml, &ml_head, queue, ml2) {
  1830. g_free(ml);
  1831. }
  1832. }
  1833. static const TypeInfo memory_region_info = {
  1834. .parent = TYPE_OBJECT,
  1835. .name = TYPE_MEMORY_REGION,
  1836. .instance_size = sizeof(MemoryRegion),
  1837. .instance_init = memory_region_initfn,
  1838. .instance_finalize = memory_region_finalize,
  1839. };
  1840. static void memory_register_types(void)
  1841. {
  1842. type_register_static(&memory_region_info);
  1843. }
  1844. type_init(memory_register_types)