2
0

memory.c 43 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437
  1. /*
  2. * Physical memory management
  3. *
  4. * Copyright 2011 Red Hat, Inc. and/or its affiliates
  5. *
  6. * Authors:
  7. * Avi Kivity <avi@redhat.com>
  8. *
  9. * This work is licensed under the terms of the GNU GPL, version 2. See
  10. * the COPYING file in the top-level directory.
  11. *
  12. */
  13. #include "memory.h"
  14. #include "exec-memory.h"
  15. #include "ioport.h"
  16. #include "bitops.h"
  17. #include "kvm.h"
  18. #include <assert.h>
  19. unsigned memory_region_transaction_depth = 0;
  20. typedef struct AddrRange AddrRange;
  21. /*
  22. * Note using signed integers limits us to physical addresses at most
  23. * 63 bits wide. They are needed for negative offsetting in aliases
  24. * (large MemoryRegion::alias_offset).
  25. */
  26. struct AddrRange {
  27. Int128 start;
  28. Int128 size;
  29. };
  30. static AddrRange addrrange_make(Int128 start, Int128 size)
  31. {
  32. return (AddrRange) { start, size };
  33. }
  34. static bool addrrange_equal(AddrRange r1, AddrRange r2)
  35. {
  36. return int128_eq(r1.start, r2.start) && int128_eq(r1.size, r2.size);
  37. }
  38. static Int128 addrrange_end(AddrRange r)
  39. {
  40. return int128_add(r.start, r.size);
  41. }
  42. static AddrRange addrrange_shift(AddrRange range, Int128 delta)
  43. {
  44. int128_addto(&range.start, delta);
  45. return range;
  46. }
  47. static bool addrrange_contains(AddrRange range, Int128 addr)
  48. {
  49. return int128_ge(addr, range.start)
  50. && int128_lt(addr, addrrange_end(range));
  51. }
  52. static bool addrrange_intersects(AddrRange r1, AddrRange r2)
  53. {
  54. return addrrange_contains(r1, r2.start)
  55. || addrrange_contains(r2, r1.start);
  56. }
  57. static AddrRange addrrange_intersection(AddrRange r1, AddrRange r2)
  58. {
  59. Int128 start = int128_max(r1.start, r2.start);
  60. Int128 end = int128_min(addrrange_end(r1), addrrange_end(r2));
  61. return addrrange_make(start, int128_sub(end, start));
  62. }
  63. struct CoalescedMemoryRange {
  64. AddrRange addr;
  65. QTAILQ_ENTRY(CoalescedMemoryRange) link;
  66. };
  67. struct MemoryRegionIoeventfd {
  68. AddrRange addr;
  69. bool match_data;
  70. uint64_t data;
  71. int fd;
  72. };
  73. static bool memory_region_ioeventfd_before(MemoryRegionIoeventfd a,
  74. MemoryRegionIoeventfd b)
  75. {
  76. if (int128_lt(a.addr.start, b.addr.start)) {
  77. return true;
  78. } else if (int128_gt(a.addr.start, b.addr.start)) {
  79. return false;
  80. } else if (int128_lt(a.addr.size, b.addr.size)) {
  81. return true;
  82. } else if (int128_gt(a.addr.size, b.addr.size)) {
  83. return false;
  84. } else if (a.match_data < b.match_data) {
  85. return true;
  86. } else if (a.match_data > b.match_data) {
  87. return false;
  88. } else if (a.match_data) {
  89. if (a.data < b.data) {
  90. return true;
  91. } else if (a.data > b.data) {
  92. return false;
  93. }
  94. }
  95. if (a.fd < b.fd) {
  96. return true;
  97. } else if (a.fd > b.fd) {
  98. return false;
  99. }
  100. return false;
  101. }
  102. static bool memory_region_ioeventfd_equal(MemoryRegionIoeventfd a,
  103. MemoryRegionIoeventfd b)
  104. {
  105. return !memory_region_ioeventfd_before(a, b)
  106. && !memory_region_ioeventfd_before(b, a);
  107. }
  108. typedef struct FlatRange FlatRange;
  109. typedef struct FlatView FlatView;
  110. /* Range of memory in the global map. Addresses are absolute. */
  111. struct FlatRange {
  112. MemoryRegion *mr;
  113. target_phys_addr_t offset_in_region;
  114. AddrRange addr;
  115. uint8_t dirty_log_mask;
  116. bool readable;
  117. bool readonly;
  118. };
  119. /* Flattened global view of current active memory hierarchy. Kept in sorted
  120. * order.
  121. */
  122. struct FlatView {
  123. FlatRange *ranges;
  124. unsigned nr;
  125. unsigned nr_allocated;
  126. };
  127. typedef struct AddressSpace AddressSpace;
  128. typedef struct AddressSpaceOps AddressSpaceOps;
  129. /* A system address space - I/O, memory, etc. */
  130. struct AddressSpace {
  131. const AddressSpaceOps *ops;
  132. MemoryRegion *root;
  133. FlatView current_map;
  134. int ioeventfd_nb;
  135. MemoryRegionIoeventfd *ioeventfds;
  136. };
  137. struct AddressSpaceOps {
  138. void (*range_add)(AddressSpace *as, FlatRange *fr);
  139. void (*range_del)(AddressSpace *as, FlatRange *fr);
  140. void (*log_start)(AddressSpace *as, FlatRange *fr);
  141. void (*log_stop)(AddressSpace *as, FlatRange *fr);
  142. void (*ioeventfd_add)(AddressSpace *as, MemoryRegionIoeventfd *fd);
  143. void (*ioeventfd_del)(AddressSpace *as, MemoryRegionIoeventfd *fd);
  144. };
  145. #define FOR_EACH_FLAT_RANGE(var, view) \
  146. for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var)
  147. static bool flatrange_equal(FlatRange *a, FlatRange *b)
  148. {
  149. return a->mr == b->mr
  150. && addrrange_equal(a->addr, b->addr)
  151. && a->offset_in_region == b->offset_in_region
  152. && a->readable == b->readable
  153. && a->readonly == b->readonly;
  154. }
  155. static void flatview_init(FlatView *view)
  156. {
  157. view->ranges = NULL;
  158. view->nr = 0;
  159. view->nr_allocated = 0;
  160. }
  161. /* Insert a range into a given position. Caller is responsible for maintaining
  162. * sorting order.
  163. */
  164. static void flatview_insert(FlatView *view, unsigned pos, FlatRange *range)
  165. {
  166. if (view->nr == view->nr_allocated) {
  167. view->nr_allocated = MAX(2 * view->nr, 10);
  168. view->ranges = g_realloc(view->ranges,
  169. view->nr_allocated * sizeof(*view->ranges));
  170. }
  171. memmove(view->ranges + pos + 1, view->ranges + pos,
  172. (view->nr - pos) * sizeof(FlatRange));
  173. view->ranges[pos] = *range;
  174. ++view->nr;
  175. }
  176. static void flatview_destroy(FlatView *view)
  177. {
  178. g_free(view->ranges);
  179. }
  180. static bool can_merge(FlatRange *r1, FlatRange *r2)
  181. {
  182. return int128_eq(addrrange_end(r1->addr), r2->addr.start)
  183. && r1->mr == r2->mr
  184. && int128_eq(int128_add(int128_make64(r1->offset_in_region),
  185. r1->addr.size),
  186. int128_make64(r2->offset_in_region))
  187. && r1->dirty_log_mask == r2->dirty_log_mask
  188. && r1->readable == r2->readable
  189. && r1->readonly == r2->readonly;
  190. }
  191. /* Attempt to simplify a view by merging ajacent ranges */
  192. static void flatview_simplify(FlatView *view)
  193. {
  194. unsigned i, j;
  195. i = 0;
  196. while (i < view->nr) {
  197. j = i + 1;
  198. while (j < view->nr
  199. && can_merge(&view->ranges[j-1], &view->ranges[j])) {
  200. int128_addto(&view->ranges[i].addr.size, view->ranges[j].addr.size);
  201. ++j;
  202. }
  203. ++i;
  204. memmove(&view->ranges[i], &view->ranges[j],
  205. (view->nr - j) * sizeof(view->ranges[j]));
  206. view->nr -= j - i;
  207. }
  208. }
  209. static void memory_region_read_accessor(void *opaque,
  210. target_phys_addr_t addr,
  211. uint64_t *value,
  212. unsigned size,
  213. unsigned shift,
  214. uint64_t mask)
  215. {
  216. MemoryRegion *mr = opaque;
  217. uint64_t tmp;
  218. tmp = mr->ops->read(mr->opaque, addr, size);
  219. *value |= (tmp & mask) << shift;
  220. }
  221. static void memory_region_write_accessor(void *opaque,
  222. target_phys_addr_t addr,
  223. uint64_t *value,
  224. unsigned size,
  225. unsigned shift,
  226. uint64_t mask)
  227. {
  228. MemoryRegion *mr = opaque;
  229. uint64_t tmp;
  230. tmp = (*value >> shift) & mask;
  231. mr->ops->write(mr->opaque, addr, tmp, size);
  232. }
  233. static void access_with_adjusted_size(target_phys_addr_t addr,
  234. uint64_t *value,
  235. unsigned size,
  236. unsigned access_size_min,
  237. unsigned access_size_max,
  238. void (*access)(void *opaque,
  239. target_phys_addr_t addr,
  240. uint64_t *value,
  241. unsigned size,
  242. unsigned shift,
  243. uint64_t mask),
  244. void *opaque)
  245. {
  246. uint64_t access_mask;
  247. unsigned access_size;
  248. unsigned i;
  249. if (!access_size_min) {
  250. access_size_min = 1;
  251. }
  252. if (!access_size_max) {
  253. access_size_max = 4;
  254. }
  255. access_size = MAX(MIN(size, access_size_max), access_size_min);
  256. access_mask = -1ULL >> (64 - access_size * 8);
  257. for (i = 0; i < size; i += access_size) {
  258. /* FIXME: big-endian support */
  259. access(opaque, addr + i, value, access_size, i * 8, access_mask);
  260. }
  261. }
  262. static void memory_region_prepare_ram_addr(MemoryRegion *mr);
  263. static void as_memory_range_add(AddressSpace *as, FlatRange *fr)
  264. {
  265. ram_addr_t phys_offset, region_offset;
  266. memory_region_prepare_ram_addr(fr->mr);
  267. phys_offset = fr->mr->ram_addr;
  268. region_offset = fr->offset_in_region;
  269. /* cpu_register_physical_memory_log() wants region_offset for
  270. * mmio, but prefers offseting phys_offset for RAM. Humour it.
  271. */
  272. if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
  273. phys_offset += region_offset;
  274. region_offset = 0;
  275. }
  276. if (!fr->readable) {
  277. phys_offset &= ~TARGET_PAGE_MASK & ~IO_MEM_ROMD;
  278. }
  279. if (fr->readonly) {
  280. phys_offset |= IO_MEM_ROM;
  281. }
  282. cpu_register_physical_memory_log(int128_get64(fr->addr.start),
  283. int128_get64(fr->addr.size),
  284. phys_offset,
  285. region_offset,
  286. fr->dirty_log_mask);
  287. }
  288. static void as_memory_range_del(AddressSpace *as, FlatRange *fr)
  289. {
  290. if (fr->dirty_log_mask) {
  291. Int128 end = addrrange_end(fr->addr);
  292. cpu_physical_sync_dirty_bitmap(int128_get64(fr->addr.start),
  293. int128_get64(end));
  294. }
  295. cpu_register_physical_memory(int128_get64(fr->addr.start),
  296. int128_get64(fr->addr.size),
  297. IO_MEM_UNASSIGNED);
  298. }
  299. static void as_memory_log_start(AddressSpace *as, FlatRange *fr)
  300. {
  301. cpu_physical_log_start(int128_get64(fr->addr.start),
  302. int128_get64(fr->addr.size));
  303. }
  304. static void as_memory_log_stop(AddressSpace *as, FlatRange *fr)
  305. {
  306. cpu_physical_log_stop(int128_get64(fr->addr.start),
  307. int128_get64(fr->addr.size));
  308. }
  309. static void as_memory_ioeventfd_add(AddressSpace *as, MemoryRegionIoeventfd *fd)
  310. {
  311. int r;
  312. assert(fd->match_data && int128_get64(fd->addr.size) == 4);
  313. r = kvm_set_ioeventfd_mmio_long(fd->fd, int128_get64(fd->addr.start),
  314. fd->data, true);
  315. if (r < 0) {
  316. abort();
  317. }
  318. }
  319. static void as_memory_ioeventfd_del(AddressSpace *as, MemoryRegionIoeventfd *fd)
  320. {
  321. int r;
  322. r = kvm_set_ioeventfd_mmio_long(fd->fd, int128_get64(fd->addr.start),
  323. fd->data, false);
  324. if (r < 0) {
  325. abort();
  326. }
  327. }
  328. static const AddressSpaceOps address_space_ops_memory = {
  329. .range_add = as_memory_range_add,
  330. .range_del = as_memory_range_del,
  331. .log_start = as_memory_log_start,
  332. .log_stop = as_memory_log_stop,
  333. .ioeventfd_add = as_memory_ioeventfd_add,
  334. .ioeventfd_del = as_memory_ioeventfd_del,
  335. };
  336. static AddressSpace address_space_memory = {
  337. .ops = &address_space_ops_memory,
  338. };
  339. static const MemoryRegionPortio *find_portio(MemoryRegion *mr, uint64_t offset,
  340. unsigned width, bool write)
  341. {
  342. const MemoryRegionPortio *mrp;
  343. for (mrp = mr->ops->old_portio; mrp->size; ++mrp) {
  344. if (offset >= mrp->offset && offset < mrp->offset + mrp->len
  345. && width == mrp->size
  346. && (write ? (bool)mrp->write : (bool)mrp->read)) {
  347. return mrp;
  348. }
  349. }
  350. return NULL;
  351. }
  352. static void memory_region_iorange_read(IORange *iorange,
  353. uint64_t offset,
  354. unsigned width,
  355. uint64_t *data)
  356. {
  357. MemoryRegion *mr = container_of(iorange, MemoryRegion, iorange);
  358. if (mr->ops->old_portio) {
  359. const MemoryRegionPortio *mrp = find_portio(mr, offset, width, false);
  360. *data = ((uint64_t)1 << (width * 8)) - 1;
  361. if (mrp) {
  362. *data = mrp->read(mr->opaque, offset + mr->offset);
  363. } else if (width == 2) {
  364. mrp = find_portio(mr, offset, 1, false);
  365. assert(mrp);
  366. *data = mrp->read(mr->opaque, offset + mr->offset) |
  367. (mrp->read(mr->opaque, offset + mr->offset + 1) << 8);
  368. }
  369. return;
  370. }
  371. *data = 0;
  372. access_with_adjusted_size(offset + mr->offset, data, width,
  373. mr->ops->impl.min_access_size,
  374. mr->ops->impl.max_access_size,
  375. memory_region_read_accessor, mr);
  376. }
  377. static void memory_region_iorange_write(IORange *iorange,
  378. uint64_t offset,
  379. unsigned width,
  380. uint64_t data)
  381. {
  382. MemoryRegion *mr = container_of(iorange, MemoryRegion, iorange);
  383. if (mr->ops->old_portio) {
  384. const MemoryRegionPortio *mrp = find_portio(mr, offset, width, true);
  385. if (mrp) {
  386. mrp->write(mr->opaque, offset + mr->offset, data);
  387. } else if (width == 2) {
  388. mrp = find_portio(mr, offset, 1, false);
  389. assert(mrp);
  390. mrp->write(mr->opaque, offset + mr->offset, data & 0xff);
  391. mrp->write(mr->opaque, offset + mr->offset + 1, data >> 8);
  392. }
  393. return;
  394. }
  395. access_with_adjusted_size(offset + mr->offset, &data, width,
  396. mr->ops->impl.min_access_size,
  397. mr->ops->impl.max_access_size,
  398. memory_region_write_accessor, mr);
  399. }
  400. static const IORangeOps memory_region_iorange_ops = {
  401. .read = memory_region_iorange_read,
  402. .write = memory_region_iorange_write,
  403. };
  404. static void as_io_range_add(AddressSpace *as, FlatRange *fr)
  405. {
  406. iorange_init(&fr->mr->iorange, &memory_region_iorange_ops,
  407. int128_get64(fr->addr.start), int128_get64(fr->addr.size));
  408. ioport_register(&fr->mr->iorange);
  409. }
  410. static void as_io_range_del(AddressSpace *as, FlatRange *fr)
  411. {
  412. isa_unassign_ioport(int128_get64(fr->addr.start),
  413. int128_get64(fr->addr.size));
  414. }
  415. static void as_io_ioeventfd_add(AddressSpace *as, MemoryRegionIoeventfd *fd)
  416. {
  417. int r;
  418. assert(fd->match_data && int128_get64(fd->addr.size) == 2);
  419. r = kvm_set_ioeventfd_pio_word(fd->fd, int128_get64(fd->addr.start),
  420. fd->data, true);
  421. if (r < 0) {
  422. abort();
  423. }
  424. }
  425. static void as_io_ioeventfd_del(AddressSpace *as, MemoryRegionIoeventfd *fd)
  426. {
  427. int r;
  428. r = kvm_set_ioeventfd_pio_word(fd->fd, int128_get64(fd->addr.start),
  429. fd->data, false);
  430. if (r < 0) {
  431. abort();
  432. }
  433. }
  434. static const AddressSpaceOps address_space_ops_io = {
  435. .range_add = as_io_range_add,
  436. .range_del = as_io_range_del,
  437. .ioeventfd_add = as_io_ioeventfd_add,
  438. .ioeventfd_del = as_io_ioeventfd_del,
  439. };
  440. static AddressSpace address_space_io = {
  441. .ops = &address_space_ops_io,
  442. };
  443. /* Render a memory region into the global view. Ranges in @view obscure
  444. * ranges in @mr.
  445. */
  446. static void render_memory_region(FlatView *view,
  447. MemoryRegion *mr,
  448. Int128 base,
  449. AddrRange clip,
  450. bool readonly)
  451. {
  452. MemoryRegion *subregion;
  453. unsigned i;
  454. target_phys_addr_t offset_in_region;
  455. Int128 remain;
  456. Int128 now;
  457. FlatRange fr;
  458. AddrRange tmp;
  459. int128_addto(&base, int128_make64(mr->addr));
  460. readonly |= mr->readonly;
  461. tmp = addrrange_make(base, mr->size);
  462. if (!addrrange_intersects(tmp, clip)) {
  463. return;
  464. }
  465. clip = addrrange_intersection(tmp, clip);
  466. if (mr->alias) {
  467. int128_subfrom(&base, int128_make64(mr->alias->addr));
  468. int128_subfrom(&base, int128_make64(mr->alias_offset));
  469. render_memory_region(view, mr->alias, base, clip, readonly);
  470. return;
  471. }
  472. /* Render subregions in priority order. */
  473. QTAILQ_FOREACH(subregion, &mr->subregions, subregions_link) {
  474. render_memory_region(view, subregion, base, clip, readonly);
  475. }
  476. if (!mr->terminates) {
  477. return;
  478. }
  479. offset_in_region = int128_get64(int128_sub(clip.start, base));
  480. base = clip.start;
  481. remain = clip.size;
  482. /* Render the region itself into any gaps left by the current view. */
  483. for (i = 0; i < view->nr && int128_nz(remain); ++i) {
  484. if (int128_ge(base, addrrange_end(view->ranges[i].addr))) {
  485. continue;
  486. }
  487. if (int128_lt(base, view->ranges[i].addr.start)) {
  488. now = int128_min(remain,
  489. int128_sub(view->ranges[i].addr.start, base));
  490. fr.mr = mr;
  491. fr.offset_in_region = offset_in_region;
  492. fr.addr = addrrange_make(base, now);
  493. fr.dirty_log_mask = mr->dirty_log_mask;
  494. fr.readable = mr->readable;
  495. fr.readonly = readonly;
  496. flatview_insert(view, i, &fr);
  497. ++i;
  498. int128_addto(&base, now);
  499. offset_in_region += int128_get64(now);
  500. int128_subfrom(&remain, now);
  501. }
  502. if (int128_eq(base, view->ranges[i].addr.start)) {
  503. now = int128_min(remain, view->ranges[i].addr.size);
  504. int128_addto(&base, now);
  505. offset_in_region += int128_get64(now);
  506. int128_subfrom(&remain, now);
  507. }
  508. }
  509. if (int128_nz(remain)) {
  510. fr.mr = mr;
  511. fr.offset_in_region = offset_in_region;
  512. fr.addr = addrrange_make(base, remain);
  513. fr.dirty_log_mask = mr->dirty_log_mask;
  514. fr.readable = mr->readable;
  515. fr.readonly = readonly;
  516. flatview_insert(view, i, &fr);
  517. }
  518. }
  519. /* Render a memory topology into a list of disjoint absolute ranges. */
  520. static FlatView generate_memory_topology(MemoryRegion *mr)
  521. {
  522. FlatView view;
  523. flatview_init(&view);
  524. render_memory_region(&view, mr, int128_zero(),
  525. addrrange_make(int128_zero(), int128_2_64()), false);
  526. flatview_simplify(&view);
  527. return view;
  528. }
  529. static void address_space_add_del_ioeventfds(AddressSpace *as,
  530. MemoryRegionIoeventfd *fds_new,
  531. unsigned fds_new_nb,
  532. MemoryRegionIoeventfd *fds_old,
  533. unsigned fds_old_nb)
  534. {
  535. unsigned iold, inew;
  536. /* Generate a symmetric difference of the old and new fd sets, adding
  537. * and deleting as necessary.
  538. */
  539. iold = inew = 0;
  540. while (iold < fds_old_nb || inew < fds_new_nb) {
  541. if (iold < fds_old_nb
  542. && (inew == fds_new_nb
  543. || memory_region_ioeventfd_before(fds_old[iold],
  544. fds_new[inew]))) {
  545. as->ops->ioeventfd_del(as, &fds_old[iold]);
  546. ++iold;
  547. } else if (inew < fds_new_nb
  548. && (iold == fds_old_nb
  549. || memory_region_ioeventfd_before(fds_new[inew],
  550. fds_old[iold]))) {
  551. as->ops->ioeventfd_add(as, &fds_new[inew]);
  552. ++inew;
  553. } else {
  554. ++iold;
  555. ++inew;
  556. }
  557. }
  558. }
  559. static void address_space_update_ioeventfds(AddressSpace *as)
  560. {
  561. FlatRange *fr;
  562. unsigned ioeventfd_nb = 0;
  563. MemoryRegionIoeventfd *ioeventfds = NULL;
  564. AddrRange tmp;
  565. unsigned i;
  566. FOR_EACH_FLAT_RANGE(fr, &as->current_map) {
  567. for (i = 0; i < fr->mr->ioeventfd_nb; ++i) {
  568. tmp = addrrange_shift(fr->mr->ioeventfds[i].addr,
  569. int128_sub(fr->addr.start,
  570. int128_make64(fr->offset_in_region)));
  571. if (addrrange_intersects(fr->addr, tmp)) {
  572. ++ioeventfd_nb;
  573. ioeventfds = g_realloc(ioeventfds,
  574. ioeventfd_nb * sizeof(*ioeventfds));
  575. ioeventfds[ioeventfd_nb-1] = fr->mr->ioeventfds[i];
  576. ioeventfds[ioeventfd_nb-1].addr = tmp;
  577. }
  578. }
  579. }
  580. address_space_add_del_ioeventfds(as, ioeventfds, ioeventfd_nb,
  581. as->ioeventfds, as->ioeventfd_nb);
  582. g_free(as->ioeventfds);
  583. as->ioeventfds = ioeventfds;
  584. as->ioeventfd_nb = ioeventfd_nb;
  585. }
  586. static void address_space_update_topology_pass(AddressSpace *as,
  587. FlatView old_view,
  588. FlatView new_view,
  589. bool adding)
  590. {
  591. unsigned iold, inew;
  592. FlatRange *frold, *frnew;
  593. /* Generate a symmetric difference of the old and new memory maps.
  594. * Kill ranges in the old map, and instantiate ranges in the new map.
  595. */
  596. iold = inew = 0;
  597. while (iold < old_view.nr || inew < new_view.nr) {
  598. if (iold < old_view.nr) {
  599. frold = &old_view.ranges[iold];
  600. } else {
  601. frold = NULL;
  602. }
  603. if (inew < new_view.nr) {
  604. frnew = &new_view.ranges[inew];
  605. } else {
  606. frnew = NULL;
  607. }
  608. if (frold
  609. && (!frnew
  610. || int128_lt(frold->addr.start, frnew->addr.start)
  611. || (int128_eq(frold->addr.start, frnew->addr.start)
  612. && !flatrange_equal(frold, frnew)))) {
  613. /* In old, but (not in new, or in new but attributes changed). */
  614. if (!adding) {
  615. as->ops->range_del(as, frold);
  616. }
  617. ++iold;
  618. } else if (frold && frnew && flatrange_equal(frold, frnew)) {
  619. /* In both (logging may have changed) */
  620. if (adding) {
  621. if (frold->dirty_log_mask && !frnew->dirty_log_mask) {
  622. as->ops->log_stop(as, frnew);
  623. } else if (frnew->dirty_log_mask && !frold->dirty_log_mask) {
  624. as->ops->log_start(as, frnew);
  625. }
  626. }
  627. ++iold;
  628. ++inew;
  629. } else {
  630. /* In new */
  631. if (adding) {
  632. as->ops->range_add(as, frnew);
  633. }
  634. ++inew;
  635. }
  636. }
  637. }
  638. static void address_space_update_topology(AddressSpace *as)
  639. {
  640. FlatView old_view = as->current_map;
  641. FlatView new_view = generate_memory_topology(as->root);
  642. address_space_update_topology_pass(as, old_view, new_view, false);
  643. address_space_update_topology_pass(as, old_view, new_view, true);
  644. as->current_map = new_view;
  645. flatview_destroy(&old_view);
  646. address_space_update_ioeventfds(as);
  647. }
  648. static void memory_region_update_topology(void)
  649. {
  650. if (memory_region_transaction_depth) {
  651. return;
  652. }
  653. if (address_space_memory.root) {
  654. address_space_update_topology(&address_space_memory);
  655. }
  656. if (address_space_io.root) {
  657. address_space_update_topology(&address_space_io);
  658. }
  659. }
  660. void memory_region_transaction_begin(void)
  661. {
  662. ++memory_region_transaction_depth;
  663. }
  664. void memory_region_transaction_commit(void)
  665. {
  666. assert(memory_region_transaction_depth);
  667. --memory_region_transaction_depth;
  668. memory_region_update_topology();
  669. }
  670. static void memory_region_destructor_none(MemoryRegion *mr)
  671. {
  672. }
  673. static void memory_region_destructor_ram(MemoryRegion *mr)
  674. {
  675. qemu_ram_free(mr->ram_addr);
  676. }
  677. static void memory_region_destructor_ram_from_ptr(MemoryRegion *mr)
  678. {
  679. qemu_ram_free_from_ptr(mr->ram_addr);
  680. }
  681. static void memory_region_destructor_iomem(MemoryRegion *mr)
  682. {
  683. cpu_unregister_io_memory(mr->ram_addr);
  684. }
  685. static void memory_region_destructor_rom_device(MemoryRegion *mr)
  686. {
  687. qemu_ram_free(mr->ram_addr & TARGET_PAGE_MASK);
  688. cpu_unregister_io_memory(mr->ram_addr & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
  689. }
  690. void memory_region_init(MemoryRegion *mr,
  691. const char *name,
  692. uint64_t size)
  693. {
  694. mr->ops = NULL;
  695. mr->parent = NULL;
  696. mr->size = int128_make64(size);
  697. if (size == UINT64_MAX) {
  698. mr->size = int128_2_64();
  699. }
  700. mr->addr = 0;
  701. mr->offset = 0;
  702. mr->terminates = false;
  703. mr->readable = true;
  704. mr->readonly = false;
  705. mr->destructor = memory_region_destructor_none;
  706. mr->priority = 0;
  707. mr->may_overlap = false;
  708. mr->alias = NULL;
  709. QTAILQ_INIT(&mr->subregions);
  710. memset(&mr->subregions_link, 0, sizeof mr->subregions_link);
  711. QTAILQ_INIT(&mr->coalesced);
  712. mr->name = g_strdup(name);
  713. mr->dirty_log_mask = 0;
  714. mr->ioeventfd_nb = 0;
  715. mr->ioeventfds = NULL;
  716. }
  717. static bool memory_region_access_valid(MemoryRegion *mr,
  718. target_phys_addr_t addr,
  719. unsigned size)
  720. {
  721. if (!mr->ops->valid.unaligned && (addr & (size - 1))) {
  722. return false;
  723. }
  724. /* Treat zero as compatibility all valid */
  725. if (!mr->ops->valid.max_access_size) {
  726. return true;
  727. }
  728. if (size > mr->ops->valid.max_access_size
  729. || size < mr->ops->valid.min_access_size) {
  730. return false;
  731. }
  732. return true;
  733. }
  734. static uint32_t memory_region_read_thunk_n(void *_mr,
  735. target_phys_addr_t addr,
  736. unsigned size)
  737. {
  738. MemoryRegion *mr = _mr;
  739. uint64_t data = 0;
  740. if (!memory_region_access_valid(mr, addr, size)) {
  741. return -1U; /* FIXME: better signalling */
  742. }
  743. if (!mr->ops->read) {
  744. return mr->ops->old_mmio.read[bitops_ffsl(size)](mr->opaque, addr);
  745. }
  746. /* FIXME: support unaligned access */
  747. access_with_adjusted_size(addr + mr->offset, &data, size,
  748. mr->ops->impl.min_access_size,
  749. mr->ops->impl.max_access_size,
  750. memory_region_read_accessor, mr);
  751. return data;
  752. }
  753. static void memory_region_write_thunk_n(void *_mr,
  754. target_phys_addr_t addr,
  755. unsigned size,
  756. uint64_t data)
  757. {
  758. MemoryRegion *mr = _mr;
  759. if (!memory_region_access_valid(mr, addr, size)) {
  760. return; /* FIXME: better signalling */
  761. }
  762. if (!mr->ops->write) {
  763. mr->ops->old_mmio.write[bitops_ffsl(size)](mr->opaque, addr, data);
  764. return;
  765. }
  766. /* FIXME: support unaligned access */
  767. access_with_adjusted_size(addr + mr->offset, &data, size,
  768. mr->ops->impl.min_access_size,
  769. mr->ops->impl.max_access_size,
  770. memory_region_write_accessor, mr);
  771. }
  772. static uint32_t memory_region_read_thunk_b(void *mr, target_phys_addr_t addr)
  773. {
  774. return memory_region_read_thunk_n(mr, addr, 1);
  775. }
  776. static uint32_t memory_region_read_thunk_w(void *mr, target_phys_addr_t addr)
  777. {
  778. return memory_region_read_thunk_n(mr, addr, 2);
  779. }
  780. static uint32_t memory_region_read_thunk_l(void *mr, target_phys_addr_t addr)
  781. {
  782. return memory_region_read_thunk_n(mr, addr, 4);
  783. }
  784. static void memory_region_write_thunk_b(void *mr, target_phys_addr_t addr,
  785. uint32_t data)
  786. {
  787. memory_region_write_thunk_n(mr, addr, 1, data);
  788. }
  789. static void memory_region_write_thunk_w(void *mr, target_phys_addr_t addr,
  790. uint32_t data)
  791. {
  792. memory_region_write_thunk_n(mr, addr, 2, data);
  793. }
  794. static void memory_region_write_thunk_l(void *mr, target_phys_addr_t addr,
  795. uint32_t data)
  796. {
  797. memory_region_write_thunk_n(mr, addr, 4, data);
  798. }
  799. static CPUReadMemoryFunc * const memory_region_read_thunk[] = {
  800. memory_region_read_thunk_b,
  801. memory_region_read_thunk_w,
  802. memory_region_read_thunk_l,
  803. };
  804. static CPUWriteMemoryFunc * const memory_region_write_thunk[] = {
  805. memory_region_write_thunk_b,
  806. memory_region_write_thunk_w,
  807. memory_region_write_thunk_l,
  808. };
  809. static void memory_region_prepare_ram_addr(MemoryRegion *mr)
  810. {
  811. if (mr->backend_registered) {
  812. return;
  813. }
  814. mr->destructor = memory_region_destructor_iomem;
  815. mr->ram_addr = cpu_register_io_memory(memory_region_read_thunk,
  816. memory_region_write_thunk,
  817. mr,
  818. mr->ops->endianness);
  819. mr->backend_registered = true;
  820. }
  821. void memory_region_init_io(MemoryRegion *mr,
  822. const MemoryRegionOps *ops,
  823. void *opaque,
  824. const char *name,
  825. uint64_t size)
  826. {
  827. memory_region_init(mr, name, size);
  828. mr->ops = ops;
  829. mr->opaque = opaque;
  830. mr->terminates = true;
  831. mr->backend_registered = false;
  832. }
  833. void memory_region_init_ram(MemoryRegion *mr,
  834. DeviceState *dev,
  835. const char *name,
  836. uint64_t size)
  837. {
  838. memory_region_init(mr, name, size);
  839. mr->terminates = true;
  840. mr->destructor = memory_region_destructor_ram;
  841. mr->ram_addr = qemu_ram_alloc(dev, name, size);
  842. mr->backend_registered = true;
  843. }
  844. void memory_region_init_ram_ptr(MemoryRegion *mr,
  845. DeviceState *dev,
  846. const char *name,
  847. uint64_t size,
  848. void *ptr)
  849. {
  850. memory_region_init(mr, name, size);
  851. mr->terminates = true;
  852. mr->destructor = memory_region_destructor_ram_from_ptr;
  853. mr->ram_addr = qemu_ram_alloc_from_ptr(dev, name, size, ptr);
  854. mr->backend_registered = true;
  855. }
  856. void memory_region_init_alias(MemoryRegion *mr,
  857. const char *name,
  858. MemoryRegion *orig,
  859. target_phys_addr_t offset,
  860. uint64_t size)
  861. {
  862. memory_region_init(mr, name, size);
  863. mr->alias = orig;
  864. mr->alias_offset = offset;
  865. }
  866. void memory_region_init_rom_device(MemoryRegion *mr,
  867. const MemoryRegionOps *ops,
  868. void *opaque,
  869. DeviceState *dev,
  870. const char *name,
  871. uint64_t size)
  872. {
  873. memory_region_init(mr, name, size);
  874. mr->ops = ops;
  875. mr->opaque = opaque;
  876. mr->terminates = true;
  877. mr->destructor = memory_region_destructor_rom_device;
  878. mr->ram_addr = qemu_ram_alloc(dev, name, size);
  879. mr->ram_addr |= cpu_register_io_memory(memory_region_read_thunk,
  880. memory_region_write_thunk,
  881. mr,
  882. mr->ops->endianness);
  883. mr->ram_addr |= IO_MEM_ROMD;
  884. mr->backend_registered = true;
  885. }
  886. void memory_region_destroy(MemoryRegion *mr)
  887. {
  888. assert(QTAILQ_EMPTY(&mr->subregions));
  889. mr->destructor(mr);
  890. memory_region_clear_coalescing(mr);
  891. g_free((char *)mr->name);
  892. g_free(mr->ioeventfds);
  893. }
  894. uint64_t memory_region_size(MemoryRegion *mr)
  895. {
  896. if (int128_eq(mr->size, int128_2_64())) {
  897. return UINT64_MAX;
  898. }
  899. return int128_get64(mr->size);
  900. }
  901. void memory_region_set_offset(MemoryRegion *mr, target_phys_addr_t offset)
  902. {
  903. mr->offset = offset;
  904. }
  905. void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client)
  906. {
  907. uint8_t mask = 1 << client;
  908. mr->dirty_log_mask = (mr->dirty_log_mask & ~mask) | (log * mask);
  909. memory_region_update_topology();
  910. }
  911. bool memory_region_get_dirty(MemoryRegion *mr, target_phys_addr_t addr,
  912. unsigned client)
  913. {
  914. assert(mr->terminates);
  915. return cpu_physical_memory_get_dirty(mr->ram_addr + addr, 1 << client);
  916. }
  917. void memory_region_set_dirty(MemoryRegion *mr, target_phys_addr_t addr)
  918. {
  919. assert(mr->terminates);
  920. return cpu_physical_memory_set_dirty(mr->ram_addr + addr);
  921. }
  922. void memory_region_sync_dirty_bitmap(MemoryRegion *mr)
  923. {
  924. FlatRange *fr;
  925. FOR_EACH_FLAT_RANGE(fr, &address_space_memory.current_map) {
  926. if (fr->mr == mr) {
  927. cpu_physical_sync_dirty_bitmap(int128_get64(fr->addr.start),
  928. int128_get64(addrrange_end(fr->addr)));
  929. }
  930. }
  931. }
  932. void memory_region_set_readonly(MemoryRegion *mr, bool readonly)
  933. {
  934. if (mr->readonly != readonly) {
  935. mr->readonly = readonly;
  936. memory_region_update_topology();
  937. }
  938. }
  939. void memory_region_rom_device_set_readable(MemoryRegion *mr, bool readable)
  940. {
  941. if (mr->readable != readable) {
  942. mr->readable = readable;
  943. memory_region_update_topology();
  944. }
  945. }
  946. void memory_region_reset_dirty(MemoryRegion *mr, target_phys_addr_t addr,
  947. target_phys_addr_t size, unsigned client)
  948. {
  949. assert(mr->terminates);
  950. cpu_physical_memory_reset_dirty(mr->ram_addr + addr,
  951. mr->ram_addr + addr + size,
  952. 1 << client);
  953. }
  954. void *memory_region_get_ram_ptr(MemoryRegion *mr)
  955. {
  956. if (mr->alias) {
  957. return memory_region_get_ram_ptr(mr->alias) + mr->alias_offset;
  958. }
  959. assert(mr->terminates);
  960. return qemu_get_ram_ptr(mr->ram_addr & TARGET_PAGE_MASK);
  961. }
  962. static void memory_region_update_coalesced_range(MemoryRegion *mr)
  963. {
  964. FlatRange *fr;
  965. CoalescedMemoryRange *cmr;
  966. AddrRange tmp;
  967. FOR_EACH_FLAT_RANGE(fr, &address_space_memory.current_map) {
  968. if (fr->mr == mr) {
  969. qemu_unregister_coalesced_mmio(int128_get64(fr->addr.start),
  970. int128_get64(fr->addr.size));
  971. QTAILQ_FOREACH(cmr, &mr->coalesced, link) {
  972. tmp = addrrange_shift(cmr->addr,
  973. int128_sub(fr->addr.start,
  974. int128_make64(fr->offset_in_region)));
  975. if (!addrrange_intersects(tmp, fr->addr)) {
  976. continue;
  977. }
  978. tmp = addrrange_intersection(tmp, fr->addr);
  979. qemu_register_coalesced_mmio(int128_get64(tmp.start),
  980. int128_get64(tmp.size));
  981. }
  982. }
  983. }
  984. }
  985. void memory_region_set_coalescing(MemoryRegion *mr)
  986. {
  987. memory_region_clear_coalescing(mr);
  988. memory_region_add_coalescing(mr, 0, int128_get64(mr->size));
  989. }
  990. void memory_region_add_coalescing(MemoryRegion *mr,
  991. target_phys_addr_t offset,
  992. uint64_t size)
  993. {
  994. CoalescedMemoryRange *cmr = g_malloc(sizeof(*cmr));
  995. cmr->addr = addrrange_make(int128_make64(offset), int128_make64(size));
  996. QTAILQ_INSERT_TAIL(&mr->coalesced, cmr, link);
  997. memory_region_update_coalesced_range(mr);
  998. }
  999. void memory_region_clear_coalescing(MemoryRegion *mr)
  1000. {
  1001. CoalescedMemoryRange *cmr;
  1002. while (!QTAILQ_EMPTY(&mr->coalesced)) {
  1003. cmr = QTAILQ_FIRST(&mr->coalesced);
  1004. QTAILQ_REMOVE(&mr->coalesced, cmr, link);
  1005. g_free(cmr);
  1006. }
  1007. memory_region_update_coalesced_range(mr);
  1008. }
  1009. void memory_region_add_eventfd(MemoryRegion *mr,
  1010. target_phys_addr_t addr,
  1011. unsigned size,
  1012. bool match_data,
  1013. uint64_t data,
  1014. int fd)
  1015. {
  1016. MemoryRegionIoeventfd mrfd = {
  1017. .addr.start = int128_make64(addr),
  1018. .addr.size = int128_make64(size),
  1019. .match_data = match_data,
  1020. .data = data,
  1021. .fd = fd,
  1022. };
  1023. unsigned i;
  1024. for (i = 0; i < mr->ioeventfd_nb; ++i) {
  1025. if (memory_region_ioeventfd_before(mrfd, mr->ioeventfds[i])) {
  1026. break;
  1027. }
  1028. }
  1029. ++mr->ioeventfd_nb;
  1030. mr->ioeventfds = g_realloc(mr->ioeventfds,
  1031. sizeof(*mr->ioeventfds) * mr->ioeventfd_nb);
  1032. memmove(&mr->ioeventfds[i+1], &mr->ioeventfds[i],
  1033. sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb-1 - i));
  1034. mr->ioeventfds[i] = mrfd;
  1035. memory_region_update_topology();
  1036. }
  1037. void memory_region_del_eventfd(MemoryRegion *mr,
  1038. target_phys_addr_t addr,
  1039. unsigned size,
  1040. bool match_data,
  1041. uint64_t data,
  1042. int fd)
  1043. {
  1044. MemoryRegionIoeventfd mrfd = {
  1045. .addr.start = int128_make64(addr),
  1046. .addr.size = int128_make64(size),
  1047. .match_data = match_data,
  1048. .data = data,
  1049. .fd = fd,
  1050. };
  1051. unsigned i;
  1052. for (i = 0; i < mr->ioeventfd_nb; ++i) {
  1053. if (memory_region_ioeventfd_equal(mrfd, mr->ioeventfds[i])) {
  1054. break;
  1055. }
  1056. }
  1057. assert(i != mr->ioeventfd_nb);
  1058. memmove(&mr->ioeventfds[i], &mr->ioeventfds[i+1],
  1059. sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb - (i+1)));
  1060. --mr->ioeventfd_nb;
  1061. mr->ioeventfds = g_realloc(mr->ioeventfds,
  1062. sizeof(*mr->ioeventfds)*mr->ioeventfd_nb + 1);
  1063. memory_region_update_topology();
  1064. }
  1065. static void memory_region_add_subregion_common(MemoryRegion *mr,
  1066. target_phys_addr_t offset,
  1067. MemoryRegion *subregion)
  1068. {
  1069. MemoryRegion *other;
  1070. assert(!subregion->parent);
  1071. subregion->parent = mr;
  1072. subregion->addr = offset;
  1073. QTAILQ_FOREACH(other, &mr->subregions, subregions_link) {
  1074. if (subregion->may_overlap || other->may_overlap) {
  1075. continue;
  1076. }
  1077. if (int128_gt(int128_make64(offset),
  1078. int128_add(int128_make64(other->addr), other->size))
  1079. || int128_le(int128_add(int128_make64(offset), subregion->size),
  1080. int128_make64(other->addr))) {
  1081. continue;
  1082. }
  1083. #if 0
  1084. printf("warning: subregion collision %llx/%llx (%s) "
  1085. "vs %llx/%llx (%s)\n",
  1086. (unsigned long long)offset,
  1087. (unsigned long long)int128_get64(subregion->size),
  1088. subregion->name,
  1089. (unsigned long long)other->addr,
  1090. (unsigned long long)int128_get64(other->size),
  1091. other->name);
  1092. #endif
  1093. }
  1094. QTAILQ_FOREACH(other, &mr->subregions, subregions_link) {
  1095. if (subregion->priority >= other->priority) {
  1096. QTAILQ_INSERT_BEFORE(other, subregion, subregions_link);
  1097. goto done;
  1098. }
  1099. }
  1100. QTAILQ_INSERT_TAIL(&mr->subregions, subregion, subregions_link);
  1101. done:
  1102. memory_region_update_topology();
  1103. }
  1104. void memory_region_add_subregion(MemoryRegion *mr,
  1105. target_phys_addr_t offset,
  1106. MemoryRegion *subregion)
  1107. {
  1108. subregion->may_overlap = false;
  1109. subregion->priority = 0;
  1110. memory_region_add_subregion_common(mr, offset, subregion);
  1111. }
  1112. void memory_region_add_subregion_overlap(MemoryRegion *mr,
  1113. target_phys_addr_t offset,
  1114. MemoryRegion *subregion,
  1115. unsigned priority)
  1116. {
  1117. subregion->may_overlap = true;
  1118. subregion->priority = priority;
  1119. memory_region_add_subregion_common(mr, offset, subregion);
  1120. }
  1121. void memory_region_del_subregion(MemoryRegion *mr,
  1122. MemoryRegion *subregion)
  1123. {
  1124. assert(subregion->parent == mr);
  1125. subregion->parent = NULL;
  1126. QTAILQ_REMOVE(&mr->subregions, subregion, subregions_link);
  1127. memory_region_update_topology();
  1128. }
  1129. void set_system_memory_map(MemoryRegion *mr)
  1130. {
  1131. address_space_memory.root = mr;
  1132. memory_region_update_topology();
  1133. }
  1134. void set_system_io_map(MemoryRegion *mr)
  1135. {
  1136. address_space_io.root = mr;
  1137. memory_region_update_topology();
  1138. }
  1139. typedef struct MemoryRegionList MemoryRegionList;
  1140. struct MemoryRegionList {
  1141. const MemoryRegion *mr;
  1142. bool printed;
  1143. QTAILQ_ENTRY(MemoryRegionList) queue;
  1144. };
  1145. typedef QTAILQ_HEAD(queue, MemoryRegionList) MemoryRegionListHead;
  1146. static void mtree_print_mr(fprintf_function mon_printf, void *f,
  1147. const MemoryRegion *mr, unsigned int level,
  1148. target_phys_addr_t base,
  1149. MemoryRegionListHead *alias_print_queue)
  1150. {
  1151. MemoryRegionList *new_ml, *ml, *next_ml;
  1152. MemoryRegionListHead submr_print_queue;
  1153. const MemoryRegion *submr;
  1154. unsigned int i;
  1155. if (!mr) {
  1156. return;
  1157. }
  1158. for (i = 0; i < level; i++) {
  1159. mon_printf(f, " ");
  1160. }
  1161. if (mr->alias) {
  1162. MemoryRegionList *ml;
  1163. bool found = false;
  1164. /* check if the alias is already in the queue */
  1165. QTAILQ_FOREACH(ml, alias_print_queue, queue) {
  1166. if (ml->mr == mr->alias && !ml->printed) {
  1167. found = true;
  1168. }
  1169. }
  1170. if (!found) {
  1171. ml = g_new(MemoryRegionList, 1);
  1172. ml->mr = mr->alias;
  1173. ml->printed = false;
  1174. QTAILQ_INSERT_TAIL(alias_print_queue, ml, queue);
  1175. }
  1176. mon_printf(f, TARGET_FMT_plx "-" TARGET_FMT_plx " (prio %d): alias %s @%s "
  1177. TARGET_FMT_plx "-" TARGET_FMT_plx "\n",
  1178. base + mr->addr,
  1179. base + mr->addr
  1180. + (target_phys_addr_t)int128_get64(mr->size) - 1,
  1181. mr->priority,
  1182. mr->name,
  1183. mr->alias->name,
  1184. mr->alias_offset,
  1185. mr->alias_offset
  1186. + (target_phys_addr_t)int128_get64(mr->size) - 1);
  1187. } else {
  1188. mon_printf(f, TARGET_FMT_plx "-" TARGET_FMT_plx " (prio %d): %s\n",
  1189. base + mr->addr,
  1190. base + mr->addr
  1191. + (target_phys_addr_t)int128_get64(mr->size) - 1,
  1192. mr->priority,
  1193. mr->name);
  1194. }
  1195. QTAILQ_INIT(&submr_print_queue);
  1196. QTAILQ_FOREACH(submr, &mr->subregions, subregions_link) {
  1197. new_ml = g_new(MemoryRegionList, 1);
  1198. new_ml->mr = submr;
  1199. QTAILQ_FOREACH(ml, &submr_print_queue, queue) {
  1200. if (new_ml->mr->addr < ml->mr->addr ||
  1201. (new_ml->mr->addr == ml->mr->addr &&
  1202. new_ml->mr->priority > ml->mr->priority)) {
  1203. QTAILQ_INSERT_BEFORE(ml, new_ml, queue);
  1204. new_ml = NULL;
  1205. break;
  1206. }
  1207. }
  1208. if (new_ml) {
  1209. QTAILQ_INSERT_TAIL(&submr_print_queue, new_ml, queue);
  1210. }
  1211. }
  1212. QTAILQ_FOREACH(ml, &submr_print_queue, queue) {
  1213. mtree_print_mr(mon_printf, f, ml->mr, level + 1, base + mr->addr,
  1214. alias_print_queue);
  1215. }
  1216. QTAILQ_FOREACH_SAFE(ml, &submr_print_queue, queue, next_ml) {
  1217. g_free(ml);
  1218. }
  1219. }
  1220. void mtree_info(fprintf_function mon_printf, void *f)
  1221. {
  1222. MemoryRegionListHead ml_head;
  1223. MemoryRegionList *ml, *ml2;
  1224. QTAILQ_INIT(&ml_head);
  1225. mon_printf(f, "memory\n");
  1226. mtree_print_mr(mon_printf, f, address_space_memory.root, 0, 0, &ml_head);
  1227. /* print aliased regions */
  1228. QTAILQ_FOREACH(ml, &ml_head, queue) {
  1229. if (!ml->printed) {
  1230. mon_printf(f, "%s\n", ml->mr->name);
  1231. mtree_print_mr(mon_printf, f, ml->mr, 0, 0, &ml_head);
  1232. }
  1233. }
  1234. QTAILQ_FOREACH_SAFE(ml, &ml_head, queue, ml2) {
  1235. g_free(ml);
  1236. }
  1237. if (address_space_io.root &&
  1238. !QTAILQ_EMPTY(&address_space_io.root->subregions)) {
  1239. QTAILQ_INIT(&ml_head);
  1240. mon_printf(f, "I/O\n");
  1241. mtree_print_mr(mon_printf, f, address_space_io.root, 0, 0, &ml_head);
  1242. }
  1243. }