memory.c 51 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647
  1. /*
  2. * Physical memory management
  3. *
  4. * Copyright 2011 Red Hat, Inc. and/or its affiliates
  5. *
  6. * Authors:
  7. * Avi Kivity <avi@redhat.com>
  8. *
  9. * This work is licensed under the terms of the GNU GPL, version 2. See
  10. * the COPYING file in the top-level directory.
  11. *
  12. * Contributions after 2012-01-13 are licensed under the terms of the
  13. * GNU GPL, version 2 or (at your option) any later version.
  14. */
  15. #include "memory.h"
  16. #include "exec-memory.h"
  17. #include "ioport.h"
  18. #include "bitops.h"
  19. #include "kvm.h"
  20. #include <assert.h>
  21. #define WANT_EXEC_OBSOLETE
  22. #include "exec-obsolete.h"
  23. unsigned memory_region_transaction_depth = 0;
  24. static bool memory_region_update_pending = false;
  25. static bool global_dirty_log = false;
  26. static QTAILQ_HEAD(memory_listeners, MemoryListener) memory_listeners
  27. = QTAILQ_HEAD_INITIALIZER(memory_listeners);
  28. typedef struct AddrRange AddrRange;
  29. /*
  30. * Note using signed integers limits us to physical addresses at most
  31. * 63 bits wide. They are needed for negative offsetting in aliases
  32. * (large MemoryRegion::alias_offset).
  33. */
  34. struct AddrRange {
  35. Int128 start;
  36. Int128 size;
  37. };
  38. static AddrRange addrrange_make(Int128 start, Int128 size)
  39. {
  40. return (AddrRange) { start, size };
  41. }
  42. static bool addrrange_equal(AddrRange r1, AddrRange r2)
  43. {
  44. return int128_eq(r1.start, r2.start) && int128_eq(r1.size, r2.size);
  45. }
  46. static Int128 addrrange_end(AddrRange r)
  47. {
  48. return int128_add(r.start, r.size);
  49. }
  50. static AddrRange addrrange_shift(AddrRange range, Int128 delta)
  51. {
  52. int128_addto(&range.start, delta);
  53. return range;
  54. }
  55. static bool addrrange_contains(AddrRange range, Int128 addr)
  56. {
  57. return int128_ge(addr, range.start)
  58. && int128_lt(addr, addrrange_end(range));
  59. }
  60. static bool addrrange_intersects(AddrRange r1, AddrRange r2)
  61. {
  62. return addrrange_contains(r1, r2.start)
  63. || addrrange_contains(r2, r1.start);
  64. }
  65. static AddrRange addrrange_intersection(AddrRange r1, AddrRange r2)
  66. {
  67. Int128 start = int128_max(r1.start, r2.start);
  68. Int128 end = int128_min(addrrange_end(r1), addrrange_end(r2));
  69. return addrrange_make(start, int128_sub(end, start));
  70. }
  71. enum ListenerDirection { Forward, Reverse };
  72. static bool memory_listener_match(MemoryListener *listener,
  73. MemoryRegionSection *section)
  74. {
  75. return !listener->address_space_filter
  76. || listener->address_space_filter == section->address_space;
  77. }
  78. #define MEMORY_LISTENER_CALL_GLOBAL(_callback, _direction, _args...) \
  79. do { \
  80. MemoryListener *_listener; \
  81. \
  82. switch (_direction) { \
  83. case Forward: \
  84. QTAILQ_FOREACH(_listener, &memory_listeners, link) { \
  85. _listener->_callback(_listener, ##_args); \
  86. } \
  87. break; \
  88. case Reverse: \
  89. QTAILQ_FOREACH_REVERSE(_listener, &memory_listeners, \
  90. memory_listeners, link) { \
  91. _listener->_callback(_listener, ##_args); \
  92. } \
  93. break; \
  94. default: \
  95. abort(); \
  96. } \
  97. } while (0)
  98. #define MEMORY_LISTENER_CALL(_callback, _direction, _section, _args...) \
  99. do { \
  100. MemoryListener *_listener; \
  101. \
  102. switch (_direction) { \
  103. case Forward: \
  104. QTAILQ_FOREACH(_listener, &memory_listeners, link) { \
  105. if (memory_listener_match(_listener, _section)) { \
  106. _listener->_callback(_listener, _section, ##_args); \
  107. } \
  108. } \
  109. break; \
  110. case Reverse: \
  111. QTAILQ_FOREACH_REVERSE(_listener, &memory_listeners, \
  112. memory_listeners, link) { \
  113. if (memory_listener_match(_listener, _section)) { \
  114. _listener->_callback(_listener, _section, ##_args); \
  115. } \
  116. } \
  117. break; \
  118. default: \
  119. abort(); \
  120. } \
  121. } while (0)
  122. #define MEMORY_LISTENER_UPDATE_REGION(fr, as, dir, callback) \
  123. MEMORY_LISTENER_CALL(callback, dir, (&(MemoryRegionSection) { \
  124. .mr = (fr)->mr, \
  125. .address_space = (as)->root, \
  126. .offset_within_region = (fr)->offset_in_region, \
  127. .size = int128_get64((fr)->addr.size), \
  128. .offset_within_address_space = int128_get64((fr)->addr.start), \
  129. .readonly = (fr)->readonly, \
  130. }))
  131. struct CoalescedMemoryRange {
  132. AddrRange addr;
  133. QTAILQ_ENTRY(CoalescedMemoryRange) link;
  134. };
  135. struct MemoryRegionIoeventfd {
  136. AddrRange addr;
  137. bool match_data;
  138. uint64_t data;
  139. int fd;
  140. };
  141. static bool memory_region_ioeventfd_before(MemoryRegionIoeventfd a,
  142. MemoryRegionIoeventfd b)
  143. {
  144. if (int128_lt(a.addr.start, b.addr.start)) {
  145. return true;
  146. } else if (int128_gt(a.addr.start, b.addr.start)) {
  147. return false;
  148. } else if (int128_lt(a.addr.size, b.addr.size)) {
  149. return true;
  150. } else if (int128_gt(a.addr.size, b.addr.size)) {
  151. return false;
  152. } else if (a.match_data < b.match_data) {
  153. return true;
  154. } else if (a.match_data > b.match_data) {
  155. return false;
  156. } else if (a.match_data) {
  157. if (a.data < b.data) {
  158. return true;
  159. } else if (a.data > b.data) {
  160. return false;
  161. }
  162. }
  163. if (a.fd < b.fd) {
  164. return true;
  165. } else if (a.fd > b.fd) {
  166. return false;
  167. }
  168. return false;
  169. }
  170. static bool memory_region_ioeventfd_equal(MemoryRegionIoeventfd a,
  171. MemoryRegionIoeventfd b)
  172. {
  173. return !memory_region_ioeventfd_before(a, b)
  174. && !memory_region_ioeventfd_before(b, a);
  175. }
  176. typedef struct FlatRange FlatRange;
  177. typedef struct FlatView FlatView;
  178. /* Range of memory in the global map. Addresses are absolute. */
  179. struct FlatRange {
  180. MemoryRegion *mr;
  181. target_phys_addr_t offset_in_region;
  182. AddrRange addr;
  183. uint8_t dirty_log_mask;
  184. bool readable;
  185. bool readonly;
  186. };
  187. /* Flattened global view of current active memory hierarchy. Kept in sorted
  188. * order.
  189. */
  190. struct FlatView {
  191. FlatRange *ranges;
  192. unsigned nr;
  193. unsigned nr_allocated;
  194. };
  195. typedef struct AddressSpace AddressSpace;
  196. typedef struct AddressSpaceOps AddressSpaceOps;
  197. /* A system address space - I/O, memory, etc. */
  198. struct AddressSpace {
  199. MemoryRegion *root;
  200. FlatView current_map;
  201. int ioeventfd_nb;
  202. MemoryRegionIoeventfd *ioeventfds;
  203. };
  204. #define FOR_EACH_FLAT_RANGE(var, view) \
  205. for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var)
  206. static bool flatrange_equal(FlatRange *a, FlatRange *b)
  207. {
  208. return a->mr == b->mr
  209. && addrrange_equal(a->addr, b->addr)
  210. && a->offset_in_region == b->offset_in_region
  211. && a->readable == b->readable
  212. && a->readonly == b->readonly;
  213. }
  214. static void flatview_init(FlatView *view)
  215. {
  216. view->ranges = NULL;
  217. view->nr = 0;
  218. view->nr_allocated = 0;
  219. }
  220. /* Insert a range into a given position. Caller is responsible for maintaining
  221. * sorting order.
  222. */
  223. static void flatview_insert(FlatView *view, unsigned pos, FlatRange *range)
  224. {
  225. if (view->nr == view->nr_allocated) {
  226. view->nr_allocated = MAX(2 * view->nr, 10);
  227. view->ranges = g_realloc(view->ranges,
  228. view->nr_allocated * sizeof(*view->ranges));
  229. }
  230. memmove(view->ranges + pos + 1, view->ranges + pos,
  231. (view->nr - pos) * sizeof(FlatRange));
  232. view->ranges[pos] = *range;
  233. ++view->nr;
  234. }
  235. static void flatview_destroy(FlatView *view)
  236. {
  237. g_free(view->ranges);
  238. }
  239. static bool can_merge(FlatRange *r1, FlatRange *r2)
  240. {
  241. return int128_eq(addrrange_end(r1->addr), r2->addr.start)
  242. && r1->mr == r2->mr
  243. && int128_eq(int128_add(int128_make64(r1->offset_in_region),
  244. r1->addr.size),
  245. int128_make64(r2->offset_in_region))
  246. && r1->dirty_log_mask == r2->dirty_log_mask
  247. && r1->readable == r2->readable
  248. && r1->readonly == r2->readonly;
  249. }
  250. /* Attempt to simplify a view by merging ajacent ranges */
  251. static void flatview_simplify(FlatView *view)
  252. {
  253. unsigned i, j;
  254. i = 0;
  255. while (i < view->nr) {
  256. j = i + 1;
  257. while (j < view->nr
  258. && can_merge(&view->ranges[j-1], &view->ranges[j])) {
  259. int128_addto(&view->ranges[i].addr.size, view->ranges[j].addr.size);
  260. ++j;
  261. }
  262. ++i;
  263. memmove(&view->ranges[i], &view->ranges[j],
  264. (view->nr - j) * sizeof(view->ranges[j]));
  265. view->nr -= j - i;
  266. }
  267. }
  268. static void memory_region_read_accessor(void *opaque,
  269. target_phys_addr_t addr,
  270. uint64_t *value,
  271. unsigned size,
  272. unsigned shift,
  273. uint64_t mask)
  274. {
  275. MemoryRegion *mr = opaque;
  276. uint64_t tmp;
  277. tmp = mr->ops->read(mr->opaque, addr, size);
  278. *value |= (tmp & mask) << shift;
  279. }
  280. static void memory_region_write_accessor(void *opaque,
  281. target_phys_addr_t addr,
  282. uint64_t *value,
  283. unsigned size,
  284. unsigned shift,
  285. uint64_t mask)
  286. {
  287. MemoryRegion *mr = opaque;
  288. uint64_t tmp;
  289. tmp = (*value >> shift) & mask;
  290. mr->ops->write(mr->opaque, addr, tmp, size);
  291. }
  292. static void access_with_adjusted_size(target_phys_addr_t addr,
  293. uint64_t *value,
  294. unsigned size,
  295. unsigned access_size_min,
  296. unsigned access_size_max,
  297. void (*access)(void *opaque,
  298. target_phys_addr_t addr,
  299. uint64_t *value,
  300. unsigned size,
  301. unsigned shift,
  302. uint64_t mask),
  303. void *opaque)
  304. {
  305. uint64_t access_mask;
  306. unsigned access_size;
  307. unsigned i;
  308. if (!access_size_min) {
  309. access_size_min = 1;
  310. }
  311. if (!access_size_max) {
  312. access_size_max = 4;
  313. }
  314. access_size = MAX(MIN(size, access_size_max), access_size_min);
  315. access_mask = -1ULL >> (64 - access_size * 8);
  316. for (i = 0; i < size; i += access_size) {
  317. /* FIXME: big-endian support */
  318. access(opaque, addr + i, value, access_size, i * 8, access_mask);
  319. }
  320. }
  321. static AddressSpace address_space_memory;
  322. static const MemoryRegionPortio *find_portio(MemoryRegion *mr, uint64_t offset,
  323. unsigned width, bool write)
  324. {
  325. const MemoryRegionPortio *mrp;
  326. for (mrp = mr->ops->old_portio; mrp->size; ++mrp) {
  327. if (offset >= mrp->offset && offset < mrp->offset + mrp->len
  328. && width == mrp->size
  329. && (write ? (bool)mrp->write : (bool)mrp->read)) {
  330. return mrp;
  331. }
  332. }
  333. return NULL;
  334. }
  335. static void memory_region_iorange_read(IORange *iorange,
  336. uint64_t offset,
  337. unsigned width,
  338. uint64_t *data)
  339. {
  340. MemoryRegionIORange *mrio
  341. = container_of(iorange, MemoryRegionIORange, iorange);
  342. MemoryRegion *mr = mrio->mr;
  343. offset += mrio->offset;
  344. if (mr->ops->old_portio) {
  345. const MemoryRegionPortio *mrp = find_portio(mr, offset - mrio->offset,
  346. width, false);
  347. *data = ((uint64_t)1 << (width * 8)) - 1;
  348. if (mrp) {
  349. *data = mrp->read(mr->opaque, offset);
  350. } else if (width == 2) {
  351. mrp = find_portio(mr, offset - mrio->offset, 1, false);
  352. assert(mrp);
  353. *data = mrp->read(mr->opaque, offset) |
  354. (mrp->read(mr->opaque, offset + 1) << 8);
  355. }
  356. return;
  357. }
  358. *data = 0;
  359. access_with_adjusted_size(offset, data, width,
  360. mr->ops->impl.min_access_size,
  361. mr->ops->impl.max_access_size,
  362. memory_region_read_accessor, mr);
  363. }
  364. static void memory_region_iorange_write(IORange *iorange,
  365. uint64_t offset,
  366. unsigned width,
  367. uint64_t data)
  368. {
  369. MemoryRegionIORange *mrio
  370. = container_of(iorange, MemoryRegionIORange, iorange);
  371. MemoryRegion *mr = mrio->mr;
  372. offset += mrio->offset;
  373. if (mr->ops->old_portio) {
  374. const MemoryRegionPortio *mrp = find_portio(mr, offset - mrio->offset,
  375. width, true);
  376. if (mrp) {
  377. mrp->write(mr->opaque, offset, data);
  378. } else if (width == 2) {
  379. mrp = find_portio(mr, offset - mrio->offset, 1, true);
  380. assert(mrp);
  381. mrp->write(mr->opaque, offset, data & 0xff);
  382. mrp->write(mr->opaque, offset + 1, data >> 8);
  383. }
  384. return;
  385. }
  386. access_with_adjusted_size(offset, &data, width,
  387. mr->ops->impl.min_access_size,
  388. mr->ops->impl.max_access_size,
  389. memory_region_write_accessor, mr);
  390. }
  391. static void memory_region_iorange_destructor(IORange *iorange)
  392. {
  393. g_free(container_of(iorange, MemoryRegionIORange, iorange));
  394. }
  395. const IORangeOps memory_region_iorange_ops = {
  396. .read = memory_region_iorange_read,
  397. .write = memory_region_iorange_write,
  398. .destructor = memory_region_iorange_destructor,
  399. };
  400. static AddressSpace address_space_io;
  401. static AddressSpace *memory_region_to_address_space(MemoryRegion *mr)
  402. {
  403. while (mr->parent) {
  404. mr = mr->parent;
  405. }
  406. if (mr == address_space_memory.root) {
  407. return &address_space_memory;
  408. }
  409. if (mr == address_space_io.root) {
  410. return &address_space_io;
  411. }
  412. abort();
  413. }
  414. /* Render a memory region into the global view. Ranges in @view obscure
  415. * ranges in @mr.
  416. */
  417. static void render_memory_region(FlatView *view,
  418. MemoryRegion *mr,
  419. Int128 base,
  420. AddrRange clip,
  421. bool readonly)
  422. {
  423. MemoryRegion *subregion;
  424. unsigned i;
  425. target_phys_addr_t offset_in_region;
  426. Int128 remain;
  427. Int128 now;
  428. FlatRange fr;
  429. AddrRange tmp;
  430. if (!mr->enabled) {
  431. return;
  432. }
  433. int128_addto(&base, int128_make64(mr->addr));
  434. readonly |= mr->readonly;
  435. tmp = addrrange_make(base, mr->size);
  436. if (!addrrange_intersects(tmp, clip)) {
  437. return;
  438. }
  439. clip = addrrange_intersection(tmp, clip);
  440. if (mr->alias) {
  441. int128_subfrom(&base, int128_make64(mr->alias->addr));
  442. int128_subfrom(&base, int128_make64(mr->alias_offset));
  443. render_memory_region(view, mr->alias, base, clip, readonly);
  444. return;
  445. }
  446. /* Render subregions in priority order. */
  447. QTAILQ_FOREACH(subregion, &mr->subregions, subregions_link) {
  448. render_memory_region(view, subregion, base, clip, readonly);
  449. }
  450. if (!mr->terminates) {
  451. return;
  452. }
  453. offset_in_region = int128_get64(int128_sub(clip.start, base));
  454. base = clip.start;
  455. remain = clip.size;
  456. /* Render the region itself into any gaps left by the current view. */
  457. for (i = 0; i < view->nr && int128_nz(remain); ++i) {
  458. if (int128_ge(base, addrrange_end(view->ranges[i].addr))) {
  459. continue;
  460. }
  461. if (int128_lt(base, view->ranges[i].addr.start)) {
  462. now = int128_min(remain,
  463. int128_sub(view->ranges[i].addr.start, base));
  464. fr.mr = mr;
  465. fr.offset_in_region = offset_in_region;
  466. fr.addr = addrrange_make(base, now);
  467. fr.dirty_log_mask = mr->dirty_log_mask;
  468. fr.readable = mr->readable;
  469. fr.readonly = readonly;
  470. flatview_insert(view, i, &fr);
  471. ++i;
  472. int128_addto(&base, now);
  473. offset_in_region += int128_get64(now);
  474. int128_subfrom(&remain, now);
  475. }
  476. if (int128_eq(base, view->ranges[i].addr.start)) {
  477. now = int128_min(remain, view->ranges[i].addr.size);
  478. int128_addto(&base, now);
  479. offset_in_region += int128_get64(now);
  480. int128_subfrom(&remain, now);
  481. }
  482. }
  483. if (int128_nz(remain)) {
  484. fr.mr = mr;
  485. fr.offset_in_region = offset_in_region;
  486. fr.addr = addrrange_make(base, remain);
  487. fr.dirty_log_mask = mr->dirty_log_mask;
  488. fr.readable = mr->readable;
  489. fr.readonly = readonly;
  490. flatview_insert(view, i, &fr);
  491. }
  492. }
  493. /* Render a memory topology into a list of disjoint absolute ranges. */
  494. static FlatView generate_memory_topology(MemoryRegion *mr)
  495. {
  496. FlatView view;
  497. flatview_init(&view);
  498. render_memory_region(&view, mr, int128_zero(),
  499. addrrange_make(int128_zero(), int128_2_64()), false);
  500. flatview_simplify(&view);
  501. return view;
  502. }
  503. static void address_space_add_del_ioeventfds(AddressSpace *as,
  504. MemoryRegionIoeventfd *fds_new,
  505. unsigned fds_new_nb,
  506. MemoryRegionIoeventfd *fds_old,
  507. unsigned fds_old_nb)
  508. {
  509. unsigned iold, inew;
  510. MemoryRegionIoeventfd *fd;
  511. MemoryRegionSection section;
  512. /* Generate a symmetric difference of the old and new fd sets, adding
  513. * and deleting as necessary.
  514. */
  515. iold = inew = 0;
  516. while (iold < fds_old_nb || inew < fds_new_nb) {
  517. if (iold < fds_old_nb
  518. && (inew == fds_new_nb
  519. || memory_region_ioeventfd_before(fds_old[iold],
  520. fds_new[inew]))) {
  521. fd = &fds_old[iold];
  522. section = (MemoryRegionSection) {
  523. .address_space = as->root,
  524. .offset_within_address_space = int128_get64(fd->addr.start),
  525. .size = int128_get64(fd->addr.size),
  526. };
  527. MEMORY_LISTENER_CALL(eventfd_del, Forward, &section,
  528. fd->match_data, fd->data, fd->fd);
  529. ++iold;
  530. } else if (inew < fds_new_nb
  531. && (iold == fds_old_nb
  532. || memory_region_ioeventfd_before(fds_new[inew],
  533. fds_old[iold]))) {
  534. fd = &fds_new[inew];
  535. section = (MemoryRegionSection) {
  536. .address_space = as->root,
  537. .offset_within_address_space = int128_get64(fd->addr.start),
  538. .size = int128_get64(fd->addr.size),
  539. };
  540. MEMORY_LISTENER_CALL(eventfd_add, Reverse, &section,
  541. fd->match_data, fd->data, fd->fd);
  542. ++inew;
  543. } else {
  544. ++iold;
  545. ++inew;
  546. }
  547. }
  548. }
  549. static void address_space_update_ioeventfds(AddressSpace *as)
  550. {
  551. FlatRange *fr;
  552. unsigned ioeventfd_nb = 0;
  553. MemoryRegionIoeventfd *ioeventfds = NULL;
  554. AddrRange tmp;
  555. unsigned i;
  556. FOR_EACH_FLAT_RANGE(fr, &as->current_map) {
  557. for (i = 0; i < fr->mr->ioeventfd_nb; ++i) {
  558. tmp = addrrange_shift(fr->mr->ioeventfds[i].addr,
  559. int128_sub(fr->addr.start,
  560. int128_make64(fr->offset_in_region)));
  561. if (addrrange_intersects(fr->addr, tmp)) {
  562. ++ioeventfd_nb;
  563. ioeventfds = g_realloc(ioeventfds,
  564. ioeventfd_nb * sizeof(*ioeventfds));
  565. ioeventfds[ioeventfd_nb-1] = fr->mr->ioeventfds[i];
  566. ioeventfds[ioeventfd_nb-1].addr = tmp;
  567. }
  568. }
  569. }
  570. address_space_add_del_ioeventfds(as, ioeventfds, ioeventfd_nb,
  571. as->ioeventfds, as->ioeventfd_nb);
  572. g_free(as->ioeventfds);
  573. as->ioeventfds = ioeventfds;
  574. as->ioeventfd_nb = ioeventfd_nb;
  575. }
  576. static void address_space_update_topology_pass(AddressSpace *as,
  577. FlatView old_view,
  578. FlatView new_view,
  579. bool adding)
  580. {
  581. unsigned iold, inew;
  582. FlatRange *frold, *frnew;
  583. /* Generate a symmetric difference of the old and new memory maps.
  584. * Kill ranges in the old map, and instantiate ranges in the new map.
  585. */
  586. iold = inew = 0;
  587. while (iold < old_view.nr || inew < new_view.nr) {
  588. if (iold < old_view.nr) {
  589. frold = &old_view.ranges[iold];
  590. } else {
  591. frold = NULL;
  592. }
  593. if (inew < new_view.nr) {
  594. frnew = &new_view.ranges[inew];
  595. } else {
  596. frnew = NULL;
  597. }
  598. if (frold
  599. && (!frnew
  600. || int128_lt(frold->addr.start, frnew->addr.start)
  601. || (int128_eq(frold->addr.start, frnew->addr.start)
  602. && !flatrange_equal(frold, frnew)))) {
  603. /* In old, but (not in new, or in new but attributes changed). */
  604. if (!adding) {
  605. MEMORY_LISTENER_UPDATE_REGION(frold, as, Reverse, region_del);
  606. }
  607. ++iold;
  608. } else if (frold && frnew && flatrange_equal(frold, frnew)) {
  609. /* In both (logging may have changed) */
  610. if (adding) {
  611. MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_nop);
  612. if (frold->dirty_log_mask && !frnew->dirty_log_mask) {
  613. MEMORY_LISTENER_UPDATE_REGION(frnew, as, Reverse, log_stop);
  614. } else if (frnew->dirty_log_mask && !frold->dirty_log_mask) {
  615. MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, log_start);
  616. }
  617. }
  618. ++iold;
  619. ++inew;
  620. } else {
  621. /* In new */
  622. if (adding) {
  623. MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_add);
  624. }
  625. ++inew;
  626. }
  627. }
  628. }
  629. static void address_space_update_topology(AddressSpace *as)
  630. {
  631. FlatView old_view = as->current_map;
  632. FlatView new_view = generate_memory_topology(as->root);
  633. address_space_update_topology_pass(as, old_view, new_view, false);
  634. address_space_update_topology_pass(as, old_view, new_view, true);
  635. as->current_map = new_view;
  636. flatview_destroy(&old_view);
  637. address_space_update_ioeventfds(as);
  638. }
  639. static void memory_region_update_topology(MemoryRegion *mr)
  640. {
  641. if (memory_region_transaction_depth) {
  642. memory_region_update_pending |= !mr || mr->enabled;
  643. return;
  644. }
  645. if (mr && !mr->enabled) {
  646. return;
  647. }
  648. MEMORY_LISTENER_CALL_GLOBAL(begin, Forward);
  649. if (address_space_memory.root) {
  650. address_space_update_topology(&address_space_memory);
  651. }
  652. if (address_space_io.root) {
  653. address_space_update_topology(&address_space_io);
  654. }
  655. MEMORY_LISTENER_CALL_GLOBAL(commit, Forward);
  656. memory_region_update_pending = false;
  657. }
  658. void memory_region_transaction_begin(void)
  659. {
  660. ++memory_region_transaction_depth;
  661. }
  662. void memory_region_transaction_commit(void)
  663. {
  664. assert(memory_region_transaction_depth);
  665. --memory_region_transaction_depth;
  666. if (!memory_region_transaction_depth && memory_region_update_pending) {
  667. memory_region_update_topology(NULL);
  668. }
  669. }
  670. static void memory_region_destructor_none(MemoryRegion *mr)
  671. {
  672. }
  673. static void memory_region_destructor_ram(MemoryRegion *mr)
  674. {
  675. qemu_ram_free(mr->ram_addr);
  676. }
  677. static void memory_region_destructor_ram_from_ptr(MemoryRegion *mr)
  678. {
  679. qemu_ram_free_from_ptr(mr->ram_addr);
  680. }
  681. static void memory_region_destructor_iomem(MemoryRegion *mr)
  682. {
  683. }
  684. static void memory_region_destructor_rom_device(MemoryRegion *mr)
  685. {
  686. qemu_ram_free(mr->ram_addr & TARGET_PAGE_MASK);
  687. }
  688. static bool memory_region_wrong_endianness(MemoryRegion *mr)
  689. {
  690. #ifdef TARGET_WORDS_BIGENDIAN
  691. return mr->ops->endianness == DEVICE_LITTLE_ENDIAN;
  692. #else
  693. return mr->ops->endianness == DEVICE_BIG_ENDIAN;
  694. #endif
  695. }
  696. void memory_region_init(MemoryRegion *mr,
  697. const char *name,
  698. uint64_t size)
  699. {
  700. mr->ops = NULL;
  701. mr->parent = NULL;
  702. mr->size = int128_make64(size);
  703. if (size == UINT64_MAX) {
  704. mr->size = int128_2_64();
  705. }
  706. mr->addr = 0;
  707. mr->subpage = false;
  708. mr->enabled = true;
  709. mr->terminates = false;
  710. mr->ram = false;
  711. mr->readable = true;
  712. mr->readonly = false;
  713. mr->rom_device = false;
  714. mr->destructor = memory_region_destructor_none;
  715. mr->priority = 0;
  716. mr->may_overlap = false;
  717. mr->alias = NULL;
  718. QTAILQ_INIT(&mr->subregions);
  719. memset(&mr->subregions_link, 0, sizeof mr->subregions_link);
  720. QTAILQ_INIT(&mr->coalesced);
  721. mr->name = g_strdup(name);
  722. mr->dirty_log_mask = 0;
  723. mr->ioeventfd_nb = 0;
  724. mr->ioeventfds = NULL;
  725. }
  726. static bool memory_region_access_valid(MemoryRegion *mr,
  727. target_phys_addr_t addr,
  728. unsigned size,
  729. bool is_write)
  730. {
  731. if (mr->ops->valid.accepts
  732. && !mr->ops->valid.accepts(mr->opaque, addr, size, is_write)) {
  733. return false;
  734. }
  735. if (!mr->ops->valid.unaligned && (addr & (size - 1))) {
  736. return false;
  737. }
  738. /* Treat zero as compatibility all valid */
  739. if (!mr->ops->valid.max_access_size) {
  740. return true;
  741. }
  742. if (size > mr->ops->valid.max_access_size
  743. || size < mr->ops->valid.min_access_size) {
  744. return false;
  745. }
  746. return true;
  747. }
  748. static uint64_t memory_region_dispatch_read1(MemoryRegion *mr,
  749. target_phys_addr_t addr,
  750. unsigned size)
  751. {
  752. uint64_t data = 0;
  753. if (!memory_region_access_valid(mr, addr, size, false)) {
  754. return -1U; /* FIXME: better signalling */
  755. }
  756. if (!mr->ops->read) {
  757. return mr->ops->old_mmio.read[bitops_ffsl(size)](mr->opaque, addr);
  758. }
  759. /* FIXME: support unaligned access */
  760. access_with_adjusted_size(addr, &data, size,
  761. mr->ops->impl.min_access_size,
  762. mr->ops->impl.max_access_size,
  763. memory_region_read_accessor, mr);
  764. return data;
  765. }
  766. static void adjust_endianness(MemoryRegion *mr, uint64_t *data, unsigned size)
  767. {
  768. if (memory_region_wrong_endianness(mr)) {
  769. switch (size) {
  770. case 1:
  771. break;
  772. case 2:
  773. *data = bswap16(*data);
  774. break;
  775. case 4:
  776. *data = bswap32(*data);
  777. break;
  778. default:
  779. abort();
  780. }
  781. }
  782. }
  783. static uint64_t memory_region_dispatch_read(MemoryRegion *mr,
  784. target_phys_addr_t addr,
  785. unsigned size)
  786. {
  787. uint64_t ret;
  788. ret = memory_region_dispatch_read1(mr, addr, size);
  789. adjust_endianness(mr, &ret, size);
  790. return ret;
  791. }
  792. static void memory_region_dispatch_write(MemoryRegion *mr,
  793. target_phys_addr_t addr,
  794. uint64_t data,
  795. unsigned size)
  796. {
  797. if (!memory_region_access_valid(mr, addr, size, true)) {
  798. return; /* FIXME: better signalling */
  799. }
  800. adjust_endianness(mr, &data, size);
  801. if (!mr->ops->write) {
  802. mr->ops->old_mmio.write[bitops_ffsl(size)](mr->opaque, addr, data);
  803. return;
  804. }
  805. /* FIXME: support unaligned access */
  806. access_with_adjusted_size(addr, &data, size,
  807. mr->ops->impl.min_access_size,
  808. mr->ops->impl.max_access_size,
  809. memory_region_write_accessor, mr);
  810. }
  811. void memory_region_init_io(MemoryRegion *mr,
  812. const MemoryRegionOps *ops,
  813. void *opaque,
  814. const char *name,
  815. uint64_t size)
  816. {
  817. memory_region_init(mr, name, size);
  818. mr->ops = ops;
  819. mr->opaque = opaque;
  820. mr->terminates = true;
  821. mr->destructor = memory_region_destructor_iomem;
  822. mr->ram_addr = ~(ram_addr_t)0;
  823. }
  824. void memory_region_init_ram(MemoryRegion *mr,
  825. const char *name,
  826. uint64_t size)
  827. {
  828. memory_region_init(mr, name, size);
  829. mr->ram = true;
  830. mr->terminates = true;
  831. mr->destructor = memory_region_destructor_ram;
  832. mr->ram_addr = qemu_ram_alloc(size, mr);
  833. }
  834. void memory_region_init_ram_ptr(MemoryRegion *mr,
  835. const char *name,
  836. uint64_t size,
  837. void *ptr)
  838. {
  839. memory_region_init(mr, name, size);
  840. mr->ram = true;
  841. mr->terminates = true;
  842. mr->destructor = memory_region_destructor_ram_from_ptr;
  843. mr->ram_addr = qemu_ram_alloc_from_ptr(size, ptr, mr);
  844. }
  845. void memory_region_init_alias(MemoryRegion *mr,
  846. const char *name,
  847. MemoryRegion *orig,
  848. target_phys_addr_t offset,
  849. uint64_t size)
  850. {
  851. memory_region_init(mr, name, size);
  852. mr->alias = orig;
  853. mr->alias_offset = offset;
  854. }
  855. void memory_region_init_rom_device(MemoryRegion *mr,
  856. const MemoryRegionOps *ops,
  857. void *opaque,
  858. const char *name,
  859. uint64_t size)
  860. {
  861. memory_region_init(mr, name, size);
  862. mr->ops = ops;
  863. mr->opaque = opaque;
  864. mr->terminates = true;
  865. mr->rom_device = true;
  866. mr->destructor = memory_region_destructor_rom_device;
  867. mr->ram_addr = qemu_ram_alloc(size, mr);
  868. }
  869. static uint64_t invalid_read(void *opaque, target_phys_addr_t addr,
  870. unsigned size)
  871. {
  872. MemoryRegion *mr = opaque;
  873. if (!mr->warning_printed) {
  874. fprintf(stderr, "Invalid read from memory region %s\n", mr->name);
  875. mr->warning_printed = true;
  876. }
  877. return -1U;
  878. }
  879. static void invalid_write(void *opaque, target_phys_addr_t addr, uint64_t data,
  880. unsigned size)
  881. {
  882. MemoryRegion *mr = opaque;
  883. if (!mr->warning_printed) {
  884. fprintf(stderr, "Invalid write to memory region %s\n", mr->name);
  885. mr->warning_printed = true;
  886. }
  887. }
  888. static const MemoryRegionOps reservation_ops = {
  889. .read = invalid_read,
  890. .write = invalid_write,
  891. .endianness = DEVICE_NATIVE_ENDIAN,
  892. };
  893. void memory_region_init_reservation(MemoryRegion *mr,
  894. const char *name,
  895. uint64_t size)
  896. {
  897. memory_region_init_io(mr, &reservation_ops, mr, name, size);
  898. }
  899. void memory_region_destroy(MemoryRegion *mr)
  900. {
  901. assert(QTAILQ_EMPTY(&mr->subregions));
  902. mr->destructor(mr);
  903. memory_region_clear_coalescing(mr);
  904. g_free((char *)mr->name);
  905. g_free(mr->ioeventfds);
  906. }
  907. uint64_t memory_region_size(MemoryRegion *mr)
  908. {
  909. if (int128_eq(mr->size, int128_2_64())) {
  910. return UINT64_MAX;
  911. }
  912. return int128_get64(mr->size);
  913. }
  914. const char *memory_region_name(MemoryRegion *mr)
  915. {
  916. return mr->name;
  917. }
  918. bool memory_region_is_ram(MemoryRegion *mr)
  919. {
  920. return mr->ram;
  921. }
  922. bool memory_region_is_logging(MemoryRegion *mr)
  923. {
  924. return mr->dirty_log_mask;
  925. }
  926. bool memory_region_is_rom(MemoryRegion *mr)
  927. {
  928. return mr->ram && mr->readonly;
  929. }
  930. void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client)
  931. {
  932. uint8_t mask = 1 << client;
  933. mr->dirty_log_mask = (mr->dirty_log_mask & ~mask) | (log * mask);
  934. memory_region_update_topology(mr);
  935. }
  936. bool memory_region_get_dirty(MemoryRegion *mr, target_phys_addr_t addr,
  937. target_phys_addr_t size, unsigned client)
  938. {
  939. assert(mr->terminates);
  940. return cpu_physical_memory_get_dirty(mr->ram_addr + addr, size,
  941. 1 << client);
  942. }
  943. void memory_region_set_dirty(MemoryRegion *mr, target_phys_addr_t addr,
  944. target_phys_addr_t size)
  945. {
  946. assert(mr->terminates);
  947. return cpu_physical_memory_set_dirty_range(mr->ram_addr + addr, size, -1);
  948. }
  949. void memory_region_sync_dirty_bitmap(MemoryRegion *mr)
  950. {
  951. FlatRange *fr;
  952. FOR_EACH_FLAT_RANGE(fr, &address_space_memory.current_map) {
  953. if (fr->mr == mr) {
  954. MEMORY_LISTENER_UPDATE_REGION(fr, &address_space_memory,
  955. Forward, log_sync);
  956. }
  957. }
  958. }
  959. void memory_region_set_readonly(MemoryRegion *mr, bool readonly)
  960. {
  961. if (mr->readonly != readonly) {
  962. mr->readonly = readonly;
  963. memory_region_update_topology(mr);
  964. }
  965. }
  966. void memory_region_rom_device_set_readable(MemoryRegion *mr, bool readable)
  967. {
  968. if (mr->readable != readable) {
  969. mr->readable = readable;
  970. memory_region_update_topology(mr);
  971. }
  972. }
  973. void memory_region_reset_dirty(MemoryRegion *mr, target_phys_addr_t addr,
  974. target_phys_addr_t size, unsigned client)
  975. {
  976. assert(mr->terminates);
  977. cpu_physical_memory_reset_dirty(mr->ram_addr + addr,
  978. mr->ram_addr + addr + size,
  979. 1 << client);
  980. }
  981. void *memory_region_get_ram_ptr(MemoryRegion *mr)
  982. {
  983. if (mr->alias) {
  984. return memory_region_get_ram_ptr(mr->alias) + mr->alias_offset;
  985. }
  986. assert(mr->terminates);
  987. return qemu_get_ram_ptr(mr->ram_addr & TARGET_PAGE_MASK);
  988. }
  989. static void memory_region_update_coalesced_range(MemoryRegion *mr)
  990. {
  991. FlatRange *fr;
  992. CoalescedMemoryRange *cmr;
  993. AddrRange tmp;
  994. FOR_EACH_FLAT_RANGE(fr, &address_space_memory.current_map) {
  995. if (fr->mr == mr) {
  996. qemu_unregister_coalesced_mmio(int128_get64(fr->addr.start),
  997. int128_get64(fr->addr.size));
  998. QTAILQ_FOREACH(cmr, &mr->coalesced, link) {
  999. tmp = addrrange_shift(cmr->addr,
  1000. int128_sub(fr->addr.start,
  1001. int128_make64(fr->offset_in_region)));
  1002. if (!addrrange_intersects(tmp, fr->addr)) {
  1003. continue;
  1004. }
  1005. tmp = addrrange_intersection(tmp, fr->addr);
  1006. qemu_register_coalesced_mmio(int128_get64(tmp.start),
  1007. int128_get64(tmp.size));
  1008. }
  1009. }
  1010. }
  1011. }
  1012. void memory_region_set_coalescing(MemoryRegion *mr)
  1013. {
  1014. memory_region_clear_coalescing(mr);
  1015. memory_region_add_coalescing(mr, 0, int128_get64(mr->size));
  1016. }
  1017. void memory_region_add_coalescing(MemoryRegion *mr,
  1018. target_phys_addr_t offset,
  1019. uint64_t size)
  1020. {
  1021. CoalescedMemoryRange *cmr = g_malloc(sizeof(*cmr));
  1022. cmr->addr = addrrange_make(int128_make64(offset), int128_make64(size));
  1023. QTAILQ_INSERT_TAIL(&mr->coalesced, cmr, link);
  1024. memory_region_update_coalesced_range(mr);
  1025. }
  1026. void memory_region_clear_coalescing(MemoryRegion *mr)
  1027. {
  1028. CoalescedMemoryRange *cmr;
  1029. while (!QTAILQ_EMPTY(&mr->coalesced)) {
  1030. cmr = QTAILQ_FIRST(&mr->coalesced);
  1031. QTAILQ_REMOVE(&mr->coalesced, cmr, link);
  1032. g_free(cmr);
  1033. }
  1034. memory_region_update_coalesced_range(mr);
  1035. }
  1036. void memory_region_add_eventfd(MemoryRegion *mr,
  1037. target_phys_addr_t addr,
  1038. unsigned size,
  1039. bool match_data,
  1040. uint64_t data,
  1041. int fd)
  1042. {
  1043. MemoryRegionIoeventfd mrfd = {
  1044. .addr.start = int128_make64(addr),
  1045. .addr.size = int128_make64(size),
  1046. .match_data = match_data,
  1047. .data = data,
  1048. .fd = fd,
  1049. };
  1050. unsigned i;
  1051. for (i = 0; i < mr->ioeventfd_nb; ++i) {
  1052. if (memory_region_ioeventfd_before(mrfd, mr->ioeventfds[i])) {
  1053. break;
  1054. }
  1055. }
  1056. ++mr->ioeventfd_nb;
  1057. mr->ioeventfds = g_realloc(mr->ioeventfds,
  1058. sizeof(*mr->ioeventfds) * mr->ioeventfd_nb);
  1059. memmove(&mr->ioeventfds[i+1], &mr->ioeventfds[i],
  1060. sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb-1 - i));
  1061. mr->ioeventfds[i] = mrfd;
  1062. memory_region_update_topology(mr);
  1063. }
  1064. void memory_region_del_eventfd(MemoryRegion *mr,
  1065. target_phys_addr_t addr,
  1066. unsigned size,
  1067. bool match_data,
  1068. uint64_t data,
  1069. int fd)
  1070. {
  1071. MemoryRegionIoeventfd mrfd = {
  1072. .addr.start = int128_make64(addr),
  1073. .addr.size = int128_make64(size),
  1074. .match_data = match_data,
  1075. .data = data,
  1076. .fd = fd,
  1077. };
  1078. unsigned i;
  1079. for (i = 0; i < mr->ioeventfd_nb; ++i) {
  1080. if (memory_region_ioeventfd_equal(mrfd, mr->ioeventfds[i])) {
  1081. break;
  1082. }
  1083. }
  1084. assert(i != mr->ioeventfd_nb);
  1085. memmove(&mr->ioeventfds[i], &mr->ioeventfds[i+1],
  1086. sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb - (i+1)));
  1087. --mr->ioeventfd_nb;
  1088. mr->ioeventfds = g_realloc(mr->ioeventfds,
  1089. sizeof(*mr->ioeventfds)*mr->ioeventfd_nb + 1);
  1090. memory_region_update_topology(mr);
  1091. }
  1092. static void memory_region_add_subregion_common(MemoryRegion *mr,
  1093. target_phys_addr_t offset,
  1094. MemoryRegion *subregion)
  1095. {
  1096. MemoryRegion *other;
  1097. assert(!subregion->parent);
  1098. subregion->parent = mr;
  1099. subregion->addr = offset;
  1100. QTAILQ_FOREACH(other, &mr->subregions, subregions_link) {
  1101. if (subregion->may_overlap || other->may_overlap) {
  1102. continue;
  1103. }
  1104. if (int128_gt(int128_make64(offset),
  1105. int128_add(int128_make64(other->addr), other->size))
  1106. || int128_le(int128_add(int128_make64(offset), subregion->size),
  1107. int128_make64(other->addr))) {
  1108. continue;
  1109. }
  1110. #if 0
  1111. printf("warning: subregion collision %llx/%llx (%s) "
  1112. "vs %llx/%llx (%s)\n",
  1113. (unsigned long long)offset,
  1114. (unsigned long long)int128_get64(subregion->size),
  1115. subregion->name,
  1116. (unsigned long long)other->addr,
  1117. (unsigned long long)int128_get64(other->size),
  1118. other->name);
  1119. #endif
  1120. }
  1121. QTAILQ_FOREACH(other, &mr->subregions, subregions_link) {
  1122. if (subregion->priority >= other->priority) {
  1123. QTAILQ_INSERT_BEFORE(other, subregion, subregions_link);
  1124. goto done;
  1125. }
  1126. }
  1127. QTAILQ_INSERT_TAIL(&mr->subregions, subregion, subregions_link);
  1128. done:
  1129. memory_region_update_topology(mr);
  1130. }
  1131. void memory_region_add_subregion(MemoryRegion *mr,
  1132. target_phys_addr_t offset,
  1133. MemoryRegion *subregion)
  1134. {
  1135. subregion->may_overlap = false;
  1136. subregion->priority = 0;
  1137. memory_region_add_subregion_common(mr, offset, subregion);
  1138. }
  1139. void memory_region_add_subregion_overlap(MemoryRegion *mr,
  1140. target_phys_addr_t offset,
  1141. MemoryRegion *subregion,
  1142. unsigned priority)
  1143. {
  1144. subregion->may_overlap = true;
  1145. subregion->priority = priority;
  1146. memory_region_add_subregion_common(mr, offset, subregion);
  1147. }
  1148. void memory_region_del_subregion(MemoryRegion *mr,
  1149. MemoryRegion *subregion)
  1150. {
  1151. assert(subregion->parent == mr);
  1152. subregion->parent = NULL;
  1153. QTAILQ_REMOVE(&mr->subregions, subregion, subregions_link);
  1154. memory_region_update_topology(mr);
  1155. }
  1156. void memory_region_set_enabled(MemoryRegion *mr, bool enabled)
  1157. {
  1158. if (enabled == mr->enabled) {
  1159. return;
  1160. }
  1161. mr->enabled = enabled;
  1162. memory_region_update_topology(NULL);
  1163. }
  1164. void memory_region_set_address(MemoryRegion *mr, target_phys_addr_t addr)
  1165. {
  1166. MemoryRegion *parent = mr->parent;
  1167. unsigned priority = mr->priority;
  1168. bool may_overlap = mr->may_overlap;
  1169. if (addr == mr->addr || !parent) {
  1170. mr->addr = addr;
  1171. return;
  1172. }
  1173. memory_region_transaction_begin();
  1174. memory_region_del_subregion(parent, mr);
  1175. if (may_overlap) {
  1176. memory_region_add_subregion_overlap(parent, addr, mr, priority);
  1177. } else {
  1178. memory_region_add_subregion(parent, addr, mr);
  1179. }
  1180. memory_region_transaction_commit();
  1181. }
  1182. void memory_region_set_alias_offset(MemoryRegion *mr, target_phys_addr_t offset)
  1183. {
  1184. target_phys_addr_t old_offset = mr->alias_offset;
  1185. assert(mr->alias);
  1186. mr->alias_offset = offset;
  1187. if (offset == old_offset || !mr->parent) {
  1188. return;
  1189. }
  1190. memory_region_update_topology(mr);
  1191. }
  1192. ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr)
  1193. {
  1194. return mr->ram_addr;
  1195. }
  1196. static int cmp_flatrange_addr(const void *addr_, const void *fr_)
  1197. {
  1198. const AddrRange *addr = addr_;
  1199. const FlatRange *fr = fr_;
  1200. if (int128_le(addrrange_end(*addr), fr->addr.start)) {
  1201. return -1;
  1202. } else if (int128_ge(addr->start, addrrange_end(fr->addr))) {
  1203. return 1;
  1204. }
  1205. return 0;
  1206. }
  1207. static FlatRange *address_space_lookup(AddressSpace *as, AddrRange addr)
  1208. {
  1209. return bsearch(&addr, as->current_map.ranges, as->current_map.nr,
  1210. sizeof(FlatRange), cmp_flatrange_addr);
  1211. }
  1212. MemoryRegionSection memory_region_find(MemoryRegion *address_space,
  1213. target_phys_addr_t addr, uint64_t size)
  1214. {
  1215. AddressSpace *as = memory_region_to_address_space(address_space);
  1216. AddrRange range = addrrange_make(int128_make64(addr),
  1217. int128_make64(size));
  1218. FlatRange *fr = address_space_lookup(as, range);
  1219. MemoryRegionSection ret = { .mr = NULL, .size = 0 };
  1220. if (!fr) {
  1221. return ret;
  1222. }
  1223. while (fr > as->current_map.ranges
  1224. && addrrange_intersects(fr[-1].addr, range)) {
  1225. --fr;
  1226. }
  1227. ret.mr = fr->mr;
  1228. range = addrrange_intersection(range, fr->addr);
  1229. ret.offset_within_region = fr->offset_in_region;
  1230. ret.offset_within_region += int128_get64(int128_sub(range.start,
  1231. fr->addr.start));
  1232. ret.size = int128_get64(range.size);
  1233. ret.offset_within_address_space = int128_get64(range.start);
  1234. ret.readonly = fr->readonly;
  1235. return ret;
  1236. }
  1237. void memory_global_sync_dirty_bitmap(MemoryRegion *address_space)
  1238. {
  1239. AddressSpace *as = memory_region_to_address_space(address_space);
  1240. FlatRange *fr;
  1241. FOR_EACH_FLAT_RANGE(fr, &as->current_map) {
  1242. MEMORY_LISTENER_UPDATE_REGION(fr, as, Forward, log_sync);
  1243. }
  1244. }
  1245. void memory_global_dirty_log_start(void)
  1246. {
  1247. global_dirty_log = true;
  1248. MEMORY_LISTENER_CALL_GLOBAL(log_global_start, Forward);
  1249. }
  1250. void memory_global_dirty_log_stop(void)
  1251. {
  1252. global_dirty_log = false;
  1253. MEMORY_LISTENER_CALL_GLOBAL(log_global_stop, Reverse);
  1254. }
  1255. static void listener_add_address_space(MemoryListener *listener,
  1256. AddressSpace *as)
  1257. {
  1258. FlatRange *fr;
  1259. if (listener->address_space_filter
  1260. && listener->address_space_filter != as->root) {
  1261. return;
  1262. }
  1263. if (global_dirty_log) {
  1264. listener->log_global_start(listener);
  1265. }
  1266. FOR_EACH_FLAT_RANGE(fr, &as->current_map) {
  1267. MemoryRegionSection section = {
  1268. .mr = fr->mr,
  1269. .address_space = as->root,
  1270. .offset_within_region = fr->offset_in_region,
  1271. .size = int128_get64(fr->addr.size),
  1272. .offset_within_address_space = int128_get64(fr->addr.start),
  1273. .readonly = fr->readonly,
  1274. };
  1275. listener->region_add(listener, &section);
  1276. }
  1277. }
  1278. void memory_listener_register(MemoryListener *listener, MemoryRegion *filter)
  1279. {
  1280. MemoryListener *other = NULL;
  1281. listener->address_space_filter = filter;
  1282. if (QTAILQ_EMPTY(&memory_listeners)
  1283. || listener->priority >= QTAILQ_LAST(&memory_listeners,
  1284. memory_listeners)->priority) {
  1285. QTAILQ_INSERT_TAIL(&memory_listeners, listener, link);
  1286. } else {
  1287. QTAILQ_FOREACH(other, &memory_listeners, link) {
  1288. if (listener->priority < other->priority) {
  1289. break;
  1290. }
  1291. }
  1292. QTAILQ_INSERT_BEFORE(other, listener, link);
  1293. }
  1294. listener_add_address_space(listener, &address_space_memory);
  1295. listener_add_address_space(listener, &address_space_io);
  1296. }
  1297. void memory_listener_unregister(MemoryListener *listener)
  1298. {
  1299. QTAILQ_REMOVE(&memory_listeners, listener, link);
  1300. }
  1301. void set_system_memory_map(MemoryRegion *mr)
  1302. {
  1303. address_space_memory.root = mr;
  1304. memory_region_update_topology(NULL);
  1305. }
  1306. void set_system_io_map(MemoryRegion *mr)
  1307. {
  1308. address_space_io.root = mr;
  1309. memory_region_update_topology(NULL);
  1310. }
  1311. uint64_t io_mem_read(MemoryRegion *mr, target_phys_addr_t addr, unsigned size)
  1312. {
  1313. return memory_region_dispatch_read(mr, addr, size);
  1314. }
  1315. void io_mem_write(MemoryRegion *mr, target_phys_addr_t addr,
  1316. uint64_t val, unsigned size)
  1317. {
  1318. memory_region_dispatch_write(mr, addr, val, size);
  1319. }
  1320. typedef struct MemoryRegionList MemoryRegionList;
  1321. struct MemoryRegionList {
  1322. const MemoryRegion *mr;
  1323. bool printed;
  1324. QTAILQ_ENTRY(MemoryRegionList) queue;
  1325. };
  1326. typedef QTAILQ_HEAD(queue, MemoryRegionList) MemoryRegionListHead;
  1327. static void mtree_print_mr(fprintf_function mon_printf, void *f,
  1328. const MemoryRegion *mr, unsigned int level,
  1329. target_phys_addr_t base,
  1330. MemoryRegionListHead *alias_print_queue)
  1331. {
  1332. MemoryRegionList *new_ml, *ml, *next_ml;
  1333. MemoryRegionListHead submr_print_queue;
  1334. const MemoryRegion *submr;
  1335. unsigned int i;
  1336. if (!mr) {
  1337. return;
  1338. }
  1339. for (i = 0; i < level; i++) {
  1340. mon_printf(f, " ");
  1341. }
  1342. if (mr->alias) {
  1343. MemoryRegionList *ml;
  1344. bool found = false;
  1345. /* check if the alias is already in the queue */
  1346. QTAILQ_FOREACH(ml, alias_print_queue, queue) {
  1347. if (ml->mr == mr->alias && !ml->printed) {
  1348. found = true;
  1349. }
  1350. }
  1351. if (!found) {
  1352. ml = g_new(MemoryRegionList, 1);
  1353. ml->mr = mr->alias;
  1354. ml->printed = false;
  1355. QTAILQ_INSERT_TAIL(alias_print_queue, ml, queue);
  1356. }
  1357. mon_printf(f, TARGET_FMT_plx "-" TARGET_FMT_plx
  1358. " (prio %d, %c%c): alias %s @%s " TARGET_FMT_plx
  1359. "-" TARGET_FMT_plx "\n",
  1360. base + mr->addr,
  1361. base + mr->addr
  1362. + (target_phys_addr_t)int128_get64(mr->size) - 1,
  1363. mr->priority,
  1364. mr->readable ? 'R' : '-',
  1365. !mr->readonly && !(mr->rom_device && mr->readable) ? 'W'
  1366. : '-',
  1367. mr->name,
  1368. mr->alias->name,
  1369. mr->alias_offset,
  1370. mr->alias_offset
  1371. + (target_phys_addr_t)int128_get64(mr->size) - 1);
  1372. } else {
  1373. mon_printf(f,
  1374. TARGET_FMT_plx "-" TARGET_FMT_plx " (prio %d, %c%c): %s\n",
  1375. base + mr->addr,
  1376. base + mr->addr
  1377. + (target_phys_addr_t)int128_get64(mr->size) - 1,
  1378. mr->priority,
  1379. mr->readable ? 'R' : '-',
  1380. !mr->readonly && !(mr->rom_device && mr->readable) ? 'W'
  1381. : '-',
  1382. mr->name);
  1383. }
  1384. QTAILQ_INIT(&submr_print_queue);
  1385. QTAILQ_FOREACH(submr, &mr->subregions, subregions_link) {
  1386. new_ml = g_new(MemoryRegionList, 1);
  1387. new_ml->mr = submr;
  1388. QTAILQ_FOREACH(ml, &submr_print_queue, queue) {
  1389. if (new_ml->mr->addr < ml->mr->addr ||
  1390. (new_ml->mr->addr == ml->mr->addr &&
  1391. new_ml->mr->priority > ml->mr->priority)) {
  1392. QTAILQ_INSERT_BEFORE(ml, new_ml, queue);
  1393. new_ml = NULL;
  1394. break;
  1395. }
  1396. }
  1397. if (new_ml) {
  1398. QTAILQ_INSERT_TAIL(&submr_print_queue, new_ml, queue);
  1399. }
  1400. }
  1401. QTAILQ_FOREACH(ml, &submr_print_queue, queue) {
  1402. mtree_print_mr(mon_printf, f, ml->mr, level + 1, base + mr->addr,
  1403. alias_print_queue);
  1404. }
  1405. QTAILQ_FOREACH_SAFE(ml, &submr_print_queue, queue, next_ml) {
  1406. g_free(ml);
  1407. }
  1408. }
  1409. void mtree_info(fprintf_function mon_printf, void *f)
  1410. {
  1411. MemoryRegionListHead ml_head;
  1412. MemoryRegionList *ml, *ml2;
  1413. QTAILQ_INIT(&ml_head);
  1414. mon_printf(f, "memory\n");
  1415. mtree_print_mr(mon_printf, f, address_space_memory.root, 0, 0, &ml_head);
  1416. if (address_space_io.root &&
  1417. !QTAILQ_EMPTY(&address_space_io.root->subregions)) {
  1418. mon_printf(f, "I/O\n");
  1419. mtree_print_mr(mon_printf, f, address_space_io.root, 0, 0, &ml_head);
  1420. }
  1421. mon_printf(f, "aliases\n");
  1422. /* print aliased regions */
  1423. QTAILQ_FOREACH(ml, &ml_head, queue) {
  1424. if (!ml->printed) {
  1425. mon_printf(f, "%s\n", ml->mr->name);
  1426. mtree_print_mr(mon_printf, f, ml->mr, 0, 0, &ml_head);
  1427. }
  1428. }
  1429. QTAILQ_FOREACH_SAFE(ml, &ml_head, queue, ml2) {
  1430. g_free(ml);
  1431. }
  1432. }