memory.c 119 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826
  1. /*
  2. * Physical memory management
  3. *
  4. * Copyright 2011 Red Hat, Inc. and/or its affiliates
  5. *
  6. * Authors:
  7. * Avi Kivity <avi@redhat.com>
  8. *
  9. * This work is licensed under the terms of the GNU GPL, version 2. See
  10. * the COPYING file in the top-level directory.
  11. *
  12. * Contributions after 2012-01-13 are licensed under the terms of the
  13. * GNU GPL, version 2 or (at your option) any later version.
  14. */
  15. #include "qemu/osdep.h"
  16. #include "qemu/log.h"
  17. #include "qapi/error.h"
  18. #include "exec/memory.h"
  19. #include "qapi/visitor.h"
  20. #include "qemu/bitops.h"
  21. #include "qemu/error-report.h"
  22. #include "qemu/main-loop.h"
  23. #include "qemu/qemu-print.h"
  24. #include "qom/object.h"
  25. #include "trace.h"
  26. #include "exec/memory-internal.h"
  27. #include "exec/ram_addr.h"
  28. #include "system/kvm.h"
  29. #include "system/runstate.h"
  30. #include "system/tcg.h"
  31. #include "qemu/accel.h"
  32. #include "hw/boards.h"
  33. #include "migration/vmstate.h"
  34. #include "exec/address-spaces.h"
  35. //#define DEBUG_UNASSIGNED
  36. static unsigned memory_region_transaction_depth;
  37. static bool memory_region_update_pending;
  38. static bool ioeventfd_update_pending;
  39. unsigned int global_dirty_tracking;
  40. static QTAILQ_HEAD(, MemoryListener) memory_listeners
  41. = QTAILQ_HEAD_INITIALIZER(memory_listeners);
  42. static QTAILQ_HEAD(, AddressSpace) address_spaces
  43. = QTAILQ_HEAD_INITIALIZER(address_spaces);
  44. static GHashTable *flat_views;
  45. typedef struct AddrRange AddrRange;
  46. /*
  47. * Note that signed integers are needed for negative offsetting in aliases
  48. * (large MemoryRegion::alias_offset).
  49. */
  50. struct AddrRange {
  51. Int128 start;
  52. Int128 size;
  53. };
  54. static AddrRange addrrange_make(Int128 start, Int128 size)
  55. {
  56. return (AddrRange) { start, size };
  57. }
  58. static bool addrrange_equal(AddrRange r1, AddrRange r2)
  59. {
  60. return int128_eq(r1.start, r2.start) && int128_eq(r1.size, r2.size);
  61. }
  62. static Int128 addrrange_end(AddrRange r)
  63. {
  64. return int128_add(r.start, r.size);
  65. }
  66. static AddrRange addrrange_shift(AddrRange range, Int128 delta)
  67. {
  68. int128_addto(&range.start, delta);
  69. return range;
  70. }
  71. static bool addrrange_contains(AddrRange range, Int128 addr)
  72. {
  73. return int128_ge(addr, range.start)
  74. && int128_lt(addr, addrrange_end(range));
  75. }
  76. static bool addrrange_intersects(AddrRange r1, AddrRange r2)
  77. {
  78. return addrrange_contains(r1, r2.start)
  79. || addrrange_contains(r2, r1.start);
  80. }
  81. static AddrRange addrrange_intersection(AddrRange r1, AddrRange r2)
  82. {
  83. Int128 start = int128_max(r1.start, r2.start);
  84. Int128 end = int128_min(addrrange_end(r1), addrrange_end(r2));
  85. return addrrange_make(start, int128_sub(end, start));
  86. }
  87. enum ListenerDirection { Forward, Reverse };
  88. #define MEMORY_LISTENER_CALL_GLOBAL(_callback, _direction, _args...) \
  89. do { \
  90. MemoryListener *_listener; \
  91. \
  92. switch (_direction) { \
  93. case Forward: \
  94. QTAILQ_FOREACH(_listener, &memory_listeners, link) { \
  95. if (_listener->_callback) { \
  96. _listener->_callback(_listener, ##_args); \
  97. } \
  98. } \
  99. break; \
  100. case Reverse: \
  101. QTAILQ_FOREACH_REVERSE(_listener, &memory_listeners, link) { \
  102. if (_listener->_callback) { \
  103. _listener->_callback(_listener, ##_args); \
  104. } \
  105. } \
  106. break; \
  107. default: \
  108. abort(); \
  109. } \
  110. } while (0)
  111. #define MEMORY_LISTENER_CALL(_as, _callback, _direction, _section, _args...) \
  112. do { \
  113. MemoryListener *_listener; \
  114. \
  115. switch (_direction) { \
  116. case Forward: \
  117. QTAILQ_FOREACH(_listener, &(_as)->listeners, link_as) { \
  118. if (_listener->_callback) { \
  119. _listener->_callback(_listener, _section, ##_args); \
  120. } \
  121. } \
  122. break; \
  123. case Reverse: \
  124. QTAILQ_FOREACH_REVERSE(_listener, &(_as)->listeners, link_as) { \
  125. if (_listener->_callback) { \
  126. _listener->_callback(_listener, _section, ##_args); \
  127. } \
  128. } \
  129. break; \
  130. default: \
  131. abort(); \
  132. } \
  133. } while (0)
  134. /* No need to ref/unref .mr, the FlatRange keeps it alive. */
  135. #define MEMORY_LISTENER_UPDATE_REGION(fr, as, dir, callback, _args...) \
  136. do { \
  137. MemoryRegionSection mrs = section_from_flat_range(fr, \
  138. address_space_to_flatview(as)); \
  139. MEMORY_LISTENER_CALL(as, callback, dir, &mrs, ##_args); \
  140. } while(0)
  141. struct CoalescedMemoryRange {
  142. AddrRange addr;
  143. QTAILQ_ENTRY(CoalescedMemoryRange) link;
  144. };
  145. struct MemoryRegionIoeventfd {
  146. AddrRange addr;
  147. bool match_data;
  148. uint64_t data;
  149. EventNotifier *e;
  150. };
  151. static bool memory_region_ioeventfd_before(MemoryRegionIoeventfd *a,
  152. MemoryRegionIoeventfd *b)
  153. {
  154. if (int128_lt(a->addr.start, b->addr.start)) {
  155. return true;
  156. } else if (int128_gt(a->addr.start, b->addr.start)) {
  157. return false;
  158. } else if (int128_lt(a->addr.size, b->addr.size)) {
  159. return true;
  160. } else if (int128_gt(a->addr.size, b->addr.size)) {
  161. return false;
  162. } else if (a->match_data < b->match_data) {
  163. return true;
  164. } else if (a->match_data > b->match_data) {
  165. return false;
  166. } else if (a->match_data) {
  167. if (a->data < b->data) {
  168. return true;
  169. } else if (a->data > b->data) {
  170. return false;
  171. }
  172. }
  173. if (a->e < b->e) {
  174. return true;
  175. } else if (a->e > b->e) {
  176. return false;
  177. }
  178. return false;
  179. }
  180. static bool memory_region_ioeventfd_equal(MemoryRegionIoeventfd *a,
  181. MemoryRegionIoeventfd *b)
  182. {
  183. if (int128_eq(a->addr.start, b->addr.start) &&
  184. (!int128_nz(a->addr.size) || !int128_nz(b->addr.size) ||
  185. (int128_eq(a->addr.size, b->addr.size) &&
  186. (a->match_data == b->match_data) &&
  187. ((a->match_data && (a->data == b->data)) || !a->match_data) &&
  188. (a->e == b->e))))
  189. return true;
  190. return false;
  191. }
  192. /* Range of memory in the global map. Addresses are absolute. */
  193. struct FlatRange {
  194. MemoryRegion *mr;
  195. hwaddr offset_in_region;
  196. AddrRange addr;
  197. uint8_t dirty_log_mask;
  198. bool romd_mode;
  199. bool readonly;
  200. bool nonvolatile;
  201. bool unmergeable;
  202. };
  203. #define FOR_EACH_FLAT_RANGE(var, view) \
  204. for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var)
  205. static inline MemoryRegionSection
  206. section_from_flat_range(FlatRange *fr, FlatView *fv)
  207. {
  208. return (MemoryRegionSection) {
  209. .mr = fr->mr,
  210. .fv = fv,
  211. .offset_within_region = fr->offset_in_region,
  212. .size = fr->addr.size,
  213. .offset_within_address_space = int128_get64(fr->addr.start),
  214. .readonly = fr->readonly,
  215. .nonvolatile = fr->nonvolatile,
  216. .unmergeable = fr->unmergeable,
  217. };
  218. }
  219. static bool flatrange_equal(FlatRange *a, FlatRange *b)
  220. {
  221. return a->mr == b->mr
  222. && addrrange_equal(a->addr, b->addr)
  223. && a->offset_in_region == b->offset_in_region
  224. && a->romd_mode == b->romd_mode
  225. && a->readonly == b->readonly
  226. && a->nonvolatile == b->nonvolatile
  227. && a->unmergeable == b->unmergeable;
  228. }
  229. static FlatView *flatview_new(MemoryRegion *mr_root)
  230. {
  231. FlatView *view;
  232. view = g_new0(FlatView, 1);
  233. view->ref = 1;
  234. view->root = mr_root;
  235. memory_region_ref(mr_root);
  236. trace_flatview_new(view, mr_root);
  237. return view;
  238. }
  239. /* Insert a range into a given position. Caller is responsible for maintaining
  240. * sorting order.
  241. */
  242. static void flatview_insert(FlatView *view, unsigned pos, FlatRange *range)
  243. {
  244. if (view->nr == view->nr_allocated) {
  245. view->nr_allocated = MAX(2 * view->nr, 10);
  246. view->ranges = g_realloc(view->ranges,
  247. view->nr_allocated * sizeof(*view->ranges));
  248. }
  249. memmove(view->ranges + pos + 1, view->ranges + pos,
  250. (view->nr - pos) * sizeof(FlatRange));
  251. view->ranges[pos] = *range;
  252. memory_region_ref(range->mr);
  253. ++view->nr;
  254. }
  255. static void flatview_destroy(FlatView *view)
  256. {
  257. int i;
  258. trace_flatview_destroy(view, view->root);
  259. if (view->dispatch) {
  260. address_space_dispatch_free(view->dispatch);
  261. }
  262. for (i = 0; i < view->nr; i++) {
  263. memory_region_unref(view->ranges[i].mr);
  264. }
  265. g_free(view->ranges);
  266. memory_region_unref(view->root);
  267. g_free(view);
  268. }
  269. static bool flatview_ref(FlatView *view)
  270. {
  271. return qatomic_fetch_inc_nonzero(&view->ref) > 0;
  272. }
  273. void flatview_unref(FlatView *view)
  274. {
  275. if (qatomic_fetch_dec(&view->ref) == 1) {
  276. trace_flatview_destroy_rcu(view, view->root);
  277. assert(view->root);
  278. call_rcu(view, flatview_destroy, rcu);
  279. }
  280. }
  281. static bool can_merge(FlatRange *r1, FlatRange *r2)
  282. {
  283. return int128_eq(addrrange_end(r1->addr), r2->addr.start)
  284. && r1->mr == r2->mr
  285. && int128_eq(int128_add(int128_make64(r1->offset_in_region),
  286. r1->addr.size),
  287. int128_make64(r2->offset_in_region))
  288. && r1->dirty_log_mask == r2->dirty_log_mask
  289. && r1->romd_mode == r2->romd_mode
  290. && r1->readonly == r2->readonly
  291. && r1->nonvolatile == r2->nonvolatile
  292. && !r1->unmergeable && !r2->unmergeable;
  293. }
  294. /* Attempt to simplify a view by merging adjacent ranges */
  295. static void flatview_simplify(FlatView *view)
  296. {
  297. unsigned i, j, k;
  298. i = 0;
  299. while (i < view->nr) {
  300. j = i + 1;
  301. while (j < view->nr
  302. && can_merge(&view->ranges[j-1], &view->ranges[j])) {
  303. int128_addto(&view->ranges[i].addr.size, view->ranges[j].addr.size);
  304. ++j;
  305. }
  306. ++i;
  307. for (k = i; k < j; k++) {
  308. memory_region_unref(view->ranges[k].mr);
  309. }
  310. memmove(&view->ranges[i], &view->ranges[j],
  311. (view->nr - j) * sizeof(view->ranges[j]));
  312. view->nr -= j - i;
  313. }
  314. }
  315. static bool memory_region_big_endian(MemoryRegion *mr)
  316. {
  317. #if TARGET_BIG_ENDIAN
  318. return mr->ops->endianness != DEVICE_LITTLE_ENDIAN;
  319. #else
  320. return mr->ops->endianness == DEVICE_BIG_ENDIAN;
  321. #endif
  322. }
  323. static void adjust_endianness(MemoryRegion *mr, uint64_t *data, MemOp op)
  324. {
  325. if ((op & MO_BSWAP) != devend_memop(mr->ops->endianness)) {
  326. switch (op & MO_SIZE) {
  327. case MO_8:
  328. break;
  329. case MO_16:
  330. *data = bswap16(*data);
  331. break;
  332. case MO_32:
  333. *data = bswap32(*data);
  334. break;
  335. case MO_64:
  336. *data = bswap64(*data);
  337. break;
  338. default:
  339. g_assert_not_reached();
  340. }
  341. }
  342. }
  343. static inline void memory_region_shift_read_access(uint64_t *value,
  344. signed shift,
  345. uint64_t mask,
  346. uint64_t tmp)
  347. {
  348. if (shift >= 0) {
  349. *value |= (tmp & mask) << shift;
  350. } else {
  351. *value |= (tmp & mask) >> -shift;
  352. }
  353. }
  354. static inline uint64_t memory_region_shift_write_access(uint64_t *value,
  355. signed shift,
  356. uint64_t mask)
  357. {
  358. uint64_t tmp;
  359. if (shift >= 0) {
  360. tmp = (*value >> shift) & mask;
  361. } else {
  362. tmp = (*value << -shift) & mask;
  363. }
  364. return tmp;
  365. }
  366. static hwaddr memory_region_to_absolute_addr(MemoryRegion *mr, hwaddr offset)
  367. {
  368. MemoryRegion *root;
  369. hwaddr abs_addr = offset;
  370. abs_addr += mr->addr;
  371. for (root = mr; root->container; ) {
  372. root = root->container;
  373. abs_addr += root->addr;
  374. }
  375. return abs_addr;
  376. }
  377. static int get_cpu_index(void)
  378. {
  379. if (current_cpu) {
  380. return current_cpu->cpu_index;
  381. }
  382. return -1;
  383. }
  384. static MemTxResult memory_region_read_accessor(MemoryRegion *mr,
  385. hwaddr addr,
  386. uint64_t *value,
  387. unsigned size,
  388. signed shift,
  389. uint64_t mask,
  390. MemTxAttrs attrs)
  391. {
  392. uint64_t tmp;
  393. tmp = mr->ops->read(mr->opaque, addr, size);
  394. if (mr->subpage) {
  395. trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
  396. } else if (trace_event_get_state_backends(TRACE_MEMORY_REGION_OPS_READ)) {
  397. hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
  398. trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size,
  399. memory_region_name(mr));
  400. }
  401. memory_region_shift_read_access(value, shift, mask, tmp);
  402. return MEMTX_OK;
  403. }
  404. static MemTxResult memory_region_read_with_attrs_accessor(MemoryRegion *mr,
  405. hwaddr addr,
  406. uint64_t *value,
  407. unsigned size,
  408. signed shift,
  409. uint64_t mask,
  410. MemTxAttrs attrs)
  411. {
  412. uint64_t tmp = 0;
  413. MemTxResult r;
  414. r = mr->ops->read_with_attrs(mr->opaque, addr, &tmp, size, attrs);
  415. if (mr->subpage) {
  416. trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
  417. } else if (trace_event_get_state_backends(TRACE_MEMORY_REGION_OPS_READ)) {
  418. hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
  419. trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size,
  420. memory_region_name(mr));
  421. }
  422. memory_region_shift_read_access(value, shift, mask, tmp);
  423. return r;
  424. }
  425. static MemTxResult memory_region_write_accessor(MemoryRegion *mr,
  426. hwaddr addr,
  427. uint64_t *value,
  428. unsigned size,
  429. signed shift,
  430. uint64_t mask,
  431. MemTxAttrs attrs)
  432. {
  433. uint64_t tmp = memory_region_shift_write_access(value, shift, mask);
  434. if (mr->subpage) {
  435. trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
  436. } else if (trace_event_get_state_backends(TRACE_MEMORY_REGION_OPS_WRITE)) {
  437. hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
  438. trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size,
  439. memory_region_name(mr));
  440. }
  441. mr->ops->write(mr->opaque, addr, tmp, size);
  442. return MEMTX_OK;
  443. }
  444. static MemTxResult memory_region_write_with_attrs_accessor(MemoryRegion *mr,
  445. hwaddr addr,
  446. uint64_t *value,
  447. unsigned size,
  448. signed shift,
  449. uint64_t mask,
  450. MemTxAttrs attrs)
  451. {
  452. uint64_t tmp = memory_region_shift_write_access(value, shift, mask);
  453. if (mr->subpage) {
  454. trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
  455. } else if (trace_event_get_state_backends(TRACE_MEMORY_REGION_OPS_WRITE)) {
  456. hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
  457. trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size,
  458. memory_region_name(mr));
  459. }
  460. return mr->ops->write_with_attrs(mr->opaque, addr, tmp, size, attrs);
  461. }
  462. static MemTxResult access_with_adjusted_size(hwaddr addr,
  463. uint64_t *value,
  464. unsigned size,
  465. unsigned access_size_min,
  466. unsigned access_size_max,
  467. MemTxResult (*access_fn)
  468. (MemoryRegion *mr,
  469. hwaddr addr,
  470. uint64_t *value,
  471. unsigned size,
  472. signed shift,
  473. uint64_t mask,
  474. MemTxAttrs attrs),
  475. MemoryRegion *mr,
  476. MemTxAttrs attrs)
  477. {
  478. uint64_t access_mask;
  479. unsigned access_size;
  480. unsigned i;
  481. MemTxResult r = MEMTX_OK;
  482. bool reentrancy_guard_applied = false;
  483. if (!access_size_min) {
  484. access_size_min = 1;
  485. }
  486. if (!access_size_max) {
  487. access_size_max = 4;
  488. }
  489. /* Do not allow more than one simultaneous access to a device's IO Regions */
  490. if (mr->dev && !mr->disable_reentrancy_guard &&
  491. !mr->ram_device && !mr->ram && !mr->rom_device && !mr->readonly) {
  492. if (mr->dev->mem_reentrancy_guard.engaged_in_io) {
  493. warn_report_once("Blocked re-entrant IO on MemoryRegion: "
  494. "%s at addr: 0x%" HWADDR_PRIX,
  495. memory_region_name(mr), addr);
  496. return MEMTX_ACCESS_ERROR;
  497. }
  498. mr->dev->mem_reentrancy_guard.engaged_in_io = true;
  499. reentrancy_guard_applied = true;
  500. }
  501. /* FIXME: support unaligned access? */
  502. access_size = MAX(MIN(size, access_size_max), access_size_min);
  503. access_mask = MAKE_64BIT_MASK(0, access_size * 8);
  504. if (memory_region_big_endian(mr)) {
  505. for (i = 0; i < size; i += access_size) {
  506. r |= access_fn(mr, addr + i, value, access_size,
  507. (size - access_size - i) * 8, access_mask, attrs);
  508. }
  509. } else {
  510. for (i = 0; i < size; i += access_size) {
  511. r |= access_fn(mr, addr + i, value, access_size, i * 8,
  512. access_mask, attrs);
  513. }
  514. }
  515. if (mr->dev && reentrancy_guard_applied) {
  516. mr->dev->mem_reentrancy_guard.engaged_in_io = false;
  517. }
  518. return r;
  519. }
  520. static AddressSpace *memory_region_to_address_space(MemoryRegion *mr)
  521. {
  522. AddressSpace *as;
  523. while (mr->container) {
  524. mr = mr->container;
  525. }
  526. QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
  527. if (mr == as->root) {
  528. return as;
  529. }
  530. }
  531. return NULL;
  532. }
  533. /* Render a memory region into the global view. Ranges in @view obscure
  534. * ranges in @mr.
  535. */
  536. static void render_memory_region(FlatView *view,
  537. MemoryRegion *mr,
  538. Int128 base,
  539. AddrRange clip,
  540. bool readonly,
  541. bool nonvolatile,
  542. bool unmergeable)
  543. {
  544. MemoryRegion *subregion;
  545. unsigned i;
  546. hwaddr offset_in_region;
  547. Int128 remain;
  548. Int128 now;
  549. FlatRange fr;
  550. AddrRange tmp;
  551. if (!mr->enabled) {
  552. return;
  553. }
  554. int128_addto(&base, int128_make64(mr->addr));
  555. readonly |= mr->readonly;
  556. nonvolatile |= mr->nonvolatile;
  557. unmergeable |= mr->unmergeable;
  558. tmp = addrrange_make(base, mr->size);
  559. if (!addrrange_intersects(tmp, clip)) {
  560. return;
  561. }
  562. clip = addrrange_intersection(tmp, clip);
  563. if (mr->alias) {
  564. int128_subfrom(&base, int128_make64(mr->alias->addr));
  565. int128_subfrom(&base, int128_make64(mr->alias_offset));
  566. render_memory_region(view, mr->alias, base, clip,
  567. readonly, nonvolatile, unmergeable);
  568. return;
  569. }
  570. /* Render subregions in priority order. */
  571. QTAILQ_FOREACH(subregion, &mr->subregions, subregions_link) {
  572. render_memory_region(view, subregion, base, clip,
  573. readonly, nonvolatile, unmergeable);
  574. }
  575. if (!mr->terminates) {
  576. return;
  577. }
  578. offset_in_region = int128_get64(int128_sub(clip.start, base));
  579. base = clip.start;
  580. remain = clip.size;
  581. fr.mr = mr;
  582. fr.dirty_log_mask = memory_region_get_dirty_log_mask(mr);
  583. fr.romd_mode = mr->romd_mode;
  584. fr.readonly = readonly;
  585. fr.nonvolatile = nonvolatile;
  586. fr.unmergeable = unmergeable;
  587. /* Render the region itself into any gaps left by the current view. */
  588. for (i = 0; i < view->nr && int128_nz(remain); ++i) {
  589. if (int128_ge(base, addrrange_end(view->ranges[i].addr))) {
  590. continue;
  591. }
  592. if (int128_lt(base, view->ranges[i].addr.start)) {
  593. now = int128_min(remain,
  594. int128_sub(view->ranges[i].addr.start, base));
  595. fr.offset_in_region = offset_in_region;
  596. fr.addr = addrrange_make(base, now);
  597. flatview_insert(view, i, &fr);
  598. ++i;
  599. int128_addto(&base, now);
  600. offset_in_region += int128_get64(now);
  601. int128_subfrom(&remain, now);
  602. }
  603. now = int128_sub(int128_min(int128_add(base, remain),
  604. addrrange_end(view->ranges[i].addr)),
  605. base);
  606. int128_addto(&base, now);
  607. offset_in_region += int128_get64(now);
  608. int128_subfrom(&remain, now);
  609. }
  610. if (int128_nz(remain)) {
  611. fr.offset_in_region = offset_in_region;
  612. fr.addr = addrrange_make(base, remain);
  613. flatview_insert(view, i, &fr);
  614. }
  615. }
  616. void flatview_for_each_range(FlatView *fv, flatview_cb cb , void *opaque)
  617. {
  618. FlatRange *fr;
  619. assert(fv);
  620. assert(cb);
  621. FOR_EACH_FLAT_RANGE(fr, fv) {
  622. if (cb(fr->addr.start, fr->addr.size, fr->mr,
  623. fr->offset_in_region, opaque)) {
  624. break;
  625. }
  626. }
  627. }
  628. static MemoryRegion *memory_region_get_flatview_root(MemoryRegion *mr)
  629. {
  630. while (mr->enabled) {
  631. if (mr->alias) {
  632. if (!mr->alias_offset && int128_ge(mr->size, mr->alias->size)) {
  633. /* The alias is included in its entirety. Use it as
  634. * the "real" root, so that we can share more FlatViews.
  635. */
  636. mr = mr->alias;
  637. continue;
  638. }
  639. } else if (!mr->terminates) {
  640. unsigned int found = 0;
  641. MemoryRegion *child, *next = NULL;
  642. QTAILQ_FOREACH(child, &mr->subregions, subregions_link) {
  643. if (child->enabled) {
  644. if (++found > 1) {
  645. next = NULL;
  646. break;
  647. }
  648. if (!child->addr && int128_ge(mr->size, child->size)) {
  649. /* A child is included in its entirety. If it's the only
  650. * enabled one, use it in the hope of finding an alias down the
  651. * way. This will also let us share FlatViews.
  652. */
  653. next = child;
  654. }
  655. }
  656. }
  657. if (found == 0) {
  658. return NULL;
  659. }
  660. if (next) {
  661. mr = next;
  662. continue;
  663. }
  664. }
  665. return mr;
  666. }
  667. return NULL;
  668. }
  669. /* Render a memory topology into a list of disjoint absolute ranges. */
  670. static FlatView *generate_memory_topology(MemoryRegion *mr)
  671. {
  672. int i;
  673. FlatView *view;
  674. view = flatview_new(mr);
  675. if (mr) {
  676. render_memory_region(view, mr, int128_zero(),
  677. addrrange_make(int128_zero(), int128_2_64()),
  678. false, false, false);
  679. }
  680. flatview_simplify(view);
  681. view->dispatch = address_space_dispatch_new(view);
  682. for (i = 0; i < view->nr; i++) {
  683. MemoryRegionSection mrs =
  684. section_from_flat_range(&view->ranges[i], view);
  685. flatview_add_to_dispatch(view, &mrs);
  686. }
  687. address_space_dispatch_compact(view->dispatch);
  688. g_hash_table_replace(flat_views, mr, view);
  689. return view;
  690. }
  691. static void address_space_add_del_ioeventfds(AddressSpace *as,
  692. MemoryRegionIoeventfd *fds_new,
  693. unsigned fds_new_nb,
  694. MemoryRegionIoeventfd *fds_old,
  695. unsigned fds_old_nb)
  696. {
  697. unsigned iold, inew;
  698. MemoryRegionIoeventfd *fd;
  699. MemoryRegionSection section;
  700. /* Generate a symmetric difference of the old and new fd sets, adding
  701. * and deleting as necessary.
  702. */
  703. iold = inew = 0;
  704. while (iold < fds_old_nb || inew < fds_new_nb) {
  705. if (iold < fds_old_nb
  706. && (inew == fds_new_nb
  707. || memory_region_ioeventfd_before(&fds_old[iold],
  708. &fds_new[inew]))) {
  709. fd = &fds_old[iold];
  710. section = (MemoryRegionSection) {
  711. .fv = address_space_to_flatview(as),
  712. .offset_within_address_space = int128_get64(fd->addr.start),
  713. .size = fd->addr.size,
  714. };
  715. MEMORY_LISTENER_CALL(as, eventfd_del, Forward, &section,
  716. fd->match_data, fd->data, fd->e);
  717. ++iold;
  718. } else if (inew < fds_new_nb
  719. && (iold == fds_old_nb
  720. || memory_region_ioeventfd_before(&fds_new[inew],
  721. &fds_old[iold]))) {
  722. fd = &fds_new[inew];
  723. section = (MemoryRegionSection) {
  724. .fv = address_space_to_flatview(as),
  725. .offset_within_address_space = int128_get64(fd->addr.start),
  726. .size = fd->addr.size,
  727. };
  728. MEMORY_LISTENER_CALL(as, eventfd_add, Reverse, &section,
  729. fd->match_data, fd->data, fd->e);
  730. ++inew;
  731. } else {
  732. ++iold;
  733. ++inew;
  734. }
  735. }
  736. }
  737. FlatView *address_space_get_flatview(AddressSpace *as)
  738. {
  739. FlatView *view;
  740. RCU_READ_LOCK_GUARD();
  741. do {
  742. view = address_space_to_flatview(as);
  743. /* If somebody has replaced as->current_map concurrently,
  744. * flatview_ref returns false.
  745. */
  746. } while (!flatview_ref(view));
  747. return view;
  748. }
  749. static void address_space_update_ioeventfds(AddressSpace *as)
  750. {
  751. FlatView *view;
  752. FlatRange *fr;
  753. unsigned ioeventfd_nb = 0;
  754. unsigned ioeventfd_max;
  755. MemoryRegionIoeventfd *ioeventfds;
  756. AddrRange tmp;
  757. unsigned i;
  758. if (!as->ioeventfd_notifiers) {
  759. return;
  760. }
  761. /*
  762. * It is likely that the number of ioeventfds hasn't changed much, so use
  763. * the previous size as the starting value, with some headroom to avoid
  764. * gratuitous reallocations.
  765. */
  766. ioeventfd_max = QEMU_ALIGN_UP(as->ioeventfd_nb, 4);
  767. ioeventfds = g_new(MemoryRegionIoeventfd, ioeventfd_max);
  768. view = address_space_get_flatview(as);
  769. FOR_EACH_FLAT_RANGE(fr, view) {
  770. for (i = 0; i < fr->mr->ioeventfd_nb; ++i) {
  771. tmp = addrrange_shift(fr->mr->ioeventfds[i].addr,
  772. int128_sub(fr->addr.start,
  773. int128_make64(fr->offset_in_region)));
  774. if (addrrange_intersects(fr->addr, tmp)) {
  775. ++ioeventfd_nb;
  776. if (ioeventfd_nb > ioeventfd_max) {
  777. ioeventfd_max = MAX(ioeventfd_max * 2, 4);
  778. ioeventfds = g_realloc(ioeventfds,
  779. ioeventfd_max * sizeof(*ioeventfds));
  780. }
  781. ioeventfds[ioeventfd_nb-1] = fr->mr->ioeventfds[i];
  782. ioeventfds[ioeventfd_nb-1].addr = tmp;
  783. }
  784. }
  785. }
  786. address_space_add_del_ioeventfds(as, ioeventfds, ioeventfd_nb,
  787. as->ioeventfds, as->ioeventfd_nb);
  788. g_free(as->ioeventfds);
  789. as->ioeventfds = ioeventfds;
  790. as->ioeventfd_nb = ioeventfd_nb;
  791. flatview_unref(view);
  792. }
  793. /*
  794. * Notify the memory listeners about the coalesced IO change events of
  795. * range `cmr'. Only the part that has intersection of the specified
  796. * FlatRange will be sent.
  797. */
  798. static void flat_range_coalesced_io_notify(FlatRange *fr, AddressSpace *as,
  799. CoalescedMemoryRange *cmr, bool add)
  800. {
  801. AddrRange tmp;
  802. tmp = addrrange_shift(cmr->addr,
  803. int128_sub(fr->addr.start,
  804. int128_make64(fr->offset_in_region)));
  805. if (!addrrange_intersects(tmp, fr->addr)) {
  806. return;
  807. }
  808. tmp = addrrange_intersection(tmp, fr->addr);
  809. if (add) {
  810. MEMORY_LISTENER_UPDATE_REGION(fr, as, Forward, coalesced_io_add,
  811. int128_get64(tmp.start),
  812. int128_get64(tmp.size));
  813. } else {
  814. MEMORY_LISTENER_UPDATE_REGION(fr, as, Reverse, coalesced_io_del,
  815. int128_get64(tmp.start),
  816. int128_get64(tmp.size));
  817. }
  818. }
  819. static void flat_range_coalesced_io_del(FlatRange *fr, AddressSpace *as)
  820. {
  821. CoalescedMemoryRange *cmr;
  822. QTAILQ_FOREACH(cmr, &fr->mr->coalesced, link) {
  823. flat_range_coalesced_io_notify(fr, as, cmr, false);
  824. }
  825. }
  826. static void flat_range_coalesced_io_add(FlatRange *fr, AddressSpace *as)
  827. {
  828. MemoryRegion *mr = fr->mr;
  829. CoalescedMemoryRange *cmr;
  830. if (QTAILQ_EMPTY(&mr->coalesced)) {
  831. return;
  832. }
  833. QTAILQ_FOREACH(cmr, &mr->coalesced, link) {
  834. flat_range_coalesced_io_notify(fr, as, cmr, true);
  835. }
  836. }
  837. static void
  838. flat_range_coalesced_io_notify_listener_add_del(FlatRange *fr,
  839. MemoryRegionSection *mrs,
  840. MemoryListener *listener,
  841. AddressSpace *as, bool add)
  842. {
  843. CoalescedMemoryRange *cmr;
  844. MemoryRegion *mr = fr->mr;
  845. AddrRange tmp;
  846. QTAILQ_FOREACH(cmr, &mr->coalesced, link) {
  847. tmp = addrrange_shift(cmr->addr,
  848. int128_sub(fr->addr.start,
  849. int128_make64(fr->offset_in_region)));
  850. if (!addrrange_intersects(tmp, fr->addr)) {
  851. return;
  852. }
  853. tmp = addrrange_intersection(tmp, fr->addr);
  854. if (add && listener->coalesced_io_add) {
  855. listener->coalesced_io_add(listener, mrs,
  856. int128_get64(tmp.start),
  857. int128_get64(tmp.size));
  858. } else if (!add && listener->coalesced_io_del) {
  859. listener->coalesced_io_del(listener, mrs,
  860. int128_get64(tmp.start),
  861. int128_get64(tmp.size));
  862. }
  863. }
  864. }
  865. static void address_space_update_topology_pass(AddressSpace *as,
  866. const FlatView *old_view,
  867. const FlatView *new_view,
  868. bool adding)
  869. {
  870. unsigned iold, inew;
  871. FlatRange *frold, *frnew;
  872. /* Generate a symmetric difference of the old and new memory maps.
  873. * Kill ranges in the old map, and instantiate ranges in the new map.
  874. */
  875. iold = inew = 0;
  876. while (iold < old_view->nr || inew < new_view->nr) {
  877. if (iold < old_view->nr) {
  878. frold = &old_view->ranges[iold];
  879. } else {
  880. frold = NULL;
  881. }
  882. if (inew < new_view->nr) {
  883. frnew = &new_view->ranges[inew];
  884. } else {
  885. frnew = NULL;
  886. }
  887. if (frold
  888. && (!frnew
  889. || int128_lt(frold->addr.start, frnew->addr.start)
  890. || (int128_eq(frold->addr.start, frnew->addr.start)
  891. && !flatrange_equal(frold, frnew)))) {
  892. /* In old but not in new, or in both but attributes changed. */
  893. if (!adding) {
  894. flat_range_coalesced_io_del(frold, as);
  895. MEMORY_LISTENER_UPDATE_REGION(frold, as, Reverse, region_del);
  896. }
  897. ++iold;
  898. } else if (frold && frnew && flatrange_equal(frold, frnew)) {
  899. /* In both and unchanged (except logging may have changed) */
  900. if (adding) {
  901. MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_nop);
  902. if (frnew->dirty_log_mask & ~frold->dirty_log_mask) {
  903. MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, log_start,
  904. frold->dirty_log_mask,
  905. frnew->dirty_log_mask);
  906. }
  907. if (frold->dirty_log_mask & ~frnew->dirty_log_mask) {
  908. MEMORY_LISTENER_UPDATE_REGION(frnew, as, Reverse, log_stop,
  909. frold->dirty_log_mask,
  910. frnew->dirty_log_mask);
  911. }
  912. }
  913. ++iold;
  914. ++inew;
  915. } else {
  916. /* In new */
  917. if (adding) {
  918. MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_add);
  919. flat_range_coalesced_io_add(frnew, as);
  920. }
  921. ++inew;
  922. }
  923. }
  924. }
  925. static void flatviews_init(void)
  926. {
  927. static FlatView *empty_view;
  928. if (flat_views) {
  929. return;
  930. }
  931. flat_views = g_hash_table_new_full(g_direct_hash, g_direct_equal, NULL,
  932. (GDestroyNotify) flatview_unref);
  933. if (!empty_view) {
  934. empty_view = generate_memory_topology(NULL);
  935. /* We keep it alive forever in the global variable. */
  936. flatview_ref(empty_view);
  937. } else {
  938. g_hash_table_replace(flat_views, NULL, empty_view);
  939. flatview_ref(empty_view);
  940. }
  941. }
  942. static void flatviews_reset(void)
  943. {
  944. AddressSpace *as;
  945. if (flat_views) {
  946. g_hash_table_unref(flat_views);
  947. flat_views = NULL;
  948. }
  949. flatviews_init();
  950. /* Render unique FVs */
  951. QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
  952. MemoryRegion *physmr = memory_region_get_flatview_root(as->root);
  953. if (g_hash_table_lookup(flat_views, physmr)) {
  954. continue;
  955. }
  956. generate_memory_topology(physmr);
  957. }
  958. }
  959. static void address_space_set_flatview(AddressSpace *as)
  960. {
  961. FlatView *old_view = address_space_to_flatview(as);
  962. MemoryRegion *physmr = memory_region_get_flatview_root(as->root);
  963. FlatView *new_view = g_hash_table_lookup(flat_views, physmr);
  964. assert(new_view);
  965. if (old_view == new_view) {
  966. return;
  967. }
  968. if (old_view) {
  969. flatview_ref(old_view);
  970. }
  971. flatview_ref(new_view);
  972. if (!QTAILQ_EMPTY(&as->listeners)) {
  973. FlatView tmpview = { .nr = 0 }, *old_view2 = old_view;
  974. if (!old_view2) {
  975. old_view2 = &tmpview;
  976. }
  977. address_space_update_topology_pass(as, old_view2, new_view, false);
  978. address_space_update_topology_pass(as, old_view2, new_view, true);
  979. }
  980. /* Writes are protected by the BQL. */
  981. qatomic_rcu_set(&as->current_map, new_view);
  982. if (old_view) {
  983. flatview_unref(old_view);
  984. }
  985. /* Note that all the old MemoryRegions are still alive up to this
  986. * point. This relieves most MemoryListeners from the need to
  987. * ref/unref the MemoryRegions they get---unless they use them
  988. * outside the iothread mutex, in which case precise reference
  989. * counting is necessary.
  990. */
  991. if (old_view) {
  992. flatview_unref(old_view);
  993. }
  994. }
  995. static void address_space_update_topology(AddressSpace *as)
  996. {
  997. MemoryRegion *physmr = memory_region_get_flatview_root(as->root);
  998. flatviews_init();
  999. if (!g_hash_table_lookup(flat_views, physmr)) {
  1000. generate_memory_topology(physmr);
  1001. }
  1002. address_space_set_flatview(as);
  1003. }
  1004. void memory_region_transaction_begin(void)
  1005. {
  1006. qemu_flush_coalesced_mmio_buffer();
  1007. ++memory_region_transaction_depth;
  1008. }
  1009. void memory_region_transaction_commit(void)
  1010. {
  1011. AddressSpace *as;
  1012. assert(memory_region_transaction_depth);
  1013. assert(bql_locked());
  1014. --memory_region_transaction_depth;
  1015. if (!memory_region_transaction_depth) {
  1016. if (memory_region_update_pending) {
  1017. flatviews_reset();
  1018. MEMORY_LISTENER_CALL_GLOBAL(begin, Forward);
  1019. QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
  1020. address_space_set_flatview(as);
  1021. address_space_update_ioeventfds(as);
  1022. }
  1023. memory_region_update_pending = false;
  1024. ioeventfd_update_pending = false;
  1025. MEMORY_LISTENER_CALL_GLOBAL(commit, Forward);
  1026. } else if (ioeventfd_update_pending) {
  1027. QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
  1028. address_space_update_ioeventfds(as);
  1029. }
  1030. ioeventfd_update_pending = false;
  1031. }
  1032. }
  1033. }
  1034. static void memory_region_destructor_none(MemoryRegion *mr)
  1035. {
  1036. }
  1037. static void memory_region_destructor_ram(MemoryRegion *mr)
  1038. {
  1039. qemu_ram_free(mr->ram_block);
  1040. }
  1041. static bool memory_region_need_escape(char c)
  1042. {
  1043. return c == '/' || c == '[' || c == '\\' || c == ']';
  1044. }
  1045. static char *memory_region_escape_name(const char *name)
  1046. {
  1047. const char *p;
  1048. char *escaped, *q;
  1049. uint8_t c;
  1050. size_t bytes = 0;
  1051. for (p = name; *p; p++) {
  1052. bytes += memory_region_need_escape(*p) ? 4 : 1;
  1053. }
  1054. if (bytes == p - name) {
  1055. return g_memdup(name, bytes + 1);
  1056. }
  1057. escaped = g_malloc(bytes + 1);
  1058. for (p = name, q = escaped; *p; p++) {
  1059. c = *p;
  1060. if (unlikely(memory_region_need_escape(c))) {
  1061. *q++ = '\\';
  1062. *q++ = 'x';
  1063. *q++ = "0123456789abcdef"[c >> 4];
  1064. c = "0123456789abcdef"[c & 15];
  1065. }
  1066. *q++ = c;
  1067. }
  1068. *q = 0;
  1069. return escaped;
  1070. }
  1071. static void memory_region_do_init(MemoryRegion *mr,
  1072. Object *owner,
  1073. const char *name,
  1074. uint64_t size)
  1075. {
  1076. mr->size = int128_make64(size);
  1077. if (size == UINT64_MAX) {
  1078. mr->size = int128_2_64();
  1079. }
  1080. mr->name = g_strdup(name);
  1081. mr->owner = owner;
  1082. mr->dev = (DeviceState *) object_dynamic_cast(mr->owner, TYPE_DEVICE);
  1083. mr->ram_block = NULL;
  1084. if (name) {
  1085. char *escaped_name = memory_region_escape_name(name);
  1086. char *name_array = g_strdup_printf("%s[*]", escaped_name);
  1087. if (!owner) {
  1088. owner = machine_get_container("unattached");
  1089. }
  1090. object_property_add_child(owner, name_array, OBJECT(mr));
  1091. object_unref(OBJECT(mr));
  1092. g_free(name_array);
  1093. g_free(escaped_name);
  1094. }
  1095. }
  1096. void memory_region_init(MemoryRegion *mr,
  1097. Object *owner,
  1098. const char *name,
  1099. uint64_t size)
  1100. {
  1101. object_initialize(mr, sizeof(*mr), TYPE_MEMORY_REGION);
  1102. memory_region_do_init(mr, owner, name, size);
  1103. }
  1104. static void memory_region_get_container(Object *obj, Visitor *v,
  1105. const char *name, void *opaque,
  1106. Error **errp)
  1107. {
  1108. MemoryRegion *mr = MEMORY_REGION(obj);
  1109. char *path = (char *)"";
  1110. if (mr->container) {
  1111. path = object_get_canonical_path(OBJECT(mr->container));
  1112. }
  1113. visit_type_str(v, name, &path, errp);
  1114. if (mr->container) {
  1115. g_free(path);
  1116. }
  1117. }
  1118. static Object *memory_region_resolve_container(Object *obj, void *opaque,
  1119. const char *part)
  1120. {
  1121. MemoryRegion *mr = MEMORY_REGION(obj);
  1122. return OBJECT(mr->container);
  1123. }
  1124. static void memory_region_get_priority(Object *obj, Visitor *v,
  1125. const char *name, void *opaque,
  1126. Error **errp)
  1127. {
  1128. MemoryRegion *mr = MEMORY_REGION(obj);
  1129. int32_t value = mr->priority;
  1130. visit_type_int32(v, name, &value, errp);
  1131. }
  1132. static void memory_region_get_size(Object *obj, Visitor *v, const char *name,
  1133. void *opaque, Error **errp)
  1134. {
  1135. MemoryRegion *mr = MEMORY_REGION(obj);
  1136. uint64_t value = memory_region_size(mr);
  1137. visit_type_uint64(v, name, &value, errp);
  1138. }
  1139. static void memory_region_initfn(Object *obj)
  1140. {
  1141. MemoryRegion *mr = MEMORY_REGION(obj);
  1142. ObjectProperty *op;
  1143. mr->ops = &unassigned_mem_ops;
  1144. mr->enabled = true;
  1145. mr->romd_mode = true;
  1146. mr->destructor = memory_region_destructor_none;
  1147. QTAILQ_INIT(&mr->subregions);
  1148. QTAILQ_INIT(&mr->coalesced);
  1149. op = object_property_add(OBJECT(mr), "container",
  1150. "link<" TYPE_MEMORY_REGION ">",
  1151. memory_region_get_container,
  1152. NULL, /* memory_region_set_container */
  1153. NULL, NULL);
  1154. op->resolve = memory_region_resolve_container;
  1155. object_property_add_uint64_ptr(OBJECT(mr), "addr",
  1156. &mr->addr, OBJ_PROP_FLAG_READ);
  1157. object_property_add(OBJECT(mr), "priority", "uint32",
  1158. memory_region_get_priority,
  1159. NULL, /* memory_region_set_priority */
  1160. NULL, NULL);
  1161. object_property_add(OBJECT(mr), "size", "uint64",
  1162. memory_region_get_size,
  1163. NULL, /* memory_region_set_size, */
  1164. NULL, NULL);
  1165. }
  1166. static void iommu_memory_region_initfn(Object *obj)
  1167. {
  1168. MemoryRegion *mr = MEMORY_REGION(obj);
  1169. mr->is_iommu = true;
  1170. }
  1171. static uint64_t unassigned_mem_read(void *opaque, hwaddr addr,
  1172. unsigned size)
  1173. {
  1174. #ifdef DEBUG_UNASSIGNED
  1175. printf("Unassigned mem read " HWADDR_FMT_plx "\n", addr);
  1176. #endif
  1177. return 0;
  1178. }
  1179. static void unassigned_mem_write(void *opaque, hwaddr addr,
  1180. uint64_t val, unsigned size)
  1181. {
  1182. #ifdef DEBUG_UNASSIGNED
  1183. printf("Unassigned mem write " HWADDR_FMT_plx " = 0x%"PRIx64"\n", addr, val);
  1184. #endif
  1185. }
  1186. static bool unassigned_mem_accepts(void *opaque, hwaddr addr,
  1187. unsigned size, bool is_write,
  1188. MemTxAttrs attrs)
  1189. {
  1190. return false;
  1191. }
  1192. const MemoryRegionOps unassigned_mem_ops = {
  1193. .valid.accepts = unassigned_mem_accepts,
  1194. .endianness = DEVICE_NATIVE_ENDIAN,
  1195. };
  1196. static uint64_t memory_region_ram_device_read(void *opaque,
  1197. hwaddr addr, unsigned size)
  1198. {
  1199. MemoryRegion *mr = opaque;
  1200. uint64_t data = ldn_he_p(mr->ram_block->host + addr, size);
  1201. trace_memory_region_ram_device_read(get_cpu_index(), mr, addr, data, size);
  1202. return data;
  1203. }
  1204. static void memory_region_ram_device_write(void *opaque, hwaddr addr,
  1205. uint64_t data, unsigned size)
  1206. {
  1207. MemoryRegion *mr = opaque;
  1208. trace_memory_region_ram_device_write(get_cpu_index(), mr, addr, data, size);
  1209. stn_he_p(mr->ram_block->host + addr, size, data);
  1210. }
  1211. static const MemoryRegionOps ram_device_mem_ops = {
  1212. .read = memory_region_ram_device_read,
  1213. .write = memory_region_ram_device_write,
  1214. .endianness = DEVICE_HOST_ENDIAN,
  1215. .valid = {
  1216. .min_access_size = 1,
  1217. .max_access_size = 8,
  1218. .unaligned = true,
  1219. },
  1220. .impl = {
  1221. .min_access_size = 1,
  1222. .max_access_size = 8,
  1223. .unaligned = true,
  1224. },
  1225. };
  1226. bool memory_region_access_valid(MemoryRegion *mr,
  1227. hwaddr addr,
  1228. unsigned size,
  1229. bool is_write,
  1230. MemTxAttrs attrs)
  1231. {
  1232. if (mr->ops->valid.accepts
  1233. && !mr->ops->valid.accepts(mr->opaque, addr, size, is_write, attrs)) {
  1234. qemu_log_mask(LOG_INVALID_MEM, "Invalid %s at addr 0x%" HWADDR_PRIX
  1235. ", size %u, region '%s', reason: rejected\n",
  1236. is_write ? "write" : "read",
  1237. addr, size, memory_region_name(mr));
  1238. return false;
  1239. }
  1240. if (!mr->ops->valid.unaligned && (addr & (size - 1))) {
  1241. qemu_log_mask(LOG_INVALID_MEM, "Invalid %s at addr 0x%" HWADDR_PRIX
  1242. ", size %u, region '%s', reason: unaligned\n",
  1243. is_write ? "write" : "read",
  1244. addr, size, memory_region_name(mr));
  1245. return false;
  1246. }
  1247. /* Treat zero as compatibility all valid */
  1248. if (!mr->ops->valid.max_access_size) {
  1249. return true;
  1250. }
  1251. if (size > mr->ops->valid.max_access_size
  1252. || size < mr->ops->valid.min_access_size) {
  1253. qemu_log_mask(LOG_INVALID_MEM, "Invalid %s at addr 0x%" HWADDR_PRIX
  1254. ", size %u, region '%s', reason: invalid size "
  1255. "(min:%u max:%u)\n",
  1256. is_write ? "write" : "read",
  1257. addr, size, memory_region_name(mr),
  1258. mr->ops->valid.min_access_size,
  1259. mr->ops->valid.max_access_size);
  1260. return false;
  1261. }
  1262. return true;
  1263. }
  1264. static MemTxResult memory_region_dispatch_read1(MemoryRegion *mr,
  1265. hwaddr addr,
  1266. uint64_t *pval,
  1267. unsigned size,
  1268. MemTxAttrs attrs)
  1269. {
  1270. *pval = 0;
  1271. if (mr->ops->read) {
  1272. return access_with_adjusted_size(addr, pval, size,
  1273. mr->ops->impl.min_access_size,
  1274. mr->ops->impl.max_access_size,
  1275. memory_region_read_accessor,
  1276. mr, attrs);
  1277. } else {
  1278. return access_with_adjusted_size(addr, pval, size,
  1279. mr->ops->impl.min_access_size,
  1280. mr->ops->impl.max_access_size,
  1281. memory_region_read_with_attrs_accessor,
  1282. mr, attrs);
  1283. }
  1284. }
  1285. MemTxResult memory_region_dispatch_read(MemoryRegion *mr,
  1286. hwaddr addr,
  1287. uint64_t *pval,
  1288. MemOp op,
  1289. MemTxAttrs attrs)
  1290. {
  1291. unsigned size = memop_size(op);
  1292. MemTxResult r;
  1293. if (mr->alias) {
  1294. return memory_region_dispatch_read(mr->alias,
  1295. mr->alias_offset + addr,
  1296. pval, op, attrs);
  1297. }
  1298. if (!memory_region_access_valid(mr, addr, size, false, attrs)) {
  1299. *pval = unassigned_mem_read(mr, addr, size);
  1300. return MEMTX_DECODE_ERROR;
  1301. }
  1302. r = memory_region_dispatch_read1(mr, addr, pval, size, attrs);
  1303. adjust_endianness(mr, pval, op);
  1304. return r;
  1305. }
  1306. /* Return true if an eventfd was signalled */
  1307. static bool memory_region_dispatch_write_eventfds(MemoryRegion *mr,
  1308. hwaddr addr,
  1309. uint64_t data,
  1310. unsigned size,
  1311. MemTxAttrs attrs)
  1312. {
  1313. MemoryRegionIoeventfd ioeventfd = {
  1314. .addr = addrrange_make(int128_make64(addr), int128_make64(size)),
  1315. .data = data,
  1316. };
  1317. unsigned i;
  1318. for (i = 0; i < mr->ioeventfd_nb; i++) {
  1319. ioeventfd.match_data = mr->ioeventfds[i].match_data;
  1320. ioeventfd.e = mr->ioeventfds[i].e;
  1321. if (memory_region_ioeventfd_equal(&ioeventfd, &mr->ioeventfds[i])) {
  1322. event_notifier_set(ioeventfd.e);
  1323. return true;
  1324. }
  1325. }
  1326. return false;
  1327. }
  1328. MemTxResult memory_region_dispatch_write(MemoryRegion *mr,
  1329. hwaddr addr,
  1330. uint64_t data,
  1331. MemOp op,
  1332. MemTxAttrs attrs)
  1333. {
  1334. unsigned size = memop_size(op);
  1335. if (mr->alias) {
  1336. return memory_region_dispatch_write(mr->alias,
  1337. mr->alias_offset + addr,
  1338. data, op, attrs);
  1339. }
  1340. if (!memory_region_access_valid(mr, addr, size, true, attrs)) {
  1341. unassigned_mem_write(mr, addr, data, size);
  1342. return MEMTX_DECODE_ERROR;
  1343. }
  1344. adjust_endianness(mr, &data, op);
  1345. /*
  1346. * FIXME: it's not clear why under KVM the write would be processed
  1347. * directly, instead of going through eventfd. This probably should
  1348. * test "tcg_enabled() || qtest_enabled()", or should just go away.
  1349. */
  1350. if (!kvm_enabled() &&
  1351. memory_region_dispatch_write_eventfds(mr, addr, data, size, attrs)) {
  1352. return MEMTX_OK;
  1353. }
  1354. if (mr->ops->write) {
  1355. return access_with_adjusted_size(addr, &data, size,
  1356. mr->ops->impl.min_access_size,
  1357. mr->ops->impl.max_access_size,
  1358. memory_region_write_accessor, mr,
  1359. attrs);
  1360. } else {
  1361. return
  1362. access_with_adjusted_size(addr, &data, size,
  1363. mr->ops->impl.min_access_size,
  1364. mr->ops->impl.max_access_size,
  1365. memory_region_write_with_attrs_accessor,
  1366. mr, attrs);
  1367. }
  1368. }
  1369. void memory_region_init_io(MemoryRegion *mr,
  1370. Object *owner,
  1371. const MemoryRegionOps *ops,
  1372. void *opaque,
  1373. const char *name,
  1374. uint64_t size)
  1375. {
  1376. memory_region_init(mr, owner, name, size);
  1377. mr->ops = ops ? ops : &unassigned_mem_ops;
  1378. mr->opaque = opaque;
  1379. mr->terminates = true;
  1380. }
  1381. bool memory_region_init_ram_nomigrate(MemoryRegion *mr,
  1382. Object *owner,
  1383. const char *name,
  1384. uint64_t size,
  1385. Error **errp)
  1386. {
  1387. return memory_region_init_ram_flags_nomigrate(mr, owner, name,
  1388. size, 0, errp);
  1389. }
  1390. bool memory_region_init_ram_flags_nomigrate(MemoryRegion *mr,
  1391. Object *owner,
  1392. const char *name,
  1393. uint64_t size,
  1394. uint32_t ram_flags,
  1395. Error **errp)
  1396. {
  1397. Error *err = NULL;
  1398. memory_region_init(mr, owner, name, size);
  1399. mr->ram = true;
  1400. mr->terminates = true;
  1401. mr->destructor = memory_region_destructor_ram;
  1402. mr->ram_block = qemu_ram_alloc(size, ram_flags, mr, &err);
  1403. if (err) {
  1404. mr->size = int128_zero();
  1405. object_unparent(OBJECT(mr));
  1406. error_propagate(errp, err);
  1407. return false;
  1408. }
  1409. return true;
  1410. }
  1411. bool memory_region_init_resizeable_ram(MemoryRegion *mr,
  1412. Object *owner,
  1413. const char *name,
  1414. uint64_t size,
  1415. uint64_t max_size,
  1416. void (*resized)(const char*,
  1417. uint64_t length,
  1418. void *host),
  1419. Error **errp)
  1420. {
  1421. Error *err = NULL;
  1422. memory_region_init(mr, owner, name, size);
  1423. mr->ram = true;
  1424. mr->terminates = true;
  1425. mr->destructor = memory_region_destructor_ram;
  1426. mr->ram_block = qemu_ram_alloc_resizeable(size, max_size, resized,
  1427. mr, &err);
  1428. if (err) {
  1429. mr->size = int128_zero();
  1430. object_unparent(OBJECT(mr));
  1431. error_propagate(errp, err);
  1432. return false;
  1433. }
  1434. return true;
  1435. }
  1436. #ifdef CONFIG_POSIX
  1437. bool memory_region_init_ram_from_file(MemoryRegion *mr,
  1438. Object *owner,
  1439. const char *name,
  1440. uint64_t size,
  1441. uint64_t align,
  1442. uint32_t ram_flags,
  1443. const char *path,
  1444. ram_addr_t offset,
  1445. Error **errp)
  1446. {
  1447. Error *err = NULL;
  1448. memory_region_init(mr, owner, name, size);
  1449. mr->ram = true;
  1450. mr->readonly = !!(ram_flags & RAM_READONLY);
  1451. mr->terminates = true;
  1452. mr->destructor = memory_region_destructor_ram;
  1453. mr->align = align;
  1454. mr->ram_block = qemu_ram_alloc_from_file(size, mr, ram_flags, path,
  1455. offset, &err);
  1456. if (err) {
  1457. mr->size = int128_zero();
  1458. object_unparent(OBJECT(mr));
  1459. error_propagate(errp, err);
  1460. return false;
  1461. }
  1462. return true;
  1463. }
  1464. bool memory_region_init_ram_from_fd(MemoryRegion *mr,
  1465. Object *owner,
  1466. const char *name,
  1467. uint64_t size,
  1468. uint32_t ram_flags,
  1469. int fd,
  1470. ram_addr_t offset,
  1471. Error **errp)
  1472. {
  1473. Error *err = NULL;
  1474. memory_region_init(mr, owner, name, size);
  1475. mr->ram = true;
  1476. mr->readonly = !!(ram_flags & RAM_READONLY);
  1477. mr->terminates = true;
  1478. mr->destructor = memory_region_destructor_ram;
  1479. mr->ram_block = qemu_ram_alloc_from_fd(size, size, NULL, mr, ram_flags, fd,
  1480. offset, false, &err);
  1481. if (err) {
  1482. mr->size = int128_zero();
  1483. object_unparent(OBJECT(mr));
  1484. error_propagate(errp, err);
  1485. return false;
  1486. }
  1487. return true;
  1488. }
  1489. #endif
  1490. void memory_region_init_ram_ptr(MemoryRegion *mr,
  1491. Object *owner,
  1492. const char *name,
  1493. uint64_t size,
  1494. void *ptr)
  1495. {
  1496. memory_region_init(mr, owner, name, size);
  1497. mr->ram = true;
  1498. mr->terminates = true;
  1499. mr->destructor = memory_region_destructor_ram;
  1500. /* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL. */
  1501. assert(ptr != NULL);
  1502. mr->ram_block = qemu_ram_alloc_from_ptr(size, ptr, mr, &error_abort);
  1503. }
  1504. void memory_region_init_ram_device_ptr(MemoryRegion *mr,
  1505. Object *owner,
  1506. const char *name,
  1507. uint64_t size,
  1508. void *ptr)
  1509. {
  1510. memory_region_init(mr, owner, name, size);
  1511. mr->ram = true;
  1512. mr->terminates = true;
  1513. mr->ram_device = true;
  1514. mr->ops = &ram_device_mem_ops;
  1515. mr->opaque = mr;
  1516. mr->destructor = memory_region_destructor_ram;
  1517. /* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL. */
  1518. assert(ptr != NULL);
  1519. mr->ram_block = qemu_ram_alloc_from_ptr(size, ptr, mr, &error_abort);
  1520. }
  1521. void memory_region_init_alias(MemoryRegion *mr,
  1522. Object *owner,
  1523. const char *name,
  1524. MemoryRegion *orig,
  1525. hwaddr offset,
  1526. uint64_t size)
  1527. {
  1528. memory_region_init(mr, owner, name, size);
  1529. mr->alias = orig;
  1530. mr->alias_offset = offset;
  1531. }
  1532. bool memory_region_init_rom_nomigrate(MemoryRegion *mr,
  1533. Object *owner,
  1534. const char *name,
  1535. uint64_t size,
  1536. Error **errp)
  1537. {
  1538. if (!memory_region_init_ram_flags_nomigrate(mr, owner, name,
  1539. size, 0, errp)) {
  1540. return false;
  1541. }
  1542. mr->readonly = true;
  1543. return true;
  1544. }
  1545. bool memory_region_init_rom_device_nomigrate(MemoryRegion *mr,
  1546. Object *owner,
  1547. const MemoryRegionOps *ops,
  1548. void *opaque,
  1549. const char *name,
  1550. uint64_t size,
  1551. Error **errp)
  1552. {
  1553. Error *err = NULL;
  1554. assert(ops);
  1555. memory_region_init(mr, owner, name, size);
  1556. mr->ops = ops;
  1557. mr->opaque = opaque;
  1558. mr->terminates = true;
  1559. mr->rom_device = true;
  1560. mr->destructor = memory_region_destructor_ram;
  1561. mr->ram_block = qemu_ram_alloc(size, 0, mr, &err);
  1562. if (err) {
  1563. mr->size = int128_zero();
  1564. object_unparent(OBJECT(mr));
  1565. error_propagate(errp, err);
  1566. return false;
  1567. }
  1568. return true;
  1569. }
  1570. void memory_region_init_iommu(void *_iommu_mr,
  1571. size_t instance_size,
  1572. const char *mrtypename,
  1573. Object *owner,
  1574. const char *name,
  1575. uint64_t size)
  1576. {
  1577. struct IOMMUMemoryRegion *iommu_mr;
  1578. struct MemoryRegion *mr;
  1579. object_initialize(_iommu_mr, instance_size, mrtypename);
  1580. mr = MEMORY_REGION(_iommu_mr);
  1581. memory_region_do_init(mr, owner, name, size);
  1582. iommu_mr = IOMMU_MEMORY_REGION(mr);
  1583. mr->terminates = true; /* then re-forwards */
  1584. QLIST_INIT(&iommu_mr->iommu_notify);
  1585. iommu_mr->iommu_notify_flags = IOMMU_NOTIFIER_NONE;
  1586. }
  1587. static void memory_region_finalize(Object *obj)
  1588. {
  1589. MemoryRegion *mr = MEMORY_REGION(obj);
  1590. assert(!mr->container);
  1591. /* We know the region is not visible in any address space (it
  1592. * does not have a container and cannot be a root either because
  1593. * it has no references, so we can blindly clear mr->enabled.
  1594. * memory_region_set_enabled instead could trigger a transaction
  1595. * and cause an infinite loop.
  1596. */
  1597. mr->enabled = false;
  1598. memory_region_transaction_begin();
  1599. while (!QTAILQ_EMPTY(&mr->subregions)) {
  1600. MemoryRegion *subregion = QTAILQ_FIRST(&mr->subregions);
  1601. memory_region_del_subregion(mr, subregion);
  1602. }
  1603. memory_region_transaction_commit();
  1604. mr->destructor(mr);
  1605. memory_region_clear_coalescing(mr);
  1606. g_free((char *)mr->name);
  1607. g_free(mr->ioeventfds);
  1608. }
  1609. Object *memory_region_owner(MemoryRegion *mr)
  1610. {
  1611. Object *obj = OBJECT(mr);
  1612. return obj->parent;
  1613. }
  1614. void memory_region_ref(MemoryRegion *mr)
  1615. {
  1616. /* MMIO callbacks most likely will access data that belongs
  1617. * to the owner, hence the need to ref/unref the owner whenever
  1618. * the memory region is in use.
  1619. *
  1620. * The memory region is a child of its owner. As long as the
  1621. * owner doesn't call unparent itself on the memory region,
  1622. * ref-ing the owner will also keep the memory region alive.
  1623. * Memory regions without an owner are supposed to never go away;
  1624. * we do not ref/unref them because it slows down DMA sensibly.
  1625. */
  1626. if (mr && mr->owner) {
  1627. object_ref(mr->owner);
  1628. }
  1629. }
  1630. void memory_region_unref(MemoryRegion *mr)
  1631. {
  1632. if (mr && mr->owner) {
  1633. object_unref(mr->owner);
  1634. }
  1635. }
  1636. uint64_t memory_region_size(MemoryRegion *mr)
  1637. {
  1638. if (int128_eq(mr->size, int128_2_64())) {
  1639. return UINT64_MAX;
  1640. }
  1641. return int128_get64(mr->size);
  1642. }
  1643. const char *memory_region_name(const MemoryRegion *mr)
  1644. {
  1645. if (!mr->name) {
  1646. ((MemoryRegion *)mr)->name =
  1647. g_strdup(object_get_canonical_path_component(OBJECT(mr)));
  1648. }
  1649. return mr->name;
  1650. }
  1651. bool memory_region_is_ram_device(MemoryRegion *mr)
  1652. {
  1653. return mr->ram_device;
  1654. }
  1655. bool memory_region_is_protected(MemoryRegion *mr)
  1656. {
  1657. return mr->ram && (mr->ram_block->flags & RAM_PROTECTED);
  1658. }
  1659. bool memory_region_has_guest_memfd(MemoryRegion *mr)
  1660. {
  1661. return mr->ram_block && mr->ram_block->guest_memfd >= 0;
  1662. }
  1663. uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr)
  1664. {
  1665. uint8_t mask = mr->dirty_log_mask;
  1666. RAMBlock *rb = mr->ram_block;
  1667. if (global_dirty_tracking && ((rb && qemu_ram_is_migratable(rb)) ||
  1668. memory_region_is_iommu(mr))) {
  1669. mask |= (1 << DIRTY_MEMORY_MIGRATION);
  1670. }
  1671. if (tcg_enabled() && rb) {
  1672. /* TCG only cares about dirty memory logging for RAM, not IOMMU. */
  1673. mask |= (1 << DIRTY_MEMORY_CODE);
  1674. }
  1675. return mask;
  1676. }
  1677. bool memory_region_is_logging(MemoryRegion *mr, uint8_t client)
  1678. {
  1679. return memory_region_get_dirty_log_mask(mr) & (1 << client);
  1680. }
  1681. static int memory_region_update_iommu_notify_flags(IOMMUMemoryRegion *iommu_mr,
  1682. Error **errp)
  1683. {
  1684. IOMMUNotifierFlag flags = IOMMU_NOTIFIER_NONE;
  1685. IOMMUNotifier *iommu_notifier;
  1686. IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
  1687. int ret = 0;
  1688. IOMMU_NOTIFIER_FOREACH(iommu_notifier, iommu_mr) {
  1689. flags |= iommu_notifier->notifier_flags;
  1690. }
  1691. if (flags != iommu_mr->iommu_notify_flags && imrc->notify_flag_changed) {
  1692. ret = imrc->notify_flag_changed(iommu_mr,
  1693. iommu_mr->iommu_notify_flags,
  1694. flags, errp);
  1695. }
  1696. if (!ret) {
  1697. iommu_mr->iommu_notify_flags = flags;
  1698. }
  1699. return ret;
  1700. }
  1701. int memory_region_register_iommu_notifier(MemoryRegion *mr,
  1702. IOMMUNotifier *n, Error **errp)
  1703. {
  1704. IOMMUMemoryRegion *iommu_mr;
  1705. int ret;
  1706. if (mr->alias) {
  1707. return memory_region_register_iommu_notifier(mr->alias, n, errp);
  1708. }
  1709. /* We need to register for at least one bitfield */
  1710. iommu_mr = IOMMU_MEMORY_REGION(mr);
  1711. assert(n->notifier_flags != IOMMU_NOTIFIER_NONE);
  1712. assert(n->start <= n->end);
  1713. assert(n->iommu_idx >= 0 &&
  1714. n->iommu_idx < memory_region_iommu_num_indexes(iommu_mr));
  1715. QLIST_INSERT_HEAD(&iommu_mr->iommu_notify, n, node);
  1716. ret = memory_region_update_iommu_notify_flags(iommu_mr, errp);
  1717. if (ret) {
  1718. QLIST_REMOVE(n, node);
  1719. }
  1720. return ret;
  1721. }
  1722. uint64_t memory_region_iommu_get_min_page_size(IOMMUMemoryRegion *iommu_mr)
  1723. {
  1724. IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
  1725. if (imrc->get_min_page_size) {
  1726. return imrc->get_min_page_size(iommu_mr);
  1727. }
  1728. return TARGET_PAGE_SIZE;
  1729. }
  1730. void memory_region_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n)
  1731. {
  1732. MemoryRegion *mr = MEMORY_REGION(iommu_mr);
  1733. IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
  1734. hwaddr addr, granularity;
  1735. IOMMUTLBEntry iotlb;
  1736. /* If the IOMMU has its own replay callback, override */
  1737. if (imrc->replay) {
  1738. imrc->replay(iommu_mr, n);
  1739. return;
  1740. }
  1741. granularity = memory_region_iommu_get_min_page_size(iommu_mr);
  1742. for (addr = 0; addr < memory_region_size(mr); addr += granularity) {
  1743. iotlb = imrc->translate(iommu_mr, addr, IOMMU_NONE, n->iommu_idx);
  1744. if (iotlb.perm != IOMMU_NONE) {
  1745. n->notify(n, &iotlb);
  1746. }
  1747. /* if (2^64 - MR size) < granularity, it's possible to get an
  1748. * infinite loop here. This should catch such a wraparound */
  1749. if ((addr + granularity) < addr) {
  1750. break;
  1751. }
  1752. }
  1753. }
  1754. void memory_region_unregister_iommu_notifier(MemoryRegion *mr,
  1755. IOMMUNotifier *n)
  1756. {
  1757. IOMMUMemoryRegion *iommu_mr;
  1758. if (mr->alias) {
  1759. memory_region_unregister_iommu_notifier(mr->alias, n);
  1760. return;
  1761. }
  1762. QLIST_REMOVE(n, node);
  1763. iommu_mr = IOMMU_MEMORY_REGION(mr);
  1764. memory_region_update_iommu_notify_flags(iommu_mr, NULL);
  1765. }
  1766. void memory_region_notify_iommu_one(IOMMUNotifier *notifier,
  1767. const IOMMUTLBEvent *event)
  1768. {
  1769. const IOMMUTLBEntry *entry = &event->entry;
  1770. hwaddr entry_end = entry->iova + entry->addr_mask;
  1771. IOMMUTLBEntry tmp = *entry;
  1772. if (event->type == IOMMU_NOTIFIER_UNMAP) {
  1773. assert(entry->perm == IOMMU_NONE);
  1774. }
  1775. /*
  1776. * Skip the notification if the notification does not overlap
  1777. * with registered range.
  1778. */
  1779. if (notifier->start > entry_end || notifier->end < entry->iova) {
  1780. return;
  1781. }
  1782. if (notifier->notifier_flags & IOMMU_NOTIFIER_DEVIOTLB_UNMAP) {
  1783. /* Crop (iova, addr_mask) to range */
  1784. tmp.iova = MAX(tmp.iova, notifier->start);
  1785. tmp.addr_mask = MIN(entry_end, notifier->end) - tmp.iova;
  1786. } else {
  1787. assert(entry->iova >= notifier->start && entry_end <= notifier->end);
  1788. }
  1789. if (event->type & notifier->notifier_flags) {
  1790. notifier->notify(notifier, &tmp);
  1791. }
  1792. }
  1793. void memory_region_unmap_iommu_notifier_range(IOMMUNotifier *notifier)
  1794. {
  1795. IOMMUTLBEvent event;
  1796. event.type = IOMMU_NOTIFIER_UNMAP;
  1797. event.entry.target_as = &address_space_memory;
  1798. event.entry.iova = notifier->start;
  1799. event.entry.perm = IOMMU_NONE;
  1800. event.entry.addr_mask = notifier->end - notifier->start;
  1801. memory_region_notify_iommu_one(notifier, &event);
  1802. }
  1803. void memory_region_notify_iommu(IOMMUMemoryRegion *iommu_mr,
  1804. int iommu_idx,
  1805. const IOMMUTLBEvent event)
  1806. {
  1807. IOMMUNotifier *iommu_notifier;
  1808. assert(memory_region_is_iommu(MEMORY_REGION(iommu_mr)));
  1809. IOMMU_NOTIFIER_FOREACH(iommu_notifier, iommu_mr) {
  1810. if (iommu_notifier->iommu_idx == iommu_idx) {
  1811. memory_region_notify_iommu_one(iommu_notifier, &event);
  1812. }
  1813. }
  1814. }
  1815. int memory_region_iommu_get_attr(IOMMUMemoryRegion *iommu_mr,
  1816. enum IOMMUMemoryRegionAttr attr,
  1817. void *data)
  1818. {
  1819. IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
  1820. if (!imrc->get_attr) {
  1821. return -EINVAL;
  1822. }
  1823. return imrc->get_attr(iommu_mr, attr, data);
  1824. }
  1825. int memory_region_iommu_attrs_to_index(IOMMUMemoryRegion *iommu_mr,
  1826. MemTxAttrs attrs)
  1827. {
  1828. IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
  1829. if (!imrc->attrs_to_index) {
  1830. return 0;
  1831. }
  1832. return imrc->attrs_to_index(iommu_mr, attrs);
  1833. }
  1834. int memory_region_iommu_num_indexes(IOMMUMemoryRegion *iommu_mr)
  1835. {
  1836. IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
  1837. if (!imrc->num_indexes) {
  1838. return 1;
  1839. }
  1840. return imrc->num_indexes(iommu_mr);
  1841. }
  1842. RamDiscardManager *memory_region_get_ram_discard_manager(MemoryRegion *mr)
  1843. {
  1844. if (!memory_region_is_ram(mr)) {
  1845. return NULL;
  1846. }
  1847. return mr->rdm;
  1848. }
  1849. void memory_region_set_ram_discard_manager(MemoryRegion *mr,
  1850. RamDiscardManager *rdm)
  1851. {
  1852. g_assert(memory_region_is_ram(mr));
  1853. g_assert(!rdm || !mr->rdm);
  1854. mr->rdm = rdm;
  1855. }
  1856. uint64_t ram_discard_manager_get_min_granularity(const RamDiscardManager *rdm,
  1857. const MemoryRegion *mr)
  1858. {
  1859. RamDiscardManagerClass *rdmc = RAM_DISCARD_MANAGER_GET_CLASS(rdm);
  1860. g_assert(rdmc->get_min_granularity);
  1861. return rdmc->get_min_granularity(rdm, mr);
  1862. }
  1863. bool ram_discard_manager_is_populated(const RamDiscardManager *rdm,
  1864. const MemoryRegionSection *section)
  1865. {
  1866. RamDiscardManagerClass *rdmc = RAM_DISCARD_MANAGER_GET_CLASS(rdm);
  1867. g_assert(rdmc->is_populated);
  1868. return rdmc->is_populated(rdm, section);
  1869. }
  1870. int ram_discard_manager_replay_populated(const RamDiscardManager *rdm,
  1871. MemoryRegionSection *section,
  1872. ReplayRamPopulate replay_fn,
  1873. void *opaque)
  1874. {
  1875. RamDiscardManagerClass *rdmc = RAM_DISCARD_MANAGER_GET_CLASS(rdm);
  1876. g_assert(rdmc->replay_populated);
  1877. return rdmc->replay_populated(rdm, section, replay_fn, opaque);
  1878. }
  1879. void ram_discard_manager_replay_discarded(const RamDiscardManager *rdm,
  1880. MemoryRegionSection *section,
  1881. ReplayRamDiscard replay_fn,
  1882. void *opaque)
  1883. {
  1884. RamDiscardManagerClass *rdmc = RAM_DISCARD_MANAGER_GET_CLASS(rdm);
  1885. g_assert(rdmc->replay_discarded);
  1886. rdmc->replay_discarded(rdm, section, replay_fn, opaque);
  1887. }
  1888. void ram_discard_manager_register_listener(RamDiscardManager *rdm,
  1889. RamDiscardListener *rdl,
  1890. MemoryRegionSection *section)
  1891. {
  1892. RamDiscardManagerClass *rdmc = RAM_DISCARD_MANAGER_GET_CLASS(rdm);
  1893. g_assert(rdmc->register_listener);
  1894. rdmc->register_listener(rdm, rdl, section);
  1895. }
  1896. void ram_discard_manager_unregister_listener(RamDiscardManager *rdm,
  1897. RamDiscardListener *rdl)
  1898. {
  1899. RamDiscardManagerClass *rdmc = RAM_DISCARD_MANAGER_GET_CLASS(rdm);
  1900. g_assert(rdmc->unregister_listener);
  1901. rdmc->unregister_listener(rdm, rdl);
  1902. }
  1903. /* Called with rcu_read_lock held. */
  1904. bool memory_get_xlat_addr(IOMMUTLBEntry *iotlb, void **vaddr,
  1905. ram_addr_t *ram_addr, bool *read_only,
  1906. bool *mr_has_discard_manager, Error **errp)
  1907. {
  1908. MemoryRegion *mr;
  1909. hwaddr xlat;
  1910. hwaddr len = iotlb->addr_mask + 1;
  1911. bool writable = iotlb->perm & IOMMU_WO;
  1912. if (mr_has_discard_manager) {
  1913. *mr_has_discard_manager = false;
  1914. }
  1915. /*
  1916. * The IOMMU TLB entry we have just covers translation through
  1917. * this IOMMU to its immediate target. We need to translate
  1918. * it the rest of the way through to memory.
  1919. */
  1920. mr = address_space_translate(&address_space_memory, iotlb->translated_addr,
  1921. &xlat, &len, writable, MEMTXATTRS_UNSPECIFIED);
  1922. if (!memory_region_is_ram(mr)) {
  1923. error_setg(errp, "iommu map to non memory area %" HWADDR_PRIx "", xlat);
  1924. return false;
  1925. } else if (memory_region_has_ram_discard_manager(mr)) {
  1926. RamDiscardManager *rdm = memory_region_get_ram_discard_manager(mr);
  1927. MemoryRegionSection tmp = {
  1928. .mr = mr,
  1929. .offset_within_region = xlat,
  1930. .size = int128_make64(len),
  1931. };
  1932. if (mr_has_discard_manager) {
  1933. *mr_has_discard_manager = true;
  1934. }
  1935. /*
  1936. * Malicious VMs can map memory into the IOMMU, which is expected
  1937. * to remain discarded. vfio will pin all pages, populating memory.
  1938. * Disallow that. vmstate priorities make sure any RamDiscardManager
  1939. * were already restored before IOMMUs are restored.
  1940. */
  1941. if (!ram_discard_manager_is_populated(rdm, &tmp)) {
  1942. error_setg(errp, "iommu map to discarded memory (e.g., unplugged"
  1943. " via virtio-mem): %" HWADDR_PRIx "",
  1944. iotlb->translated_addr);
  1945. return false;
  1946. }
  1947. }
  1948. /*
  1949. * Translation truncates length to the IOMMU page size,
  1950. * check that it did not truncate too much.
  1951. */
  1952. if (len & iotlb->addr_mask) {
  1953. error_setg(errp, "iommu has granularity incompatible with target AS");
  1954. return false;
  1955. }
  1956. if (vaddr) {
  1957. *vaddr = memory_region_get_ram_ptr(mr) + xlat;
  1958. }
  1959. if (ram_addr) {
  1960. *ram_addr = memory_region_get_ram_addr(mr) + xlat;
  1961. }
  1962. if (read_only) {
  1963. *read_only = !writable || mr->readonly;
  1964. }
  1965. return true;
  1966. }
  1967. void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client)
  1968. {
  1969. uint8_t mask = 1 << client;
  1970. uint8_t old_logging;
  1971. assert(client == DIRTY_MEMORY_VGA);
  1972. old_logging = mr->vga_logging_count;
  1973. mr->vga_logging_count += log ? 1 : -1;
  1974. if (!!old_logging == !!mr->vga_logging_count) {
  1975. return;
  1976. }
  1977. memory_region_transaction_begin();
  1978. mr->dirty_log_mask = (mr->dirty_log_mask & ~mask) | (log * mask);
  1979. memory_region_update_pending |= mr->enabled;
  1980. memory_region_transaction_commit();
  1981. }
  1982. void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
  1983. hwaddr size)
  1984. {
  1985. assert(mr->ram_block);
  1986. cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr,
  1987. size,
  1988. memory_region_get_dirty_log_mask(mr));
  1989. }
  1990. /*
  1991. * If memory region `mr' is NULL, do global sync. Otherwise, sync
  1992. * dirty bitmap for the specified memory region.
  1993. */
  1994. static void memory_region_sync_dirty_bitmap(MemoryRegion *mr, bool last_stage)
  1995. {
  1996. MemoryListener *listener;
  1997. AddressSpace *as;
  1998. FlatView *view;
  1999. FlatRange *fr;
  2000. /* If the same address space has multiple log_sync listeners, we
  2001. * visit that address space's FlatView multiple times. But because
  2002. * log_sync listeners are rare, it's still cheaper than walking each
  2003. * address space once.
  2004. */
  2005. QTAILQ_FOREACH(listener, &memory_listeners, link) {
  2006. if (listener->log_sync) {
  2007. as = listener->address_space;
  2008. view = address_space_get_flatview(as);
  2009. FOR_EACH_FLAT_RANGE(fr, view) {
  2010. if (fr->dirty_log_mask && (!mr || fr->mr == mr)) {
  2011. MemoryRegionSection mrs = section_from_flat_range(fr, view);
  2012. listener->log_sync(listener, &mrs);
  2013. }
  2014. }
  2015. flatview_unref(view);
  2016. trace_memory_region_sync_dirty(mr ? mr->name : "(all)", listener->name, 0);
  2017. } else if (listener->log_sync_global) {
  2018. /*
  2019. * No matter whether MR is specified, what we can do here
  2020. * is to do a global sync, because we are not capable to
  2021. * sync in a finer granularity.
  2022. */
  2023. listener->log_sync_global(listener, last_stage);
  2024. trace_memory_region_sync_dirty(mr ? mr->name : "(all)", listener->name, 1);
  2025. }
  2026. }
  2027. }
  2028. void memory_region_clear_dirty_bitmap(MemoryRegion *mr, hwaddr start,
  2029. hwaddr len)
  2030. {
  2031. MemoryRegionSection mrs;
  2032. MemoryListener *listener;
  2033. AddressSpace *as;
  2034. FlatView *view;
  2035. FlatRange *fr;
  2036. hwaddr sec_start, sec_end, sec_size;
  2037. QTAILQ_FOREACH(listener, &memory_listeners, link) {
  2038. if (!listener->log_clear) {
  2039. continue;
  2040. }
  2041. as = listener->address_space;
  2042. view = address_space_get_flatview(as);
  2043. FOR_EACH_FLAT_RANGE(fr, view) {
  2044. if (!fr->dirty_log_mask || fr->mr != mr) {
  2045. /*
  2046. * Clear dirty bitmap operation only applies to those
  2047. * regions whose dirty logging is at least enabled
  2048. */
  2049. continue;
  2050. }
  2051. mrs = section_from_flat_range(fr, view);
  2052. sec_start = MAX(mrs.offset_within_region, start);
  2053. sec_end = mrs.offset_within_region + int128_get64(mrs.size);
  2054. sec_end = MIN(sec_end, start + len);
  2055. if (sec_start >= sec_end) {
  2056. /*
  2057. * If this memory region section has no intersection
  2058. * with the requested range, skip.
  2059. */
  2060. continue;
  2061. }
  2062. /* Valid case; shrink the section if needed */
  2063. mrs.offset_within_address_space +=
  2064. sec_start - mrs.offset_within_region;
  2065. mrs.offset_within_region = sec_start;
  2066. sec_size = sec_end - sec_start;
  2067. mrs.size = int128_make64(sec_size);
  2068. listener->log_clear(listener, &mrs);
  2069. }
  2070. flatview_unref(view);
  2071. }
  2072. }
  2073. DirtyBitmapSnapshot *memory_region_snapshot_and_clear_dirty(MemoryRegion *mr,
  2074. hwaddr addr,
  2075. hwaddr size,
  2076. unsigned client)
  2077. {
  2078. DirtyBitmapSnapshot *snapshot;
  2079. assert(mr->ram_block);
  2080. memory_region_sync_dirty_bitmap(mr, false);
  2081. snapshot = cpu_physical_memory_snapshot_and_clear_dirty(mr, addr, size, client);
  2082. memory_global_after_dirty_log_sync();
  2083. return snapshot;
  2084. }
  2085. bool memory_region_snapshot_get_dirty(MemoryRegion *mr, DirtyBitmapSnapshot *snap,
  2086. hwaddr addr, hwaddr size)
  2087. {
  2088. assert(mr->ram_block);
  2089. return cpu_physical_memory_snapshot_get_dirty(snap,
  2090. memory_region_get_ram_addr(mr) + addr, size);
  2091. }
  2092. void memory_region_set_readonly(MemoryRegion *mr, bool readonly)
  2093. {
  2094. if (mr->readonly != readonly) {
  2095. memory_region_transaction_begin();
  2096. mr->readonly = readonly;
  2097. memory_region_update_pending |= mr->enabled;
  2098. memory_region_transaction_commit();
  2099. }
  2100. }
  2101. void memory_region_set_nonvolatile(MemoryRegion *mr, bool nonvolatile)
  2102. {
  2103. if (mr->nonvolatile != nonvolatile) {
  2104. memory_region_transaction_begin();
  2105. mr->nonvolatile = nonvolatile;
  2106. memory_region_update_pending |= mr->enabled;
  2107. memory_region_transaction_commit();
  2108. }
  2109. }
  2110. void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode)
  2111. {
  2112. if (mr->romd_mode != romd_mode) {
  2113. memory_region_transaction_begin();
  2114. mr->romd_mode = romd_mode;
  2115. memory_region_update_pending |= mr->enabled;
  2116. memory_region_transaction_commit();
  2117. }
  2118. }
  2119. void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr,
  2120. hwaddr size, unsigned client)
  2121. {
  2122. assert(mr->ram_block);
  2123. cpu_physical_memory_test_and_clear_dirty(
  2124. memory_region_get_ram_addr(mr) + addr, size, client);
  2125. }
  2126. int memory_region_get_fd(MemoryRegion *mr)
  2127. {
  2128. RCU_READ_LOCK_GUARD();
  2129. while (mr->alias) {
  2130. mr = mr->alias;
  2131. }
  2132. return mr->ram_block->fd;
  2133. }
  2134. void *memory_region_get_ram_ptr(MemoryRegion *mr)
  2135. {
  2136. uint64_t offset = 0;
  2137. RCU_READ_LOCK_GUARD();
  2138. while (mr->alias) {
  2139. offset += mr->alias_offset;
  2140. mr = mr->alias;
  2141. }
  2142. assert(mr->ram_block);
  2143. return qemu_map_ram_ptr(mr->ram_block, offset);
  2144. }
  2145. MemoryRegion *memory_region_from_host(void *ptr, ram_addr_t *offset)
  2146. {
  2147. RAMBlock *block;
  2148. block = qemu_ram_block_from_host(ptr, false, offset);
  2149. if (!block) {
  2150. return NULL;
  2151. }
  2152. return block->mr;
  2153. }
  2154. ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr)
  2155. {
  2156. return mr->ram_block ? mr->ram_block->offset : RAM_ADDR_INVALID;
  2157. }
  2158. void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize, Error **errp)
  2159. {
  2160. assert(mr->ram_block);
  2161. qemu_ram_resize(mr->ram_block, newsize, errp);
  2162. }
  2163. void memory_region_msync(MemoryRegion *mr, hwaddr addr, hwaddr size)
  2164. {
  2165. if (mr->ram_block) {
  2166. qemu_ram_msync(mr->ram_block, addr, size);
  2167. }
  2168. }
  2169. void memory_region_writeback(MemoryRegion *mr, hwaddr addr, hwaddr size)
  2170. {
  2171. /*
  2172. * Might be extended case needed to cover
  2173. * different types of memory regions
  2174. */
  2175. if (mr->dirty_log_mask) {
  2176. memory_region_msync(mr, addr, size);
  2177. }
  2178. }
  2179. /*
  2180. * Call proper memory listeners about the change on the newly
  2181. * added/removed CoalescedMemoryRange.
  2182. */
  2183. static void memory_region_update_coalesced_range(MemoryRegion *mr,
  2184. CoalescedMemoryRange *cmr,
  2185. bool add)
  2186. {
  2187. AddressSpace *as;
  2188. FlatView *view;
  2189. FlatRange *fr;
  2190. QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
  2191. view = address_space_get_flatview(as);
  2192. FOR_EACH_FLAT_RANGE(fr, view) {
  2193. if (fr->mr == mr) {
  2194. flat_range_coalesced_io_notify(fr, as, cmr, add);
  2195. }
  2196. }
  2197. flatview_unref(view);
  2198. }
  2199. }
  2200. void memory_region_set_coalescing(MemoryRegion *mr)
  2201. {
  2202. memory_region_clear_coalescing(mr);
  2203. memory_region_add_coalescing(mr, 0, int128_get64(mr->size));
  2204. }
  2205. void memory_region_add_coalescing(MemoryRegion *mr,
  2206. hwaddr offset,
  2207. uint64_t size)
  2208. {
  2209. CoalescedMemoryRange *cmr = g_malloc(sizeof(*cmr));
  2210. cmr->addr = addrrange_make(int128_make64(offset), int128_make64(size));
  2211. QTAILQ_INSERT_TAIL(&mr->coalesced, cmr, link);
  2212. memory_region_update_coalesced_range(mr, cmr, true);
  2213. memory_region_set_flush_coalesced(mr);
  2214. }
  2215. void memory_region_clear_coalescing(MemoryRegion *mr)
  2216. {
  2217. CoalescedMemoryRange *cmr;
  2218. if (QTAILQ_EMPTY(&mr->coalesced)) {
  2219. return;
  2220. }
  2221. qemu_flush_coalesced_mmio_buffer();
  2222. mr->flush_coalesced_mmio = false;
  2223. while (!QTAILQ_EMPTY(&mr->coalesced)) {
  2224. cmr = QTAILQ_FIRST(&mr->coalesced);
  2225. QTAILQ_REMOVE(&mr->coalesced, cmr, link);
  2226. memory_region_update_coalesced_range(mr, cmr, false);
  2227. g_free(cmr);
  2228. }
  2229. }
  2230. void memory_region_set_flush_coalesced(MemoryRegion *mr)
  2231. {
  2232. mr->flush_coalesced_mmio = true;
  2233. }
  2234. void memory_region_clear_flush_coalesced(MemoryRegion *mr)
  2235. {
  2236. qemu_flush_coalesced_mmio_buffer();
  2237. if (QTAILQ_EMPTY(&mr->coalesced)) {
  2238. mr->flush_coalesced_mmio = false;
  2239. }
  2240. }
  2241. void memory_region_add_eventfd(MemoryRegion *mr,
  2242. hwaddr addr,
  2243. unsigned size,
  2244. bool match_data,
  2245. uint64_t data,
  2246. EventNotifier *e)
  2247. {
  2248. MemoryRegionIoeventfd mrfd = {
  2249. .addr.start = int128_make64(addr),
  2250. .addr.size = int128_make64(size),
  2251. .match_data = match_data,
  2252. .data = data,
  2253. .e = e,
  2254. };
  2255. unsigned i;
  2256. if (size) {
  2257. adjust_endianness(mr, &mrfd.data, size_memop(size) | MO_TE);
  2258. }
  2259. memory_region_transaction_begin();
  2260. for (i = 0; i < mr->ioeventfd_nb; ++i) {
  2261. if (memory_region_ioeventfd_before(&mrfd, &mr->ioeventfds[i])) {
  2262. break;
  2263. }
  2264. }
  2265. ++mr->ioeventfd_nb;
  2266. mr->ioeventfds = g_realloc(mr->ioeventfds,
  2267. sizeof(*mr->ioeventfds) * mr->ioeventfd_nb);
  2268. memmove(&mr->ioeventfds[i+1], &mr->ioeventfds[i],
  2269. sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb-1 - i));
  2270. mr->ioeventfds[i] = mrfd;
  2271. ioeventfd_update_pending |= mr->enabled;
  2272. memory_region_transaction_commit();
  2273. }
  2274. void memory_region_del_eventfd(MemoryRegion *mr,
  2275. hwaddr addr,
  2276. unsigned size,
  2277. bool match_data,
  2278. uint64_t data,
  2279. EventNotifier *e)
  2280. {
  2281. MemoryRegionIoeventfd mrfd = {
  2282. .addr.start = int128_make64(addr),
  2283. .addr.size = int128_make64(size),
  2284. .match_data = match_data,
  2285. .data = data,
  2286. .e = e,
  2287. };
  2288. unsigned i;
  2289. if (size) {
  2290. adjust_endianness(mr, &mrfd.data, size_memop(size) | MO_TE);
  2291. }
  2292. memory_region_transaction_begin();
  2293. for (i = 0; i < mr->ioeventfd_nb; ++i) {
  2294. if (memory_region_ioeventfd_equal(&mrfd, &mr->ioeventfds[i])) {
  2295. break;
  2296. }
  2297. }
  2298. assert(i != mr->ioeventfd_nb);
  2299. memmove(&mr->ioeventfds[i], &mr->ioeventfds[i+1],
  2300. sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb - (i+1)));
  2301. --mr->ioeventfd_nb;
  2302. mr->ioeventfds = g_realloc(mr->ioeventfds,
  2303. sizeof(*mr->ioeventfds)*mr->ioeventfd_nb + 1);
  2304. ioeventfd_update_pending |= mr->enabled;
  2305. memory_region_transaction_commit();
  2306. }
  2307. static void memory_region_update_container_subregions(MemoryRegion *subregion)
  2308. {
  2309. MemoryRegion *mr = subregion->container;
  2310. MemoryRegion *other;
  2311. memory_region_transaction_begin();
  2312. memory_region_ref(subregion);
  2313. QTAILQ_FOREACH(other, &mr->subregions, subregions_link) {
  2314. if (subregion->priority >= other->priority) {
  2315. QTAILQ_INSERT_BEFORE(other, subregion, subregions_link);
  2316. goto done;
  2317. }
  2318. }
  2319. QTAILQ_INSERT_TAIL(&mr->subregions, subregion, subregions_link);
  2320. done:
  2321. memory_region_update_pending |= mr->enabled && subregion->enabled;
  2322. memory_region_transaction_commit();
  2323. }
  2324. static void memory_region_add_subregion_common(MemoryRegion *mr,
  2325. hwaddr offset,
  2326. MemoryRegion *subregion)
  2327. {
  2328. MemoryRegion *alias;
  2329. assert(!subregion->container);
  2330. subregion->container = mr;
  2331. for (alias = subregion->alias; alias; alias = alias->alias) {
  2332. alias->mapped_via_alias++;
  2333. }
  2334. subregion->addr = offset;
  2335. memory_region_update_container_subregions(subregion);
  2336. }
  2337. void memory_region_add_subregion(MemoryRegion *mr,
  2338. hwaddr offset,
  2339. MemoryRegion *subregion)
  2340. {
  2341. subregion->priority = 0;
  2342. memory_region_add_subregion_common(mr, offset, subregion);
  2343. }
  2344. void memory_region_add_subregion_overlap(MemoryRegion *mr,
  2345. hwaddr offset,
  2346. MemoryRegion *subregion,
  2347. int priority)
  2348. {
  2349. subregion->priority = priority;
  2350. memory_region_add_subregion_common(mr, offset, subregion);
  2351. }
  2352. void memory_region_del_subregion(MemoryRegion *mr,
  2353. MemoryRegion *subregion)
  2354. {
  2355. MemoryRegion *alias;
  2356. memory_region_transaction_begin();
  2357. assert(subregion->container == mr);
  2358. subregion->container = NULL;
  2359. for (alias = subregion->alias; alias; alias = alias->alias) {
  2360. alias->mapped_via_alias--;
  2361. assert(alias->mapped_via_alias >= 0);
  2362. }
  2363. QTAILQ_REMOVE(&mr->subregions, subregion, subregions_link);
  2364. memory_region_unref(subregion);
  2365. memory_region_update_pending |= mr->enabled && subregion->enabled;
  2366. memory_region_transaction_commit();
  2367. }
  2368. void memory_region_set_enabled(MemoryRegion *mr, bool enabled)
  2369. {
  2370. if (enabled == mr->enabled) {
  2371. return;
  2372. }
  2373. memory_region_transaction_begin();
  2374. mr->enabled = enabled;
  2375. memory_region_update_pending = true;
  2376. memory_region_transaction_commit();
  2377. }
  2378. void memory_region_set_size(MemoryRegion *mr, uint64_t size)
  2379. {
  2380. Int128 s = int128_make64(size);
  2381. if (size == UINT64_MAX) {
  2382. s = int128_2_64();
  2383. }
  2384. if (int128_eq(s, mr->size)) {
  2385. return;
  2386. }
  2387. memory_region_transaction_begin();
  2388. mr->size = s;
  2389. memory_region_update_pending = true;
  2390. memory_region_transaction_commit();
  2391. }
  2392. static void memory_region_readd_subregion(MemoryRegion *mr)
  2393. {
  2394. MemoryRegion *container = mr->container;
  2395. if (container) {
  2396. memory_region_transaction_begin();
  2397. memory_region_ref(mr);
  2398. memory_region_del_subregion(container, mr);
  2399. memory_region_add_subregion_common(container, mr->addr, mr);
  2400. memory_region_unref(mr);
  2401. memory_region_transaction_commit();
  2402. }
  2403. }
  2404. void memory_region_set_address(MemoryRegion *mr, hwaddr addr)
  2405. {
  2406. if (addr != mr->addr) {
  2407. mr->addr = addr;
  2408. memory_region_readd_subregion(mr);
  2409. }
  2410. }
  2411. void memory_region_set_alias_offset(MemoryRegion *mr, hwaddr offset)
  2412. {
  2413. assert(mr->alias);
  2414. if (offset == mr->alias_offset) {
  2415. return;
  2416. }
  2417. memory_region_transaction_begin();
  2418. mr->alias_offset = offset;
  2419. memory_region_update_pending |= mr->enabled;
  2420. memory_region_transaction_commit();
  2421. }
  2422. void memory_region_set_unmergeable(MemoryRegion *mr, bool unmergeable)
  2423. {
  2424. if (unmergeable == mr->unmergeable) {
  2425. return;
  2426. }
  2427. memory_region_transaction_begin();
  2428. mr->unmergeable = unmergeable;
  2429. memory_region_update_pending |= mr->enabled;
  2430. memory_region_transaction_commit();
  2431. }
  2432. uint64_t memory_region_get_alignment(const MemoryRegion *mr)
  2433. {
  2434. return mr->align;
  2435. }
  2436. static int cmp_flatrange_addr(const void *addr_, const void *fr_)
  2437. {
  2438. const AddrRange *addr = addr_;
  2439. const FlatRange *fr = fr_;
  2440. if (int128_le(addrrange_end(*addr), fr->addr.start)) {
  2441. return -1;
  2442. } else if (int128_ge(addr->start, addrrange_end(fr->addr))) {
  2443. return 1;
  2444. }
  2445. return 0;
  2446. }
  2447. static FlatRange *flatview_lookup(FlatView *view, AddrRange addr)
  2448. {
  2449. return bsearch(&addr, view->ranges, view->nr,
  2450. sizeof(FlatRange), cmp_flatrange_addr);
  2451. }
  2452. bool memory_region_is_mapped(MemoryRegion *mr)
  2453. {
  2454. return !!mr->container || mr->mapped_via_alias;
  2455. }
  2456. /* Same as memory_region_find, but it does not add a reference to the
  2457. * returned region. It must be called from an RCU critical section.
  2458. */
  2459. static MemoryRegionSection memory_region_find_rcu(MemoryRegion *mr,
  2460. hwaddr addr, uint64_t size)
  2461. {
  2462. MemoryRegionSection ret = { .mr = NULL };
  2463. MemoryRegion *root;
  2464. AddressSpace *as;
  2465. AddrRange range;
  2466. FlatView *view;
  2467. FlatRange *fr;
  2468. addr += mr->addr;
  2469. for (root = mr; root->container; ) {
  2470. root = root->container;
  2471. addr += root->addr;
  2472. }
  2473. as = memory_region_to_address_space(root);
  2474. if (!as) {
  2475. return ret;
  2476. }
  2477. range = addrrange_make(int128_make64(addr), int128_make64(size));
  2478. view = address_space_to_flatview(as);
  2479. fr = flatview_lookup(view, range);
  2480. if (!fr) {
  2481. return ret;
  2482. }
  2483. while (fr > view->ranges && addrrange_intersects(fr[-1].addr, range)) {
  2484. --fr;
  2485. }
  2486. ret.mr = fr->mr;
  2487. ret.fv = view;
  2488. range = addrrange_intersection(range, fr->addr);
  2489. ret.offset_within_region = fr->offset_in_region;
  2490. ret.offset_within_region += int128_get64(int128_sub(range.start,
  2491. fr->addr.start));
  2492. ret.size = range.size;
  2493. ret.offset_within_address_space = int128_get64(range.start);
  2494. ret.readonly = fr->readonly;
  2495. ret.nonvolatile = fr->nonvolatile;
  2496. return ret;
  2497. }
  2498. MemoryRegionSection memory_region_find(MemoryRegion *mr,
  2499. hwaddr addr, uint64_t size)
  2500. {
  2501. MemoryRegionSection ret;
  2502. RCU_READ_LOCK_GUARD();
  2503. ret = memory_region_find_rcu(mr, addr, size);
  2504. if (ret.mr) {
  2505. memory_region_ref(ret.mr);
  2506. }
  2507. return ret;
  2508. }
  2509. MemoryRegionSection *memory_region_section_new_copy(MemoryRegionSection *s)
  2510. {
  2511. MemoryRegionSection *tmp = g_new(MemoryRegionSection, 1);
  2512. *tmp = *s;
  2513. if (tmp->mr) {
  2514. memory_region_ref(tmp->mr);
  2515. }
  2516. if (tmp->fv) {
  2517. bool ret = flatview_ref(tmp->fv);
  2518. g_assert(ret);
  2519. }
  2520. return tmp;
  2521. }
  2522. void memory_region_section_free_copy(MemoryRegionSection *s)
  2523. {
  2524. if (s->fv) {
  2525. flatview_unref(s->fv);
  2526. }
  2527. if (s->mr) {
  2528. memory_region_unref(s->mr);
  2529. }
  2530. g_free(s);
  2531. }
  2532. bool memory_region_present(MemoryRegion *container, hwaddr addr)
  2533. {
  2534. MemoryRegion *mr;
  2535. RCU_READ_LOCK_GUARD();
  2536. mr = memory_region_find_rcu(container, addr, 1).mr;
  2537. return mr && mr != container;
  2538. }
  2539. void memory_global_dirty_log_sync(bool last_stage)
  2540. {
  2541. memory_region_sync_dirty_bitmap(NULL, last_stage);
  2542. }
  2543. void memory_global_after_dirty_log_sync(void)
  2544. {
  2545. MEMORY_LISTENER_CALL_GLOBAL(log_global_after_sync, Forward);
  2546. }
  2547. /*
  2548. * Dirty track stop flags that are postponed due to VM being stopped. Should
  2549. * only be used within vmstate_change hook.
  2550. */
  2551. static unsigned int postponed_stop_flags;
  2552. static VMChangeStateEntry *vmstate_change;
  2553. static void memory_global_dirty_log_stop_postponed_run(void);
  2554. static bool memory_global_dirty_log_do_start(Error **errp)
  2555. {
  2556. MemoryListener *listener;
  2557. QTAILQ_FOREACH(listener, &memory_listeners, link) {
  2558. if (listener->log_global_start) {
  2559. if (!listener->log_global_start(listener, errp)) {
  2560. goto err;
  2561. }
  2562. }
  2563. }
  2564. return true;
  2565. err:
  2566. while ((listener = QTAILQ_PREV(listener, link)) != NULL) {
  2567. if (listener->log_global_stop) {
  2568. listener->log_global_stop(listener);
  2569. }
  2570. }
  2571. return false;
  2572. }
  2573. bool memory_global_dirty_log_start(unsigned int flags, Error **errp)
  2574. {
  2575. unsigned int old_flags;
  2576. assert(flags && !(flags & (~GLOBAL_DIRTY_MASK)));
  2577. if (vmstate_change) {
  2578. /* If there is postponed stop(), operate on it first */
  2579. postponed_stop_flags &= ~flags;
  2580. memory_global_dirty_log_stop_postponed_run();
  2581. }
  2582. flags &= ~global_dirty_tracking;
  2583. if (!flags) {
  2584. return true;
  2585. }
  2586. old_flags = global_dirty_tracking;
  2587. global_dirty_tracking |= flags;
  2588. trace_global_dirty_changed(global_dirty_tracking);
  2589. if (!old_flags) {
  2590. if (!memory_global_dirty_log_do_start(errp)) {
  2591. global_dirty_tracking &= ~flags;
  2592. trace_global_dirty_changed(global_dirty_tracking);
  2593. return false;
  2594. }
  2595. memory_region_transaction_begin();
  2596. memory_region_update_pending = true;
  2597. memory_region_transaction_commit();
  2598. }
  2599. return true;
  2600. }
  2601. static void memory_global_dirty_log_do_stop(unsigned int flags)
  2602. {
  2603. assert(flags && !(flags & (~GLOBAL_DIRTY_MASK)));
  2604. assert((global_dirty_tracking & flags) == flags);
  2605. global_dirty_tracking &= ~flags;
  2606. trace_global_dirty_changed(global_dirty_tracking);
  2607. if (!global_dirty_tracking) {
  2608. memory_region_transaction_begin();
  2609. memory_region_update_pending = true;
  2610. memory_region_transaction_commit();
  2611. MEMORY_LISTENER_CALL_GLOBAL(log_global_stop, Reverse);
  2612. }
  2613. }
  2614. /*
  2615. * Execute the postponed dirty log stop operations if there is, then reset
  2616. * everything (including the flags and the vmstate change hook).
  2617. */
  2618. static void memory_global_dirty_log_stop_postponed_run(void)
  2619. {
  2620. /* This must be called with the vmstate handler registered */
  2621. assert(vmstate_change);
  2622. /* Note: postponed_stop_flags can be cleared in log start routine */
  2623. if (postponed_stop_flags) {
  2624. memory_global_dirty_log_do_stop(postponed_stop_flags);
  2625. postponed_stop_flags = 0;
  2626. }
  2627. qemu_del_vm_change_state_handler(vmstate_change);
  2628. vmstate_change = NULL;
  2629. }
  2630. static void memory_vm_change_state_handler(void *opaque, bool running,
  2631. RunState state)
  2632. {
  2633. if (running) {
  2634. memory_global_dirty_log_stop_postponed_run();
  2635. }
  2636. }
  2637. void memory_global_dirty_log_stop(unsigned int flags)
  2638. {
  2639. if (!runstate_is_running()) {
  2640. /* Postpone the dirty log stop, e.g., to when VM starts again */
  2641. if (vmstate_change) {
  2642. /* Batch with previous postponed flags */
  2643. postponed_stop_flags |= flags;
  2644. } else {
  2645. postponed_stop_flags = flags;
  2646. vmstate_change = qemu_add_vm_change_state_handler(
  2647. memory_vm_change_state_handler, NULL);
  2648. }
  2649. return;
  2650. }
  2651. memory_global_dirty_log_do_stop(flags);
  2652. }
  2653. static void listener_add_address_space(MemoryListener *listener,
  2654. AddressSpace *as)
  2655. {
  2656. unsigned i;
  2657. FlatView *view;
  2658. FlatRange *fr;
  2659. MemoryRegionIoeventfd *fd;
  2660. if (listener->begin) {
  2661. listener->begin(listener);
  2662. }
  2663. if (global_dirty_tracking) {
  2664. /*
  2665. * Currently only VFIO can fail log_global_start(), and it's not
  2666. * yet allowed to hotplug any PCI device during migration. So this
  2667. * should never fail when invoked, guard it with error_abort. If
  2668. * it can start to fail in the future, we need to be able to fail
  2669. * the whole listener_add_address_space() and its callers.
  2670. */
  2671. if (listener->log_global_start) {
  2672. listener->log_global_start(listener, &error_abort);
  2673. }
  2674. }
  2675. view = address_space_get_flatview(as);
  2676. FOR_EACH_FLAT_RANGE(fr, view) {
  2677. MemoryRegionSection section = section_from_flat_range(fr, view);
  2678. if (listener->region_add) {
  2679. listener->region_add(listener, &section);
  2680. }
  2681. /* send coalesced io add notifications */
  2682. flat_range_coalesced_io_notify_listener_add_del(fr, &section,
  2683. listener, as, true);
  2684. if (fr->dirty_log_mask && listener->log_start) {
  2685. listener->log_start(listener, &section, 0, fr->dirty_log_mask);
  2686. }
  2687. }
  2688. /*
  2689. * register all eventfds for this address space for the newly registered
  2690. * listener.
  2691. */
  2692. for (i = 0; i < as->ioeventfd_nb; i++) {
  2693. fd = &as->ioeventfds[i];
  2694. MemoryRegionSection section = (MemoryRegionSection) {
  2695. .fv = view,
  2696. .offset_within_address_space = int128_get64(fd->addr.start),
  2697. .size = fd->addr.size,
  2698. };
  2699. if (listener->eventfd_add) {
  2700. listener->eventfd_add(listener, &section,
  2701. fd->match_data, fd->data, fd->e);
  2702. }
  2703. }
  2704. if (listener->commit) {
  2705. listener->commit(listener);
  2706. }
  2707. flatview_unref(view);
  2708. }
  2709. static void listener_del_address_space(MemoryListener *listener,
  2710. AddressSpace *as)
  2711. {
  2712. unsigned i;
  2713. FlatView *view;
  2714. FlatRange *fr;
  2715. MemoryRegionIoeventfd *fd;
  2716. if (listener->begin) {
  2717. listener->begin(listener);
  2718. }
  2719. view = address_space_get_flatview(as);
  2720. FOR_EACH_FLAT_RANGE(fr, view) {
  2721. MemoryRegionSection section = section_from_flat_range(fr, view);
  2722. if (fr->dirty_log_mask && listener->log_stop) {
  2723. listener->log_stop(listener, &section, fr->dirty_log_mask, 0);
  2724. }
  2725. /* send coalesced io del notifications */
  2726. flat_range_coalesced_io_notify_listener_add_del(fr, &section,
  2727. listener, as, false);
  2728. if (listener->region_del) {
  2729. listener->region_del(listener, &section);
  2730. }
  2731. }
  2732. /*
  2733. * de-register all eventfds for this address space for the current
  2734. * listener.
  2735. */
  2736. for (i = 0; i < as->ioeventfd_nb; i++) {
  2737. fd = &as->ioeventfds[i];
  2738. MemoryRegionSection section = (MemoryRegionSection) {
  2739. .fv = view,
  2740. .offset_within_address_space = int128_get64(fd->addr.start),
  2741. .size = fd->addr.size,
  2742. };
  2743. if (listener->eventfd_del) {
  2744. listener->eventfd_del(listener, &section,
  2745. fd->match_data, fd->data, fd->e);
  2746. }
  2747. }
  2748. if (listener->commit) {
  2749. listener->commit(listener);
  2750. }
  2751. flatview_unref(view);
  2752. }
  2753. void memory_listener_register(MemoryListener *listener, AddressSpace *as)
  2754. {
  2755. MemoryListener *other = NULL;
  2756. /* Only one of them can be defined for a listener */
  2757. assert(!(listener->log_sync && listener->log_sync_global));
  2758. listener->address_space = as;
  2759. if (QTAILQ_EMPTY(&memory_listeners)
  2760. || listener->priority >= QTAILQ_LAST(&memory_listeners)->priority) {
  2761. QTAILQ_INSERT_TAIL(&memory_listeners, listener, link);
  2762. } else {
  2763. QTAILQ_FOREACH(other, &memory_listeners, link) {
  2764. if (listener->priority < other->priority) {
  2765. break;
  2766. }
  2767. }
  2768. QTAILQ_INSERT_BEFORE(other, listener, link);
  2769. }
  2770. if (QTAILQ_EMPTY(&as->listeners)
  2771. || listener->priority >= QTAILQ_LAST(&as->listeners)->priority) {
  2772. QTAILQ_INSERT_TAIL(&as->listeners, listener, link_as);
  2773. } else {
  2774. QTAILQ_FOREACH(other, &as->listeners, link_as) {
  2775. if (listener->priority < other->priority) {
  2776. break;
  2777. }
  2778. }
  2779. QTAILQ_INSERT_BEFORE(other, listener, link_as);
  2780. }
  2781. listener_add_address_space(listener, as);
  2782. if (listener->eventfd_add || listener->eventfd_del) {
  2783. as->ioeventfd_notifiers++;
  2784. }
  2785. }
  2786. void memory_listener_unregister(MemoryListener *listener)
  2787. {
  2788. if (!listener->address_space) {
  2789. return;
  2790. }
  2791. if (listener->eventfd_add || listener->eventfd_del) {
  2792. listener->address_space->ioeventfd_notifiers--;
  2793. }
  2794. listener_del_address_space(listener, listener->address_space);
  2795. QTAILQ_REMOVE(&memory_listeners, listener, link);
  2796. QTAILQ_REMOVE(&listener->address_space->listeners, listener, link_as);
  2797. listener->address_space = NULL;
  2798. }
  2799. void address_space_remove_listeners(AddressSpace *as)
  2800. {
  2801. while (!QTAILQ_EMPTY(&as->listeners)) {
  2802. memory_listener_unregister(QTAILQ_FIRST(&as->listeners));
  2803. }
  2804. }
  2805. void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name)
  2806. {
  2807. memory_region_ref(root);
  2808. as->root = root;
  2809. as->current_map = NULL;
  2810. as->ioeventfd_nb = 0;
  2811. as->ioeventfds = NULL;
  2812. QTAILQ_INIT(&as->listeners);
  2813. QTAILQ_INSERT_TAIL(&address_spaces, as, address_spaces_link);
  2814. as->max_bounce_buffer_size = DEFAULT_MAX_BOUNCE_BUFFER_SIZE;
  2815. as->bounce_buffer_size = 0;
  2816. qemu_mutex_init(&as->map_client_list_lock);
  2817. QLIST_INIT(&as->map_client_list);
  2818. as->name = g_strdup(name ? name : "anonymous");
  2819. address_space_update_topology(as);
  2820. address_space_update_ioeventfds(as);
  2821. }
  2822. static void do_address_space_destroy(AddressSpace *as)
  2823. {
  2824. assert(qatomic_read(&as->bounce_buffer_size) == 0);
  2825. assert(QLIST_EMPTY(&as->map_client_list));
  2826. qemu_mutex_destroy(&as->map_client_list_lock);
  2827. assert(QTAILQ_EMPTY(&as->listeners));
  2828. flatview_unref(as->current_map);
  2829. g_free(as->name);
  2830. g_free(as->ioeventfds);
  2831. memory_region_unref(as->root);
  2832. }
  2833. void address_space_destroy(AddressSpace *as)
  2834. {
  2835. MemoryRegion *root = as->root;
  2836. /* Flush out anything from MemoryListeners listening in on this */
  2837. memory_region_transaction_begin();
  2838. as->root = NULL;
  2839. memory_region_transaction_commit();
  2840. QTAILQ_REMOVE(&address_spaces, as, address_spaces_link);
  2841. /* At this point, as->dispatch and as->current_map are dummy
  2842. * entries that the guest should never use. Wait for the old
  2843. * values to expire before freeing the data.
  2844. */
  2845. as->root = root;
  2846. call_rcu(as, do_address_space_destroy, rcu);
  2847. }
  2848. static const char *memory_region_type(MemoryRegion *mr)
  2849. {
  2850. if (mr->alias) {
  2851. return memory_region_type(mr->alias);
  2852. }
  2853. if (memory_region_is_ram_device(mr)) {
  2854. return "ramd";
  2855. } else if (memory_region_is_romd(mr)) {
  2856. return "romd";
  2857. } else if (memory_region_is_rom(mr)) {
  2858. return "rom";
  2859. } else if (memory_region_is_ram(mr)) {
  2860. return "ram";
  2861. } else {
  2862. return "i/o";
  2863. }
  2864. }
  2865. typedef struct MemoryRegionList MemoryRegionList;
  2866. struct MemoryRegionList {
  2867. const MemoryRegion *mr;
  2868. QTAILQ_ENTRY(MemoryRegionList) mrqueue;
  2869. };
  2870. typedef QTAILQ_HEAD(, MemoryRegionList) MemoryRegionListHead;
  2871. #define MR_SIZE(size) (int128_nz(size) ? (hwaddr)int128_get64( \
  2872. int128_sub((size), int128_one())) : 0)
  2873. #define MTREE_INDENT " "
  2874. static void mtree_expand_owner(const char *label, Object *obj)
  2875. {
  2876. DeviceState *dev = (DeviceState *) object_dynamic_cast(obj, TYPE_DEVICE);
  2877. qemu_printf(" %s:{%s", label, dev ? "dev" : "obj");
  2878. if (dev && dev->id) {
  2879. qemu_printf(" id=%s", dev->id);
  2880. } else {
  2881. char *canonical_path = object_get_canonical_path(obj);
  2882. if (canonical_path) {
  2883. qemu_printf(" path=%s", canonical_path);
  2884. g_free(canonical_path);
  2885. } else {
  2886. qemu_printf(" type=%s", object_get_typename(obj));
  2887. }
  2888. }
  2889. qemu_printf("}");
  2890. }
  2891. static void mtree_print_mr_owner(const MemoryRegion *mr)
  2892. {
  2893. Object *owner = mr->owner;
  2894. Object *parent = memory_region_owner((MemoryRegion *)mr);
  2895. if (!owner && !parent) {
  2896. qemu_printf(" orphan");
  2897. return;
  2898. }
  2899. if (owner) {
  2900. mtree_expand_owner("owner", owner);
  2901. }
  2902. if (parent && parent != owner) {
  2903. mtree_expand_owner("parent", parent);
  2904. }
  2905. }
  2906. static void mtree_print_mr(const MemoryRegion *mr, unsigned int level,
  2907. hwaddr base,
  2908. MemoryRegionListHead *alias_print_queue,
  2909. bool owner, bool display_disabled)
  2910. {
  2911. MemoryRegionList *new_ml, *ml, *next_ml;
  2912. MemoryRegionListHead submr_print_queue;
  2913. const MemoryRegion *submr;
  2914. unsigned int i;
  2915. hwaddr cur_start, cur_end;
  2916. if (!mr) {
  2917. return;
  2918. }
  2919. cur_start = base + mr->addr;
  2920. cur_end = cur_start + MR_SIZE(mr->size);
  2921. /*
  2922. * Try to detect overflow of memory region. This should never
  2923. * happen normally. When it happens, we dump something to warn the
  2924. * user who is observing this.
  2925. */
  2926. if (cur_start < base || cur_end < cur_start) {
  2927. qemu_printf("[DETECTED OVERFLOW!] ");
  2928. }
  2929. if (mr->alias) {
  2930. bool found = false;
  2931. /* check if the alias is already in the queue */
  2932. QTAILQ_FOREACH(ml, alias_print_queue, mrqueue) {
  2933. if (ml->mr == mr->alias) {
  2934. found = true;
  2935. }
  2936. }
  2937. if (!found) {
  2938. ml = g_new(MemoryRegionList, 1);
  2939. ml->mr = mr->alias;
  2940. QTAILQ_INSERT_TAIL(alias_print_queue, ml, mrqueue);
  2941. }
  2942. if (mr->enabled || display_disabled) {
  2943. for (i = 0; i < level; i++) {
  2944. qemu_printf(MTREE_INDENT);
  2945. }
  2946. qemu_printf(HWADDR_FMT_plx "-" HWADDR_FMT_plx
  2947. " (prio %d, %s%s): alias %s @%s " HWADDR_FMT_plx
  2948. "-" HWADDR_FMT_plx "%s",
  2949. cur_start, cur_end,
  2950. mr->priority,
  2951. mr->nonvolatile ? "nv-" : "",
  2952. memory_region_type((MemoryRegion *)mr),
  2953. memory_region_name(mr),
  2954. memory_region_name(mr->alias),
  2955. mr->alias_offset,
  2956. mr->alias_offset + MR_SIZE(mr->size),
  2957. mr->enabled ? "" : " [disabled]");
  2958. if (owner) {
  2959. mtree_print_mr_owner(mr);
  2960. }
  2961. qemu_printf("\n");
  2962. }
  2963. } else {
  2964. if (mr->enabled || display_disabled) {
  2965. for (i = 0; i < level; i++) {
  2966. qemu_printf(MTREE_INDENT);
  2967. }
  2968. qemu_printf(HWADDR_FMT_plx "-" HWADDR_FMT_plx
  2969. " (prio %d, %s%s): %s%s",
  2970. cur_start, cur_end,
  2971. mr->priority,
  2972. mr->nonvolatile ? "nv-" : "",
  2973. memory_region_type((MemoryRegion *)mr),
  2974. memory_region_name(mr),
  2975. mr->enabled ? "" : " [disabled]");
  2976. if (owner) {
  2977. mtree_print_mr_owner(mr);
  2978. }
  2979. qemu_printf("\n");
  2980. }
  2981. }
  2982. QTAILQ_INIT(&submr_print_queue);
  2983. QTAILQ_FOREACH(submr, &mr->subregions, subregions_link) {
  2984. new_ml = g_new(MemoryRegionList, 1);
  2985. new_ml->mr = submr;
  2986. QTAILQ_FOREACH(ml, &submr_print_queue, mrqueue) {
  2987. if (new_ml->mr->addr < ml->mr->addr ||
  2988. (new_ml->mr->addr == ml->mr->addr &&
  2989. new_ml->mr->priority > ml->mr->priority)) {
  2990. QTAILQ_INSERT_BEFORE(ml, new_ml, mrqueue);
  2991. new_ml = NULL;
  2992. break;
  2993. }
  2994. }
  2995. if (new_ml) {
  2996. QTAILQ_INSERT_TAIL(&submr_print_queue, new_ml, mrqueue);
  2997. }
  2998. }
  2999. QTAILQ_FOREACH(ml, &submr_print_queue, mrqueue) {
  3000. mtree_print_mr(ml->mr, level + 1, cur_start,
  3001. alias_print_queue, owner, display_disabled);
  3002. }
  3003. QTAILQ_FOREACH_SAFE(ml, &submr_print_queue, mrqueue, next_ml) {
  3004. g_free(ml);
  3005. }
  3006. }
  3007. struct FlatViewInfo {
  3008. int counter;
  3009. bool dispatch_tree;
  3010. bool owner;
  3011. AccelClass *ac;
  3012. };
  3013. static void mtree_print_flatview(gpointer key, gpointer value,
  3014. gpointer user_data)
  3015. {
  3016. FlatView *view = key;
  3017. GArray *fv_address_spaces = value;
  3018. struct FlatViewInfo *fvi = user_data;
  3019. FlatRange *range = &view->ranges[0];
  3020. MemoryRegion *mr;
  3021. int n = view->nr;
  3022. int i;
  3023. AddressSpace *as;
  3024. qemu_printf("FlatView #%d\n", fvi->counter);
  3025. ++fvi->counter;
  3026. for (i = 0; i < fv_address_spaces->len; ++i) {
  3027. as = g_array_index(fv_address_spaces, AddressSpace*, i);
  3028. qemu_printf(" AS \"%s\", root: %s",
  3029. as->name, memory_region_name(as->root));
  3030. if (as->root->alias) {
  3031. qemu_printf(", alias %s", memory_region_name(as->root->alias));
  3032. }
  3033. qemu_printf("\n");
  3034. }
  3035. qemu_printf(" Root memory region: %s\n",
  3036. view->root ? memory_region_name(view->root) : "(none)");
  3037. if (n <= 0) {
  3038. qemu_printf(MTREE_INDENT "No rendered FlatView\n\n");
  3039. return;
  3040. }
  3041. while (n--) {
  3042. mr = range->mr;
  3043. if (range->offset_in_region) {
  3044. qemu_printf(MTREE_INDENT HWADDR_FMT_plx "-" HWADDR_FMT_plx
  3045. " (prio %d, %s%s): %s @" HWADDR_FMT_plx,
  3046. int128_get64(range->addr.start),
  3047. int128_get64(range->addr.start)
  3048. + MR_SIZE(range->addr.size),
  3049. mr->priority,
  3050. range->nonvolatile ? "nv-" : "",
  3051. range->readonly ? "rom" : memory_region_type(mr),
  3052. memory_region_name(mr),
  3053. range->offset_in_region);
  3054. } else {
  3055. qemu_printf(MTREE_INDENT HWADDR_FMT_plx "-" HWADDR_FMT_plx
  3056. " (prio %d, %s%s): %s",
  3057. int128_get64(range->addr.start),
  3058. int128_get64(range->addr.start)
  3059. + MR_SIZE(range->addr.size),
  3060. mr->priority,
  3061. range->nonvolatile ? "nv-" : "",
  3062. range->readonly ? "rom" : memory_region_type(mr),
  3063. memory_region_name(mr));
  3064. }
  3065. if (fvi->owner) {
  3066. mtree_print_mr_owner(mr);
  3067. }
  3068. if (fvi->ac) {
  3069. for (i = 0; i < fv_address_spaces->len; ++i) {
  3070. as = g_array_index(fv_address_spaces, AddressSpace*, i);
  3071. if (fvi->ac->has_memory(current_machine, as,
  3072. int128_get64(range->addr.start),
  3073. MR_SIZE(range->addr.size) + 1)) {
  3074. qemu_printf(" %s", fvi->ac->name);
  3075. }
  3076. }
  3077. }
  3078. qemu_printf("\n");
  3079. range++;
  3080. }
  3081. #if !defined(CONFIG_USER_ONLY)
  3082. if (fvi->dispatch_tree && view->root) {
  3083. mtree_print_dispatch(view->dispatch, view->root);
  3084. }
  3085. #endif
  3086. qemu_printf("\n");
  3087. }
  3088. static gboolean mtree_info_flatview_free(gpointer key, gpointer value,
  3089. gpointer user_data)
  3090. {
  3091. FlatView *view = key;
  3092. GArray *fv_address_spaces = value;
  3093. g_array_unref(fv_address_spaces);
  3094. flatview_unref(view);
  3095. return true;
  3096. }
  3097. static void mtree_info_flatview(bool dispatch_tree, bool owner)
  3098. {
  3099. struct FlatViewInfo fvi = {
  3100. .counter = 0,
  3101. .dispatch_tree = dispatch_tree,
  3102. .owner = owner,
  3103. };
  3104. AddressSpace *as;
  3105. FlatView *view;
  3106. GArray *fv_address_spaces;
  3107. GHashTable *views = g_hash_table_new(g_direct_hash, g_direct_equal);
  3108. AccelClass *ac = ACCEL_GET_CLASS(current_accel());
  3109. if (ac->has_memory) {
  3110. fvi.ac = ac;
  3111. }
  3112. /* Gather all FVs in one table */
  3113. QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
  3114. view = address_space_get_flatview(as);
  3115. fv_address_spaces = g_hash_table_lookup(views, view);
  3116. if (!fv_address_spaces) {
  3117. fv_address_spaces = g_array_new(false, false, sizeof(as));
  3118. g_hash_table_insert(views, view, fv_address_spaces);
  3119. }
  3120. g_array_append_val(fv_address_spaces, as);
  3121. }
  3122. /* Print */
  3123. g_hash_table_foreach(views, mtree_print_flatview, &fvi);
  3124. /* Free */
  3125. g_hash_table_foreach_remove(views, mtree_info_flatview_free, 0);
  3126. g_hash_table_unref(views);
  3127. }
  3128. struct AddressSpaceInfo {
  3129. MemoryRegionListHead *ml_head;
  3130. bool owner;
  3131. bool disabled;
  3132. };
  3133. /* Returns negative value if a < b; zero if a = b; positive value if a > b. */
  3134. static gint address_space_compare_name(gconstpointer a, gconstpointer b)
  3135. {
  3136. const AddressSpace *as_a = a;
  3137. const AddressSpace *as_b = b;
  3138. return g_strcmp0(as_a->name, as_b->name);
  3139. }
  3140. static void mtree_print_as_name(gpointer data, gpointer user_data)
  3141. {
  3142. AddressSpace *as = data;
  3143. qemu_printf("address-space: %s\n", as->name);
  3144. }
  3145. static void mtree_print_as(gpointer key, gpointer value, gpointer user_data)
  3146. {
  3147. MemoryRegion *mr = key;
  3148. GSList *as_same_root_mr_list = value;
  3149. struct AddressSpaceInfo *asi = user_data;
  3150. g_slist_foreach(as_same_root_mr_list, mtree_print_as_name, NULL);
  3151. mtree_print_mr(mr, 1, 0, asi->ml_head, asi->owner, asi->disabled);
  3152. qemu_printf("\n");
  3153. }
  3154. static gboolean mtree_info_as_free(gpointer key, gpointer value,
  3155. gpointer user_data)
  3156. {
  3157. GSList *as_same_root_mr_list = value;
  3158. g_slist_free(as_same_root_mr_list);
  3159. return true;
  3160. }
  3161. static void mtree_info_as(bool dispatch_tree, bool owner, bool disabled)
  3162. {
  3163. MemoryRegionListHead ml_head;
  3164. MemoryRegionList *ml, *ml2;
  3165. AddressSpace *as;
  3166. GHashTable *views = g_hash_table_new(g_direct_hash, g_direct_equal);
  3167. GSList *as_same_root_mr_list;
  3168. struct AddressSpaceInfo asi = {
  3169. .ml_head = &ml_head,
  3170. .owner = owner,
  3171. .disabled = disabled,
  3172. };
  3173. QTAILQ_INIT(&ml_head);
  3174. QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
  3175. /* Create hashtable, key=AS root MR, value = list of AS */
  3176. as_same_root_mr_list = g_hash_table_lookup(views, as->root);
  3177. as_same_root_mr_list = g_slist_insert_sorted(as_same_root_mr_list, as,
  3178. address_space_compare_name);
  3179. g_hash_table_insert(views, as->root, as_same_root_mr_list);
  3180. }
  3181. /* print address spaces */
  3182. g_hash_table_foreach(views, mtree_print_as, &asi);
  3183. g_hash_table_foreach_remove(views, mtree_info_as_free, 0);
  3184. g_hash_table_unref(views);
  3185. /* print aliased regions */
  3186. QTAILQ_FOREACH(ml, &ml_head, mrqueue) {
  3187. qemu_printf("memory-region: %s\n", memory_region_name(ml->mr));
  3188. mtree_print_mr(ml->mr, 1, 0, &ml_head, owner, disabled);
  3189. qemu_printf("\n");
  3190. }
  3191. QTAILQ_FOREACH_SAFE(ml, &ml_head, mrqueue, ml2) {
  3192. g_free(ml);
  3193. }
  3194. }
  3195. void mtree_info(bool flatview, bool dispatch_tree, bool owner, bool disabled)
  3196. {
  3197. if (flatview) {
  3198. mtree_info_flatview(dispatch_tree, owner);
  3199. } else {
  3200. mtree_info_as(dispatch_tree, owner, disabled);
  3201. }
  3202. }
  3203. bool memory_region_init_ram(MemoryRegion *mr,
  3204. Object *owner,
  3205. const char *name,
  3206. uint64_t size,
  3207. Error **errp)
  3208. {
  3209. DeviceState *owner_dev;
  3210. if (!memory_region_init_ram_nomigrate(mr, owner, name, size, errp)) {
  3211. return false;
  3212. }
  3213. /* This will assert if owner is neither NULL nor a DeviceState.
  3214. * We only want the owner here for the purposes of defining a
  3215. * unique name for migration. TODO: Ideally we should implement
  3216. * a naming scheme for Objects which are not DeviceStates, in
  3217. * which case we can relax this restriction.
  3218. */
  3219. owner_dev = DEVICE(owner);
  3220. vmstate_register_ram(mr, owner_dev);
  3221. return true;
  3222. }
  3223. bool memory_region_init_ram_guest_memfd(MemoryRegion *mr,
  3224. Object *owner,
  3225. const char *name,
  3226. uint64_t size,
  3227. Error **errp)
  3228. {
  3229. DeviceState *owner_dev;
  3230. if (!memory_region_init_ram_flags_nomigrate(mr, owner, name, size,
  3231. RAM_GUEST_MEMFD, errp)) {
  3232. return false;
  3233. }
  3234. /* This will assert if owner is neither NULL nor a DeviceState.
  3235. * We only want the owner here for the purposes of defining a
  3236. * unique name for migration. TODO: Ideally we should implement
  3237. * a naming scheme for Objects which are not DeviceStates, in
  3238. * which case we can relax this restriction.
  3239. */
  3240. owner_dev = DEVICE(owner);
  3241. vmstate_register_ram(mr, owner_dev);
  3242. return true;
  3243. }
  3244. bool memory_region_init_rom(MemoryRegion *mr,
  3245. Object *owner,
  3246. const char *name,
  3247. uint64_t size,
  3248. Error **errp)
  3249. {
  3250. DeviceState *owner_dev;
  3251. if (!memory_region_init_rom_nomigrate(mr, owner, name, size, errp)) {
  3252. return false;
  3253. }
  3254. /* This will assert if owner is neither NULL nor a DeviceState.
  3255. * We only want the owner here for the purposes of defining a
  3256. * unique name for migration. TODO: Ideally we should implement
  3257. * a naming scheme for Objects which are not DeviceStates, in
  3258. * which case we can relax this restriction.
  3259. */
  3260. owner_dev = DEVICE(owner);
  3261. vmstate_register_ram(mr, owner_dev);
  3262. return true;
  3263. }
  3264. bool memory_region_init_rom_device(MemoryRegion *mr,
  3265. Object *owner,
  3266. const MemoryRegionOps *ops,
  3267. void *opaque,
  3268. const char *name,
  3269. uint64_t size,
  3270. Error **errp)
  3271. {
  3272. DeviceState *owner_dev;
  3273. if (!memory_region_init_rom_device_nomigrate(mr, owner, ops, opaque,
  3274. name, size, errp)) {
  3275. return false;
  3276. }
  3277. /* This will assert if owner is neither NULL nor a DeviceState.
  3278. * We only want the owner here for the purposes of defining a
  3279. * unique name for migration. TODO: Ideally we should implement
  3280. * a naming scheme for Objects which are not DeviceStates, in
  3281. * which case we can relax this restriction.
  3282. */
  3283. owner_dev = DEVICE(owner);
  3284. vmstate_register_ram(mr, owner_dev);
  3285. return true;
  3286. }
  3287. /*
  3288. * Support system builds with CONFIG_FUZZ using a weak symbol and a stub for
  3289. * the fuzz_dma_read_cb callback
  3290. */
  3291. #ifdef CONFIG_FUZZ
  3292. void __attribute__((weak)) fuzz_dma_read_cb(size_t addr,
  3293. size_t len,
  3294. MemoryRegion *mr)
  3295. {
  3296. }
  3297. #endif
  3298. static const TypeInfo memory_region_info = {
  3299. .parent = TYPE_OBJECT,
  3300. .name = TYPE_MEMORY_REGION,
  3301. .class_size = sizeof(MemoryRegionClass),
  3302. .instance_size = sizeof(MemoryRegion),
  3303. .instance_init = memory_region_initfn,
  3304. .instance_finalize = memory_region_finalize,
  3305. };
  3306. static const TypeInfo iommu_memory_region_info = {
  3307. .parent = TYPE_MEMORY_REGION,
  3308. .name = TYPE_IOMMU_MEMORY_REGION,
  3309. .class_size = sizeof(IOMMUMemoryRegionClass),
  3310. .instance_size = sizeof(IOMMUMemoryRegion),
  3311. .instance_init = iommu_memory_region_initfn,
  3312. .abstract = true,
  3313. };
  3314. static const TypeInfo ram_discard_manager_info = {
  3315. .parent = TYPE_INTERFACE,
  3316. .name = TYPE_RAM_DISCARD_MANAGER,
  3317. .class_size = sizeof(RamDiscardManagerClass),
  3318. };
  3319. static void memory_register_types(void)
  3320. {
  3321. type_register_static(&memory_region_info);
  3322. type_register_static(&iommu_memory_region_info);
  3323. type_register_static(&ram_discard_manager_info);
  3324. }
  3325. type_init(memory_register_types)