2
0

kvm-all.c 124 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446
  1. /*
  2. * QEMU KVM support
  3. *
  4. * Copyright IBM, Corp. 2008
  5. * Red Hat, Inc. 2008
  6. *
  7. * Authors:
  8. * Anthony Liguori <aliguori@us.ibm.com>
  9. * Glauber Costa <gcosta@redhat.com>
  10. *
  11. * This work is licensed under the terms of the GNU GPL, version 2 or later.
  12. * See the COPYING file in the top-level directory.
  13. *
  14. */
  15. #include "qemu/osdep.h"
  16. #include <sys/ioctl.h>
  17. #include <poll.h>
  18. #include <linux/kvm.h>
  19. #include "qemu/atomic.h"
  20. #include "qemu/option.h"
  21. #include "qemu/config-file.h"
  22. #include "qemu/error-report.h"
  23. #include "qapi/error.h"
  24. #include "hw/pci/msi.h"
  25. #include "hw/pci/msix.h"
  26. #include "hw/s390x/adapter.h"
  27. #include "gdbstub/enums.h"
  28. #include "sysemu/kvm_int.h"
  29. #include "sysemu/runstate.h"
  30. #include "sysemu/cpus.h"
  31. #include "sysemu/accel-blocker.h"
  32. #include "qemu/bswap.h"
  33. #include "exec/memory.h"
  34. #include "exec/ram_addr.h"
  35. #include "qemu/event_notifier.h"
  36. #include "qemu/main-loop.h"
  37. #include "trace.h"
  38. #include "hw/irq.h"
  39. #include "qapi/visitor.h"
  40. #include "qapi/qapi-types-common.h"
  41. #include "qapi/qapi-visit-common.h"
  42. #include "sysemu/reset.h"
  43. #include "qemu/guest-random.h"
  44. #include "sysemu/hw_accel.h"
  45. #include "kvm-cpus.h"
  46. #include "sysemu/dirtylimit.h"
  47. #include "qemu/range.h"
  48. #include "hw/boards.h"
  49. #include "sysemu/stats.h"
  50. /* This check must be after config-host.h is included */
  51. #ifdef CONFIG_EVENTFD
  52. #include <sys/eventfd.h>
  53. #endif
  54. /* KVM uses PAGE_SIZE in its definition of KVM_COALESCED_MMIO_MAX. We
  55. * need to use the real host PAGE_SIZE, as that's what KVM will use.
  56. */
  57. #ifdef PAGE_SIZE
  58. #undef PAGE_SIZE
  59. #endif
  60. #define PAGE_SIZE qemu_real_host_page_size()
  61. #ifndef KVM_GUESTDBG_BLOCKIRQ
  62. #define KVM_GUESTDBG_BLOCKIRQ 0
  63. #endif
  64. /* Default num of memslots to be allocated when VM starts */
  65. #define KVM_MEMSLOTS_NR_ALLOC_DEFAULT 16
  66. /* Default max allowed memslots if kernel reported nothing */
  67. #define KVM_MEMSLOTS_NR_MAX_DEFAULT 32
  68. struct KVMParkedVcpu {
  69. unsigned long vcpu_id;
  70. int kvm_fd;
  71. QLIST_ENTRY(KVMParkedVcpu) node;
  72. };
  73. KVMState *kvm_state;
  74. bool kvm_kernel_irqchip;
  75. bool kvm_split_irqchip;
  76. bool kvm_async_interrupts_allowed;
  77. bool kvm_halt_in_kernel_allowed;
  78. bool kvm_resamplefds_allowed;
  79. bool kvm_msi_via_irqfd_allowed;
  80. bool kvm_gsi_routing_allowed;
  81. bool kvm_gsi_direct_mapping;
  82. bool kvm_allowed;
  83. bool kvm_readonly_mem_allowed;
  84. bool kvm_vm_attributes_allowed;
  85. bool kvm_msi_use_devid;
  86. static bool kvm_has_guest_debug;
  87. static int kvm_sstep_flags;
  88. static bool kvm_immediate_exit;
  89. static uint64_t kvm_supported_memory_attributes;
  90. static bool kvm_guest_memfd_supported;
  91. static hwaddr kvm_max_slot_size = ~0;
  92. static const KVMCapabilityInfo kvm_required_capabilites[] = {
  93. KVM_CAP_INFO(USER_MEMORY),
  94. KVM_CAP_INFO(DESTROY_MEMORY_REGION_WORKS),
  95. KVM_CAP_INFO(JOIN_MEMORY_REGIONS_WORKS),
  96. KVM_CAP_INFO(INTERNAL_ERROR_DATA),
  97. KVM_CAP_INFO(IOEVENTFD),
  98. KVM_CAP_INFO(IOEVENTFD_ANY_LENGTH),
  99. KVM_CAP_LAST_INFO
  100. };
  101. static NotifierList kvm_irqchip_change_notifiers =
  102. NOTIFIER_LIST_INITIALIZER(kvm_irqchip_change_notifiers);
  103. struct KVMResampleFd {
  104. int gsi;
  105. EventNotifier *resample_event;
  106. QLIST_ENTRY(KVMResampleFd) node;
  107. };
  108. typedef struct KVMResampleFd KVMResampleFd;
  109. /*
  110. * Only used with split irqchip where we need to do the resample fd
  111. * kick for the kernel from userspace.
  112. */
  113. static QLIST_HEAD(, KVMResampleFd) kvm_resample_fd_list =
  114. QLIST_HEAD_INITIALIZER(kvm_resample_fd_list);
  115. static QemuMutex kml_slots_lock;
  116. #define kvm_slots_lock() qemu_mutex_lock(&kml_slots_lock)
  117. #define kvm_slots_unlock() qemu_mutex_unlock(&kml_slots_lock)
  118. static void kvm_slot_init_dirty_bitmap(KVMSlot *mem);
  119. static inline void kvm_resample_fd_remove(int gsi)
  120. {
  121. KVMResampleFd *rfd;
  122. QLIST_FOREACH(rfd, &kvm_resample_fd_list, node) {
  123. if (rfd->gsi == gsi) {
  124. QLIST_REMOVE(rfd, node);
  125. g_free(rfd);
  126. break;
  127. }
  128. }
  129. }
  130. static inline void kvm_resample_fd_insert(int gsi, EventNotifier *event)
  131. {
  132. KVMResampleFd *rfd = g_new0(KVMResampleFd, 1);
  133. rfd->gsi = gsi;
  134. rfd->resample_event = event;
  135. QLIST_INSERT_HEAD(&kvm_resample_fd_list, rfd, node);
  136. }
  137. void kvm_resample_fd_notify(int gsi)
  138. {
  139. KVMResampleFd *rfd;
  140. QLIST_FOREACH(rfd, &kvm_resample_fd_list, node) {
  141. if (rfd->gsi == gsi) {
  142. event_notifier_set(rfd->resample_event);
  143. trace_kvm_resample_fd_notify(gsi);
  144. return;
  145. }
  146. }
  147. }
  148. /**
  149. * kvm_slots_grow(): Grow the slots[] array in the KVMMemoryListener
  150. *
  151. * @kml: The KVMMemoryListener* to grow the slots[] array
  152. * @nr_slots_new: The new size of slots[] array
  153. *
  154. * Returns: True if the array grows larger, false otherwise.
  155. */
  156. static bool kvm_slots_grow(KVMMemoryListener *kml, unsigned int nr_slots_new)
  157. {
  158. unsigned int i, cur = kml->nr_slots_allocated;
  159. KVMSlot *slots;
  160. if (nr_slots_new > kvm_state->nr_slots_max) {
  161. nr_slots_new = kvm_state->nr_slots_max;
  162. }
  163. if (cur >= nr_slots_new) {
  164. /* Big enough, no need to grow, or we reached max */
  165. return false;
  166. }
  167. if (cur == 0) {
  168. slots = g_new0(KVMSlot, nr_slots_new);
  169. } else {
  170. assert(kml->slots);
  171. slots = g_renew(KVMSlot, kml->slots, nr_slots_new);
  172. /*
  173. * g_renew() doesn't initialize extended buffers, however kvm
  174. * memslots require fields to be zero-initialized. E.g. pointers,
  175. * memory_size field, etc.
  176. */
  177. memset(&slots[cur], 0x0, sizeof(slots[0]) * (nr_slots_new - cur));
  178. }
  179. for (i = cur; i < nr_slots_new; i++) {
  180. slots[i].slot = i;
  181. }
  182. kml->slots = slots;
  183. kml->nr_slots_allocated = nr_slots_new;
  184. trace_kvm_slots_grow(cur, nr_slots_new);
  185. return true;
  186. }
  187. static bool kvm_slots_double(KVMMemoryListener *kml)
  188. {
  189. return kvm_slots_grow(kml, kml->nr_slots_allocated * 2);
  190. }
  191. unsigned int kvm_get_max_memslots(void)
  192. {
  193. KVMState *s = KVM_STATE(current_accel());
  194. return s->nr_slots_max;
  195. }
  196. unsigned int kvm_get_free_memslots(void)
  197. {
  198. unsigned int used_slots = 0;
  199. KVMState *s = kvm_state;
  200. int i;
  201. kvm_slots_lock();
  202. for (i = 0; i < s->nr_as; i++) {
  203. if (!s->as[i].ml) {
  204. continue;
  205. }
  206. used_slots = MAX(used_slots, s->as[i].ml->nr_slots_used);
  207. }
  208. kvm_slots_unlock();
  209. return s->nr_slots_max - used_slots;
  210. }
  211. /* Called with KVMMemoryListener.slots_lock held */
  212. static KVMSlot *kvm_get_free_slot(KVMMemoryListener *kml)
  213. {
  214. unsigned int n;
  215. int i;
  216. for (i = 0; i < kml->nr_slots_allocated; i++) {
  217. if (kml->slots[i].memory_size == 0) {
  218. return &kml->slots[i];
  219. }
  220. }
  221. /*
  222. * If no free slots, try to grow first by doubling. Cache the old size
  223. * here to avoid another round of search: if the grow succeeded, it
  224. * means slots[] now must have the existing "n" slots occupied,
  225. * followed by one or more free slots starting from slots[n].
  226. */
  227. n = kml->nr_slots_allocated;
  228. if (kvm_slots_double(kml)) {
  229. return &kml->slots[n];
  230. }
  231. return NULL;
  232. }
  233. /* Called with KVMMemoryListener.slots_lock held */
  234. static KVMSlot *kvm_alloc_slot(KVMMemoryListener *kml)
  235. {
  236. KVMSlot *slot = kvm_get_free_slot(kml);
  237. if (slot) {
  238. return slot;
  239. }
  240. fprintf(stderr, "%s: no free slot available\n", __func__);
  241. abort();
  242. }
  243. static KVMSlot *kvm_lookup_matching_slot(KVMMemoryListener *kml,
  244. hwaddr start_addr,
  245. hwaddr size)
  246. {
  247. int i;
  248. for (i = 0; i < kml->nr_slots_allocated; i++) {
  249. KVMSlot *mem = &kml->slots[i];
  250. if (start_addr == mem->start_addr && size == mem->memory_size) {
  251. return mem;
  252. }
  253. }
  254. return NULL;
  255. }
  256. /*
  257. * Calculate and align the start address and the size of the section.
  258. * Return the size. If the size is 0, the aligned section is empty.
  259. */
  260. static hwaddr kvm_align_section(MemoryRegionSection *section,
  261. hwaddr *start)
  262. {
  263. hwaddr size = int128_get64(section->size);
  264. hwaddr delta, aligned;
  265. /* kvm works in page size chunks, but the function may be called
  266. with sub-page size and unaligned start address. Pad the start
  267. address to next and truncate size to previous page boundary. */
  268. aligned = ROUND_UP(section->offset_within_address_space,
  269. qemu_real_host_page_size());
  270. delta = aligned - section->offset_within_address_space;
  271. *start = aligned;
  272. if (delta > size) {
  273. return 0;
  274. }
  275. return (size - delta) & qemu_real_host_page_mask();
  276. }
  277. int kvm_physical_memory_addr_from_host(KVMState *s, void *ram,
  278. hwaddr *phys_addr)
  279. {
  280. KVMMemoryListener *kml = &s->memory_listener;
  281. int i, ret = 0;
  282. kvm_slots_lock();
  283. for (i = 0; i < kml->nr_slots_allocated; i++) {
  284. KVMSlot *mem = &kml->slots[i];
  285. if (ram >= mem->ram && ram < mem->ram + mem->memory_size) {
  286. *phys_addr = mem->start_addr + (ram - mem->ram);
  287. ret = 1;
  288. break;
  289. }
  290. }
  291. kvm_slots_unlock();
  292. return ret;
  293. }
  294. static int kvm_set_user_memory_region(KVMMemoryListener *kml, KVMSlot *slot, bool new)
  295. {
  296. KVMState *s = kvm_state;
  297. struct kvm_userspace_memory_region2 mem;
  298. int ret;
  299. mem.slot = slot->slot | (kml->as_id << 16);
  300. mem.guest_phys_addr = slot->start_addr;
  301. mem.userspace_addr = (unsigned long)slot->ram;
  302. mem.flags = slot->flags;
  303. mem.guest_memfd = slot->guest_memfd;
  304. mem.guest_memfd_offset = slot->guest_memfd_offset;
  305. if (slot->memory_size && !new && (mem.flags ^ slot->old_flags) & KVM_MEM_READONLY) {
  306. /* Set the slot size to 0 before setting the slot to the desired
  307. * value. This is needed based on KVM commit 75d61fbc. */
  308. mem.memory_size = 0;
  309. if (kvm_guest_memfd_supported) {
  310. ret = kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION2, &mem);
  311. } else {
  312. ret = kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem);
  313. }
  314. if (ret < 0) {
  315. goto err;
  316. }
  317. }
  318. mem.memory_size = slot->memory_size;
  319. if (kvm_guest_memfd_supported) {
  320. ret = kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION2, &mem);
  321. } else {
  322. ret = kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem);
  323. }
  324. slot->old_flags = mem.flags;
  325. err:
  326. trace_kvm_set_user_memory(mem.slot >> 16, (uint16_t)mem.slot, mem.flags,
  327. mem.guest_phys_addr, mem.memory_size,
  328. mem.userspace_addr, mem.guest_memfd,
  329. mem.guest_memfd_offset, ret);
  330. if (ret < 0) {
  331. if (kvm_guest_memfd_supported) {
  332. error_report("%s: KVM_SET_USER_MEMORY_REGION2 failed, slot=%d,"
  333. " start=0x%" PRIx64 ", size=0x%" PRIx64 ","
  334. " flags=0x%" PRIx32 ", guest_memfd=%" PRId32 ","
  335. " guest_memfd_offset=0x%" PRIx64 ": %s",
  336. __func__, mem.slot, slot->start_addr,
  337. (uint64_t)mem.memory_size, mem.flags,
  338. mem.guest_memfd, (uint64_t)mem.guest_memfd_offset,
  339. strerror(errno));
  340. } else {
  341. error_report("%s: KVM_SET_USER_MEMORY_REGION failed, slot=%d,"
  342. " start=0x%" PRIx64 ", size=0x%" PRIx64 ": %s",
  343. __func__, mem.slot, slot->start_addr,
  344. (uint64_t)mem.memory_size, strerror(errno));
  345. }
  346. }
  347. return ret;
  348. }
  349. void kvm_park_vcpu(CPUState *cpu)
  350. {
  351. struct KVMParkedVcpu *vcpu;
  352. trace_kvm_park_vcpu(cpu->cpu_index, kvm_arch_vcpu_id(cpu));
  353. vcpu = g_malloc0(sizeof(*vcpu));
  354. vcpu->vcpu_id = kvm_arch_vcpu_id(cpu);
  355. vcpu->kvm_fd = cpu->kvm_fd;
  356. QLIST_INSERT_HEAD(&kvm_state->kvm_parked_vcpus, vcpu, node);
  357. }
  358. int kvm_unpark_vcpu(KVMState *s, unsigned long vcpu_id)
  359. {
  360. struct KVMParkedVcpu *cpu;
  361. int kvm_fd = -ENOENT;
  362. QLIST_FOREACH(cpu, &s->kvm_parked_vcpus, node) {
  363. if (cpu->vcpu_id == vcpu_id) {
  364. QLIST_REMOVE(cpu, node);
  365. kvm_fd = cpu->kvm_fd;
  366. g_free(cpu);
  367. break;
  368. }
  369. }
  370. trace_kvm_unpark_vcpu(vcpu_id, kvm_fd > 0 ? "unparked" : "!found parked");
  371. return kvm_fd;
  372. }
  373. static void kvm_reset_parked_vcpus(void *param)
  374. {
  375. KVMState *s = param;
  376. struct KVMParkedVcpu *cpu;
  377. QLIST_FOREACH(cpu, &s->kvm_parked_vcpus, node) {
  378. kvm_arch_reset_parked_vcpu(cpu->vcpu_id, cpu->kvm_fd);
  379. }
  380. }
  381. int kvm_create_vcpu(CPUState *cpu)
  382. {
  383. unsigned long vcpu_id = kvm_arch_vcpu_id(cpu);
  384. KVMState *s = kvm_state;
  385. int kvm_fd;
  386. /* check if the KVM vCPU already exist but is parked */
  387. kvm_fd = kvm_unpark_vcpu(s, vcpu_id);
  388. if (kvm_fd < 0) {
  389. /* vCPU not parked: create a new KVM vCPU */
  390. kvm_fd = kvm_vm_ioctl(s, KVM_CREATE_VCPU, vcpu_id);
  391. if (kvm_fd < 0) {
  392. error_report("KVM_CREATE_VCPU IOCTL failed for vCPU %lu", vcpu_id);
  393. return kvm_fd;
  394. }
  395. }
  396. cpu->kvm_fd = kvm_fd;
  397. cpu->kvm_state = s;
  398. cpu->vcpu_dirty = true;
  399. cpu->dirty_pages = 0;
  400. cpu->throttle_us_per_full = 0;
  401. trace_kvm_create_vcpu(cpu->cpu_index, vcpu_id, kvm_fd);
  402. return 0;
  403. }
  404. int kvm_create_and_park_vcpu(CPUState *cpu)
  405. {
  406. int ret = 0;
  407. ret = kvm_create_vcpu(cpu);
  408. if (!ret) {
  409. kvm_park_vcpu(cpu);
  410. }
  411. return ret;
  412. }
  413. static int do_kvm_destroy_vcpu(CPUState *cpu)
  414. {
  415. KVMState *s = kvm_state;
  416. int mmap_size;
  417. int ret = 0;
  418. trace_kvm_destroy_vcpu(cpu->cpu_index, kvm_arch_vcpu_id(cpu));
  419. ret = kvm_arch_destroy_vcpu(cpu);
  420. if (ret < 0) {
  421. goto err;
  422. }
  423. mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0);
  424. if (mmap_size < 0) {
  425. ret = mmap_size;
  426. trace_kvm_failed_get_vcpu_mmap_size();
  427. goto err;
  428. }
  429. ret = munmap(cpu->kvm_run, mmap_size);
  430. if (ret < 0) {
  431. goto err;
  432. }
  433. if (cpu->kvm_dirty_gfns) {
  434. ret = munmap(cpu->kvm_dirty_gfns, s->kvm_dirty_ring_bytes);
  435. if (ret < 0) {
  436. goto err;
  437. }
  438. }
  439. kvm_park_vcpu(cpu);
  440. err:
  441. return ret;
  442. }
  443. void kvm_destroy_vcpu(CPUState *cpu)
  444. {
  445. if (do_kvm_destroy_vcpu(cpu) < 0) {
  446. error_report("kvm_destroy_vcpu failed");
  447. exit(EXIT_FAILURE);
  448. }
  449. }
  450. int kvm_init_vcpu(CPUState *cpu, Error **errp)
  451. {
  452. KVMState *s = kvm_state;
  453. int mmap_size;
  454. int ret;
  455. trace_kvm_init_vcpu(cpu->cpu_index, kvm_arch_vcpu_id(cpu));
  456. ret = kvm_create_vcpu(cpu);
  457. if (ret < 0) {
  458. error_setg_errno(errp, -ret,
  459. "kvm_init_vcpu: kvm_create_vcpu failed (%lu)",
  460. kvm_arch_vcpu_id(cpu));
  461. goto err;
  462. }
  463. mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0);
  464. if (mmap_size < 0) {
  465. ret = mmap_size;
  466. error_setg_errno(errp, -mmap_size,
  467. "kvm_init_vcpu: KVM_GET_VCPU_MMAP_SIZE failed");
  468. goto err;
  469. }
  470. cpu->kvm_run = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED,
  471. cpu->kvm_fd, 0);
  472. if (cpu->kvm_run == MAP_FAILED) {
  473. ret = -errno;
  474. error_setg_errno(errp, ret,
  475. "kvm_init_vcpu: mmap'ing vcpu state failed (%lu)",
  476. kvm_arch_vcpu_id(cpu));
  477. goto err;
  478. }
  479. if (s->coalesced_mmio && !s->coalesced_mmio_ring) {
  480. s->coalesced_mmio_ring =
  481. (void *)cpu->kvm_run + s->coalesced_mmio * PAGE_SIZE;
  482. }
  483. if (s->kvm_dirty_ring_size) {
  484. /* Use MAP_SHARED to share pages with the kernel */
  485. cpu->kvm_dirty_gfns = mmap(NULL, s->kvm_dirty_ring_bytes,
  486. PROT_READ | PROT_WRITE, MAP_SHARED,
  487. cpu->kvm_fd,
  488. PAGE_SIZE * KVM_DIRTY_LOG_PAGE_OFFSET);
  489. if (cpu->kvm_dirty_gfns == MAP_FAILED) {
  490. ret = -errno;
  491. goto err;
  492. }
  493. }
  494. ret = kvm_arch_init_vcpu(cpu);
  495. if (ret < 0) {
  496. error_setg_errno(errp, -ret,
  497. "kvm_init_vcpu: kvm_arch_init_vcpu failed (%lu)",
  498. kvm_arch_vcpu_id(cpu));
  499. }
  500. cpu->kvm_vcpu_stats_fd = kvm_vcpu_ioctl(cpu, KVM_GET_STATS_FD, NULL);
  501. err:
  502. return ret;
  503. }
  504. /*
  505. * dirty pages logging control
  506. */
  507. static int kvm_mem_flags(MemoryRegion *mr)
  508. {
  509. bool readonly = mr->readonly || memory_region_is_romd(mr);
  510. int flags = 0;
  511. if (memory_region_get_dirty_log_mask(mr) != 0) {
  512. flags |= KVM_MEM_LOG_DIRTY_PAGES;
  513. }
  514. if (readonly && kvm_readonly_mem_allowed) {
  515. flags |= KVM_MEM_READONLY;
  516. }
  517. if (memory_region_has_guest_memfd(mr)) {
  518. assert(kvm_guest_memfd_supported);
  519. flags |= KVM_MEM_GUEST_MEMFD;
  520. }
  521. return flags;
  522. }
  523. /* Called with KVMMemoryListener.slots_lock held */
  524. static int kvm_slot_update_flags(KVMMemoryListener *kml, KVMSlot *mem,
  525. MemoryRegion *mr)
  526. {
  527. mem->flags = kvm_mem_flags(mr);
  528. /* If nothing changed effectively, no need to issue ioctl */
  529. if (mem->flags == mem->old_flags) {
  530. return 0;
  531. }
  532. kvm_slot_init_dirty_bitmap(mem);
  533. return kvm_set_user_memory_region(kml, mem, false);
  534. }
  535. static int kvm_section_update_flags(KVMMemoryListener *kml,
  536. MemoryRegionSection *section)
  537. {
  538. hwaddr start_addr, size, slot_size;
  539. KVMSlot *mem;
  540. int ret = 0;
  541. size = kvm_align_section(section, &start_addr);
  542. if (!size) {
  543. return 0;
  544. }
  545. kvm_slots_lock();
  546. while (size && !ret) {
  547. slot_size = MIN(kvm_max_slot_size, size);
  548. mem = kvm_lookup_matching_slot(kml, start_addr, slot_size);
  549. if (!mem) {
  550. /* We don't have a slot if we want to trap every access. */
  551. goto out;
  552. }
  553. ret = kvm_slot_update_flags(kml, mem, section->mr);
  554. start_addr += slot_size;
  555. size -= slot_size;
  556. }
  557. out:
  558. kvm_slots_unlock();
  559. return ret;
  560. }
  561. static void kvm_log_start(MemoryListener *listener,
  562. MemoryRegionSection *section,
  563. int old, int new)
  564. {
  565. KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
  566. int r;
  567. if (old != 0) {
  568. return;
  569. }
  570. r = kvm_section_update_flags(kml, section);
  571. if (r < 0) {
  572. abort();
  573. }
  574. }
  575. static void kvm_log_stop(MemoryListener *listener,
  576. MemoryRegionSection *section,
  577. int old, int new)
  578. {
  579. KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
  580. int r;
  581. if (new != 0) {
  582. return;
  583. }
  584. r = kvm_section_update_flags(kml, section);
  585. if (r < 0) {
  586. abort();
  587. }
  588. }
  589. /* get kvm's dirty pages bitmap and update qemu's */
  590. static void kvm_slot_sync_dirty_pages(KVMSlot *slot)
  591. {
  592. ram_addr_t start = slot->ram_start_offset;
  593. ram_addr_t pages = slot->memory_size / qemu_real_host_page_size();
  594. cpu_physical_memory_set_dirty_lebitmap(slot->dirty_bmap, start, pages);
  595. }
  596. static void kvm_slot_reset_dirty_pages(KVMSlot *slot)
  597. {
  598. memset(slot->dirty_bmap, 0, slot->dirty_bmap_size);
  599. }
  600. #define ALIGN(x, y) (((x)+(y)-1) & ~((y)-1))
  601. /* Allocate the dirty bitmap for a slot */
  602. static void kvm_slot_init_dirty_bitmap(KVMSlot *mem)
  603. {
  604. if (!(mem->flags & KVM_MEM_LOG_DIRTY_PAGES) || mem->dirty_bmap) {
  605. return;
  606. }
  607. /*
  608. * XXX bad kernel interface alert
  609. * For dirty bitmap, kernel allocates array of size aligned to
  610. * bits-per-long. But for case when the kernel is 64bits and
  611. * the userspace is 32bits, userspace can't align to the same
  612. * bits-per-long, since sizeof(long) is different between kernel
  613. * and user space. This way, userspace will provide buffer which
  614. * may be 4 bytes less than the kernel will use, resulting in
  615. * userspace memory corruption (which is not detectable by valgrind
  616. * too, in most cases).
  617. * So for now, let's align to 64 instead of HOST_LONG_BITS here, in
  618. * a hope that sizeof(long) won't become >8 any time soon.
  619. *
  620. * Note: the granule of kvm dirty log is qemu_real_host_page_size.
  621. * And mem->memory_size is aligned to it (otherwise this mem can't
  622. * be registered to KVM).
  623. */
  624. hwaddr bitmap_size = ALIGN(mem->memory_size / qemu_real_host_page_size(),
  625. /*HOST_LONG_BITS*/ 64) / 8;
  626. mem->dirty_bmap = g_malloc0(bitmap_size);
  627. mem->dirty_bmap_size = bitmap_size;
  628. }
  629. /*
  630. * Sync dirty bitmap from kernel to KVMSlot.dirty_bmap, return true if
  631. * succeeded, false otherwise
  632. */
  633. static bool kvm_slot_get_dirty_log(KVMState *s, KVMSlot *slot)
  634. {
  635. struct kvm_dirty_log d = {};
  636. int ret;
  637. d.dirty_bitmap = slot->dirty_bmap;
  638. d.slot = slot->slot | (slot->as_id << 16);
  639. ret = kvm_vm_ioctl(s, KVM_GET_DIRTY_LOG, &d);
  640. if (ret == -ENOENT) {
  641. /* kernel does not have dirty bitmap in this slot */
  642. ret = 0;
  643. }
  644. if (ret) {
  645. error_report_once("%s: KVM_GET_DIRTY_LOG failed with %d",
  646. __func__, ret);
  647. }
  648. return ret == 0;
  649. }
  650. /* Should be with all slots_lock held for the address spaces. */
  651. static void kvm_dirty_ring_mark_page(KVMState *s, uint32_t as_id,
  652. uint32_t slot_id, uint64_t offset)
  653. {
  654. KVMMemoryListener *kml;
  655. KVMSlot *mem;
  656. if (as_id >= s->nr_as) {
  657. return;
  658. }
  659. kml = s->as[as_id].ml;
  660. mem = &kml->slots[slot_id];
  661. if (!mem->memory_size || offset >=
  662. (mem->memory_size / qemu_real_host_page_size())) {
  663. return;
  664. }
  665. set_bit(offset, mem->dirty_bmap);
  666. }
  667. static bool dirty_gfn_is_dirtied(struct kvm_dirty_gfn *gfn)
  668. {
  669. /*
  670. * Read the flags before the value. Pairs with barrier in
  671. * KVM's kvm_dirty_ring_push() function.
  672. */
  673. return qatomic_load_acquire(&gfn->flags) == KVM_DIRTY_GFN_F_DIRTY;
  674. }
  675. static void dirty_gfn_set_collected(struct kvm_dirty_gfn *gfn)
  676. {
  677. /*
  678. * Use a store-release so that the CPU that executes KVM_RESET_DIRTY_RINGS
  679. * sees the full content of the ring:
  680. *
  681. * CPU0 CPU1 CPU2
  682. * ------------------------------------------------------------------------------
  683. * fill gfn0
  684. * store-rel flags for gfn0
  685. * load-acq flags for gfn0
  686. * store-rel RESET for gfn0
  687. * ioctl(RESET_RINGS)
  688. * load-acq flags for gfn0
  689. * check if flags have RESET
  690. *
  691. * The synchronization goes from CPU2 to CPU0 to CPU1.
  692. */
  693. qatomic_store_release(&gfn->flags, KVM_DIRTY_GFN_F_RESET);
  694. }
  695. /*
  696. * Should be with all slots_lock held for the address spaces. It returns the
  697. * dirty page we've collected on this dirty ring.
  698. */
  699. static uint32_t kvm_dirty_ring_reap_one(KVMState *s, CPUState *cpu)
  700. {
  701. struct kvm_dirty_gfn *dirty_gfns = cpu->kvm_dirty_gfns, *cur;
  702. uint32_t ring_size = s->kvm_dirty_ring_size;
  703. uint32_t count = 0, fetch = cpu->kvm_fetch_index;
  704. /*
  705. * It's possible that we race with vcpu creation code where the vcpu is
  706. * put onto the vcpus list but not yet initialized the dirty ring
  707. * structures. If so, skip it.
  708. */
  709. if (!cpu->created) {
  710. return 0;
  711. }
  712. assert(dirty_gfns && ring_size);
  713. trace_kvm_dirty_ring_reap_vcpu(cpu->cpu_index);
  714. while (true) {
  715. cur = &dirty_gfns[fetch % ring_size];
  716. if (!dirty_gfn_is_dirtied(cur)) {
  717. break;
  718. }
  719. kvm_dirty_ring_mark_page(s, cur->slot >> 16, cur->slot & 0xffff,
  720. cur->offset);
  721. dirty_gfn_set_collected(cur);
  722. trace_kvm_dirty_ring_page(cpu->cpu_index, fetch, cur->offset);
  723. fetch++;
  724. count++;
  725. }
  726. cpu->kvm_fetch_index = fetch;
  727. cpu->dirty_pages += count;
  728. return count;
  729. }
  730. /* Must be with slots_lock held */
  731. static uint64_t kvm_dirty_ring_reap_locked(KVMState *s, CPUState* cpu)
  732. {
  733. int ret;
  734. uint64_t total = 0;
  735. int64_t stamp;
  736. stamp = get_clock();
  737. if (cpu) {
  738. total = kvm_dirty_ring_reap_one(s, cpu);
  739. } else {
  740. CPU_FOREACH(cpu) {
  741. total += kvm_dirty_ring_reap_one(s, cpu);
  742. }
  743. }
  744. if (total) {
  745. ret = kvm_vm_ioctl(s, KVM_RESET_DIRTY_RINGS);
  746. assert(ret == total);
  747. }
  748. stamp = get_clock() - stamp;
  749. if (total) {
  750. trace_kvm_dirty_ring_reap(total, stamp / 1000);
  751. }
  752. return total;
  753. }
  754. /*
  755. * Currently for simplicity, we must hold BQL before calling this. We can
  756. * consider to drop the BQL if we're clear with all the race conditions.
  757. */
  758. static uint64_t kvm_dirty_ring_reap(KVMState *s, CPUState *cpu)
  759. {
  760. uint64_t total;
  761. /*
  762. * We need to lock all kvm slots for all address spaces here,
  763. * because:
  764. *
  765. * (1) We need to mark dirty for dirty bitmaps in multiple slots
  766. * and for tons of pages, so it's better to take the lock here
  767. * once rather than once per page. And more importantly,
  768. *
  769. * (2) We must _NOT_ publish dirty bits to the other threads
  770. * (e.g., the migration thread) via the kvm memory slot dirty
  771. * bitmaps before correctly re-protect those dirtied pages.
  772. * Otherwise we can have potential risk of data corruption if
  773. * the page data is read in the other thread before we do
  774. * reset below.
  775. */
  776. kvm_slots_lock();
  777. total = kvm_dirty_ring_reap_locked(s, cpu);
  778. kvm_slots_unlock();
  779. return total;
  780. }
  781. static void do_kvm_cpu_synchronize_kick(CPUState *cpu, run_on_cpu_data arg)
  782. {
  783. /* No need to do anything */
  784. }
  785. /*
  786. * Kick all vcpus out in a synchronized way. When returned, we
  787. * guarantee that every vcpu has been kicked and at least returned to
  788. * userspace once.
  789. */
  790. static void kvm_cpu_synchronize_kick_all(void)
  791. {
  792. CPUState *cpu;
  793. CPU_FOREACH(cpu) {
  794. run_on_cpu(cpu, do_kvm_cpu_synchronize_kick, RUN_ON_CPU_NULL);
  795. }
  796. }
  797. /*
  798. * Flush all the existing dirty pages to the KVM slot buffers. When
  799. * this call returns, we guarantee that all the touched dirty pages
  800. * before calling this function have been put into the per-kvmslot
  801. * dirty bitmap.
  802. *
  803. * This function must be called with BQL held.
  804. */
  805. static void kvm_dirty_ring_flush(void)
  806. {
  807. trace_kvm_dirty_ring_flush(0);
  808. /*
  809. * The function needs to be serialized. Since this function
  810. * should always be with BQL held, serialization is guaranteed.
  811. * However, let's be sure of it.
  812. */
  813. assert(bql_locked());
  814. /*
  815. * First make sure to flush the hardware buffers by kicking all
  816. * vcpus out in a synchronous way.
  817. */
  818. kvm_cpu_synchronize_kick_all();
  819. kvm_dirty_ring_reap(kvm_state, NULL);
  820. trace_kvm_dirty_ring_flush(1);
  821. }
  822. /**
  823. * kvm_physical_sync_dirty_bitmap - Sync dirty bitmap from kernel space
  824. *
  825. * This function will first try to fetch dirty bitmap from the kernel,
  826. * and then updates qemu's dirty bitmap.
  827. *
  828. * NOTE: caller must be with kml->slots_lock held.
  829. *
  830. * @kml: the KVM memory listener object
  831. * @section: the memory section to sync the dirty bitmap with
  832. */
  833. static void kvm_physical_sync_dirty_bitmap(KVMMemoryListener *kml,
  834. MemoryRegionSection *section)
  835. {
  836. KVMState *s = kvm_state;
  837. KVMSlot *mem;
  838. hwaddr start_addr, size;
  839. hwaddr slot_size;
  840. size = kvm_align_section(section, &start_addr);
  841. while (size) {
  842. slot_size = MIN(kvm_max_slot_size, size);
  843. mem = kvm_lookup_matching_slot(kml, start_addr, slot_size);
  844. if (!mem) {
  845. /* We don't have a slot if we want to trap every access. */
  846. return;
  847. }
  848. if (kvm_slot_get_dirty_log(s, mem)) {
  849. kvm_slot_sync_dirty_pages(mem);
  850. }
  851. start_addr += slot_size;
  852. size -= slot_size;
  853. }
  854. }
  855. /* Alignment requirement for KVM_CLEAR_DIRTY_LOG - 64 pages */
  856. #define KVM_CLEAR_LOG_SHIFT 6
  857. #define KVM_CLEAR_LOG_ALIGN (qemu_real_host_page_size() << KVM_CLEAR_LOG_SHIFT)
  858. #define KVM_CLEAR_LOG_MASK (-KVM_CLEAR_LOG_ALIGN)
  859. static int kvm_log_clear_one_slot(KVMSlot *mem, int as_id, uint64_t start,
  860. uint64_t size)
  861. {
  862. KVMState *s = kvm_state;
  863. uint64_t end, bmap_start, start_delta, bmap_npages;
  864. struct kvm_clear_dirty_log d;
  865. unsigned long *bmap_clear = NULL, psize = qemu_real_host_page_size();
  866. int ret;
  867. /*
  868. * We need to extend either the start or the size or both to
  869. * satisfy the KVM interface requirement. Firstly, do the start
  870. * page alignment on 64 host pages
  871. */
  872. bmap_start = start & KVM_CLEAR_LOG_MASK;
  873. start_delta = start - bmap_start;
  874. bmap_start /= psize;
  875. /*
  876. * The kernel interface has restriction on the size too, that either:
  877. *
  878. * (1) the size is 64 host pages aligned (just like the start), or
  879. * (2) the size fills up until the end of the KVM memslot.
  880. */
  881. bmap_npages = DIV_ROUND_UP(size + start_delta, KVM_CLEAR_LOG_ALIGN)
  882. << KVM_CLEAR_LOG_SHIFT;
  883. end = mem->memory_size / psize;
  884. if (bmap_npages > end - bmap_start) {
  885. bmap_npages = end - bmap_start;
  886. }
  887. start_delta /= psize;
  888. /*
  889. * Prepare the bitmap to clear dirty bits. Here we must guarantee
  890. * that we won't clear any unknown dirty bits otherwise we might
  891. * accidentally clear some set bits which are not yet synced from
  892. * the kernel into QEMU's bitmap, then we'll lose track of the
  893. * guest modifications upon those pages (which can directly lead
  894. * to guest data loss or panic after migration).
  895. *
  896. * Layout of the KVMSlot.dirty_bmap:
  897. *
  898. * |<-------- bmap_npages -----------..>|
  899. * [1]
  900. * start_delta size
  901. * |----------------|-------------|------------------|------------|
  902. * ^ ^ ^ ^
  903. * | | | |
  904. * start bmap_start (start) end
  905. * of memslot of memslot
  906. *
  907. * [1] bmap_npages can be aligned to either 64 pages or the end of slot
  908. */
  909. assert(bmap_start % BITS_PER_LONG == 0);
  910. /* We should never do log_clear before log_sync */
  911. assert(mem->dirty_bmap);
  912. if (start_delta || bmap_npages - size / psize) {
  913. /* Slow path - we need to manipulate a temp bitmap */
  914. bmap_clear = bitmap_new(bmap_npages);
  915. bitmap_copy_with_src_offset(bmap_clear, mem->dirty_bmap,
  916. bmap_start, start_delta + size / psize);
  917. /*
  918. * We need to fill the holes at start because that was not
  919. * specified by the caller and we extended the bitmap only for
  920. * 64 pages alignment
  921. */
  922. bitmap_clear(bmap_clear, 0, start_delta);
  923. d.dirty_bitmap = bmap_clear;
  924. } else {
  925. /*
  926. * Fast path - both start and size align well with BITS_PER_LONG
  927. * (or the end of memory slot)
  928. */
  929. d.dirty_bitmap = mem->dirty_bmap + BIT_WORD(bmap_start);
  930. }
  931. d.first_page = bmap_start;
  932. /* It should never overflow. If it happens, say something */
  933. assert(bmap_npages <= UINT32_MAX);
  934. d.num_pages = bmap_npages;
  935. d.slot = mem->slot | (as_id << 16);
  936. ret = kvm_vm_ioctl(s, KVM_CLEAR_DIRTY_LOG, &d);
  937. if (ret < 0 && ret != -ENOENT) {
  938. error_report("%s: KVM_CLEAR_DIRTY_LOG failed, slot=%d, "
  939. "start=0x%"PRIx64", size=0x%"PRIx32", errno=%d",
  940. __func__, d.slot, (uint64_t)d.first_page,
  941. (uint32_t)d.num_pages, ret);
  942. } else {
  943. ret = 0;
  944. trace_kvm_clear_dirty_log(d.slot, d.first_page, d.num_pages);
  945. }
  946. /*
  947. * After we have updated the remote dirty bitmap, we update the
  948. * cached bitmap as well for the memslot, then if another user
  949. * clears the same region we know we shouldn't clear it again on
  950. * the remote otherwise it's data loss as well.
  951. */
  952. bitmap_clear(mem->dirty_bmap, bmap_start + start_delta,
  953. size / psize);
  954. /* This handles the NULL case well */
  955. g_free(bmap_clear);
  956. return ret;
  957. }
  958. /**
  959. * kvm_physical_log_clear - Clear the kernel's dirty bitmap for range
  960. *
  961. * NOTE: this will be a no-op if we haven't enabled manual dirty log
  962. * protection in the host kernel because in that case this operation
  963. * will be done within log_sync().
  964. *
  965. * @kml: the kvm memory listener
  966. * @section: the memory range to clear dirty bitmap
  967. */
  968. static int kvm_physical_log_clear(KVMMemoryListener *kml,
  969. MemoryRegionSection *section)
  970. {
  971. KVMState *s = kvm_state;
  972. uint64_t start, size, offset, count;
  973. KVMSlot *mem;
  974. int ret = 0, i;
  975. if (!s->manual_dirty_log_protect) {
  976. /* No need to do explicit clear */
  977. return ret;
  978. }
  979. start = section->offset_within_address_space;
  980. size = int128_get64(section->size);
  981. if (!size) {
  982. /* Nothing more we can do... */
  983. return ret;
  984. }
  985. kvm_slots_lock();
  986. for (i = 0; i < kml->nr_slots_allocated; i++) {
  987. mem = &kml->slots[i];
  988. /* Discard slots that are empty or do not overlap the section */
  989. if (!mem->memory_size ||
  990. mem->start_addr > start + size - 1 ||
  991. start > mem->start_addr + mem->memory_size - 1) {
  992. continue;
  993. }
  994. if (start >= mem->start_addr) {
  995. /* The slot starts before section or is aligned to it. */
  996. offset = start - mem->start_addr;
  997. count = MIN(mem->memory_size - offset, size);
  998. } else {
  999. /* The slot starts after section. */
  1000. offset = 0;
  1001. count = MIN(mem->memory_size, size - (mem->start_addr - start));
  1002. }
  1003. ret = kvm_log_clear_one_slot(mem, kml->as_id, offset, count);
  1004. if (ret < 0) {
  1005. break;
  1006. }
  1007. }
  1008. kvm_slots_unlock();
  1009. return ret;
  1010. }
  1011. static void kvm_coalesce_mmio_region(MemoryListener *listener,
  1012. MemoryRegionSection *secion,
  1013. hwaddr start, hwaddr size)
  1014. {
  1015. KVMState *s = kvm_state;
  1016. if (s->coalesced_mmio) {
  1017. struct kvm_coalesced_mmio_zone zone;
  1018. zone.addr = start;
  1019. zone.size = size;
  1020. zone.pad = 0;
  1021. (void)kvm_vm_ioctl(s, KVM_REGISTER_COALESCED_MMIO, &zone);
  1022. }
  1023. }
  1024. static void kvm_uncoalesce_mmio_region(MemoryListener *listener,
  1025. MemoryRegionSection *secion,
  1026. hwaddr start, hwaddr size)
  1027. {
  1028. KVMState *s = kvm_state;
  1029. if (s->coalesced_mmio) {
  1030. struct kvm_coalesced_mmio_zone zone;
  1031. zone.addr = start;
  1032. zone.size = size;
  1033. zone.pad = 0;
  1034. (void)kvm_vm_ioctl(s, KVM_UNREGISTER_COALESCED_MMIO, &zone);
  1035. }
  1036. }
  1037. static void kvm_coalesce_pio_add(MemoryListener *listener,
  1038. MemoryRegionSection *section,
  1039. hwaddr start, hwaddr size)
  1040. {
  1041. KVMState *s = kvm_state;
  1042. if (s->coalesced_pio) {
  1043. struct kvm_coalesced_mmio_zone zone;
  1044. zone.addr = start;
  1045. zone.size = size;
  1046. zone.pio = 1;
  1047. (void)kvm_vm_ioctl(s, KVM_REGISTER_COALESCED_MMIO, &zone);
  1048. }
  1049. }
  1050. static void kvm_coalesce_pio_del(MemoryListener *listener,
  1051. MemoryRegionSection *section,
  1052. hwaddr start, hwaddr size)
  1053. {
  1054. KVMState *s = kvm_state;
  1055. if (s->coalesced_pio) {
  1056. struct kvm_coalesced_mmio_zone zone;
  1057. zone.addr = start;
  1058. zone.size = size;
  1059. zone.pio = 1;
  1060. (void)kvm_vm_ioctl(s, KVM_UNREGISTER_COALESCED_MMIO, &zone);
  1061. }
  1062. }
  1063. int kvm_check_extension(KVMState *s, unsigned int extension)
  1064. {
  1065. int ret;
  1066. ret = kvm_ioctl(s, KVM_CHECK_EXTENSION, extension);
  1067. if (ret < 0) {
  1068. ret = 0;
  1069. }
  1070. return ret;
  1071. }
  1072. int kvm_vm_check_extension(KVMState *s, unsigned int extension)
  1073. {
  1074. int ret;
  1075. ret = kvm_vm_ioctl(s, KVM_CHECK_EXTENSION, extension);
  1076. if (ret < 0) {
  1077. /* VM wide version not implemented, use global one instead */
  1078. ret = kvm_check_extension(s, extension);
  1079. }
  1080. return ret;
  1081. }
  1082. /*
  1083. * We track the poisoned pages to be able to:
  1084. * - replace them on VM reset
  1085. * - block a migration for a VM with a poisoned page
  1086. */
  1087. typedef struct HWPoisonPage {
  1088. ram_addr_t ram_addr;
  1089. QLIST_ENTRY(HWPoisonPage) list;
  1090. } HWPoisonPage;
  1091. static QLIST_HEAD(, HWPoisonPage) hwpoison_page_list =
  1092. QLIST_HEAD_INITIALIZER(hwpoison_page_list);
  1093. static void kvm_unpoison_all(void *param)
  1094. {
  1095. HWPoisonPage *page, *next_page;
  1096. QLIST_FOREACH_SAFE(page, &hwpoison_page_list, list, next_page) {
  1097. QLIST_REMOVE(page, list);
  1098. qemu_ram_remap(page->ram_addr, TARGET_PAGE_SIZE);
  1099. g_free(page);
  1100. }
  1101. }
  1102. void kvm_hwpoison_page_add(ram_addr_t ram_addr)
  1103. {
  1104. HWPoisonPage *page;
  1105. QLIST_FOREACH(page, &hwpoison_page_list, list) {
  1106. if (page->ram_addr == ram_addr) {
  1107. return;
  1108. }
  1109. }
  1110. page = g_new(HWPoisonPage, 1);
  1111. page->ram_addr = ram_addr;
  1112. QLIST_INSERT_HEAD(&hwpoison_page_list, page, list);
  1113. }
  1114. bool kvm_hwpoisoned_mem(void)
  1115. {
  1116. return !QLIST_EMPTY(&hwpoison_page_list);
  1117. }
  1118. static uint32_t adjust_ioeventfd_endianness(uint32_t val, uint32_t size)
  1119. {
  1120. #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
  1121. /* The kernel expects ioeventfd values in HOST_BIG_ENDIAN
  1122. * endianness, but the memory core hands them in target endianness.
  1123. * For example, PPC is always treated as big-endian even if running
  1124. * on KVM and on PPC64LE. Correct here.
  1125. */
  1126. switch (size) {
  1127. case 2:
  1128. val = bswap16(val);
  1129. break;
  1130. case 4:
  1131. val = bswap32(val);
  1132. break;
  1133. }
  1134. #endif
  1135. return val;
  1136. }
  1137. static int kvm_set_ioeventfd_mmio(int fd, hwaddr addr, uint32_t val,
  1138. bool assign, uint32_t size, bool datamatch)
  1139. {
  1140. int ret;
  1141. struct kvm_ioeventfd iofd = {
  1142. .datamatch = datamatch ? adjust_ioeventfd_endianness(val, size) : 0,
  1143. .addr = addr,
  1144. .len = size,
  1145. .flags = 0,
  1146. .fd = fd,
  1147. };
  1148. trace_kvm_set_ioeventfd_mmio(fd, (uint64_t)addr, val, assign, size,
  1149. datamatch);
  1150. if (!kvm_enabled()) {
  1151. return -ENOSYS;
  1152. }
  1153. if (datamatch) {
  1154. iofd.flags |= KVM_IOEVENTFD_FLAG_DATAMATCH;
  1155. }
  1156. if (!assign) {
  1157. iofd.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN;
  1158. }
  1159. ret = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &iofd);
  1160. if (ret < 0) {
  1161. return -errno;
  1162. }
  1163. return 0;
  1164. }
  1165. static int kvm_set_ioeventfd_pio(int fd, uint16_t addr, uint16_t val,
  1166. bool assign, uint32_t size, bool datamatch)
  1167. {
  1168. struct kvm_ioeventfd kick = {
  1169. .datamatch = datamatch ? adjust_ioeventfd_endianness(val, size) : 0,
  1170. .addr = addr,
  1171. .flags = KVM_IOEVENTFD_FLAG_PIO,
  1172. .len = size,
  1173. .fd = fd,
  1174. };
  1175. int r;
  1176. trace_kvm_set_ioeventfd_pio(fd, addr, val, assign, size, datamatch);
  1177. if (!kvm_enabled()) {
  1178. return -ENOSYS;
  1179. }
  1180. if (datamatch) {
  1181. kick.flags |= KVM_IOEVENTFD_FLAG_DATAMATCH;
  1182. }
  1183. if (!assign) {
  1184. kick.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN;
  1185. }
  1186. r = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &kick);
  1187. if (r < 0) {
  1188. return r;
  1189. }
  1190. return 0;
  1191. }
  1192. static const KVMCapabilityInfo *
  1193. kvm_check_extension_list(KVMState *s, const KVMCapabilityInfo *list)
  1194. {
  1195. while (list->name) {
  1196. if (!kvm_check_extension(s, list->value)) {
  1197. return list;
  1198. }
  1199. list++;
  1200. }
  1201. return NULL;
  1202. }
  1203. void kvm_set_max_memslot_size(hwaddr max_slot_size)
  1204. {
  1205. g_assert(
  1206. ROUND_UP(max_slot_size, qemu_real_host_page_size()) == max_slot_size
  1207. );
  1208. kvm_max_slot_size = max_slot_size;
  1209. }
  1210. static int kvm_set_memory_attributes(hwaddr start, uint64_t size, uint64_t attr)
  1211. {
  1212. struct kvm_memory_attributes attrs;
  1213. int r;
  1214. assert((attr & kvm_supported_memory_attributes) == attr);
  1215. attrs.attributes = attr;
  1216. attrs.address = start;
  1217. attrs.size = size;
  1218. attrs.flags = 0;
  1219. r = kvm_vm_ioctl(kvm_state, KVM_SET_MEMORY_ATTRIBUTES, &attrs);
  1220. if (r) {
  1221. error_report("failed to set memory (0x%" HWADDR_PRIx "+0x%" PRIx64 ") "
  1222. "with attr 0x%" PRIx64 " error '%s'",
  1223. start, size, attr, strerror(errno));
  1224. }
  1225. return r;
  1226. }
  1227. int kvm_set_memory_attributes_private(hwaddr start, uint64_t size)
  1228. {
  1229. return kvm_set_memory_attributes(start, size, KVM_MEMORY_ATTRIBUTE_PRIVATE);
  1230. }
  1231. int kvm_set_memory_attributes_shared(hwaddr start, uint64_t size)
  1232. {
  1233. return kvm_set_memory_attributes(start, size, 0);
  1234. }
  1235. /* Called with KVMMemoryListener.slots_lock held */
  1236. static void kvm_set_phys_mem(KVMMemoryListener *kml,
  1237. MemoryRegionSection *section, bool add)
  1238. {
  1239. KVMSlot *mem;
  1240. int err;
  1241. MemoryRegion *mr = section->mr;
  1242. bool writable = !mr->readonly && !mr->rom_device;
  1243. hwaddr start_addr, size, slot_size, mr_offset;
  1244. ram_addr_t ram_start_offset;
  1245. void *ram;
  1246. if (!memory_region_is_ram(mr)) {
  1247. if (writable || !kvm_readonly_mem_allowed) {
  1248. return;
  1249. } else if (!mr->romd_mode) {
  1250. /* If the memory device is not in romd_mode, then we actually want
  1251. * to remove the kvm memory slot so all accesses will trap. */
  1252. add = false;
  1253. }
  1254. }
  1255. size = kvm_align_section(section, &start_addr);
  1256. if (!size) {
  1257. return;
  1258. }
  1259. /* The offset of the kvmslot within the memory region */
  1260. mr_offset = section->offset_within_region + start_addr -
  1261. section->offset_within_address_space;
  1262. /* use aligned delta to align the ram address and offset */
  1263. ram = memory_region_get_ram_ptr(mr) + mr_offset;
  1264. ram_start_offset = memory_region_get_ram_addr(mr) + mr_offset;
  1265. if (!add) {
  1266. do {
  1267. slot_size = MIN(kvm_max_slot_size, size);
  1268. mem = kvm_lookup_matching_slot(kml, start_addr, slot_size);
  1269. if (!mem) {
  1270. return;
  1271. }
  1272. if (mem->flags & KVM_MEM_LOG_DIRTY_PAGES) {
  1273. /*
  1274. * NOTE: We should be aware of the fact that here we're only
  1275. * doing a best effort to sync dirty bits. No matter whether
  1276. * we're using dirty log or dirty ring, we ignored two facts:
  1277. *
  1278. * (1) dirty bits can reside in hardware buffers (PML)
  1279. *
  1280. * (2) after we collected dirty bits here, pages can be dirtied
  1281. * again before we do the final KVM_SET_USER_MEMORY_REGION to
  1282. * remove the slot.
  1283. *
  1284. * Not easy. Let's cross the fingers until it's fixed.
  1285. */
  1286. if (kvm_state->kvm_dirty_ring_size) {
  1287. kvm_dirty_ring_reap_locked(kvm_state, NULL);
  1288. if (kvm_state->kvm_dirty_ring_with_bitmap) {
  1289. kvm_slot_sync_dirty_pages(mem);
  1290. kvm_slot_get_dirty_log(kvm_state, mem);
  1291. }
  1292. } else {
  1293. kvm_slot_get_dirty_log(kvm_state, mem);
  1294. }
  1295. kvm_slot_sync_dirty_pages(mem);
  1296. }
  1297. /* unregister the slot */
  1298. g_free(mem->dirty_bmap);
  1299. mem->dirty_bmap = NULL;
  1300. mem->memory_size = 0;
  1301. mem->flags = 0;
  1302. err = kvm_set_user_memory_region(kml, mem, false);
  1303. if (err) {
  1304. fprintf(stderr, "%s: error unregistering slot: %s\n",
  1305. __func__, strerror(-err));
  1306. abort();
  1307. }
  1308. start_addr += slot_size;
  1309. size -= slot_size;
  1310. kml->nr_slots_used--;
  1311. } while (size);
  1312. return;
  1313. }
  1314. /* register the new slot */
  1315. do {
  1316. slot_size = MIN(kvm_max_slot_size, size);
  1317. mem = kvm_alloc_slot(kml);
  1318. mem->as_id = kml->as_id;
  1319. mem->memory_size = slot_size;
  1320. mem->start_addr = start_addr;
  1321. mem->ram_start_offset = ram_start_offset;
  1322. mem->ram = ram;
  1323. mem->flags = kvm_mem_flags(mr);
  1324. mem->guest_memfd = mr->ram_block->guest_memfd;
  1325. mem->guest_memfd_offset = (uint8_t*)ram - mr->ram_block->host;
  1326. kvm_slot_init_dirty_bitmap(mem);
  1327. err = kvm_set_user_memory_region(kml, mem, true);
  1328. if (err) {
  1329. fprintf(stderr, "%s: error registering slot: %s\n", __func__,
  1330. strerror(-err));
  1331. abort();
  1332. }
  1333. if (memory_region_has_guest_memfd(mr)) {
  1334. err = kvm_set_memory_attributes_private(start_addr, slot_size);
  1335. if (err) {
  1336. error_report("%s: failed to set memory attribute private: %s",
  1337. __func__, strerror(-err));
  1338. exit(1);
  1339. }
  1340. }
  1341. start_addr += slot_size;
  1342. ram_start_offset += slot_size;
  1343. ram += slot_size;
  1344. size -= slot_size;
  1345. kml->nr_slots_used++;
  1346. } while (size);
  1347. }
  1348. static void *kvm_dirty_ring_reaper_thread(void *data)
  1349. {
  1350. KVMState *s = data;
  1351. struct KVMDirtyRingReaper *r = &s->reaper;
  1352. rcu_register_thread();
  1353. trace_kvm_dirty_ring_reaper("init");
  1354. while (true) {
  1355. r->reaper_state = KVM_DIRTY_RING_REAPER_WAIT;
  1356. trace_kvm_dirty_ring_reaper("wait");
  1357. /*
  1358. * TODO: provide a smarter timeout rather than a constant?
  1359. */
  1360. sleep(1);
  1361. /* keep sleeping so that dirtylimit not be interfered by reaper */
  1362. if (dirtylimit_in_service()) {
  1363. continue;
  1364. }
  1365. trace_kvm_dirty_ring_reaper("wakeup");
  1366. r->reaper_state = KVM_DIRTY_RING_REAPER_REAPING;
  1367. bql_lock();
  1368. kvm_dirty_ring_reap(s, NULL);
  1369. bql_unlock();
  1370. r->reaper_iteration++;
  1371. }
  1372. g_assert_not_reached();
  1373. }
  1374. static void kvm_dirty_ring_reaper_init(KVMState *s)
  1375. {
  1376. struct KVMDirtyRingReaper *r = &s->reaper;
  1377. qemu_thread_create(&r->reaper_thr, "kvm-reaper",
  1378. kvm_dirty_ring_reaper_thread,
  1379. s, QEMU_THREAD_JOINABLE);
  1380. }
  1381. static int kvm_dirty_ring_init(KVMState *s)
  1382. {
  1383. uint32_t ring_size = s->kvm_dirty_ring_size;
  1384. uint64_t ring_bytes = ring_size * sizeof(struct kvm_dirty_gfn);
  1385. unsigned int capability = KVM_CAP_DIRTY_LOG_RING;
  1386. int ret;
  1387. s->kvm_dirty_ring_size = 0;
  1388. s->kvm_dirty_ring_bytes = 0;
  1389. /* Bail if the dirty ring size isn't specified */
  1390. if (!ring_size) {
  1391. return 0;
  1392. }
  1393. /*
  1394. * Read the max supported pages. Fall back to dirty logging mode
  1395. * if the dirty ring isn't supported.
  1396. */
  1397. ret = kvm_vm_check_extension(s, capability);
  1398. if (ret <= 0) {
  1399. capability = KVM_CAP_DIRTY_LOG_RING_ACQ_REL;
  1400. ret = kvm_vm_check_extension(s, capability);
  1401. }
  1402. if (ret <= 0) {
  1403. warn_report("KVM dirty ring not available, using bitmap method");
  1404. return 0;
  1405. }
  1406. if (ring_bytes > ret) {
  1407. error_report("KVM dirty ring size %" PRIu32 " too big "
  1408. "(maximum is %ld). Please use a smaller value.",
  1409. ring_size, (long)ret / sizeof(struct kvm_dirty_gfn));
  1410. return -EINVAL;
  1411. }
  1412. ret = kvm_vm_enable_cap(s, capability, 0, ring_bytes);
  1413. if (ret) {
  1414. error_report("Enabling of KVM dirty ring failed: %s. "
  1415. "Suggested minimum value is 1024.", strerror(-ret));
  1416. return -EIO;
  1417. }
  1418. /* Enable the backup bitmap if it is supported */
  1419. ret = kvm_vm_check_extension(s, KVM_CAP_DIRTY_LOG_RING_WITH_BITMAP);
  1420. if (ret > 0) {
  1421. ret = kvm_vm_enable_cap(s, KVM_CAP_DIRTY_LOG_RING_WITH_BITMAP, 0);
  1422. if (ret) {
  1423. error_report("Enabling of KVM dirty ring's backup bitmap failed: "
  1424. "%s. ", strerror(-ret));
  1425. return -EIO;
  1426. }
  1427. s->kvm_dirty_ring_with_bitmap = true;
  1428. }
  1429. s->kvm_dirty_ring_size = ring_size;
  1430. s->kvm_dirty_ring_bytes = ring_bytes;
  1431. return 0;
  1432. }
  1433. static void kvm_region_add(MemoryListener *listener,
  1434. MemoryRegionSection *section)
  1435. {
  1436. KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
  1437. KVMMemoryUpdate *update;
  1438. update = g_new0(KVMMemoryUpdate, 1);
  1439. update->section = *section;
  1440. QSIMPLEQ_INSERT_TAIL(&kml->transaction_add, update, next);
  1441. }
  1442. static void kvm_region_del(MemoryListener *listener,
  1443. MemoryRegionSection *section)
  1444. {
  1445. KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
  1446. KVMMemoryUpdate *update;
  1447. update = g_new0(KVMMemoryUpdate, 1);
  1448. update->section = *section;
  1449. QSIMPLEQ_INSERT_TAIL(&kml->transaction_del, update, next);
  1450. }
  1451. static void kvm_region_commit(MemoryListener *listener)
  1452. {
  1453. KVMMemoryListener *kml = container_of(listener, KVMMemoryListener,
  1454. listener);
  1455. KVMMemoryUpdate *u1, *u2;
  1456. bool need_inhibit = false;
  1457. if (QSIMPLEQ_EMPTY(&kml->transaction_add) &&
  1458. QSIMPLEQ_EMPTY(&kml->transaction_del)) {
  1459. return;
  1460. }
  1461. /*
  1462. * We have to be careful when regions to add overlap with ranges to remove.
  1463. * We have to simulate atomic KVM memslot updates by making sure no ioctl()
  1464. * is currently active.
  1465. *
  1466. * The lists are order by addresses, so it's easy to find overlaps.
  1467. */
  1468. u1 = QSIMPLEQ_FIRST(&kml->transaction_del);
  1469. u2 = QSIMPLEQ_FIRST(&kml->transaction_add);
  1470. while (u1 && u2) {
  1471. Range r1, r2;
  1472. range_init_nofail(&r1, u1->section.offset_within_address_space,
  1473. int128_get64(u1->section.size));
  1474. range_init_nofail(&r2, u2->section.offset_within_address_space,
  1475. int128_get64(u2->section.size));
  1476. if (range_overlaps_range(&r1, &r2)) {
  1477. need_inhibit = true;
  1478. break;
  1479. }
  1480. if (range_lob(&r1) < range_lob(&r2)) {
  1481. u1 = QSIMPLEQ_NEXT(u1, next);
  1482. } else {
  1483. u2 = QSIMPLEQ_NEXT(u2, next);
  1484. }
  1485. }
  1486. kvm_slots_lock();
  1487. if (need_inhibit) {
  1488. accel_ioctl_inhibit_begin();
  1489. }
  1490. /* Remove all memslots before adding the new ones. */
  1491. while (!QSIMPLEQ_EMPTY(&kml->transaction_del)) {
  1492. u1 = QSIMPLEQ_FIRST(&kml->transaction_del);
  1493. QSIMPLEQ_REMOVE_HEAD(&kml->transaction_del, next);
  1494. kvm_set_phys_mem(kml, &u1->section, false);
  1495. memory_region_unref(u1->section.mr);
  1496. g_free(u1);
  1497. }
  1498. while (!QSIMPLEQ_EMPTY(&kml->transaction_add)) {
  1499. u1 = QSIMPLEQ_FIRST(&kml->transaction_add);
  1500. QSIMPLEQ_REMOVE_HEAD(&kml->transaction_add, next);
  1501. memory_region_ref(u1->section.mr);
  1502. kvm_set_phys_mem(kml, &u1->section, true);
  1503. g_free(u1);
  1504. }
  1505. if (need_inhibit) {
  1506. accel_ioctl_inhibit_end();
  1507. }
  1508. kvm_slots_unlock();
  1509. }
  1510. static void kvm_log_sync(MemoryListener *listener,
  1511. MemoryRegionSection *section)
  1512. {
  1513. KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
  1514. kvm_slots_lock();
  1515. kvm_physical_sync_dirty_bitmap(kml, section);
  1516. kvm_slots_unlock();
  1517. }
  1518. static void kvm_log_sync_global(MemoryListener *l, bool last_stage)
  1519. {
  1520. KVMMemoryListener *kml = container_of(l, KVMMemoryListener, listener);
  1521. KVMState *s = kvm_state;
  1522. KVMSlot *mem;
  1523. int i;
  1524. /* Flush all kernel dirty addresses into KVMSlot dirty bitmap */
  1525. kvm_dirty_ring_flush();
  1526. kvm_slots_lock();
  1527. for (i = 0; i < kml->nr_slots_allocated; i++) {
  1528. mem = &kml->slots[i];
  1529. if (mem->memory_size && mem->flags & KVM_MEM_LOG_DIRTY_PAGES) {
  1530. kvm_slot_sync_dirty_pages(mem);
  1531. if (s->kvm_dirty_ring_with_bitmap && last_stage &&
  1532. kvm_slot_get_dirty_log(s, mem)) {
  1533. kvm_slot_sync_dirty_pages(mem);
  1534. }
  1535. /*
  1536. * This is not needed by KVM_GET_DIRTY_LOG because the
  1537. * ioctl will unconditionally overwrite the whole region.
  1538. * However kvm dirty ring has no such side effect.
  1539. */
  1540. kvm_slot_reset_dirty_pages(mem);
  1541. }
  1542. }
  1543. kvm_slots_unlock();
  1544. }
  1545. static void kvm_log_clear(MemoryListener *listener,
  1546. MemoryRegionSection *section)
  1547. {
  1548. KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
  1549. int r;
  1550. r = kvm_physical_log_clear(kml, section);
  1551. if (r < 0) {
  1552. error_report_once("%s: kvm log clear failed: mr=%s "
  1553. "offset=%"HWADDR_PRIx" size=%"PRIx64, __func__,
  1554. section->mr->name, section->offset_within_region,
  1555. int128_get64(section->size));
  1556. abort();
  1557. }
  1558. }
  1559. static void kvm_mem_ioeventfd_add(MemoryListener *listener,
  1560. MemoryRegionSection *section,
  1561. bool match_data, uint64_t data,
  1562. EventNotifier *e)
  1563. {
  1564. int fd = event_notifier_get_fd(e);
  1565. int r;
  1566. r = kvm_set_ioeventfd_mmio(fd, section->offset_within_address_space,
  1567. data, true, int128_get64(section->size),
  1568. match_data);
  1569. if (r < 0) {
  1570. fprintf(stderr, "%s: error adding ioeventfd: %s (%d)\n",
  1571. __func__, strerror(-r), -r);
  1572. abort();
  1573. }
  1574. }
  1575. static void kvm_mem_ioeventfd_del(MemoryListener *listener,
  1576. MemoryRegionSection *section,
  1577. bool match_data, uint64_t data,
  1578. EventNotifier *e)
  1579. {
  1580. int fd = event_notifier_get_fd(e);
  1581. int r;
  1582. r = kvm_set_ioeventfd_mmio(fd, section->offset_within_address_space,
  1583. data, false, int128_get64(section->size),
  1584. match_data);
  1585. if (r < 0) {
  1586. fprintf(stderr, "%s: error deleting ioeventfd: %s (%d)\n",
  1587. __func__, strerror(-r), -r);
  1588. abort();
  1589. }
  1590. }
  1591. static void kvm_io_ioeventfd_add(MemoryListener *listener,
  1592. MemoryRegionSection *section,
  1593. bool match_data, uint64_t data,
  1594. EventNotifier *e)
  1595. {
  1596. int fd = event_notifier_get_fd(e);
  1597. int r;
  1598. r = kvm_set_ioeventfd_pio(fd, section->offset_within_address_space,
  1599. data, true, int128_get64(section->size),
  1600. match_data);
  1601. if (r < 0) {
  1602. fprintf(stderr, "%s: error adding ioeventfd: %s (%d)\n",
  1603. __func__, strerror(-r), -r);
  1604. abort();
  1605. }
  1606. }
  1607. static void kvm_io_ioeventfd_del(MemoryListener *listener,
  1608. MemoryRegionSection *section,
  1609. bool match_data, uint64_t data,
  1610. EventNotifier *e)
  1611. {
  1612. int fd = event_notifier_get_fd(e);
  1613. int r;
  1614. r = kvm_set_ioeventfd_pio(fd, section->offset_within_address_space,
  1615. data, false, int128_get64(section->size),
  1616. match_data);
  1617. if (r < 0) {
  1618. fprintf(stderr, "%s: error deleting ioeventfd: %s (%d)\n",
  1619. __func__, strerror(-r), -r);
  1620. abort();
  1621. }
  1622. }
  1623. void kvm_memory_listener_register(KVMState *s, KVMMemoryListener *kml,
  1624. AddressSpace *as, int as_id, const char *name)
  1625. {
  1626. int i;
  1627. kml->as_id = as_id;
  1628. kvm_slots_grow(kml, KVM_MEMSLOTS_NR_ALLOC_DEFAULT);
  1629. QSIMPLEQ_INIT(&kml->transaction_add);
  1630. QSIMPLEQ_INIT(&kml->transaction_del);
  1631. kml->listener.region_add = kvm_region_add;
  1632. kml->listener.region_del = kvm_region_del;
  1633. kml->listener.commit = kvm_region_commit;
  1634. kml->listener.log_start = kvm_log_start;
  1635. kml->listener.log_stop = kvm_log_stop;
  1636. kml->listener.priority = MEMORY_LISTENER_PRIORITY_ACCEL;
  1637. kml->listener.name = name;
  1638. if (s->kvm_dirty_ring_size) {
  1639. kml->listener.log_sync_global = kvm_log_sync_global;
  1640. } else {
  1641. kml->listener.log_sync = kvm_log_sync;
  1642. kml->listener.log_clear = kvm_log_clear;
  1643. }
  1644. memory_listener_register(&kml->listener, as);
  1645. for (i = 0; i < s->nr_as; ++i) {
  1646. if (!s->as[i].as) {
  1647. s->as[i].as = as;
  1648. s->as[i].ml = kml;
  1649. break;
  1650. }
  1651. }
  1652. }
  1653. static MemoryListener kvm_io_listener = {
  1654. .name = "kvm-io",
  1655. .coalesced_io_add = kvm_coalesce_pio_add,
  1656. .coalesced_io_del = kvm_coalesce_pio_del,
  1657. .eventfd_add = kvm_io_ioeventfd_add,
  1658. .eventfd_del = kvm_io_ioeventfd_del,
  1659. .priority = MEMORY_LISTENER_PRIORITY_DEV_BACKEND,
  1660. };
  1661. int kvm_set_irq(KVMState *s, int irq, int level)
  1662. {
  1663. struct kvm_irq_level event;
  1664. int ret;
  1665. assert(kvm_async_interrupts_enabled());
  1666. event.level = level;
  1667. event.irq = irq;
  1668. ret = kvm_vm_ioctl(s, s->irq_set_ioctl, &event);
  1669. if (ret < 0) {
  1670. perror("kvm_set_irq");
  1671. abort();
  1672. }
  1673. return (s->irq_set_ioctl == KVM_IRQ_LINE) ? 1 : event.status;
  1674. }
  1675. #ifdef KVM_CAP_IRQ_ROUTING
  1676. typedef struct KVMMSIRoute {
  1677. struct kvm_irq_routing_entry kroute;
  1678. QTAILQ_ENTRY(KVMMSIRoute) entry;
  1679. } KVMMSIRoute;
  1680. static void set_gsi(KVMState *s, unsigned int gsi)
  1681. {
  1682. set_bit(gsi, s->used_gsi_bitmap);
  1683. }
  1684. static void clear_gsi(KVMState *s, unsigned int gsi)
  1685. {
  1686. clear_bit(gsi, s->used_gsi_bitmap);
  1687. }
  1688. void kvm_init_irq_routing(KVMState *s)
  1689. {
  1690. int gsi_count;
  1691. gsi_count = kvm_check_extension(s, KVM_CAP_IRQ_ROUTING) - 1;
  1692. if (gsi_count > 0) {
  1693. /* Round up so we can search ints using ffs */
  1694. s->used_gsi_bitmap = bitmap_new(gsi_count);
  1695. s->gsi_count = gsi_count;
  1696. }
  1697. s->irq_routes = g_malloc0(sizeof(*s->irq_routes));
  1698. s->nr_allocated_irq_routes = 0;
  1699. kvm_arch_init_irq_routing(s);
  1700. }
  1701. void kvm_irqchip_commit_routes(KVMState *s)
  1702. {
  1703. int ret;
  1704. if (kvm_gsi_direct_mapping()) {
  1705. return;
  1706. }
  1707. if (!kvm_gsi_routing_enabled()) {
  1708. return;
  1709. }
  1710. s->irq_routes->flags = 0;
  1711. trace_kvm_irqchip_commit_routes();
  1712. ret = kvm_vm_ioctl(s, KVM_SET_GSI_ROUTING, s->irq_routes);
  1713. assert(ret == 0);
  1714. }
  1715. void kvm_add_routing_entry(KVMState *s,
  1716. struct kvm_irq_routing_entry *entry)
  1717. {
  1718. struct kvm_irq_routing_entry *new;
  1719. int n, size;
  1720. if (s->irq_routes->nr == s->nr_allocated_irq_routes) {
  1721. n = s->nr_allocated_irq_routes * 2;
  1722. if (n < 64) {
  1723. n = 64;
  1724. }
  1725. size = sizeof(struct kvm_irq_routing);
  1726. size += n * sizeof(*new);
  1727. s->irq_routes = g_realloc(s->irq_routes, size);
  1728. s->nr_allocated_irq_routes = n;
  1729. }
  1730. n = s->irq_routes->nr++;
  1731. new = &s->irq_routes->entries[n];
  1732. *new = *entry;
  1733. set_gsi(s, entry->gsi);
  1734. }
  1735. static int kvm_update_routing_entry(KVMState *s,
  1736. struct kvm_irq_routing_entry *new_entry)
  1737. {
  1738. struct kvm_irq_routing_entry *entry;
  1739. int n;
  1740. for (n = 0; n < s->irq_routes->nr; n++) {
  1741. entry = &s->irq_routes->entries[n];
  1742. if (entry->gsi != new_entry->gsi) {
  1743. continue;
  1744. }
  1745. if(!memcmp(entry, new_entry, sizeof *entry)) {
  1746. return 0;
  1747. }
  1748. *entry = *new_entry;
  1749. return 0;
  1750. }
  1751. return -ESRCH;
  1752. }
  1753. void kvm_irqchip_add_irq_route(KVMState *s, int irq, int irqchip, int pin)
  1754. {
  1755. struct kvm_irq_routing_entry e = {};
  1756. assert(pin < s->gsi_count);
  1757. e.gsi = irq;
  1758. e.type = KVM_IRQ_ROUTING_IRQCHIP;
  1759. e.flags = 0;
  1760. e.u.irqchip.irqchip = irqchip;
  1761. e.u.irqchip.pin = pin;
  1762. kvm_add_routing_entry(s, &e);
  1763. }
  1764. void kvm_irqchip_release_virq(KVMState *s, int virq)
  1765. {
  1766. struct kvm_irq_routing_entry *e;
  1767. int i;
  1768. if (kvm_gsi_direct_mapping()) {
  1769. return;
  1770. }
  1771. for (i = 0; i < s->irq_routes->nr; i++) {
  1772. e = &s->irq_routes->entries[i];
  1773. if (e->gsi == virq) {
  1774. s->irq_routes->nr--;
  1775. *e = s->irq_routes->entries[s->irq_routes->nr];
  1776. }
  1777. }
  1778. clear_gsi(s, virq);
  1779. kvm_arch_release_virq_post(virq);
  1780. trace_kvm_irqchip_release_virq(virq);
  1781. }
  1782. void kvm_irqchip_add_change_notifier(Notifier *n)
  1783. {
  1784. notifier_list_add(&kvm_irqchip_change_notifiers, n);
  1785. }
  1786. void kvm_irqchip_remove_change_notifier(Notifier *n)
  1787. {
  1788. notifier_remove(n);
  1789. }
  1790. void kvm_irqchip_change_notify(void)
  1791. {
  1792. notifier_list_notify(&kvm_irqchip_change_notifiers, NULL);
  1793. }
  1794. int kvm_irqchip_get_virq(KVMState *s)
  1795. {
  1796. int next_virq;
  1797. /* Return the lowest unused GSI in the bitmap */
  1798. next_virq = find_first_zero_bit(s->used_gsi_bitmap, s->gsi_count);
  1799. if (next_virq >= s->gsi_count) {
  1800. return -ENOSPC;
  1801. } else {
  1802. return next_virq;
  1803. }
  1804. }
  1805. int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg)
  1806. {
  1807. struct kvm_msi msi;
  1808. msi.address_lo = (uint32_t)msg.address;
  1809. msi.address_hi = msg.address >> 32;
  1810. msi.data = le32_to_cpu(msg.data);
  1811. msi.flags = 0;
  1812. memset(msi.pad, 0, sizeof(msi.pad));
  1813. return kvm_vm_ioctl(s, KVM_SIGNAL_MSI, &msi);
  1814. }
  1815. int kvm_irqchip_add_msi_route(KVMRouteChange *c, int vector, PCIDevice *dev)
  1816. {
  1817. struct kvm_irq_routing_entry kroute = {};
  1818. int virq;
  1819. KVMState *s = c->s;
  1820. MSIMessage msg = {0, 0};
  1821. if (pci_available && dev) {
  1822. msg = pci_get_msi_message(dev, vector);
  1823. }
  1824. if (kvm_gsi_direct_mapping()) {
  1825. return kvm_arch_msi_data_to_gsi(msg.data);
  1826. }
  1827. if (!kvm_gsi_routing_enabled()) {
  1828. return -ENOSYS;
  1829. }
  1830. virq = kvm_irqchip_get_virq(s);
  1831. if (virq < 0) {
  1832. return virq;
  1833. }
  1834. kroute.gsi = virq;
  1835. kroute.type = KVM_IRQ_ROUTING_MSI;
  1836. kroute.flags = 0;
  1837. kroute.u.msi.address_lo = (uint32_t)msg.address;
  1838. kroute.u.msi.address_hi = msg.address >> 32;
  1839. kroute.u.msi.data = le32_to_cpu(msg.data);
  1840. if (pci_available && kvm_msi_devid_required()) {
  1841. kroute.flags = KVM_MSI_VALID_DEVID;
  1842. kroute.u.msi.devid = pci_requester_id(dev);
  1843. }
  1844. if (kvm_arch_fixup_msi_route(&kroute, msg.address, msg.data, dev)) {
  1845. kvm_irqchip_release_virq(s, virq);
  1846. return -EINVAL;
  1847. }
  1848. if (s->irq_routes->nr < s->gsi_count) {
  1849. trace_kvm_irqchip_add_msi_route(dev ? dev->name : (char *)"N/A",
  1850. vector, virq);
  1851. kvm_add_routing_entry(s, &kroute);
  1852. kvm_arch_add_msi_route_post(&kroute, vector, dev);
  1853. c->changes++;
  1854. } else {
  1855. kvm_irqchip_release_virq(s, virq);
  1856. return -ENOSPC;
  1857. }
  1858. return virq;
  1859. }
  1860. int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg,
  1861. PCIDevice *dev)
  1862. {
  1863. struct kvm_irq_routing_entry kroute = {};
  1864. if (kvm_gsi_direct_mapping()) {
  1865. return 0;
  1866. }
  1867. if (!kvm_irqchip_in_kernel()) {
  1868. return -ENOSYS;
  1869. }
  1870. kroute.gsi = virq;
  1871. kroute.type = KVM_IRQ_ROUTING_MSI;
  1872. kroute.flags = 0;
  1873. kroute.u.msi.address_lo = (uint32_t)msg.address;
  1874. kroute.u.msi.address_hi = msg.address >> 32;
  1875. kroute.u.msi.data = le32_to_cpu(msg.data);
  1876. if (pci_available && kvm_msi_devid_required()) {
  1877. kroute.flags = KVM_MSI_VALID_DEVID;
  1878. kroute.u.msi.devid = pci_requester_id(dev);
  1879. }
  1880. if (kvm_arch_fixup_msi_route(&kroute, msg.address, msg.data, dev)) {
  1881. return -EINVAL;
  1882. }
  1883. trace_kvm_irqchip_update_msi_route(virq);
  1884. return kvm_update_routing_entry(s, &kroute);
  1885. }
  1886. static int kvm_irqchip_assign_irqfd(KVMState *s, EventNotifier *event,
  1887. EventNotifier *resample, int virq,
  1888. bool assign)
  1889. {
  1890. int fd = event_notifier_get_fd(event);
  1891. int rfd = resample ? event_notifier_get_fd(resample) : -1;
  1892. struct kvm_irqfd irqfd = {
  1893. .fd = fd,
  1894. .gsi = virq,
  1895. .flags = assign ? 0 : KVM_IRQFD_FLAG_DEASSIGN,
  1896. };
  1897. if (rfd != -1) {
  1898. assert(assign);
  1899. if (kvm_irqchip_is_split()) {
  1900. /*
  1901. * When the slow irqchip (e.g. IOAPIC) is in the
  1902. * userspace, KVM kernel resamplefd will not work because
  1903. * the EOI of the interrupt will be delivered to userspace
  1904. * instead, so the KVM kernel resamplefd kick will be
  1905. * skipped. The userspace here mimics what the kernel
  1906. * provides with resamplefd, remember the resamplefd and
  1907. * kick it when we receive EOI of this IRQ.
  1908. *
  1909. * This is hackery because IOAPIC is mostly bypassed
  1910. * (except EOI broadcasts) when irqfd is used. However
  1911. * this can bring much performance back for split irqchip
  1912. * with INTx IRQs (for VFIO, this gives 93% perf of the
  1913. * full fast path, which is 46% perf boost comparing to
  1914. * the INTx slow path).
  1915. */
  1916. kvm_resample_fd_insert(virq, resample);
  1917. } else {
  1918. irqfd.flags |= KVM_IRQFD_FLAG_RESAMPLE;
  1919. irqfd.resamplefd = rfd;
  1920. }
  1921. } else if (!assign) {
  1922. if (kvm_irqchip_is_split()) {
  1923. kvm_resample_fd_remove(virq);
  1924. }
  1925. }
  1926. return kvm_vm_ioctl(s, KVM_IRQFD, &irqfd);
  1927. }
  1928. #else /* !KVM_CAP_IRQ_ROUTING */
  1929. void kvm_init_irq_routing(KVMState *s)
  1930. {
  1931. }
  1932. void kvm_irqchip_release_virq(KVMState *s, int virq)
  1933. {
  1934. }
  1935. int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg)
  1936. {
  1937. abort();
  1938. }
  1939. int kvm_irqchip_add_msi_route(KVMRouteChange *c, int vector, PCIDevice *dev)
  1940. {
  1941. return -ENOSYS;
  1942. }
  1943. int kvm_irqchip_add_adapter_route(KVMState *s, AdapterInfo *adapter)
  1944. {
  1945. return -ENOSYS;
  1946. }
  1947. int kvm_irqchip_add_hv_sint_route(KVMState *s, uint32_t vcpu, uint32_t sint)
  1948. {
  1949. return -ENOSYS;
  1950. }
  1951. static int kvm_irqchip_assign_irqfd(KVMState *s, EventNotifier *event,
  1952. EventNotifier *resample, int virq,
  1953. bool assign)
  1954. {
  1955. abort();
  1956. }
  1957. int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg)
  1958. {
  1959. return -ENOSYS;
  1960. }
  1961. #endif /* !KVM_CAP_IRQ_ROUTING */
  1962. int kvm_irqchip_add_irqfd_notifier_gsi(KVMState *s, EventNotifier *n,
  1963. EventNotifier *rn, int virq)
  1964. {
  1965. return kvm_irqchip_assign_irqfd(s, n, rn, virq, true);
  1966. }
  1967. int kvm_irqchip_remove_irqfd_notifier_gsi(KVMState *s, EventNotifier *n,
  1968. int virq)
  1969. {
  1970. return kvm_irqchip_assign_irqfd(s, n, NULL, virq, false);
  1971. }
  1972. int kvm_irqchip_add_irqfd_notifier(KVMState *s, EventNotifier *n,
  1973. EventNotifier *rn, qemu_irq irq)
  1974. {
  1975. gpointer key, gsi;
  1976. gboolean found = g_hash_table_lookup_extended(s->gsimap, irq, &key, &gsi);
  1977. if (!found) {
  1978. return -ENXIO;
  1979. }
  1980. return kvm_irqchip_add_irqfd_notifier_gsi(s, n, rn, GPOINTER_TO_INT(gsi));
  1981. }
  1982. int kvm_irqchip_remove_irqfd_notifier(KVMState *s, EventNotifier *n,
  1983. qemu_irq irq)
  1984. {
  1985. gpointer key, gsi;
  1986. gboolean found = g_hash_table_lookup_extended(s->gsimap, irq, &key, &gsi);
  1987. if (!found) {
  1988. return -ENXIO;
  1989. }
  1990. return kvm_irqchip_remove_irqfd_notifier_gsi(s, n, GPOINTER_TO_INT(gsi));
  1991. }
  1992. void kvm_irqchip_set_qemuirq_gsi(KVMState *s, qemu_irq irq, int gsi)
  1993. {
  1994. g_hash_table_insert(s->gsimap, irq, GINT_TO_POINTER(gsi));
  1995. }
  1996. static void kvm_irqchip_create(KVMState *s)
  1997. {
  1998. int ret;
  1999. assert(s->kernel_irqchip_split != ON_OFF_AUTO_AUTO);
  2000. if (kvm_check_extension(s, KVM_CAP_IRQCHIP)) {
  2001. ;
  2002. } else if (kvm_check_extension(s, KVM_CAP_S390_IRQCHIP)) {
  2003. ret = kvm_vm_enable_cap(s, KVM_CAP_S390_IRQCHIP, 0);
  2004. if (ret < 0) {
  2005. fprintf(stderr, "Enable kernel irqchip failed: %s\n", strerror(-ret));
  2006. exit(1);
  2007. }
  2008. } else {
  2009. return;
  2010. }
  2011. if (kvm_check_extension(s, KVM_CAP_IRQFD) <= 0) {
  2012. fprintf(stderr, "kvm: irqfd not implemented\n");
  2013. exit(1);
  2014. }
  2015. /* First probe and see if there's a arch-specific hook to create the
  2016. * in-kernel irqchip for us */
  2017. ret = kvm_arch_irqchip_create(s);
  2018. if (ret == 0) {
  2019. if (s->kernel_irqchip_split == ON_OFF_AUTO_ON) {
  2020. error_report("Split IRQ chip mode not supported.");
  2021. exit(1);
  2022. } else {
  2023. ret = kvm_vm_ioctl(s, KVM_CREATE_IRQCHIP);
  2024. }
  2025. }
  2026. if (ret < 0) {
  2027. fprintf(stderr, "Create kernel irqchip failed: %s\n", strerror(-ret));
  2028. exit(1);
  2029. }
  2030. kvm_kernel_irqchip = true;
  2031. /* If we have an in-kernel IRQ chip then we must have asynchronous
  2032. * interrupt delivery (though the reverse is not necessarily true)
  2033. */
  2034. kvm_async_interrupts_allowed = true;
  2035. kvm_halt_in_kernel_allowed = true;
  2036. kvm_init_irq_routing(s);
  2037. s->gsimap = g_hash_table_new(g_direct_hash, g_direct_equal);
  2038. }
  2039. /* Find number of supported CPUs using the recommended
  2040. * procedure from the kernel API documentation to cope with
  2041. * older kernels that may be missing capabilities.
  2042. */
  2043. static int kvm_recommended_vcpus(KVMState *s)
  2044. {
  2045. int ret = kvm_vm_check_extension(s, KVM_CAP_NR_VCPUS);
  2046. return (ret) ? ret : 4;
  2047. }
  2048. static int kvm_max_vcpus(KVMState *s)
  2049. {
  2050. int ret = kvm_check_extension(s, KVM_CAP_MAX_VCPUS);
  2051. return (ret) ? ret : kvm_recommended_vcpus(s);
  2052. }
  2053. static int kvm_max_vcpu_id(KVMState *s)
  2054. {
  2055. int ret = kvm_check_extension(s, KVM_CAP_MAX_VCPU_ID);
  2056. return (ret) ? ret : kvm_max_vcpus(s);
  2057. }
  2058. bool kvm_vcpu_id_is_valid(int vcpu_id)
  2059. {
  2060. KVMState *s = KVM_STATE(current_accel());
  2061. return vcpu_id >= 0 && vcpu_id < kvm_max_vcpu_id(s);
  2062. }
  2063. bool kvm_dirty_ring_enabled(void)
  2064. {
  2065. return kvm_state && kvm_state->kvm_dirty_ring_size;
  2066. }
  2067. static void query_stats_cb(StatsResultList **result, StatsTarget target,
  2068. strList *names, strList *targets, Error **errp);
  2069. static void query_stats_schemas_cb(StatsSchemaList **result, Error **errp);
  2070. uint32_t kvm_dirty_ring_size(void)
  2071. {
  2072. return kvm_state->kvm_dirty_ring_size;
  2073. }
  2074. static int do_kvm_create_vm(MachineState *ms, int type)
  2075. {
  2076. KVMState *s;
  2077. int ret;
  2078. s = KVM_STATE(ms->accelerator);
  2079. do {
  2080. ret = kvm_ioctl(s, KVM_CREATE_VM, type);
  2081. } while (ret == -EINTR);
  2082. if (ret < 0) {
  2083. error_report("ioctl(KVM_CREATE_VM) failed: %s", strerror(-ret));
  2084. #ifdef TARGET_S390X
  2085. if (ret == -EINVAL) {
  2086. error_printf("Host kernel setup problem detected."
  2087. " Please verify:\n");
  2088. error_printf("- for kernels supporting the"
  2089. " switch_amode or user_mode parameters, whether");
  2090. error_printf(" user space is running in primary address space\n");
  2091. error_printf("- for kernels supporting the vm.allocate_pgste"
  2092. " sysctl, whether it is enabled\n");
  2093. }
  2094. #elif defined(TARGET_PPC)
  2095. if (ret == -EINVAL) {
  2096. error_printf("PPC KVM module is not loaded. Try modprobe kvm_%s.\n",
  2097. (type == 2) ? "pr" : "hv");
  2098. }
  2099. #endif
  2100. }
  2101. return ret;
  2102. }
  2103. static int find_kvm_machine_type(MachineState *ms)
  2104. {
  2105. MachineClass *mc = MACHINE_GET_CLASS(ms);
  2106. int type;
  2107. if (object_property_find(OBJECT(current_machine), "kvm-type")) {
  2108. g_autofree char *kvm_type;
  2109. kvm_type = object_property_get_str(OBJECT(current_machine),
  2110. "kvm-type",
  2111. &error_abort);
  2112. type = mc->kvm_type(ms, kvm_type);
  2113. } else if (mc->kvm_type) {
  2114. type = mc->kvm_type(ms, NULL);
  2115. } else {
  2116. type = kvm_arch_get_default_type(ms);
  2117. }
  2118. return type;
  2119. }
  2120. static int kvm_setup_dirty_ring(KVMState *s)
  2121. {
  2122. uint64_t dirty_log_manual_caps;
  2123. int ret;
  2124. /*
  2125. * Enable KVM dirty ring if supported, otherwise fall back to
  2126. * dirty logging mode
  2127. */
  2128. ret = kvm_dirty_ring_init(s);
  2129. if (ret < 0) {
  2130. return ret;
  2131. }
  2132. /*
  2133. * KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 is not needed when dirty ring is
  2134. * enabled. More importantly, KVM_DIRTY_LOG_INITIALLY_SET will assume no
  2135. * page is wr-protected initially, which is against how kvm dirty ring is
  2136. * usage - kvm dirty ring requires all pages are wr-protected at the very
  2137. * beginning. Enabling this feature for dirty ring causes data corruption.
  2138. *
  2139. * TODO: Without KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 and kvm clear dirty log,
  2140. * we may expect a higher stall time when starting the migration. In the
  2141. * future we can enable KVM_CLEAR_DIRTY_LOG to work with dirty ring too:
  2142. * instead of clearing dirty bit, it can be a way to explicitly wr-protect
  2143. * guest pages.
  2144. */
  2145. if (!s->kvm_dirty_ring_size) {
  2146. dirty_log_manual_caps =
  2147. kvm_check_extension(s, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2);
  2148. dirty_log_manual_caps &= (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE |
  2149. KVM_DIRTY_LOG_INITIALLY_SET);
  2150. s->manual_dirty_log_protect = dirty_log_manual_caps;
  2151. if (dirty_log_manual_caps) {
  2152. ret = kvm_vm_enable_cap(s, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2, 0,
  2153. dirty_log_manual_caps);
  2154. if (ret) {
  2155. warn_report("Trying to enable capability %"PRIu64" of "
  2156. "KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 but failed. "
  2157. "Falling back to the legacy mode. ",
  2158. dirty_log_manual_caps);
  2159. s->manual_dirty_log_protect = 0;
  2160. }
  2161. }
  2162. }
  2163. return 0;
  2164. }
  2165. static int kvm_init(MachineState *ms)
  2166. {
  2167. MachineClass *mc = MACHINE_GET_CLASS(ms);
  2168. static const char upgrade_note[] =
  2169. "Please upgrade to at least kernel 2.6.29 or recent kvm-kmod\n"
  2170. "(see http://sourceforge.net/projects/kvm).\n";
  2171. const struct {
  2172. const char *name;
  2173. int num;
  2174. } num_cpus[] = {
  2175. { "SMP", ms->smp.cpus },
  2176. { "hotpluggable", ms->smp.max_cpus },
  2177. { /* end of list */ }
  2178. }, *nc = num_cpus;
  2179. int soft_vcpus_limit, hard_vcpus_limit;
  2180. KVMState *s;
  2181. const KVMCapabilityInfo *missing_cap;
  2182. int ret;
  2183. int type;
  2184. qemu_mutex_init(&kml_slots_lock);
  2185. s = KVM_STATE(ms->accelerator);
  2186. /*
  2187. * On systems where the kernel can support different base page
  2188. * sizes, host page size may be different from TARGET_PAGE_SIZE,
  2189. * even with KVM. TARGET_PAGE_SIZE is assumed to be the minimum
  2190. * page size for the system though.
  2191. */
  2192. assert(TARGET_PAGE_SIZE <= qemu_real_host_page_size());
  2193. s->sigmask_len = 8;
  2194. accel_blocker_init();
  2195. #ifdef TARGET_KVM_HAVE_GUEST_DEBUG
  2196. QTAILQ_INIT(&s->kvm_sw_breakpoints);
  2197. #endif
  2198. QLIST_INIT(&s->kvm_parked_vcpus);
  2199. s->fd = qemu_open_old(s->device ?: "/dev/kvm", O_RDWR);
  2200. if (s->fd == -1) {
  2201. error_report("Could not access KVM kernel module: %m");
  2202. ret = -errno;
  2203. goto err;
  2204. }
  2205. ret = kvm_ioctl(s, KVM_GET_API_VERSION, 0);
  2206. if (ret < KVM_API_VERSION) {
  2207. if (ret >= 0) {
  2208. ret = -EINVAL;
  2209. }
  2210. error_report("kvm version too old");
  2211. goto err;
  2212. }
  2213. if (ret > KVM_API_VERSION) {
  2214. ret = -EINVAL;
  2215. error_report("kvm version not supported");
  2216. goto err;
  2217. }
  2218. kvm_immediate_exit = kvm_check_extension(s, KVM_CAP_IMMEDIATE_EXIT);
  2219. s->nr_slots_max = kvm_check_extension(s, KVM_CAP_NR_MEMSLOTS);
  2220. /* If unspecified, use the default value */
  2221. if (!s->nr_slots_max) {
  2222. s->nr_slots_max = KVM_MEMSLOTS_NR_MAX_DEFAULT;
  2223. }
  2224. type = find_kvm_machine_type(ms);
  2225. if (type < 0) {
  2226. ret = -EINVAL;
  2227. goto err;
  2228. }
  2229. ret = do_kvm_create_vm(ms, type);
  2230. if (ret < 0) {
  2231. goto err;
  2232. }
  2233. s->vmfd = ret;
  2234. s->nr_as = kvm_vm_check_extension(s, KVM_CAP_MULTI_ADDRESS_SPACE);
  2235. if (s->nr_as <= 1) {
  2236. s->nr_as = 1;
  2237. }
  2238. s->as = g_new0(struct KVMAs, s->nr_as);
  2239. /* check the vcpu limits */
  2240. soft_vcpus_limit = kvm_recommended_vcpus(s);
  2241. hard_vcpus_limit = kvm_max_vcpus(s);
  2242. while (nc->name) {
  2243. if (nc->num > soft_vcpus_limit) {
  2244. warn_report("Number of %s cpus requested (%d) exceeds "
  2245. "the recommended cpus supported by KVM (%d)",
  2246. nc->name, nc->num, soft_vcpus_limit);
  2247. if (nc->num > hard_vcpus_limit) {
  2248. error_report("Number of %s cpus requested (%d) exceeds "
  2249. "the maximum cpus supported by KVM (%d)",
  2250. nc->name, nc->num, hard_vcpus_limit);
  2251. exit(1);
  2252. }
  2253. }
  2254. nc++;
  2255. }
  2256. missing_cap = kvm_check_extension_list(s, kvm_required_capabilites);
  2257. if (!missing_cap) {
  2258. missing_cap =
  2259. kvm_check_extension_list(s, kvm_arch_required_capabilities);
  2260. }
  2261. if (missing_cap) {
  2262. ret = -EINVAL;
  2263. error_report("kvm does not support %s", missing_cap->name);
  2264. error_printf("%s", upgrade_note);
  2265. goto err;
  2266. }
  2267. s->coalesced_mmio = kvm_check_extension(s, KVM_CAP_COALESCED_MMIO);
  2268. s->coalesced_pio = s->coalesced_mmio &&
  2269. kvm_check_extension(s, KVM_CAP_COALESCED_PIO);
  2270. ret = kvm_setup_dirty_ring(s);
  2271. if (ret < 0) {
  2272. goto err;
  2273. }
  2274. #ifdef KVM_CAP_VCPU_EVENTS
  2275. s->vcpu_events = kvm_check_extension(s, KVM_CAP_VCPU_EVENTS);
  2276. #endif
  2277. s->max_nested_state_len = kvm_check_extension(s, KVM_CAP_NESTED_STATE);
  2278. s->irq_set_ioctl = KVM_IRQ_LINE;
  2279. if (kvm_check_extension(s, KVM_CAP_IRQ_INJECT_STATUS)) {
  2280. s->irq_set_ioctl = KVM_IRQ_LINE_STATUS;
  2281. }
  2282. kvm_readonly_mem_allowed =
  2283. (kvm_vm_check_extension(s, KVM_CAP_READONLY_MEM) > 0);
  2284. kvm_resamplefds_allowed =
  2285. (kvm_check_extension(s, KVM_CAP_IRQFD_RESAMPLE) > 0);
  2286. kvm_vm_attributes_allowed =
  2287. (kvm_check_extension(s, KVM_CAP_VM_ATTRIBUTES) > 0);
  2288. #ifdef TARGET_KVM_HAVE_GUEST_DEBUG
  2289. kvm_has_guest_debug =
  2290. (kvm_check_extension(s, KVM_CAP_SET_GUEST_DEBUG) > 0);
  2291. #endif
  2292. kvm_sstep_flags = 0;
  2293. if (kvm_has_guest_debug) {
  2294. kvm_sstep_flags = SSTEP_ENABLE;
  2295. #if defined TARGET_KVM_HAVE_GUEST_DEBUG
  2296. int guest_debug_flags =
  2297. kvm_check_extension(s, KVM_CAP_SET_GUEST_DEBUG2);
  2298. if (guest_debug_flags & KVM_GUESTDBG_BLOCKIRQ) {
  2299. kvm_sstep_flags |= SSTEP_NOIRQ;
  2300. }
  2301. #endif
  2302. }
  2303. kvm_state = s;
  2304. ret = kvm_arch_init(ms, s);
  2305. if (ret < 0) {
  2306. goto err;
  2307. }
  2308. kvm_supported_memory_attributes = kvm_vm_check_extension(s, KVM_CAP_MEMORY_ATTRIBUTES);
  2309. kvm_guest_memfd_supported =
  2310. kvm_check_extension(s, KVM_CAP_GUEST_MEMFD) &&
  2311. kvm_check_extension(s, KVM_CAP_USER_MEMORY2) &&
  2312. (kvm_supported_memory_attributes & KVM_MEMORY_ATTRIBUTE_PRIVATE);
  2313. if (s->kernel_irqchip_split == ON_OFF_AUTO_AUTO) {
  2314. s->kernel_irqchip_split = mc->default_kernel_irqchip_split ? ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF;
  2315. }
  2316. qemu_register_reset(kvm_unpoison_all, NULL);
  2317. qemu_register_reset(kvm_reset_parked_vcpus, s);
  2318. if (s->kernel_irqchip_allowed) {
  2319. kvm_irqchip_create(s);
  2320. }
  2321. s->memory_listener.listener.eventfd_add = kvm_mem_ioeventfd_add;
  2322. s->memory_listener.listener.eventfd_del = kvm_mem_ioeventfd_del;
  2323. s->memory_listener.listener.coalesced_io_add = kvm_coalesce_mmio_region;
  2324. s->memory_listener.listener.coalesced_io_del = kvm_uncoalesce_mmio_region;
  2325. kvm_memory_listener_register(s, &s->memory_listener,
  2326. &address_space_memory, 0, "kvm-memory");
  2327. memory_listener_register(&kvm_io_listener,
  2328. &address_space_io);
  2329. s->sync_mmu = !!kvm_vm_check_extension(kvm_state, KVM_CAP_SYNC_MMU);
  2330. if (!s->sync_mmu) {
  2331. ret = ram_block_discard_disable(true);
  2332. assert(!ret);
  2333. }
  2334. if (s->kvm_dirty_ring_size) {
  2335. kvm_dirty_ring_reaper_init(s);
  2336. }
  2337. if (kvm_check_extension(kvm_state, KVM_CAP_BINARY_STATS_FD)) {
  2338. add_stats_callbacks(STATS_PROVIDER_KVM, query_stats_cb,
  2339. query_stats_schemas_cb);
  2340. }
  2341. return 0;
  2342. err:
  2343. assert(ret < 0);
  2344. if (s->vmfd >= 0) {
  2345. close(s->vmfd);
  2346. }
  2347. if (s->fd != -1) {
  2348. close(s->fd);
  2349. }
  2350. g_free(s->as);
  2351. g_free(s->memory_listener.slots);
  2352. return ret;
  2353. }
  2354. void kvm_set_sigmask_len(KVMState *s, unsigned int sigmask_len)
  2355. {
  2356. s->sigmask_len = sigmask_len;
  2357. }
  2358. static void kvm_handle_io(uint16_t port, MemTxAttrs attrs, void *data, int direction,
  2359. int size, uint32_t count)
  2360. {
  2361. int i;
  2362. uint8_t *ptr = data;
  2363. for (i = 0; i < count; i++) {
  2364. address_space_rw(&address_space_io, port, attrs,
  2365. ptr, size,
  2366. direction == KVM_EXIT_IO_OUT);
  2367. ptr += size;
  2368. }
  2369. }
  2370. static int kvm_handle_internal_error(CPUState *cpu, struct kvm_run *run)
  2371. {
  2372. int i;
  2373. fprintf(stderr, "KVM internal error. Suberror: %d\n",
  2374. run->internal.suberror);
  2375. for (i = 0; i < run->internal.ndata; ++i) {
  2376. fprintf(stderr, "extra data[%d]: 0x%016"PRIx64"\n",
  2377. i, (uint64_t)run->internal.data[i]);
  2378. }
  2379. if (run->internal.suberror == KVM_INTERNAL_ERROR_EMULATION) {
  2380. fprintf(stderr, "emulation failure\n");
  2381. if (!kvm_arch_stop_on_emulation_error(cpu)) {
  2382. cpu_dump_state(cpu, stderr, CPU_DUMP_CODE);
  2383. return EXCP_INTERRUPT;
  2384. }
  2385. }
  2386. /* FIXME: Should trigger a qmp message to let management know
  2387. * something went wrong.
  2388. */
  2389. return -1;
  2390. }
  2391. void kvm_flush_coalesced_mmio_buffer(void)
  2392. {
  2393. KVMState *s = kvm_state;
  2394. if (!s || s->coalesced_flush_in_progress) {
  2395. return;
  2396. }
  2397. s->coalesced_flush_in_progress = true;
  2398. if (s->coalesced_mmio_ring) {
  2399. struct kvm_coalesced_mmio_ring *ring = s->coalesced_mmio_ring;
  2400. while (ring->first != ring->last) {
  2401. struct kvm_coalesced_mmio *ent;
  2402. ent = &ring->coalesced_mmio[ring->first];
  2403. if (ent->pio == 1) {
  2404. address_space_write(&address_space_io, ent->phys_addr,
  2405. MEMTXATTRS_UNSPECIFIED, ent->data,
  2406. ent->len);
  2407. } else {
  2408. cpu_physical_memory_write(ent->phys_addr, ent->data, ent->len);
  2409. }
  2410. smp_wmb();
  2411. ring->first = (ring->first + 1) % KVM_COALESCED_MMIO_MAX;
  2412. }
  2413. }
  2414. s->coalesced_flush_in_progress = false;
  2415. }
  2416. static void do_kvm_cpu_synchronize_state(CPUState *cpu, run_on_cpu_data arg)
  2417. {
  2418. if (!cpu->vcpu_dirty && !kvm_state->guest_state_protected) {
  2419. Error *err = NULL;
  2420. int ret = kvm_arch_get_registers(cpu, &err);
  2421. if (ret) {
  2422. if (err) {
  2423. error_reportf_err(err, "Failed to synchronize CPU state: ");
  2424. } else {
  2425. error_report("Failed to get registers: %s", strerror(-ret));
  2426. }
  2427. cpu_dump_state(cpu, stderr, CPU_DUMP_CODE);
  2428. vm_stop(RUN_STATE_INTERNAL_ERROR);
  2429. }
  2430. cpu->vcpu_dirty = true;
  2431. }
  2432. }
  2433. void kvm_cpu_synchronize_state(CPUState *cpu)
  2434. {
  2435. if (!cpu->vcpu_dirty && !kvm_state->guest_state_protected) {
  2436. run_on_cpu(cpu, do_kvm_cpu_synchronize_state, RUN_ON_CPU_NULL);
  2437. }
  2438. }
  2439. static void do_kvm_cpu_synchronize_post_reset(CPUState *cpu, run_on_cpu_data arg)
  2440. {
  2441. Error *err = NULL;
  2442. int ret = kvm_arch_put_registers(cpu, KVM_PUT_RESET_STATE, &err);
  2443. if (ret) {
  2444. if (err) {
  2445. error_reportf_err(err, "Restoring resisters after reset: ");
  2446. } else {
  2447. error_report("Failed to put registers after reset: %s",
  2448. strerror(-ret));
  2449. }
  2450. cpu_dump_state(cpu, stderr, CPU_DUMP_CODE);
  2451. vm_stop(RUN_STATE_INTERNAL_ERROR);
  2452. }
  2453. cpu->vcpu_dirty = false;
  2454. }
  2455. void kvm_cpu_synchronize_post_reset(CPUState *cpu)
  2456. {
  2457. run_on_cpu(cpu, do_kvm_cpu_synchronize_post_reset, RUN_ON_CPU_NULL);
  2458. }
  2459. static void do_kvm_cpu_synchronize_post_init(CPUState *cpu, run_on_cpu_data arg)
  2460. {
  2461. Error *err = NULL;
  2462. int ret = kvm_arch_put_registers(cpu, KVM_PUT_FULL_STATE, &err);
  2463. if (ret) {
  2464. if (err) {
  2465. error_reportf_err(err, "Putting registers after init: ");
  2466. } else {
  2467. error_report("Failed to put registers after init: %s",
  2468. strerror(-ret));
  2469. }
  2470. exit(1);
  2471. }
  2472. cpu->vcpu_dirty = false;
  2473. }
  2474. void kvm_cpu_synchronize_post_init(CPUState *cpu)
  2475. {
  2476. if (!kvm_state->guest_state_protected) {
  2477. /*
  2478. * This runs before the machine_init_done notifiers, and is the last
  2479. * opportunity to synchronize the state of confidential guests.
  2480. */
  2481. run_on_cpu(cpu, do_kvm_cpu_synchronize_post_init, RUN_ON_CPU_NULL);
  2482. }
  2483. }
  2484. static void do_kvm_cpu_synchronize_pre_loadvm(CPUState *cpu, run_on_cpu_data arg)
  2485. {
  2486. cpu->vcpu_dirty = true;
  2487. }
  2488. void kvm_cpu_synchronize_pre_loadvm(CPUState *cpu)
  2489. {
  2490. run_on_cpu(cpu, do_kvm_cpu_synchronize_pre_loadvm, RUN_ON_CPU_NULL);
  2491. }
  2492. #ifdef KVM_HAVE_MCE_INJECTION
  2493. static __thread void *pending_sigbus_addr;
  2494. static __thread int pending_sigbus_code;
  2495. static __thread bool have_sigbus_pending;
  2496. #endif
  2497. static void kvm_cpu_kick(CPUState *cpu)
  2498. {
  2499. qatomic_set(&cpu->kvm_run->immediate_exit, 1);
  2500. }
  2501. static void kvm_cpu_kick_self(void)
  2502. {
  2503. if (kvm_immediate_exit) {
  2504. kvm_cpu_kick(current_cpu);
  2505. } else {
  2506. qemu_cpu_kick_self();
  2507. }
  2508. }
  2509. static void kvm_eat_signals(CPUState *cpu)
  2510. {
  2511. struct timespec ts = { 0, 0 };
  2512. siginfo_t siginfo;
  2513. sigset_t waitset;
  2514. sigset_t chkset;
  2515. int r;
  2516. if (kvm_immediate_exit) {
  2517. qatomic_set(&cpu->kvm_run->immediate_exit, 0);
  2518. /* Write kvm_run->immediate_exit before the cpu->exit_request
  2519. * write in kvm_cpu_exec.
  2520. */
  2521. smp_wmb();
  2522. return;
  2523. }
  2524. sigemptyset(&waitset);
  2525. sigaddset(&waitset, SIG_IPI);
  2526. do {
  2527. r = sigtimedwait(&waitset, &siginfo, &ts);
  2528. if (r == -1 && !(errno == EAGAIN || errno == EINTR)) {
  2529. perror("sigtimedwait");
  2530. exit(1);
  2531. }
  2532. r = sigpending(&chkset);
  2533. if (r == -1) {
  2534. perror("sigpending");
  2535. exit(1);
  2536. }
  2537. } while (sigismember(&chkset, SIG_IPI));
  2538. }
  2539. int kvm_convert_memory(hwaddr start, hwaddr size, bool to_private)
  2540. {
  2541. MemoryRegionSection section;
  2542. ram_addr_t offset;
  2543. MemoryRegion *mr;
  2544. RAMBlock *rb;
  2545. void *addr;
  2546. int ret = -EINVAL;
  2547. trace_kvm_convert_memory(start, size, to_private ? "shared_to_private" : "private_to_shared");
  2548. if (!QEMU_PTR_IS_ALIGNED(start, qemu_real_host_page_size()) ||
  2549. !QEMU_PTR_IS_ALIGNED(size, qemu_real_host_page_size())) {
  2550. return ret;
  2551. }
  2552. if (!size) {
  2553. return ret;
  2554. }
  2555. section = memory_region_find(get_system_memory(), start, size);
  2556. mr = section.mr;
  2557. if (!mr) {
  2558. /*
  2559. * Ignore converting non-assigned region to shared.
  2560. *
  2561. * TDX requires vMMIO region to be shared to inject #VE to guest.
  2562. * OVMF issues conservatively MapGPA(shared) on 32bit PCI MMIO region,
  2563. * and vIO-APIC 0xFEC00000 4K page.
  2564. * OVMF assigns 32bit PCI MMIO region to
  2565. * [top of low memory: typically 2GB=0xC000000, 0xFC00000)
  2566. */
  2567. if (!to_private) {
  2568. return 0;
  2569. }
  2570. return ret;
  2571. }
  2572. if (!memory_region_has_guest_memfd(mr)) {
  2573. /*
  2574. * Because vMMIO region must be shared, guest TD may convert vMMIO
  2575. * region to shared explicitly. Don't complain such case. See
  2576. * memory_region_type() for checking if the region is MMIO region.
  2577. */
  2578. if (!to_private &&
  2579. !memory_region_is_ram(mr) &&
  2580. !memory_region_is_ram_device(mr) &&
  2581. !memory_region_is_rom(mr) &&
  2582. !memory_region_is_romd(mr)) {
  2583. ret = 0;
  2584. } else {
  2585. error_report("Convert non guest_memfd backed memory region "
  2586. "(0x%"HWADDR_PRIx" ,+ 0x%"HWADDR_PRIx") to %s",
  2587. start, size, to_private ? "private" : "shared");
  2588. }
  2589. goto out_unref;
  2590. }
  2591. if (to_private) {
  2592. ret = kvm_set_memory_attributes_private(start, size);
  2593. } else {
  2594. ret = kvm_set_memory_attributes_shared(start, size);
  2595. }
  2596. if (ret) {
  2597. goto out_unref;
  2598. }
  2599. addr = memory_region_get_ram_ptr(mr) + section.offset_within_region;
  2600. rb = qemu_ram_block_from_host(addr, false, &offset);
  2601. if (to_private) {
  2602. if (rb->page_size != qemu_real_host_page_size()) {
  2603. /*
  2604. * shared memory is backed by hugetlb, which is supposed to be
  2605. * pre-allocated and doesn't need to be discarded
  2606. */
  2607. goto out_unref;
  2608. }
  2609. ret = ram_block_discard_range(rb, offset, size);
  2610. } else {
  2611. ret = ram_block_discard_guest_memfd_range(rb, offset, size);
  2612. }
  2613. out_unref:
  2614. memory_region_unref(mr);
  2615. return ret;
  2616. }
  2617. int kvm_cpu_exec(CPUState *cpu)
  2618. {
  2619. struct kvm_run *run = cpu->kvm_run;
  2620. int ret, run_ret;
  2621. trace_kvm_cpu_exec();
  2622. if (kvm_arch_process_async_events(cpu)) {
  2623. qatomic_set(&cpu->exit_request, 0);
  2624. return EXCP_HLT;
  2625. }
  2626. bql_unlock();
  2627. cpu_exec_start(cpu);
  2628. do {
  2629. MemTxAttrs attrs;
  2630. if (cpu->vcpu_dirty) {
  2631. Error *err = NULL;
  2632. ret = kvm_arch_put_registers(cpu, KVM_PUT_RUNTIME_STATE, &err);
  2633. if (ret) {
  2634. if (err) {
  2635. error_reportf_err(err, "Putting registers after init: ");
  2636. } else {
  2637. error_report("Failed to put registers after init: %s",
  2638. strerror(-ret));
  2639. }
  2640. ret = -1;
  2641. break;
  2642. }
  2643. cpu->vcpu_dirty = false;
  2644. }
  2645. kvm_arch_pre_run(cpu, run);
  2646. if (qatomic_read(&cpu->exit_request)) {
  2647. trace_kvm_interrupt_exit_request();
  2648. /*
  2649. * KVM requires us to reenter the kernel after IO exits to complete
  2650. * instruction emulation. This self-signal will ensure that we
  2651. * leave ASAP again.
  2652. */
  2653. kvm_cpu_kick_self();
  2654. }
  2655. /* Read cpu->exit_request before KVM_RUN reads run->immediate_exit.
  2656. * Matching barrier in kvm_eat_signals.
  2657. */
  2658. smp_rmb();
  2659. run_ret = kvm_vcpu_ioctl(cpu, KVM_RUN, 0);
  2660. attrs = kvm_arch_post_run(cpu, run);
  2661. #ifdef KVM_HAVE_MCE_INJECTION
  2662. if (unlikely(have_sigbus_pending)) {
  2663. bql_lock();
  2664. kvm_arch_on_sigbus_vcpu(cpu, pending_sigbus_code,
  2665. pending_sigbus_addr);
  2666. have_sigbus_pending = false;
  2667. bql_unlock();
  2668. }
  2669. #endif
  2670. if (run_ret < 0) {
  2671. if (run_ret == -EINTR || run_ret == -EAGAIN) {
  2672. trace_kvm_io_window_exit();
  2673. kvm_eat_signals(cpu);
  2674. ret = EXCP_INTERRUPT;
  2675. break;
  2676. }
  2677. if (!(run_ret == -EFAULT && run->exit_reason == KVM_EXIT_MEMORY_FAULT)) {
  2678. fprintf(stderr, "error: kvm run failed %s\n",
  2679. strerror(-run_ret));
  2680. #ifdef TARGET_PPC
  2681. if (run_ret == -EBUSY) {
  2682. fprintf(stderr,
  2683. "This is probably because your SMT is enabled.\n"
  2684. "VCPU can only run on primary threads with all "
  2685. "secondary threads offline.\n");
  2686. }
  2687. #endif
  2688. ret = -1;
  2689. break;
  2690. }
  2691. }
  2692. trace_kvm_run_exit(cpu->cpu_index, run->exit_reason);
  2693. switch (run->exit_reason) {
  2694. case KVM_EXIT_IO:
  2695. /* Called outside BQL */
  2696. kvm_handle_io(run->io.port, attrs,
  2697. (uint8_t *)run + run->io.data_offset,
  2698. run->io.direction,
  2699. run->io.size,
  2700. run->io.count);
  2701. ret = 0;
  2702. break;
  2703. case KVM_EXIT_MMIO:
  2704. /* Called outside BQL */
  2705. address_space_rw(&address_space_memory,
  2706. run->mmio.phys_addr, attrs,
  2707. run->mmio.data,
  2708. run->mmio.len,
  2709. run->mmio.is_write);
  2710. ret = 0;
  2711. break;
  2712. case KVM_EXIT_IRQ_WINDOW_OPEN:
  2713. ret = EXCP_INTERRUPT;
  2714. break;
  2715. case KVM_EXIT_SHUTDOWN:
  2716. qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
  2717. ret = EXCP_INTERRUPT;
  2718. break;
  2719. case KVM_EXIT_UNKNOWN:
  2720. fprintf(stderr, "KVM: unknown exit, hardware reason %" PRIx64 "\n",
  2721. (uint64_t)run->hw.hardware_exit_reason);
  2722. ret = -1;
  2723. break;
  2724. case KVM_EXIT_INTERNAL_ERROR:
  2725. ret = kvm_handle_internal_error(cpu, run);
  2726. break;
  2727. case KVM_EXIT_DIRTY_RING_FULL:
  2728. /*
  2729. * We shouldn't continue if the dirty ring of this vcpu is
  2730. * still full. Got kicked by KVM_RESET_DIRTY_RINGS.
  2731. */
  2732. trace_kvm_dirty_ring_full(cpu->cpu_index);
  2733. bql_lock();
  2734. /*
  2735. * We throttle vCPU by making it sleep once it exit from kernel
  2736. * due to dirty ring full. In the dirtylimit scenario, reaping
  2737. * all vCPUs after a single vCPU dirty ring get full result in
  2738. * the miss of sleep, so just reap the ring-fulled vCPU.
  2739. */
  2740. if (dirtylimit_in_service()) {
  2741. kvm_dirty_ring_reap(kvm_state, cpu);
  2742. } else {
  2743. kvm_dirty_ring_reap(kvm_state, NULL);
  2744. }
  2745. bql_unlock();
  2746. dirtylimit_vcpu_execute(cpu);
  2747. ret = 0;
  2748. break;
  2749. case KVM_EXIT_SYSTEM_EVENT:
  2750. trace_kvm_run_exit_system_event(cpu->cpu_index, run->system_event.type);
  2751. switch (run->system_event.type) {
  2752. case KVM_SYSTEM_EVENT_SHUTDOWN:
  2753. qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
  2754. ret = EXCP_INTERRUPT;
  2755. break;
  2756. case KVM_SYSTEM_EVENT_RESET:
  2757. qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
  2758. ret = EXCP_INTERRUPT;
  2759. break;
  2760. case KVM_SYSTEM_EVENT_CRASH:
  2761. kvm_cpu_synchronize_state(cpu);
  2762. bql_lock();
  2763. qemu_system_guest_panicked(cpu_get_crash_info(cpu));
  2764. bql_unlock();
  2765. ret = 0;
  2766. break;
  2767. default:
  2768. ret = kvm_arch_handle_exit(cpu, run);
  2769. break;
  2770. }
  2771. break;
  2772. case KVM_EXIT_MEMORY_FAULT:
  2773. trace_kvm_memory_fault(run->memory_fault.gpa,
  2774. run->memory_fault.size,
  2775. run->memory_fault.flags);
  2776. if (run->memory_fault.flags & ~KVM_MEMORY_EXIT_FLAG_PRIVATE) {
  2777. error_report("KVM_EXIT_MEMORY_FAULT: Unknown flag 0x%" PRIx64,
  2778. (uint64_t)run->memory_fault.flags);
  2779. ret = -1;
  2780. break;
  2781. }
  2782. ret = kvm_convert_memory(run->memory_fault.gpa, run->memory_fault.size,
  2783. run->memory_fault.flags & KVM_MEMORY_EXIT_FLAG_PRIVATE);
  2784. break;
  2785. default:
  2786. ret = kvm_arch_handle_exit(cpu, run);
  2787. break;
  2788. }
  2789. } while (ret == 0);
  2790. cpu_exec_end(cpu);
  2791. bql_lock();
  2792. if (ret < 0) {
  2793. cpu_dump_state(cpu, stderr, CPU_DUMP_CODE);
  2794. vm_stop(RUN_STATE_INTERNAL_ERROR);
  2795. }
  2796. qatomic_set(&cpu->exit_request, 0);
  2797. return ret;
  2798. }
  2799. int kvm_ioctl(KVMState *s, unsigned long type, ...)
  2800. {
  2801. int ret;
  2802. void *arg;
  2803. va_list ap;
  2804. va_start(ap, type);
  2805. arg = va_arg(ap, void *);
  2806. va_end(ap);
  2807. trace_kvm_ioctl(type, arg);
  2808. ret = ioctl(s->fd, type, arg);
  2809. if (ret == -1) {
  2810. ret = -errno;
  2811. }
  2812. return ret;
  2813. }
  2814. int kvm_vm_ioctl(KVMState *s, unsigned long type, ...)
  2815. {
  2816. int ret;
  2817. void *arg;
  2818. va_list ap;
  2819. va_start(ap, type);
  2820. arg = va_arg(ap, void *);
  2821. va_end(ap);
  2822. trace_kvm_vm_ioctl(type, arg);
  2823. accel_ioctl_begin();
  2824. ret = ioctl(s->vmfd, type, arg);
  2825. accel_ioctl_end();
  2826. if (ret == -1) {
  2827. ret = -errno;
  2828. }
  2829. return ret;
  2830. }
  2831. int kvm_vcpu_ioctl(CPUState *cpu, unsigned long type, ...)
  2832. {
  2833. int ret;
  2834. void *arg;
  2835. va_list ap;
  2836. va_start(ap, type);
  2837. arg = va_arg(ap, void *);
  2838. va_end(ap);
  2839. trace_kvm_vcpu_ioctl(cpu->cpu_index, type, arg);
  2840. accel_cpu_ioctl_begin(cpu);
  2841. ret = ioctl(cpu->kvm_fd, type, arg);
  2842. accel_cpu_ioctl_end(cpu);
  2843. if (ret == -1) {
  2844. ret = -errno;
  2845. }
  2846. return ret;
  2847. }
  2848. int kvm_device_ioctl(int fd, unsigned long type, ...)
  2849. {
  2850. int ret;
  2851. void *arg;
  2852. va_list ap;
  2853. va_start(ap, type);
  2854. arg = va_arg(ap, void *);
  2855. va_end(ap);
  2856. trace_kvm_device_ioctl(fd, type, arg);
  2857. accel_ioctl_begin();
  2858. ret = ioctl(fd, type, arg);
  2859. accel_ioctl_end();
  2860. if (ret == -1) {
  2861. ret = -errno;
  2862. }
  2863. return ret;
  2864. }
  2865. int kvm_vm_check_attr(KVMState *s, uint32_t group, uint64_t attr)
  2866. {
  2867. int ret;
  2868. struct kvm_device_attr attribute = {
  2869. .group = group,
  2870. .attr = attr,
  2871. };
  2872. if (!kvm_vm_attributes_allowed) {
  2873. return 0;
  2874. }
  2875. ret = kvm_vm_ioctl(s, KVM_HAS_DEVICE_ATTR, &attribute);
  2876. /* kvm returns 0 on success for HAS_DEVICE_ATTR */
  2877. return ret ? 0 : 1;
  2878. }
  2879. int kvm_device_check_attr(int dev_fd, uint32_t group, uint64_t attr)
  2880. {
  2881. struct kvm_device_attr attribute = {
  2882. .group = group,
  2883. .attr = attr,
  2884. .flags = 0,
  2885. };
  2886. return kvm_device_ioctl(dev_fd, KVM_HAS_DEVICE_ATTR, &attribute) ? 0 : 1;
  2887. }
  2888. int kvm_device_access(int fd, int group, uint64_t attr,
  2889. void *val, bool write, Error **errp)
  2890. {
  2891. struct kvm_device_attr kvmattr;
  2892. int err;
  2893. kvmattr.flags = 0;
  2894. kvmattr.group = group;
  2895. kvmattr.attr = attr;
  2896. kvmattr.addr = (uintptr_t)val;
  2897. err = kvm_device_ioctl(fd,
  2898. write ? KVM_SET_DEVICE_ATTR : KVM_GET_DEVICE_ATTR,
  2899. &kvmattr);
  2900. if (err < 0) {
  2901. error_setg_errno(errp, -err,
  2902. "KVM_%s_DEVICE_ATTR failed: Group %d "
  2903. "attr 0x%016" PRIx64,
  2904. write ? "SET" : "GET", group, attr);
  2905. }
  2906. return err;
  2907. }
  2908. bool kvm_has_sync_mmu(void)
  2909. {
  2910. return kvm_state->sync_mmu;
  2911. }
  2912. int kvm_has_vcpu_events(void)
  2913. {
  2914. return kvm_state->vcpu_events;
  2915. }
  2916. int kvm_max_nested_state_length(void)
  2917. {
  2918. return kvm_state->max_nested_state_len;
  2919. }
  2920. int kvm_has_gsi_routing(void)
  2921. {
  2922. #ifdef KVM_CAP_IRQ_ROUTING
  2923. return kvm_check_extension(kvm_state, KVM_CAP_IRQ_ROUTING);
  2924. #else
  2925. return false;
  2926. #endif
  2927. }
  2928. bool kvm_arm_supports_user_irq(void)
  2929. {
  2930. return kvm_check_extension(kvm_state, KVM_CAP_ARM_USER_IRQ);
  2931. }
  2932. #ifdef TARGET_KVM_HAVE_GUEST_DEBUG
  2933. struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUState *cpu, vaddr pc)
  2934. {
  2935. struct kvm_sw_breakpoint *bp;
  2936. QTAILQ_FOREACH(bp, &cpu->kvm_state->kvm_sw_breakpoints, entry) {
  2937. if (bp->pc == pc) {
  2938. return bp;
  2939. }
  2940. }
  2941. return NULL;
  2942. }
  2943. int kvm_sw_breakpoints_active(CPUState *cpu)
  2944. {
  2945. return !QTAILQ_EMPTY(&cpu->kvm_state->kvm_sw_breakpoints);
  2946. }
  2947. struct kvm_set_guest_debug_data {
  2948. struct kvm_guest_debug dbg;
  2949. int err;
  2950. };
  2951. static void kvm_invoke_set_guest_debug(CPUState *cpu, run_on_cpu_data data)
  2952. {
  2953. struct kvm_set_guest_debug_data *dbg_data =
  2954. (struct kvm_set_guest_debug_data *) data.host_ptr;
  2955. dbg_data->err = kvm_vcpu_ioctl(cpu, KVM_SET_GUEST_DEBUG,
  2956. &dbg_data->dbg);
  2957. }
  2958. int kvm_update_guest_debug(CPUState *cpu, unsigned long reinject_trap)
  2959. {
  2960. struct kvm_set_guest_debug_data data;
  2961. data.dbg.control = reinject_trap;
  2962. if (cpu->singlestep_enabled) {
  2963. data.dbg.control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_SINGLESTEP;
  2964. if (cpu->singlestep_enabled & SSTEP_NOIRQ) {
  2965. data.dbg.control |= KVM_GUESTDBG_BLOCKIRQ;
  2966. }
  2967. }
  2968. kvm_arch_update_guest_debug(cpu, &data.dbg);
  2969. run_on_cpu(cpu, kvm_invoke_set_guest_debug,
  2970. RUN_ON_CPU_HOST_PTR(&data));
  2971. return data.err;
  2972. }
  2973. bool kvm_supports_guest_debug(void)
  2974. {
  2975. /* probed during kvm_init() */
  2976. return kvm_has_guest_debug;
  2977. }
  2978. int kvm_insert_breakpoint(CPUState *cpu, int type, vaddr addr, vaddr len)
  2979. {
  2980. struct kvm_sw_breakpoint *bp;
  2981. int err;
  2982. if (type == GDB_BREAKPOINT_SW) {
  2983. bp = kvm_find_sw_breakpoint(cpu, addr);
  2984. if (bp) {
  2985. bp->use_count++;
  2986. return 0;
  2987. }
  2988. bp = g_new(struct kvm_sw_breakpoint, 1);
  2989. bp->pc = addr;
  2990. bp->use_count = 1;
  2991. err = kvm_arch_insert_sw_breakpoint(cpu, bp);
  2992. if (err) {
  2993. g_free(bp);
  2994. return err;
  2995. }
  2996. QTAILQ_INSERT_HEAD(&cpu->kvm_state->kvm_sw_breakpoints, bp, entry);
  2997. } else {
  2998. err = kvm_arch_insert_hw_breakpoint(addr, len, type);
  2999. if (err) {
  3000. return err;
  3001. }
  3002. }
  3003. CPU_FOREACH(cpu) {
  3004. err = kvm_update_guest_debug(cpu, 0);
  3005. if (err) {
  3006. return err;
  3007. }
  3008. }
  3009. return 0;
  3010. }
  3011. int kvm_remove_breakpoint(CPUState *cpu, int type, vaddr addr, vaddr len)
  3012. {
  3013. struct kvm_sw_breakpoint *bp;
  3014. int err;
  3015. if (type == GDB_BREAKPOINT_SW) {
  3016. bp = kvm_find_sw_breakpoint(cpu, addr);
  3017. if (!bp) {
  3018. return -ENOENT;
  3019. }
  3020. if (bp->use_count > 1) {
  3021. bp->use_count--;
  3022. return 0;
  3023. }
  3024. err = kvm_arch_remove_sw_breakpoint(cpu, bp);
  3025. if (err) {
  3026. return err;
  3027. }
  3028. QTAILQ_REMOVE(&cpu->kvm_state->kvm_sw_breakpoints, bp, entry);
  3029. g_free(bp);
  3030. } else {
  3031. err = kvm_arch_remove_hw_breakpoint(addr, len, type);
  3032. if (err) {
  3033. return err;
  3034. }
  3035. }
  3036. CPU_FOREACH(cpu) {
  3037. err = kvm_update_guest_debug(cpu, 0);
  3038. if (err) {
  3039. return err;
  3040. }
  3041. }
  3042. return 0;
  3043. }
  3044. void kvm_remove_all_breakpoints(CPUState *cpu)
  3045. {
  3046. struct kvm_sw_breakpoint *bp, *next;
  3047. KVMState *s = cpu->kvm_state;
  3048. CPUState *tmpcpu;
  3049. QTAILQ_FOREACH_SAFE(bp, &s->kvm_sw_breakpoints, entry, next) {
  3050. if (kvm_arch_remove_sw_breakpoint(cpu, bp) != 0) {
  3051. /* Try harder to find a CPU that currently sees the breakpoint. */
  3052. CPU_FOREACH(tmpcpu) {
  3053. if (kvm_arch_remove_sw_breakpoint(tmpcpu, bp) == 0) {
  3054. break;
  3055. }
  3056. }
  3057. }
  3058. QTAILQ_REMOVE(&s->kvm_sw_breakpoints, bp, entry);
  3059. g_free(bp);
  3060. }
  3061. kvm_arch_remove_all_hw_breakpoints();
  3062. CPU_FOREACH(cpu) {
  3063. kvm_update_guest_debug(cpu, 0);
  3064. }
  3065. }
  3066. #endif /* !TARGET_KVM_HAVE_GUEST_DEBUG */
  3067. static int kvm_set_signal_mask(CPUState *cpu, const sigset_t *sigset)
  3068. {
  3069. KVMState *s = kvm_state;
  3070. struct kvm_signal_mask *sigmask;
  3071. int r;
  3072. sigmask = g_malloc(sizeof(*sigmask) + sizeof(*sigset));
  3073. sigmask->len = s->sigmask_len;
  3074. memcpy(sigmask->sigset, sigset, sizeof(*sigset));
  3075. r = kvm_vcpu_ioctl(cpu, KVM_SET_SIGNAL_MASK, sigmask);
  3076. g_free(sigmask);
  3077. return r;
  3078. }
  3079. static void kvm_ipi_signal(int sig)
  3080. {
  3081. if (current_cpu) {
  3082. assert(kvm_immediate_exit);
  3083. kvm_cpu_kick(current_cpu);
  3084. }
  3085. }
  3086. void kvm_init_cpu_signals(CPUState *cpu)
  3087. {
  3088. int r;
  3089. sigset_t set;
  3090. struct sigaction sigact;
  3091. memset(&sigact, 0, sizeof(sigact));
  3092. sigact.sa_handler = kvm_ipi_signal;
  3093. sigaction(SIG_IPI, &sigact, NULL);
  3094. pthread_sigmask(SIG_BLOCK, NULL, &set);
  3095. #if defined KVM_HAVE_MCE_INJECTION
  3096. sigdelset(&set, SIGBUS);
  3097. pthread_sigmask(SIG_SETMASK, &set, NULL);
  3098. #endif
  3099. sigdelset(&set, SIG_IPI);
  3100. if (kvm_immediate_exit) {
  3101. r = pthread_sigmask(SIG_SETMASK, &set, NULL);
  3102. } else {
  3103. r = kvm_set_signal_mask(cpu, &set);
  3104. }
  3105. if (r) {
  3106. fprintf(stderr, "kvm_set_signal_mask: %s\n", strerror(-r));
  3107. exit(1);
  3108. }
  3109. }
  3110. /* Called asynchronously in VCPU thread. */
  3111. int kvm_on_sigbus_vcpu(CPUState *cpu, int code, void *addr)
  3112. {
  3113. #ifdef KVM_HAVE_MCE_INJECTION
  3114. if (have_sigbus_pending) {
  3115. return 1;
  3116. }
  3117. have_sigbus_pending = true;
  3118. pending_sigbus_addr = addr;
  3119. pending_sigbus_code = code;
  3120. qatomic_set(&cpu->exit_request, 1);
  3121. return 0;
  3122. #else
  3123. return 1;
  3124. #endif
  3125. }
  3126. /* Called synchronously (via signalfd) in main thread. */
  3127. int kvm_on_sigbus(int code, void *addr)
  3128. {
  3129. #ifdef KVM_HAVE_MCE_INJECTION
  3130. /* Action required MCE kills the process if SIGBUS is blocked. Because
  3131. * that's what happens in the I/O thread, where we handle MCE via signalfd,
  3132. * we can only get action optional here.
  3133. */
  3134. assert(code != BUS_MCEERR_AR);
  3135. kvm_arch_on_sigbus_vcpu(first_cpu, code, addr);
  3136. return 0;
  3137. #else
  3138. return 1;
  3139. #endif
  3140. }
  3141. int kvm_create_device(KVMState *s, uint64_t type, bool test)
  3142. {
  3143. int ret;
  3144. struct kvm_create_device create_dev;
  3145. create_dev.type = type;
  3146. create_dev.fd = -1;
  3147. create_dev.flags = test ? KVM_CREATE_DEVICE_TEST : 0;
  3148. if (!kvm_check_extension(s, KVM_CAP_DEVICE_CTRL)) {
  3149. return -ENOTSUP;
  3150. }
  3151. ret = kvm_vm_ioctl(s, KVM_CREATE_DEVICE, &create_dev);
  3152. if (ret) {
  3153. return ret;
  3154. }
  3155. return test ? 0 : create_dev.fd;
  3156. }
  3157. bool kvm_device_supported(int vmfd, uint64_t type)
  3158. {
  3159. struct kvm_create_device create_dev = {
  3160. .type = type,
  3161. .fd = -1,
  3162. .flags = KVM_CREATE_DEVICE_TEST,
  3163. };
  3164. if (ioctl(vmfd, KVM_CHECK_EXTENSION, KVM_CAP_DEVICE_CTRL) <= 0) {
  3165. return false;
  3166. }
  3167. return (ioctl(vmfd, KVM_CREATE_DEVICE, &create_dev) >= 0);
  3168. }
  3169. int kvm_set_one_reg(CPUState *cs, uint64_t id, void *source)
  3170. {
  3171. struct kvm_one_reg reg;
  3172. int r;
  3173. reg.id = id;
  3174. reg.addr = (uintptr_t) source;
  3175. r = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
  3176. if (r) {
  3177. trace_kvm_failed_reg_set(id, strerror(-r));
  3178. }
  3179. return r;
  3180. }
  3181. int kvm_get_one_reg(CPUState *cs, uint64_t id, void *target)
  3182. {
  3183. struct kvm_one_reg reg;
  3184. int r;
  3185. reg.id = id;
  3186. reg.addr = (uintptr_t) target;
  3187. r = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
  3188. if (r) {
  3189. trace_kvm_failed_reg_get(id, strerror(-r));
  3190. }
  3191. return r;
  3192. }
  3193. static bool kvm_accel_has_memory(MachineState *ms, AddressSpace *as,
  3194. hwaddr start_addr, hwaddr size)
  3195. {
  3196. KVMState *kvm = KVM_STATE(ms->accelerator);
  3197. int i;
  3198. for (i = 0; i < kvm->nr_as; ++i) {
  3199. if (kvm->as[i].as == as && kvm->as[i].ml) {
  3200. size = MIN(kvm_max_slot_size, size);
  3201. return NULL != kvm_lookup_matching_slot(kvm->as[i].ml,
  3202. start_addr, size);
  3203. }
  3204. }
  3205. return false;
  3206. }
  3207. static void kvm_get_kvm_shadow_mem(Object *obj, Visitor *v,
  3208. const char *name, void *opaque,
  3209. Error **errp)
  3210. {
  3211. KVMState *s = KVM_STATE(obj);
  3212. int64_t value = s->kvm_shadow_mem;
  3213. visit_type_int(v, name, &value, errp);
  3214. }
  3215. static void kvm_set_kvm_shadow_mem(Object *obj, Visitor *v,
  3216. const char *name, void *opaque,
  3217. Error **errp)
  3218. {
  3219. KVMState *s = KVM_STATE(obj);
  3220. int64_t value;
  3221. if (s->fd != -1) {
  3222. error_setg(errp, "Cannot set properties after the accelerator has been initialized");
  3223. return;
  3224. }
  3225. if (!visit_type_int(v, name, &value, errp)) {
  3226. return;
  3227. }
  3228. s->kvm_shadow_mem = value;
  3229. }
  3230. static void kvm_set_kernel_irqchip(Object *obj, Visitor *v,
  3231. const char *name, void *opaque,
  3232. Error **errp)
  3233. {
  3234. KVMState *s = KVM_STATE(obj);
  3235. OnOffSplit mode;
  3236. if (s->fd != -1) {
  3237. error_setg(errp, "Cannot set properties after the accelerator has been initialized");
  3238. return;
  3239. }
  3240. if (!visit_type_OnOffSplit(v, name, &mode, errp)) {
  3241. return;
  3242. }
  3243. switch (mode) {
  3244. case ON_OFF_SPLIT_ON:
  3245. s->kernel_irqchip_allowed = true;
  3246. s->kernel_irqchip_required = true;
  3247. s->kernel_irqchip_split = ON_OFF_AUTO_OFF;
  3248. break;
  3249. case ON_OFF_SPLIT_OFF:
  3250. s->kernel_irqchip_allowed = false;
  3251. s->kernel_irqchip_required = false;
  3252. s->kernel_irqchip_split = ON_OFF_AUTO_OFF;
  3253. break;
  3254. case ON_OFF_SPLIT_SPLIT:
  3255. s->kernel_irqchip_allowed = true;
  3256. s->kernel_irqchip_required = true;
  3257. s->kernel_irqchip_split = ON_OFF_AUTO_ON;
  3258. break;
  3259. default:
  3260. /* The value was checked in visit_type_OnOffSplit() above. If
  3261. * we get here, then something is wrong in QEMU.
  3262. */
  3263. abort();
  3264. }
  3265. }
  3266. bool kvm_kernel_irqchip_allowed(void)
  3267. {
  3268. return kvm_state->kernel_irqchip_allowed;
  3269. }
  3270. bool kvm_kernel_irqchip_required(void)
  3271. {
  3272. return kvm_state->kernel_irqchip_required;
  3273. }
  3274. bool kvm_kernel_irqchip_split(void)
  3275. {
  3276. return kvm_state->kernel_irqchip_split == ON_OFF_AUTO_ON;
  3277. }
  3278. static void kvm_get_dirty_ring_size(Object *obj, Visitor *v,
  3279. const char *name, void *opaque,
  3280. Error **errp)
  3281. {
  3282. KVMState *s = KVM_STATE(obj);
  3283. uint32_t value = s->kvm_dirty_ring_size;
  3284. visit_type_uint32(v, name, &value, errp);
  3285. }
  3286. static void kvm_set_dirty_ring_size(Object *obj, Visitor *v,
  3287. const char *name, void *opaque,
  3288. Error **errp)
  3289. {
  3290. KVMState *s = KVM_STATE(obj);
  3291. uint32_t value;
  3292. if (s->fd != -1) {
  3293. error_setg(errp, "Cannot set properties after the accelerator has been initialized");
  3294. return;
  3295. }
  3296. if (!visit_type_uint32(v, name, &value, errp)) {
  3297. return;
  3298. }
  3299. if (value & (value - 1)) {
  3300. error_setg(errp, "dirty-ring-size must be a power of two.");
  3301. return;
  3302. }
  3303. s->kvm_dirty_ring_size = value;
  3304. }
  3305. static char *kvm_get_device(Object *obj,
  3306. Error **errp G_GNUC_UNUSED)
  3307. {
  3308. KVMState *s = KVM_STATE(obj);
  3309. return g_strdup(s->device);
  3310. }
  3311. static void kvm_set_device(Object *obj,
  3312. const char *value,
  3313. Error **errp G_GNUC_UNUSED)
  3314. {
  3315. KVMState *s = KVM_STATE(obj);
  3316. g_free(s->device);
  3317. s->device = g_strdup(value);
  3318. }
  3319. static void kvm_set_kvm_rapl(Object *obj, bool value, Error **errp)
  3320. {
  3321. KVMState *s = KVM_STATE(obj);
  3322. s->msr_energy.enable = value;
  3323. }
  3324. static void kvm_set_kvm_rapl_socket_path(Object *obj,
  3325. const char *str,
  3326. Error **errp)
  3327. {
  3328. KVMState *s = KVM_STATE(obj);
  3329. g_free(s->msr_energy.socket_path);
  3330. s->msr_energy.socket_path = g_strdup(str);
  3331. }
  3332. static void kvm_accel_instance_init(Object *obj)
  3333. {
  3334. KVMState *s = KVM_STATE(obj);
  3335. s->fd = -1;
  3336. s->vmfd = -1;
  3337. s->kvm_shadow_mem = -1;
  3338. s->kernel_irqchip_allowed = true;
  3339. s->kernel_irqchip_split = ON_OFF_AUTO_AUTO;
  3340. /* KVM dirty ring is by default off */
  3341. s->kvm_dirty_ring_size = 0;
  3342. s->kvm_dirty_ring_with_bitmap = false;
  3343. s->kvm_eager_split_size = 0;
  3344. s->notify_vmexit = NOTIFY_VMEXIT_OPTION_RUN;
  3345. s->notify_window = 0;
  3346. s->xen_version = 0;
  3347. s->xen_gnttab_max_frames = 64;
  3348. s->xen_evtchn_max_pirq = 256;
  3349. s->device = NULL;
  3350. s->msr_energy.enable = false;
  3351. }
  3352. /**
  3353. * kvm_gdbstub_sstep_flags():
  3354. *
  3355. * Returns: SSTEP_* flags that KVM supports for guest debug. The
  3356. * support is probed during kvm_init()
  3357. */
  3358. static int kvm_gdbstub_sstep_flags(void)
  3359. {
  3360. return kvm_sstep_flags;
  3361. }
  3362. static void kvm_accel_class_init(ObjectClass *oc, void *data)
  3363. {
  3364. AccelClass *ac = ACCEL_CLASS(oc);
  3365. ac->name = "KVM";
  3366. ac->init_machine = kvm_init;
  3367. ac->has_memory = kvm_accel_has_memory;
  3368. ac->allowed = &kvm_allowed;
  3369. ac->gdbstub_supported_sstep_flags = kvm_gdbstub_sstep_flags;
  3370. object_class_property_add(oc, "kernel-irqchip", "on|off|split",
  3371. NULL, kvm_set_kernel_irqchip,
  3372. NULL, NULL);
  3373. object_class_property_set_description(oc, "kernel-irqchip",
  3374. "Configure KVM in-kernel irqchip");
  3375. object_class_property_add(oc, "kvm-shadow-mem", "int",
  3376. kvm_get_kvm_shadow_mem, kvm_set_kvm_shadow_mem,
  3377. NULL, NULL);
  3378. object_class_property_set_description(oc, "kvm-shadow-mem",
  3379. "KVM shadow MMU size");
  3380. object_class_property_add(oc, "dirty-ring-size", "uint32",
  3381. kvm_get_dirty_ring_size, kvm_set_dirty_ring_size,
  3382. NULL, NULL);
  3383. object_class_property_set_description(oc, "dirty-ring-size",
  3384. "Size of KVM dirty page ring buffer (default: 0, i.e. use bitmap)");
  3385. object_class_property_add_str(oc, "device", kvm_get_device, kvm_set_device);
  3386. object_class_property_set_description(oc, "device",
  3387. "Path to the device node to use (default: /dev/kvm)");
  3388. object_class_property_add_bool(oc, "rapl",
  3389. NULL,
  3390. kvm_set_kvm_rapl);
  3391. object_class_property_set_description(oc, "rapl",
  3392. "Allow energy related MSRs for RAPL interface in Guest");
  3393. object_class_property_add_str(oc, "rapl-helper-socket", NULL,
  3394. kvm_set_kvm_rapl_socket_path);
  3395. object_class_property_set_description(oc, "rapl-helper-socket",
  3396. "Socket Path for comminucating with the Virtual MSR helper daemon");
  3397. kvm_arch_accel_class_init(oc);
  3398. }
  3399. static const TypeInfo kvm_accel_type = {
  3400. .name = TYPE_KVM_ACCEL,
  3401. .parent = TYPE_ACCEL,
  3402. .instance_init = kvm_accel_instance_init,
  3403. .class_init = kvm_accel_class_init,
  3404. .instance_size = sizeof(KVMState),
  3405. };
  3406. static void kvm_type_init(void)
  3407. {
  3408. type_register_static(&kvm_accel_type);
  3409. }
  3410. type_init(kvm_type_init);
  3411. typedef struct StatsArgs {
  3412. union StatsResultsType {
  3413. StatsResultList **stats;
  3414. StatsSchemaList **schema;
  3415. } result;
  3416. strList *names;
  3417. Error **errp;
  3418. } StatsArgs;
  3419. static StatsList *add_kvmstat_entry(struct kvm_stats_desc *pdesc,
  3420. uint64_t *stats_data,
  3421. StatsList *stats_list,
  3422. Error **errp)
  3423. {
  3424. Stats *stats;
  3425. uint64List *val_list = NULL;
  3426. /* Only add stats that we understand. */
  3427. switch (pdesc->flags & KVM_STATS_TYPE_MASK) {
  3428. case KVM_STATS_TYPE_CUMULATIVE:
  3429. case KVM_STATS_TYPE_INSTANT:
  3430. case KVM_STATS_TYPE_PEAK:
  3431. case KVM_STATS_TYPE_LINEAR_HIST:
  3432. case KVM_STATS_TYPE_LOG_HIST:
  3433. break;
  3434. default:
  3435. return stats_list;
  3436. }
  3437. switch (pdesc->flags & KVM_STATS_UNIT_MASK) {
  3438. case KVM_STATS_UNIT_NONE:
  3439. case KVM_STATS_UNIT_BYTES:
  3440. case KVM_STATS_UNIT_CYCLES:
  3441. case KVM_STATS_UNIT_SECONDS:
  3442. case KVM_STATS_UNIT_BOOLEAN:
  3443. break;
  3444. default:
  3445. return stats_list;
  3446. }
  3447. switch (pdesc->flags & KVM_STATS_BASE_MASK) {
  3448. case KVM_STATS_BASE_POW10:
  3449. case KVM_STATS_BASE_POW2:
  3450. break;
  3451. default:
  3452. return stats_list;
  3453. }
  3454. /* Alloc and populate data list */
  3455. stats = g_new0(Stats, 1);
  3456. stats->name = g_strdup(pdesc->name);
  3457. stats->value = g_new0(StatsValue, 1);
  3458. if ((pdesc->flags & KVM_STATS_UNIT_MASK) == KVM_STATS_UNIT_BOOLEAN) {
  3459. stats->value->u.boolean = *stats_data;
  3460. stats->value->type = QTYPE_QBOOL;
  3461. } else if (pdesc->size == 1) {
  3462. stats->value->u.scalar = *stats_data;
  3463. stats->value->type = QTYPE_QNUM;
  3464. } else {
  3465. int i;
  3466. for (i = 0; i < pdesc->size; i++) {
  3467. QAPI_LIST_PREPEND(val_list, stats_data[i]);
  3468. }
  3469. stats->value->u.list = val_list;
  3470. stats->value->type = QTYPE_QLIST;
  3471. }
  3472. QAPI_LIST_PREPEND(stats_list, stats);
  3473. return stats_list;
  3474. }
  3475. static StatsSchemaValueList *add_kvmschema_entry(struct kvm_stats_desc *pdesc,
  3476. StatsSchemaValueList *list,
  3477. Error **errp)
  3478. {
  3479. StatsSchemaValueList *schema_entry = g_new0(StatsSchemaValueList, 1);
  3480. schema_entry->value = g_new0(StatsSchemaValue, 1);
  3481. switch (pdesc->flags & KVM_STATS_TYPE_MASK) {
  3482. case KVM_STATS_TYPE_CUMULATIVE:
  3483. schema_entry->value->type = STATS_TYPE_CUMULATIVE;
  3484. break;
  3485. case KVM_STATS_TYPE_INSTANT:
  3486. schema_entry->value->type = STATS_TYPE_INSTANT;
  3487. break;
  3488. case KVM_STATS_TYPE_PEAK:
  3489. schema_entry->value->type = STATS_TYPE_PEAK;
  3490. break;
  3491. case KVM_STATS_TYPE_LINEAR_HIST:
  3492. schema_entry->value->type = STATS_TYPE_LINEAR_HISTOGRAM;
  3493. schema_entry->value->bucket_size = pdesc->bucket_size;
  3494. schema_entry->value->has_bucket_size = true;
  3495. break;
  3496. case KVM_STATS_TYPE_LOG_HIST:
  3497. schema_entry->value->type = STATS_TYPE_LOG2_HISTOGRAM;
  3498. break;
  3499. default:
  3500. goto exit;
  3501. }
  3502. switch (pdesc->flags & KVM_STATS_UNIT_MASK) {
  3503. case KVM_STATS_UNIT_NONE:
  3504. break;
  3505. case KVM_STATS_UNIT_BOOLEAN:
  3506. schema_entry->value->has_unit = true;
  3507. schema_entry->value->unit = STATS_UNIT_BOOLEAN;
  3508. break;
  3509. case KVM_STATS_UNIT_BYTES:
  3510. schema_entry->value->has_unit = true;
  3511. schema_entry->value->unit = STATS_UNIT_BYTES;
  3512. break;
  3513. case KVM_STATS_UNIT_CYCLES:
  3514. schema_entry->value->has_unit = true;
  3515. schema_entry->value->unit = STATS_UNIT_CYCLES;
  3516. break;
  3517. case KVM_STATS_UNIT_SECONDS:
  3518. schema_entry->value->has_unit = true;
  3519. schema_entry->value->unit = STATS_UNIT_SECONDS;
  3520. break;
  3521. default:
  3522. goto exit;
  3523. }
  3524. schema_entry->value->exponent = pdesc->exponent;
  3525. if (pdesc->exponent) {
  3526. switch (pdesc->flags & KVM_STATS_BASE_MASK) {
  3527. case KVM_STATS_BASE_POW10:
  3528. schema_entry->value->has_base = true;
  3529. schema_entry->value->base = 10;
  3530. break;
  3531. case KVM_STATS_BASE_POW2:
  3532. schema_entry->value->has_base = true;
  3533. schema_entry->value->base = 2;
  3534. break;
  3535. default:
  3536. goto exit;
  3537. }
  3538. }
  3539. schema_entry->value->name = g_strdup(pdesc->name);
  3540. schema_entry->next = list;
  3541. return schema_entry;
  3542. exit:
  3543. g_free(schema_entry->value);
  3544. g_free(schema_entry);
  3545. return list;
  3546. }
  3547. /* Cached stats descriptors */
  3548. typedef struct StatsDescriptors {
  3549. const char *ident; /* cache key, currently the StatsTarget */
  3550. struct kvm_stats_desc *kvm_stats_desc;
  3551. struct kvm_stats_header kvm_stats_header;
  3552. QTAILQ_ENTRY(StatsDescriptors) next;
  3553. } StatsDescriptors;
  3554. static QTAILQ_HEAD(, StatsDescriptors) stats_descriptors =
  3555. QTAILQ_HEAD_INITIALIZER(stats_descriptors);
  3556. /*
  3557. * Return the descriptors for 'target', that either have already been read
  3558. * or are retrieved from 'stats_fd'.
  3559. */
  3560. static StatsDescriptors *find_stats_descriptors(StatsTarget target, int stats_fd,
  3561. Error **errp)
  3562. {
  3563. StatsDescriptors *descriptors;
  3564. const char *ident;
  3565. struct kvm_stats_desc *kvm_stats_desc;
  3566. struct kvm_stats_header *kvm_stats_header;
  3567. size_t size_desc;
  3568. ssize_t ret;
  3569. ident = StatsTarget_str(target);
  3570. QTAILQ_FOREACH(descriptors, &stats_descriptors, next) {
  3571. if (g_str_equal(descriptors->ident, ident)) {
  3572. return descriptors;
  3573. }
  3574. }
  3575. descriptors = g_new0(StatsDescriptors, 1);
  3576. /* Read stats header */
  3577. kvm_stats_header = &descriptors->kvm_stats_header;
  3578. ret = pread(stats_fd, kvm_stats_header, sizeof(*kvm_stats_header), 0);
  3579. if (ret != sizeof(*kvm_stats_header)) {
  3580. error_setg(errp, "KVM stats: failed to read stats header: "
  3581. "expected %zu actual %zu",
  3582. sizeof(*kvm_stats_header), ret);
  3583. g_free(descriptors);
  3584. return NULL;
  3585. }
  3586. size_desc = sizeof(*kvm_stats_desc) + kvm_stats_header->name_size;
  3587. /* Read stats descriptors */
  3588. kvm_stats_desc = g_malloc0_n(kvm_stats_header->num_desc, size_desc);
  3589. ret = pread(stats_fd, kvm_stats_desc,
  3590. size_desc * kvm_stats_header->num_desc,
  3591. kvm_stats_header->desc_offset);
  3592. if (ret != size_desc * kvm_stats_header->num_desc) {
  3593. error_setg(errp, "KVM stats: failed to read stats descriptors: "
  3594. "expected %zu actual %zu",
  3595. size_desc * kvm_stats_header->num_desc, ret);
  3596. g_free(descriptors);
  3597. g_free(kvm_stats_desc);
  3598. return NULL;
  3599. }
  3600. descriptors->kvm_stats_desc = kvm_stats_desc;
  3601. descriptors->ident = ident;
  3602. QTAILQ_INSERT_TAIL(&stats_descriptors, descriptors, next);
  3603. return descriptors;
  3604. }
  3605. static void query_stats(StatsResultList **result, StatsTarget target,
  3606. strList *names, int stats_fd, CPUState *cpu,
  3607. Error **errp)
  3608. {
  3609. struct kvm_stats_desc *kvm_stats_desc;
  3610. struct kvm_stats_header *kvm_stats_header;
  3611. StatsDescriptors *descriptors;
  3612. g_autofree uint64_t *stats_data = NULL;
  3613. struct kvm_stats_desc *pdesc;
  3614. StatsList *stats_list = NULL;
  3615. size_t size_desc, size_data = 0;
  3616. ssize_t ret;
  3617. int i;
  3618. descriptors = find_stats_descriptors(target, stats_fd, errp);
  3619. if (!descriptors) {
  3620. return;
  3621. }
  3622. kvm_stats_header = &descriptors->kvm_stats_header;
  3623. kvm_stats_desc = descriptors->kvm_stats_desc;
  3624. size_desc = sizeof(*kvm_stats_desc) + kvm_stats_header->name_size;
  3625. /* Tally the total data size; read schema data */
  3626. for (i = 0; i < kvm_stats_header->num_desc; ++i) {
  3627. pdesc = (void *)kvm_stats_desc + i * size_desc;
  3628. size_data += pdesc->size * sizeof(*stats_data);
  3629. }
  3630. stats_data = g_malloc0(size_data);
  3631. ret = pread(stats_fd, stats_data, size_data, kvm_stats_header->data_offset);
  3632. if (ret != size_data) {
  3633. error_setg(errp, "KVM stats: failed to read data: "
  3634. "expected %zu actual %zu", size_data, ret);
  3635. return;
  3636. }
  3637. for (i = 0; i < kvm_stats_header->num_desc; ++i) {
  3638. uint64_t *stats;
  3639. pdesc = (void *)kvm_stats_desc + i * size_desc;
  3640. /* Add entry to the list */
  3641. stats = (void *)stats_data + pdesc->offset;
  3642. if (!apply_str_list_filter(pdesc->name, names)) {
  3643. continue;
  3644. }
  3645. stats_list = add_kvmstat_entry(pdesc, stats, stats_list, errp);
  3646. }
  3647. if (!stats_list) {
  3648. return;
  3649. }
  3650. switch (target) {
  3651. case STATS_TARGET_VM:
  3652. add_stats_entry(result, STATS_PROVIDER_KVM, NULL, stats_list);
  3653. break;
  3654. case STATS_TARGET_VCPU:
  3655. add_stats_entry(result, STATS_PROVIDER_KVM,
  3656. cpu->parent_obj.canonical_path,
  3657. stats_list);
  3658. break;
  3659. default:
  3660. g_assert_not_reached();
  3661. }
  3662. }
  3663. static void query_stats_schema(StatsSchemaList **result, StatsTarget target,
  3664. int stats_fd, Error **errp)
  3665. {
  3666. struct kvm_stats_desc *kvm_stats_desc;
  3667. struct kvm_stats_header *kvm_stats_header;
  3668. StatsDescriptors *descriptors;
  3669. struct kvm_stats_desc *pdesc;
  3670. StatsSchemaValueList *stats_list = NULL;
  3671. size_t size_desc;
  3672. int i;
  3673. descriptors = find_stats_descriptors(target, stats_fd, errp);
  3674. if (!descriptors) {
  3675. return;
  3676. }
  3677. kvm_stats_header = &descriptors->kvm_stats_header;
  3678. kvm_stats_desc = descriptors->kvm_stats_desc;
  3679. size_desc = sizeof(*kvm_stats_desc) + kvm_stats_header->name_size;
  3680. /* Tally the total data size; read schema data */
  3681. for (i = 0; i < kvm_stats_header->num_desc; ++i) {
  3682. pdesc = (void *)kvm_stats_desc + i * size_desc;
  3683. stats_list = add_kvmschema_entry(pdesc, stats_list, errp);
  3684. }
  3685. add_stats_schema(result, STATS_PROVIDER_KVM, target, stats_list);
  3686. }
  3687. static void query_stats_vcpu(CPUState *cpu, StatsArgs *kvm_stats_args)
  3688. {
  3689. int stats_fd = cpu->kvm_vcpu_stats_fd;
  3690. Error *local_err = NULL;
  3691. if (stats_fd == -1) {
  3692. error_setg_errno(&local_err, errno, "KVM stats: ioctl failed");
  3693. error_propagate(kvm_stats_args->errp, local_err);
  3694. return;
  3695. }
  3696. query_stats(kvm_stats_args->result.stats, STATS_TARGET_VCPU,
  3697. kvm_stats_args->names, stats_fd, cpu,
  3698. kvm_stats_args->errp);
  3699. }
  3700. static void query_stats_schema_vcpu(CPUState *cpu, StatsArgs *kvm_stats_args)
  3701. {
  3702. int stats_fd = cpu->kvm_vcpu_stats_fd;
  3703. Error *local_err = NULL;
  3704. if (stats_fd == -1) {
  3705. error_setg_errno(&local_err, errno, "KVM stats: ioctl failed");
  3706. error_propagate(kvm_stats_args->errp, local_err);
  3707. return;
  3708. }
  3709. query_stats_schema(kvm_stats_args->result.schema, STATS_TARGET_VCPU, stats_fd,
  3710. kvm_stats_args->errp);
  3711. }
  3712. static void query_stats_cb(StatsResultList **result, StatsTarget target,
  3713. strList *names, strList *targets, Error **errp)
  3714. {
  3715. KVMState *s = kvm_state;
  3716. CPUState *cpu;
  3717. int stats_fd;
  3718. switch (target) {
  3719. case STATS_TARGET_VM:
  3720. {
  3721. stats_fd = kvm_vm_ioctl(s, KVM_GET_STATS_FD, NULL);
  3722. if (stats_fd == -1) {
  3723. error_setg_errno(errp, errno, "KVM stats: ioctl failed");
  3724. return;
  3725. }
  3726. query_stats(result, target, names, stats_fd, NULL, errp);
  3727. close(stats_fd);
  3728. break;
  3729. }
  3730. case STATS_TARGET_VCPU:
  3731. {
  3732. StatsArgs stats_args;
  3733. stats_args.result.stats = result;
  3734. stats_args.names = names;
  3735. stats_args.errp = errp;
  3736. CPU_FOREACH(cpu) {
  3737. if (!apply_str_list_filter(cpu->parent_obj.canonical_path, targets)) {
  3738. continue;
  3739. }
  3740. query_stats_vcpu(cpu, &stats_args);
  3741. }
  3742. break;
  3743. }
  3744. default:
  3745. break;
  3746. }
  3747. }
  3748. void query_stats_schemas_cb(StatsSchemaList **result, Error **errp)
  3749. {
  3750. StatsArgs stats_args;
  3751. KVMState *s = kvm_state;
  3752. int stats_fd;
  3753. stats_fd = kvm_vm_ioctl(s, KVM_GET_STATS_FD, NULL);
  3754. if (stats_fd == -1) {
  3755. error_setg_errno(errp, errno, "KVM stats: ioctl failed");
  3756. return;
  3757. }
  3758. query_stats_schema(result, STATS_TARGET_VM, stats_fd, errp);
  3759. close(stats_fd);
  3760. if (first_cpu) {
  3761. stats_args.result.schema = result;
  3762. stats_args.errp = errp;
  3763. query_stats_schema_vcpu(first_cpu, &stats_args);
  3764. }
  3765. }
  3766. void kvm_mark_guest_state_protected(void)
  3767. {
  3768. kvm_state->guest_state_protected = true;
  3769. }
  3770. int kvm_create_guest_memfd(uint64_t size, uint64_t flags, Error **errp)
  3771. {
  3772. int fd;
  3773. struct kvm_create_guest_memfd guest_memfd = {
  3774. .size = size,
  3775. .flags = flags,
  3776. };
  3777. if (!kvm_guest_memfd_supported) {
  3778. error_setg(errp, "KVM does not support guest_memfd");
  3779. return -1;
  3780. }
  3781. fd = kvm_vm_ioctl(kvm_state, KVM_CREATE_GUEST_MEMFD, &guest_memfd);
  3782. if (fd < 0) {
  3783. error_setg_errno(errp, errno, "Error creating KVM guest_memfd");
  3784. return -1;
  3785. }
  3786. return fd;
  3787. }