spapr.c 163 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069
  1. /*
  2. * QEMU PowerPC pSeries Logical Partition (aka sPAPR) hardware System Emulator
  3. *
  4. * Copyright (c) 2004-2007 Fabrice Bellard
  5. * Copyright (c) 2007 Jocelyn Mayer
  6. * Copyright (c) 2010 David Gibson, IBM Corporation.
  7. * Copyright (c) 2010-2024, IBM Corporation..
  8. *
  9. * SPDX-License-Identifier: GPL-2.0-or-later
  10. *
  11. * Permission is hereby granted, free of charge, to any person obtaining a copy
  12. * of this software and associated documentation files (the "Software"), to deal
  13. * in the Software without restriction, including without limitation the rights
  14. * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  15. * copies of the Software, and to permit persons to whom the Software is
  16. * furnished to do so, subject to the following conditions:
  17. *
  18. * The above copyright notice and this permission notice shall be included in
  19. * all copies or substantial portions of the Software.
  20. *
  21. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  22. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  23. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  24. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  25. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  26. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  27. * THE SOFTWARE.
  28. */
  29. #include "qemu/osdep.h"
  30. #include "qemu/datadir.h"
  31. #include "qemu/memalign.h"
  32. #include "qemu/guest-random.h"
  33. #include "qapi/error.h"
  34. #include "qapi/qapi-events-machine.h"
  35. #include "qapi/qapi-events-qdev.h"
  36. #include "qapi/visitor.h"
  37. #include "system/system.h"
  38. #include "system/hostmem.h"
  39. #include "system/numa.h"
  40. #include "system/tcg.h"
  41. #include "system/qtest.h"
  42. #include "system/reset.h"
  43. #include "system/runstate.h"
  44. #include "qemu/log.h"
  45. #include "hw/fw-path-provider.h"
  46. #include "elf.h"
  47. #include "net/net.h"
  48. #include "system/device_tree.h"
  49. #include "system/cpus.h"
  50. #include "system/hw_accel.h"
  51. #include "kvm_ppc.h"
  52. #include "migration/misc.h"
  53. #include "migration/qemu-file-types.h"
  54. #include "migration/global_state.h"
  55. #include "migration/register.h"
  56. #include "migration/blocker.h"
  57. #include "mmu-hash64.h"
  58. #include "mmu-book3s-v3.h"
  59. #include "cpu-models.h"
  60. #include "hw/core/cpu.h"
  61. #include "hw/ppc/ppc.h"
  62. #include "hw/loader.h"
  63. #include "hw/ppc/fdt.h"
  64. #include "hw/ppc/spapr.h"
  65. #include "hw/ppc/spapr_nested.h"
  66. #include "hw/ppc/spapr_vio.h"
  67. #include "hw/ppc/vof.h"
  68. #include "hw/qdev-properties.h"
  69. #include "hw/pci-host/spapr.h"
  70. #include "hw/pci/msi.h"
  71. #include "hw/pci/pci.h"
  72. #include "hw/scsi/scsi.h"
  73. #include "hw/virtio/virtio-scsi.h"
  74. #include "hw/virtio/vhost-scsi-common.h"
  75. #include "exec/ram_addr.h"
  76. #include "system/confidential-guest-support.h"
  77. #include "hw/usb.h"
  78. #include "qemu/config-file.h"
  79. #include "qemu/error-report.h"
  80. #include "trace.h"
  81. #include "hw/nmi.h"
  82. #include "hw/intc/intc.h"
  83. #include "hw/ppc/spapr_cpu_core.h"
  84. #include "hw/mem/memory-device.h"
  85. #include "hw/ppc/spapr_tpm_proxy.h"
  86. #include "hw/ppc/spapr_nvdimm.h"
  87. #include "hw/ppc/spapr_numa.h"
  88. #include <libfdt.h>
  89. /* SLOF memory layout:
  90. *
  91. * SLOF raw image loaded at 0, copies its romfs right below the flat
  92. * device-tree, then position SLOF itself 31M below that
  93. *
  94. * So we set FW_OVERHEAD to 40MB which should account for all of that
  95. * and more
  96. *
  97. * We load our kernel at 4M, leaving space for SLOF initial image
  98. */
  99. #define FDT_MAX_ADDR 0x80000000 /* FDT must stay below that */
  100. #define FW_MAX_SIZE 0x400000
  101. #define FW_FILE_NAME "slof.bin"
  102. #define FW_FILE_NAME_VOF "vof.bin"
  103. #define FW_OVERHEAD 0x2800000
  104. #define KERNEL_LOAD_ADDR FW_MAX_SIZE
  105. #define MIN_RMA_SLOF (128 * MiB)
  106. #define PHANDLE_INTC 0x00001111
  107. /* These two functions implement the VCPU id numbering: one to compute them
  108. * all and one to identify thread 0 of a VCORE. Any change to the first one
  109. * is likely to have an impact on the second one, so let's keep them close.
  110. */
  111. static int spapr_vcpu_id(SpaprMachineState *spapr, int cpu_index)
  112. {
  113. MachineState *ms = MACHINE(spapr);
  114. unsigned int smp_threads = ms->smp.threads;
  115. assert(spapr->vsmt);
  116. return
  117. (cpu_index / smp_threads) * spapr->vsmt + cpu_index % smp_threads;
  118. }
  119. static bool spapr_is_thread0_in_vcore(SpaprMachineState *spapr,
  120. PowerPCCPU *cpu)
  121. {
  122. assert(spapr->vsmt);
  123. return spapr_get_vcpu_id(cpu) % spapr->vsmt == 0;
  124. }
  125. int spapr_max_server_number(SpaprMachineState *spapr)
  126. {
  127. MachineState *ms = MACHINE(spapr);
  128. assert(spapr->vsmt);
  129. return DIV_ROUND_UP(ms->smp.max_cpus * spapr->vsmt, ms->smp.threads);
  130. }
  131. static int spapr_fixup_cpu_smt_dt(void *fdt, int offset, PowerPCCPU *cpu,
  132. int smt_threads)
  133. {
  134. int i, ret = 0;
  135. g_autofree uint32_t *servers_prop = g_new(uint32_t, smt_threads);
  136. g_autofree uint32_t *gservers_prop = g_new(uint32_t, smt_threads * 2);
  137. int index = spapr_get_vcpu_id(cpu);
  138. if (cpu->compat_pvr) {
  139. ret = fdt_setprop_cell(fdt, offset, "cpu-version", cpu->compat_pvr);
  140. if (ret < 0) {
  141. return ret;
  142. }
  143. }
  144. /* Build interrupt servers and gservers properties */
  145. for (i = 0; i < smt_threads; i++) {
  146. servers_prop[i] = cpu_to_be32(index + i);
  147. /* Hack, direct the group queues back to cpu 0 */
  148. gservers_prop[i*2] = cpu_to_be32(index + i);
  149. gservers_prop[i*2 + 1] = 0;
  150. }
  151. ret = fdt_setprop(fdt, offset, "ibm,ppc-interrupt-server#s",
  152. servers_prop, sizeof(*servers_prop) * smt_threads);
  153. if (ret < 0) {
  154. return ret;
  155. }
  156. ret = fdt_setprop(fdt, offset, "ibm,ppc-interrupt-gserver#s",
  157. gservers_prop, sizeof(*gservers_prop) * smt_threads * 2);
  158. return ret;
  159. }
  160. static void spapr_dt_pa_features(SpaprMachineState *spapr,
  161. PowerPCCPU *cpu,
  162. void *fdt, int offset)
  163. {
  164. /*
  165. * SSO (SAO) ordering is supported on KVM and thread=single hosts,
  166. * but not MTTCG, so disable it. To advertise it, a cap would have
  167. * to be added, or support implemented for MTTCG.
  168. *
  169. * Copy/paste is not supported by TCG, so it is not advertised. KVM
  170. * can execute them but it has no accelerator drivers which are usable,
  171. * so there isn't much need for it anyway.
  172. */
  173. /* These should be kept in sync with pnv */
  174. uint8_t pa_features_206[] = { 6, 0,
  175. 0xf6, 0x1f, 0xc7, 0x00, 0x00, 0xc0 };
  176. uint8_t pa_features_207[] = { 24, 0,
  177. 0xf6, 0x1f, 0xc7, 0xc0, 0x00, 0xf0,
  178. 0x80, 0x00, 0x00, 0x00, 0x00, 0x00,
  179. 0x00, 0x00, 0x00, 0x00, 0x80, 0x00,
  180. 0x80, 0x00, 0x80, 0x00, 0x00, 0x00 };
  181. uint8_t pa_features_300[] = { 66, 0,
  182. /* 0: MMU|FPU|SLB|RUN|DABR|NX, 1: fri[nzpm]|DABRX|SPRG3|SLB0|PP110 */
  183. /* 2: VPM|DS205|PPR|DS202|DS206, 3: LSD|URG, 5: LE|CFAR|EB|LSQ */
  184. 0xf6, 0x1f, 0xc7, 0xc0, 0x00, 0xf0, /* 0 - 5 */
  185. /* 6: DS207 */
  186. 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, /* 6 - 11 */
  187. /* 16: Vector */
  188. 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, /* 12 - 17 */
  189. /* 18: Vec. Scalar, 20: Vec. XOR */
  190. 0x80, 0x00, 0x80, 0x00, 0x00, 0x00, /* 18 - 23 */
  191. /* 24: Ext. Dec, 26: 64 bit ftrs, 28: PM ftrs */
  192. 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 24 - 29 */
  193. /* 32: LE atomic, 34: EBB + ext EBB */
  194. 0x00, 0x00, 0x80, 0x00, 0xC0, 0x00, /* 30 - 35 */
  195. /* 40: Radix MMU */
  196. 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, /* 36 - 41 */
  197. /* 42: PM, 44: PC RA, 46: SC vec'd */
  198. 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 42 - 47 */
  199. /* 48: SIMD, 50: QP BFP, 52: String */
  200. 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 48 - 53 */
  201. /* 54: DecFP, 56: DecI, 58: SHA */
  202. 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 54 - 59 */
  203. /* 60: NM atomic, 62: RNG */
  204. 0x80, 0x00, 0x80, 0x00, 0x00, 0x00, /* 60 - 65 */
  205. };
  206. /* 3.1 removes SAO, HTM support */
  207. uint8_t pa_features_31[] = { 74, 0,
  208. /* 0: MMU|FPU|SLB|RUN|DABR|NX, 1: fri[nzpm]|DABRX|SPRG3|SLB0|PP110 */
  209. /* 2: VPM|DS205|PPR|DS202|DS206, 3: LSD|URG, 5: LE|CFAR|EB|LSQ */
  210. 0xf6, 0x1f, 0xc7, 0xc0, 0x00, 0xf0, /* 0 - 5 */
  211. /* 6: DS207 */
  212. 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, /* 6 - 11 */
  213. /* 16: Vector */
  214. 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, /* 12 - 17 */
  215. /* 18: Vec. Scalar, 20: Vec. XOR */
  216. 0x80, 0x00, 0x80, 0x00, 0x00, 0x00, /* 18 - 23 */
  217. /* 24: Ext. Dec, 26: 64 bit ftrs, 28: PM ftrs */
  218. 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 24 - 29 */
  219. /* 32: LE atomic, 34: EBB + ext EBB */
  220. 0x00, 0x00, 0x80, 0x00, 0xC0, 0x00, /* 30 - 35 */
  221. /* 40: Radix MMU */
  222. 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, /* 36 - 41 */
  223. /* 42: PM, 44: PC RA, 46: SC vec'd */
  224. 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 42 - 47 */
  225. /* 48: SIMD, 50: QP BFP, 52: String */
  226. 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 48 - 53 */
  227. /* 54: DecFP, 56: DecI, 58: SHA */
  228. 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 54 - 59 */
  229. /* 60: NM atomic, 62: RNG, 64: DAWR1 (ISA 3.1) */
  230. 0x80, 0x00, 0x80, 0x00, 0x00, 0x00, /* 60 - 65 */
  231. /* 68: DEXCR[SBHE|IBRTPDUS|SRAPD|NPHIE|PHIE] */
  232. 0x00, 0x00, 0xce, 0x00, 0x00, 0x00, /* 66 - 71 */
  233. /* 72: [P]HASHST/[P]HASHCHK */
  234. 0x80, 0x00, /* 72 - 73 */
  235. };
  236. uint8_t *pa_features = NULL;
  237. size_t pa_size;
  238. if (ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_2_06, 0, cpu->compat_pvr)) {
  239. pa_features = pa_features_206;
  240. pa_size = sizeof(pa_features_206);
  241. }
  242. if (ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_2_07, 0, cpu->compat_pvr)) {
  243. pa_features = pa_features_207;
  244. pa_size = sizeof(pa_features_207);
  245. }
  246. if (ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_3_00, 0, cpu->compat_pvr)) {
  247. pa_features = pa_features_300;
  248. pa_size = sizeof(pa_features_300);
  249. }
  250. if (ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_3_10, 0, cpu->compat_pvr)) {
  251. pa_features = pa_features_31;
  252. pa_size = sizeof(pa_features_31);
  253. }
  254. if (!pa_features) {
  255. return;
  256. }
  257. if (ppc_hash64_has(cpu, PPC_HASH64_CI_LARGEPAGE)) {
  258. /*
  259. * Note: we keep CI large pages off by default because a 64K capable
  260. * guest provisioned with large pages might otherwise try to map a qemu
  261. * framebuffer (or other kind of memory mapped PCI BAR) using 64K pages
  262. * even if that qemu runs on a 4k host.
  263. * We dd this bit back here if we are confident this is not an issue
  264. */
  265. pa_features[3] |= 0x20;
  266. }
  267. if ((spapr_get_cap(spapr, SPAPR_CAP_HTM) != 0) && pa_size > 24) {
  268. pa_features[24] |= 0x80; /* Transactional memory support */
  269. }
  270. if (spapr->cas_pre_isa3_guest && pa_size > 40) {
  271. /* Workaround for broken kernels that attempt (guest) radix
  272. * mode when they can't handle it, if they see the radix bit set
  273. * in pa-features. So hide it from them. */
  274. pa_features[40 + 2] &= ~0x80; /* Radix MMU */
  275. }
  276. if (spapr_get_cap(spapr, SPAPR_CAP_DAWR1)) {
  277. g_assert(pa_size > 66);
  278. pa_features[66] |= 0x80;
  279. }
  280. _FDT((fdt_setprop(fdt, offset, "ibm,pa-features", pa_features, pa_size)));
  281. }
  282. static void spapr_dt_pi_features(SpaprMachineState *spapr,
  283. PowerPCCPU *cpu,
  284. void *fdt, int offset)
  285. {
  286. uint8_t pi_features[] = { 1, 0,
  287. 0x00 };
  288. if (kvm_enabled() && ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_3_00,
  289. 0, cpu->compat_pvr)) {
  290. /*
  291. * POWER9 and later CPUs with KVM run in LPAR-per-thread mode where
  292. * all threads are essentially independent CPUs, and msgsndp does not
  293. * work (because it is physically-addressed) and therefore is
  294. * emulated by KVM, so disable it here to ensure XIVE will be used.
  295. * This is both KVM and CPU implementation-specific behaviour so a KVM
  296. * cap would be cleanest, but for now this works. If KVM ever permits
  297. * native msgsndp execution by guests, a cap could be added at that
  298. * time.
  299. */
  300. pi_features[2] |= 0x08; /* 4: No msgsndp */
  301. }
  302. _FDT((fdt_setprop(fdt, offset, "ibm,pi-features", pi_features,
  303. sizeof(pi_features))));
  304. }
  305. static hwaddr spapr_node0_size(MachineState *machine)
  306. {
  307. if (machine->numa_state->num_nodes) {
  308. int i;
  309. for (i = 0; i < machine->numa_state->num_nodes; ++i) {
  310. if (machine->numa_state->nodes[i].node_mem) {
  311. return MIN(pow2floor(machine->numa_state->nodes[i].node_mem),
  312. machine->ram_size);
  313. }
  314. }
  315. }
  316. return machine->ram_size;
  317. }
  318. static void add_str(GString *s, const gchar *s1)
  319. {
  320. g_string_append_len(s, s1, strlen(s1) + 1);
  321. }
  322. static int spapr_dt_memory_node(SpaprMachineState *spapr, void *fdt, int nodeid,
  323. hwaddr start, hwaddr size)
  324. {
  325. char mem_name[32];
  326. uint64_t mem_reg_property[2];
  327. int off;
  328. mem_reg_property[0] = cpu_to_be64(start);
  329. mem_reg_property[1] = cpu_to_be64(size);
  330. sprintf(mem_name, "memory@%" HWADDR_PRIx, start);
  331. off = fdt_add_subnode(fdt, 0, mem_name);
  332. _FDT(off);
  333. _FDT((fdt_setprop_string(fdt, off, "device_type", "memory")));
  334. _FDT((fdt_setprop(fdt, off, "reg", mem_reg_property,
  335. sizeof(mem_reg_property))));
  336. spapr_numa_write_associativity_dt(spapr, fdt, off, nodeid);
  337. return off;
  338. }
  339. static uint32_t spapr_pc_dimm_node(MemoryDeviceInfoList *list, ram_addr_t addr)
  340. {
  341. MemoryDeviceInfoList *info;
  342. for (info = list; info; info = info->next) {
  343. MemoryDeviceInfo *value = info->value;
  344. if (value && value->type == MEMORY_DEVICE_INFO_KIND_DIMM) {
  345. PCDIMMDeviceInfo *pcdimm_info = value->u.dimm.data;
  346. if (addr >= pcdimm_info->addr &&
  347. addr < (pcdimm_info->addr + pcdimm_info->size)) {
  348. return pcdimm_info->node;
  349. }
  350. }
  351. }
  352. return -1;
  353. }
  354. struct sPAPRDrconfCellV2 {
  355. uint32_t seq_lmbs;
  356. uint64_t base_addr;
  357. uint32_t drc_index;
  358. uint32_t aa_index;
  359. uint32_t flags;
  360. } QEMU_PACKED;
  361. typedef struct DrconfCellQueue {
  362. struct sPAPRDrconfCellV2 cell;
  363. QSIMPLEQ_ENTRY(DrconfCellQueue) entry;
  364. } DrconfCellQueue;
  365. static DrconfCellQueue *
  366. spapr_get_drconf_cell(uint32_t seq_lmbs, uint64_t base_addr,
  367. uint32_t drc_index, uint32_t aa_index,
  368. uint32_t flags)
  369. {
  370. DrconfCellQueue *elem;
  371. elem = g_malloc0(sizeof(*elem));
  372. elem->cell.seq_lmbs = cpu_to_be32(seq_lmbs);
  373. elem->cell.base_addr = cpu_to_be64(base_addr);
  374. elem->cell.drc_index = cpu_to_be32(drc_index);
  375. elem->cell.aa_index = cpu_to_be32(aa_index);
  376. elem->cell.flags = cpu_to_be32(flags);
  377. return elem;
  378. }
  379. static int spapr_dt_dynamic_memory_v2(SpaprMachineState *spapr, void *fdt,
  380. int offset, MemoryDeviceInfoList *dimms)
  381. {
  382. MachineState *machine = MACHINE(spapr);
  383. uint8_t *int_buf, *cur_index;
  384. int ret;
  385. uint64_t lmb_size = SPAPR_MEMORY_BLOCK_SIZE;
  386. uint64_t addr, cur_addr, size;
  387. uint32_t nr_boot_lmbs = (machine->device_memory->base / lmb_size);
  388. uint64_t mem_end = machine->device_memory->base +
  389. memory_region_size(&machine->device_memory->mr);
  390. uint32_t node, buf_len, nr_entries = 0;
  391. SpaprDrc *drc;
  392. DrconfCellQueue *elem, *next;
  393. MemoryDeviceInfoList *info;
  394. QSIMPLEQ_HEAD(, DrconfCellQueue) drconf_queue
  395. = QSIMPLEQ_HEAD_INITIALIZER(drconf_queue);
  396. /* Entry to cover RAM and the gap area */
  397. elem = spapr_get_drconf_cell(nr_boot_lmbs, 0, 0, -1,
  398. SPAPR_LMB_FLAGS_RESERVED |
  399. SPAPR_LMB_FLAGS_DRC_INVALID);
  400. QSIMPLEQ_INSERT_TAIL(&drconf_queue, elem, entry);
  401. nr_entries++;
  402. cur_addr = machine->device_memory->base;
  403. for (info = dimms; info; info = info->next) {
  404. PCDIMMDeviceInfo *di = info->value->u.dimm.data;
  405. addr = di->addr;
  406. size = di->size;
  407. node = di->node;
  408. /*
  409. * The NVDIMM area is hotpluggable after the NVDIMM is unplugged. The
  410. * area is marked hotpluggable in the next iteration for the bigger
  411. * chunk including the NVDIMM occupied area.
  412. */
  413. if (info->value->type == MEMORY_DEVICE_INFO_KIND_NVDIMM)
  414. continue;
  415. /* Entry for hot-pluggable area */
  416. if (cur_addr < addr) {
  417. drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB, cur_addr / lmb_size);
  418. g_assert(drc);
  419. elem = spapr_get_drconf_cell((addr - cur_addr) / lmb_size,
  420. cur_addr, spapr_drc_index(drc), -1, 0);
  421. QSIMPLEQ_INSERT_TAIL(&drconf_queue, elem, entry);
  422. nr_entries++;
  423. }
  424. /* Entry for DIMM */
  425. drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB, addr / lmb_size);
  426. g_assert(drc);
  427. elem = spapr_get_drconf_cell(size / lmb_size, addr,
  428. spapr_drc_index(drc), node,
  429. (SPAPR_LMB_FLAGS_ASSIGNED |
  430. SPAPR_LMB_FLAGS_HOTREMOVABLE));
  431. QSIMPLEQ_INSERT_TAIL(&drconf_queue, elem, entry);
  432. nr_entries++;
  433. cur_addr = addr + size;
  434. }
  435. /* Entry for remaining hotpluggable area */
  436. if (cur_addr < mem_end) {
  437. drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB, cur_addr / lmb_size);
  438. g_assert(drc);
  439. elem = spapr_get_drconf_cell((mem_end - cur_addr) / lmb_size,
  440. cur_addr, spapr_drc_index(drc), -1, 0);
  441. QSIMPLEQ_INSERT_TAIL(&drconf_queue, elem, entry);
  442. nr_entries++;
  443. }
  444. buf_len = nr_entries * sizeof(struct sPAPRDrconfCellV2) + sizeof(uint32_t);
  445. int_buf = cur_index = g_malloc0(buf_len);
  446. *(uint32_t *)int_buf = cpu_to_be32(nr_entries);
  447. cur_index += sizeof(nr_entries);
  448. QSIMPLEQ_FOREACH_SAFE(elem, &drconf_queue, entry, next) {
  449. memcpy(cur_index, &elem->cell, sizeof(elem->cell));
  450. cur_index += sizeof(elem->cell);
  451. QSIMPLEQ_REMOVE(&drconf_queue, elem, DrconfCellQueue, entry);
  452. g_free(elem);
  453. }
  454. ret = fdt_setprop(fdt, offset, "ibm,dynamic-memory-v2", int_buf, buf_len);
  455. g_free(int_buf);
  456. if (ret < 0) {
  457. return -1;
  458. }
  459. return 0;
  460. }
  461. static int spapr_dt_dynamic_memory(SpaprMachineState *spapr, void *fdt,
  462. int offset, MemoryDeviceInfoList *dimms)
  463. {
  464. MachineState *machine = MACHINE(spapr);
  465. int i, ret;
  466. uint64_t lmb_size = SPAPR_MEMORY_BLOCK_SIZE;
  467. uint32_t device_lmb_start = machine->device_memory->base / lmb_size;
  468. uint32_t nr_lmbs = (machine->device_memory->base +
  469. memory_region_size(&machine->device_memory->mr)) /
  470. lmb_size;
  471. uint32_t *int_buf, *cur_index, buf_len;
  472. /*
  473. * Allocate enough buffer size to fit in ibm,dynamic-memory
  474. */
  475. buf_len = (nr_lmbs * SPAPR_DR_LMB_LIST_ENTRY_SIZE + 1) * sizeof(uint32_t);
  476. cur_index = int_buf = g_malloc0(buf_len);
  477. int_buf[0] = cpu_to_be32(nr_lmbs);
  478. cur_index++;
  479. for (i = 0; i < nr_lmbs; i++) {
  480. uint64_t addr = i * lmb_size;
  481. uint32_t *dynamic_memory = cur_index;
  482. if (i >= device_lmb_start) {
  483. SpaprDrc *drc;
  484. drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB, i);
  485. g_assert(drc);
  486. dynamic_memory[0] = cpu_to_be32(addr >> 32);
  487. dynamic_memory[1] = cpu_to_be32(addr & 0xffffffff);
  488. dynamic_memory[2] = cpu_to_be32(spapr_drc_index(drc));
  489. dynamic_memory[3] = cpu_to_be32(0); /* reserved */
  490. dynamic_memory[4] = cpu_to_be32(spapr_pc_dimm_node(dimms, addr));
  491. if (memory_region_present(get_system_memory(), addr)) {
  492. dynamic_memory[5] = cpu_to_be32(SPAPR_LMB_FLAGS_ASSIGNED);
  493. } else {
  494. dynamic_memory[5] = cpu_to_be32(0);
  495. }
  496. } else {
  497. /*
  498. * LMB information for RMA, boot time RAM and gap b/n RAM and
  499. * device memory region -- all these are marked as reserved
  500. * and as having no valid DRC.
  501. */
  502. dynamic_memory[0] = cpu_to_be32(addr >> 32);
  503. dynamic_memory[1] = cpu_to_be32(addr & 0xffffffff);
  504. dynamic_memory[2] = cpu_to_be32(0);
  505. dynamic_memory[3] = cpu_to_be32(0); /* reserved */
  506. dynamic_memory[4] = cpu_to_be32(-1);
  507. dynamic_memory[5] = cpu_to_be32(SPAPR_LMB_FLAGS_RESERVED |
  508. SPAPR_LMB_FLAGS_DRC_INVALID);
  509. }
  510. cur_index += SPAPR_DR_LMB_LIST_ENTRY_SIZE;
  511. }
  512. ret = fdt_setprop(fdt, offset, "ibm,dynamic-memory", int_buf, buf_len);
  513. g_free(int_buf);
  514. if (ret < 0) {
  515. return -1;
  516. }
  517. return 0;
  518. }
  519. /*
  520. * Adds ibm,dynamic-reconfiguration-memory node.
  521. * Refer to docs/specs/ppc-spapr-hotplug.txt for the documentation
  522. * of this device tree node.
  523. */
  524. static int spapr_dt_dynamic_reconfiguration_memory(SpaprMachineState *spapr,
  525. void *fdt)
  526. {
  527. MachineState *machine = MACHINE(spapr);
  528. int ret, offset;
  529. uint64_t lmb_size = SPAPR_MEMORY_BLOCK_SIZE;
  530. uint32_t prop_lmb_size[] = {cpu_to_be32(lmb_size >> 32),
  531. cpu_to_be32(lmb_size & 0xffffffff)};
  532. MemoryDeviceInfoList *dimms = NULL;
  533. /* Don't create the node if there is no device memory. */
  534. if (!machine->device_memory) {
  535. return 0;
  536. }
  537. offset = fdt_add_subnode(fdt, 0, "ibm,dynamic-reconfiguration-memory");
  538. ret = fdt_setprop(fdt, offset, "ibm,lmb-size", prop_lmb_size,
  539. sizeof(prop_lmb_size));
  540. if (ret < 0) {
  541. return ret;
  542. }
  543. ret = fdt_setprop_cell(fdt, offset, "ibm,memory-flags-mask", 0xff);
  544. if (ret < 0) {
  545. return ret;
  546. }
  547. ret = fdt_setprop_cell(fdt, offset, "ibm,memory-preservation-time", 0x0);
  548. if (ret < 0) {
  549. return ret;
  550. }
  551. /* ibm,dynamic-memory or ibm,dynamic-memory-v2 */
  552. dimms = qmp_memory_device_list();
  553. if (spapr_ovec_test(spapr->ov5_cas, OV5_DRMEM_V2)) {
  554. ret = spapr_dt_dynamic_memory_v2(spapr, fdt, offset, dimms);
  555. } else {
  556. ret = spapr_dt_dynamic_memory(spapr, fdt, offset, dimms);
  557. }
  558. qapi_free_MemoryDeviceInfoList(dimms);
  559. if (ret < 0) {
  560. return ret;
  561. }
  562. ret = spapr_numa_write_assoc_lookup_arrays(spapr, fdt, offset);
  563. return ret;
  564. }
  565. static int spapr_dt_memory(SpaprMachineState *spapr, void *fdt)
  566. {
  567. MachineState *machine = MACHINE(spapr);
  568. hwaddr mem_start, node_size;
  569. int i, nb_nodes = machine->numa_state->num_nodes;
  570. NodeInfo *nodes = machine->numa_state->nodes;
  571. for (i = 0, mem_start = 0; i < nb_nodes; ++i) {
  572. if (!nodes[i].node_mem) {
  573. continue;
  574. }
  575. if (mem_start >= machine->ram_size) {
  576. node_size = 0;
  577. } else {
  578. node_size = nodes[i].node_mem;
  579. if (node_size > machine->ram_size - mem_start) {
  580. node_size = machine->ram_size - mem_start;
  581. }
  582. }
  583. if (!mem_start) {
  584. /* spapr_machine_init() checks for rma_size <= node0_size
  585. * already */
  586. spapr_dt_memory_node(spapr, fdt, i, 0, spapr->rma_size);
  587. mem_start += spapr->rma_size;
  588. node_size -= spapr->rma_size;
  589. }
  590. for ( ; node_size; ) {
  591. hwaddr sizetmp = pow2floor(node_size);
  592. /* mem_start != 0 here */
  593. if (ctzl(mem_start) < ctzl(sizetmp)) {
  594. sizetmp = 1ULL << ctzl(mem_start);
  595. }
  596. spapr_dt_memory_node(spapr, fdt, i, mem_start, sizetmp);
  597. node_size -= sizetmp;
  598. mem_start += sizetmp;
  599. }
  600. }
  601. /* Generate ibm,dynamic-reconfiguration-memory node if required */
  602. if (spapr_ovec_test(spapr->ov5_cas, OV5_DRCONF_MEMORY)) {
  603. int ret;
  604. ret = spapr_dt_dynamic_reconfiguration_memory(spapr, fdt);
  605. if (ret) {
  606. return ret;
  607. }
  608. }
  609. return 0;
  610. }
  611. static void spapr_dt_cpu(CPUState *cs, void *fdt, int offset,
  612. SpaprMachineState *spapr)
  613. {
  614. MachineState *ms = MACHINE(spapr);
  615. PowerPCCPU *cpu = POWERPC_CPU(cs);
  616. CPUPPCState *env = &cpu->env;
  617. PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cs);
  618. int index = spapr_get_vcpu_id(cpu);
  619. uint32_t segs[] = {cpu_to_be32(28), cpu_to_be32(40),
  620. 0xffffffff, 0xffffffff};
  621. uint32_t tbfreq = kvm_enabled() ? kvmppc_get_tbfreq()
  622. : SPAPR_TIMEBASE_FREQ;
  623. uint32_t cpufreq = kvm_enabled() ? kvmppc_get_clockfreq() : 1000000000;
  624. uint32_t page_sizes_prop[64];
  625. size_t page_sizes_prop_size;
  626. unsigned int smp_threads = ms->smp.threads;
  627. uint32_t vcpus_per_socket = smp_threads * ms->smp.cores;
  628. uint32_t pft_size_prop[] = {0, cpu_to_be32(spapr->htab_shift)};
  629. int compat_smt = MIN(smp_threads, ppc_compat_max_vthreads(cpu));
  630. SpaprDrc *drc;
  631. int drc_index;
  632. uint32_t radix_AP_encodings[PPC_PAGE_SIZES_MAX_SZ];
  633. int i;
  634. drc = spapr_drc_by_id(TYPE_SPAPR_DRC_CPU, env->core_index);
  635. if (drc) {
  636. drc_index = spapr_drc_index(drc);
  637. _FDT((fdt_setprop_cell(fdt, offset, "ibm,my-drc-index", drc_index)));
  638. }
  639. _FDT((fdt_setprop_cell(fdt, offset, "reg", index)));
  640. _FDT((fdt_setprop_string(fdt, offset, "device_type", "cpu")));
  641. _FDT((fdt_setprop_cell(fdt, offset, "cpu-version", env->spr[SPR_PVR])));
  642. _FDT((fdt_setprop_cell(fdt, offset, "d-cache-block-size",
  643. env->dcache_line_size)));
  644. _FDT((fdt_setprop_cell(fdt, offset, "d-cache-line-size",
  645. env->dcache_line_size)));
  646. _FDT((fdt_setprop_cell(fdt, offset, "i-cache-block-size",
  647. env->icache_line_size)));
  648. _FDT((fdt_setprop_cell(fdt, offset, "i-cache-line-size",
  649. env->icache_line_size)));
  650. if (pcc->l1_dcache_size) {
  651. _FDT((fdt_setprop_cell(fdt, offset, "d-cache-size",
  652. pcc->l1_dcache_size)));
  653. } else {
  654. warn_report("Unknown L1 dcache size for cpu");
  655. }
  656. if (pcc->l1_icache_size) {
  657. _FDT((fdt_setprop_cell(fdt, offset, "i-cache-size",
  658. pcc->l1_icache_size)));
  659. } else {
  660. warn_report("Unknown L1 icache size for cpu");
  661. }
  662. _FDT((fdt_setprop_cell(fdt, offset, "timebase-frequency", tbfreq)));
  663. _FDT((fdt_setprop_cell(fdt, offset, "clock-frequency", cpufreq)));
  664. _FDT((fdt_setprop_cell(fdt, offset, "slb-size", cpu->hash64_opts->slb_size)));
  665. _FDT((fdt_setprop_cell(fdt, offset, "ibm,slb-size", cpu->hash64_opts->slb_size)));
  666. _FDT((fdt_setprop_string(fdt, offset, "status", "okay")));
  667. _FDT((fdt_setprop(fdt, offset, "64-bit", NULL, 0)));
  668. if (ppc_has_spr(cpu, SPR_PURR)) {
  669. _FDT((fdt_setprop_cell(fdt, offset, "ibm,purr", 1)));
  670. }
  671. if (ppc_has_spr(cpu, SPR_PURR)) {
  672. _FDT((fdt_setprop_cell(fdt, offset, "ibm,spurr", 1)));
  673. }
  674. if (ppc_hash64_has(cpu, PPC_HASH64_1TSEG)) {
  675. _FDT((fdt_setprop(fdt, offset, "ibm,processor-segment-sizes",
  676. segs, sizeof(segs))));
  677. }
  678. /* Advertise VSX (vector extensions) if available
  679. * 1 == VMX / Altivec available
  680. * 2 == VSX available
  681. *
  682. * Only CPUs for which we create core types in spapr_cpu_core.c
  683. * are possible, and all of those have VMX */
  684. if (env->insns_flags & PPC_ALTIVEC) {
  685. if (spapr_get_cap(spapr, SPAPR_CAP_VSX) != 0) {
  686. _FDT((fdt_setprop_cell(fdt, offset, "ibm,vmx", 2)));
  687. } else {
  688. _FDT((fdt_setprop_cell(fdt, offset, "ibm,vmx", 1)));
  689. }
  690. }
  691. /* Advertise DFP (Decimal Floating Point) if available
  692. * 0 / no property == no DFP
  693. * 1 == DFP available */
  694. if (spapr_get_cap(spapr, SPAPR_CAP_DFP) != 0) {
  695. _FDT((fdt_setprop_cell(fdt, offset, "ibm,dfp", 1)));
  696. }
  697. page_sizes_prop_size = ppc_create_page_sizes_prop(cpu, page_sizes_prop,
  698. sizeof(page_sizes_prop));
  699. if (page_sizes_prop_size) {
  700. _FDT((fdt_setprop(fdt, offset, "ibm,segment-page-sizes",
  701. page_sizes_prop, page_sizes_prop_size)));
  702. }
  703. spapr_dt_pa_features(spapr, cpu, fdt, offset);
  704. spapr_dt_pi_features(spapr, cpu, fdt, offset);
  705. _FDT((fdt_setprop_cell(fdt, offset, "ibm,chip-id",
  706. cs->cpu_index / vcpus_per_socket)));
  707. _FDT((fdt_setprop(fdt, offset, "ibm,pft-size",
  708. pft_size_prop, sizeof(pft_size_prop))));
  709. if (ms->numa_state->num_nodes > 1) {
  710. _FDT(spapr_numa_fixup_cpu_dt(spapr, fdt, offset, cpu));
  711. }
  712. _FDT(spapr_fixup_cpu_smt_dt(fdt, offset, cpu, compat_smt));
  713. if (pcc->radix_page_info) {
  714. for (i = 0; i < pcc->radix_page_info->count; i++) {
  715. radix_AP_encodings[i] =
  716. cpu_to_be32(pcc->radix_page_info->entries[i]);
  717. }
  718. _FDT((fdt_setprop(fdt, offset, "ibm,processor-radix-AP-encodings",
  719. radix_AP_encodings,
  720. pcc->radix_page_info->count *
  721. sizeof(radix_AP_encodings[0]))));
  722. }
  723. /*
  724. * We set this property to let the guest know that it can use the large
  725. * decrementer and its width in bits.
  726. */
  727. if (spapr_get_cap(spapr, SPAPR_CAP_LARGE_DECREMENTER) != SPAPR_CAP_OFF)
  728. _FDT((fdt_setprop_u32(fdt, offset, "ibm,dec-bits",
  729. pcc->lrg_decr_bits)));
  730. }
  731. static void spapr_dt_one_cpu(void *fdt, SpaprMachineState *spapr, CPUState *cs,
  732. int cpus_offset)
  733. {
  734. PowerPCCPU *cpu = POWERPC_CPU(cs);
  735. int index = spapr_get_vcpu_id(cpu);
  736. DeviceClass *dc = DEVICE_GET_CLASS(cs);
  737. g_autofree char *nodename = NULL;
  738. int offset;
  739. if (!spapr_is_thread0_in_vcore(spapr, cpu)) {
  740. return;
  741. }
  742. nodename = g_strdup_printf("%s@%x", dc->fw_name, index);
  743. offset = fdt_add_subnode(fdt, cpus_offset, nodename);
  744. _FDT(offset);
  745. spapr_dt_cpu(cs, fdt, offset, spapr);
  746. }
  747. static void spapr_dt_cpus(void *fdt, SpaprMachineState *spapr)
  748. {
  749. CPUState **rev;
  750. CPUState *cs;
  751. int n_cpus;
  752. int cpus_offset;
  753. int i;
  754. cpus_offset = fdt_add_subnode(fdt, 0, "cpus");
  755. _FDT(cpus_offset);
  756. _FDT((fdt_setprop_cell(fdt, cpus_offset, "#address-cells", 0x1)));
  757. _FDT((fdt_setprop_cell(fdt, cpus_offset, "#size-cells", 0x0)));
  758. /*
  759. * We walk the CPUs in reverse order to ensure that CPU DT nodes
  760. * created by fdt_add_subnode() end up in the right order in FDT
  761. * for the guest kernel the enumerate the CPUs correctly.
  762. *
  763. * The CPU list cannot be traversed in reverse order, so we need
  764. * to do extra work.
  765. */
  766. n_cpus = 0;
  767. rev = NULL;
  768. CPU_FOREACH(cs) {
  769. rev = g_renew(CPUState *, rev, n_cpus + 1);
  770. rev[n_cpus++] = cs;
  771. }
  772. for (i = n_cpus - 1; i >= 0; i--) {
  773. spapr_dt_one_cpu(fdt, spapr, rev[i], cpus_offset);
  774. }
  775. g_free(rev);
  776. }
  777. static int spapr_dt_rng(void *fdt)
  778. {
  779. int node;
  780. int ret;
  781. node = qemu_fdt_add_subnode(fdt, "/ibm,platform-facilities");
  782. if (node <= 0) {
  783. return -1;
  784. }
  785. ret = fdt_setprop_string(fdt, node, "device_type",
  786. "ibm,platform-facilities");
  787. ret |= fdt_setprop_cell(fdt, node, "#address-cells", 0x1);
  788. ret |= fdt_setprop_cell(fdt, node, "#size-cells", 0x0);
  789. node = fdt_add_subnode(fdt, node, "ibm,random-v1");
  790. if (node <= 0) {
  791. return -1;
  792. }
  793. ret |= fdt_setprop_string(fdt, node, "compatible", "ibm,random");
  794. return ret ? -1 : 0;
  795. }
  796. static void spapr_dt_rtas(SpaprMachineState *spapr, void *fdt)
  797. {
  798. MachineState *ms = MACHINE(spapr);
  799. int rtas;
  800. GString *hypertas = g_string_sized_new(256);
  801. GString *qemu_hypertas = g_string_sized_new(256);
  802. uint32_t lrdr_capacity[] = {
  803. 0,
  804. 0,
  805. cpu_to_be32(SPAPR_MEMORY_BLOCK_SIZE >> 32),
  806. cpu_to_be32(SPAPR_MEMORY_BLOCK_SIZE & 0xffffffff),
  807. cpu_to_be32(ms->smp.max_cpus / ms->smp.threads),
  808. };
  809. /* Do we have device memory? */
  810. if (MACHINE(spapr)->device_memory) {
  811. uint64_t max_device_addr = MACHINE(spapr)->device_memory->base +
  812. memory_region_size(&MACHINE(spapr)->device_memory->mr);
  813. lrdr_capacity[0] = cpu_to_be32(max_device_addr >> 32);
  814. lrdr_capacity[1] = cpu_to_be32(max_device_addr & 0xffffffff);
  815. }
  816. _FDT(rtas = fdt_add_subnode(fdt, 0, "rtas"));
  817. /* hypertas */
  818. add_str(hypertas, "hcall-pft");
  819. add_str(hypertas, "hcall-term");
  820. add_str(hypertas, "hcall-dabr");
  821. add_str(hypertas, "hcall-interrupt");
  822. add_str(hypertas, "hcall-tce");
  823. add_str(hypertas, "hcall-vio");
  824. add_str(hypertas, "hcall-splpar");
  825. add_str(hypertas, "hcall-join");
  826. add_str(hypertas, "hcall-bulk");
  827. add_str(hypertas, "hcall-set-mode");
  828. add_str(hypertas, "hcall-sprg0");
  829. add_str(hypertas, "hcall-copy");
  830. add_str(hypertas, "hcall-debug");
  831. add_str(hypertas, "hcall-vphn");
  832. if (spapr_get_cap(spapr, SPAPR_CAP_RPT_INVALIDATE) == SPAPR_CAP_ON) {
  833. add_str(hypertas, "hcall-rpt-invalidate");
  834. }
  835. add_str(qemu_hypertas, "hcall-memop1");
  836. if (!kvm_enabled() || kvmppc_spapr_use_multitce()) {
  837. add_str(hypertas, "hcall-multi-tce");
  838. }
  839. if (spapr->resize_hpt != SPAPR_RESIZE_HPT_DISABLED) {
  840. add_str(hypertas, "hcall-hpt-resize");
  841. }
  842. add_str(hypertas, "hcall-watchdog");
  843. _FDT(fdt_setprop(fdt, rtas, "ibm,hypertas-functions",
  844. hypertas->str, hypertas->len));
  845. g_string_free(hypertas, TRUE);
  846. _FDT(fdt_setprop(fdt, rtas, "qemu,hypertas-functions",
  847. qemu_hypertas->str, qemu_hypertas->len));
  848. g_string_free(qemu_hypertas, TRUE);
  849. spapr_numa_write_rtas_dt(spapr, fdt, rtas);
  850. /*
  851. * FWNMI reserves RTAS_ERROR_LOG_MAX for the machine check error log,
  852. * and 16 bytes per CPU for system reset error log plus an extra 8 bytes.
  853. *
  854. * The system reset requirements are driven by existing Linux and PowerVM
  855. * implementation which (contrary to PAPR) saves r3 in the error log
  856. * structure like machine check, so Linux expects to find the saved r3
  857. * value at the address in r3 upon FWNMI-enabled sreset interrupt (and
  858. * does not look at the error value).
  859. *
  860. * System reset interrupts are not subject to interlock like machine
  861. * check, so this memory area could be corrupted if the sreset is
  862. * interrupted by a machine check (or vice versa) if it was shared. To
  863. * prevent this, system reset uses per-CPU areas for the sreset save
  864. * area. A system reset that interrupts a system reset handler could
  865. * still overwrite this area, but Linux doesn't try to recover in that
  866. * case anyway.
  867. *
  868. * The extra 8 bytes is required because Linux's FWNMI error log check
  869. * is off-by-one.
  870. *
  871. * RTAS_MIN_SIZE is required for the RTAS blob itself.
  872. */
  873. _FDT(fdt_setprop_cell(fdt, rtas, "rtas-size", RTAS_MIN_SIZE +
  874. RTAS_ERROR_LOG_MAX +
  875. ms->smp.max_cpus * sizeof(uint64_t) * 2 +
  876. sizeof(uint64_t)));
  877. _FDT(fdt_setprop_cell(fdt, rtas, "rtas-error-log-max",
  878. RTAS_ERROR_LOG_MAX));
  879. _FDT(fdt_setprop_cell(fdt, rtas, "rtas-event-scan-rate",
  880. RTAS_EVENT_SCAN_RATE));
  881. g_assert(msi_nonbroken);
  882. _FDT(fdt_setprop(fdt, rtas, "ibm,change-msix-capable", NULL, 0));
  883. /*
  884. * According to PAPR, rtas ibm,os-term does not guarantee a return
  885. * back to the guest cpu.
  886. *
  887. * While an additional ibm,extended-os-term property indicates
  888. * that rtas call return will always occur. Set this property.
  889. */
  890. _FDT(fdt_setprop(fdt, rtas, "ibm,extended-os-term", NULL, 0));
  891. _FDT(fdt_setprop(fdt, rtas, "ibm,lrdr-capacity",
  892. lrdr_capacity, sizeof(lrdr_capacity)));
  893. spapr_dt_rtas_tokens(fdt, rtas);
  894. }
  895. /*
  896. * Prepare ibm,arch-vec-5-platform-support, which indicates the MMU
  897. * and the XIVE features that the guest may request and thus the valid
  898. * values for bytes 23..26 of option vector 5:
  899. */
  900. static void spapr_dt_ov5_platform_support(SpaprMachineState *spapr, void *fdt,
  901. int chosen)
  902. {
  903. PowerPCCPU *first_ppc_cpu = POWERPC_CPU(first_cpu);
  904. char val[2 * 4] = {
  905. 23, 0x00, /* XICS / XIVE mode */
  906. 24, 0x00, /* Hash/Radix, filled in below. */
  907. 25, 0x00, /* Hash options: Segment Tables == no, GTSE == no. */
  908. 26, 0x40, /* Radix options: GTSE == yes. */
  909. };
  910. if (spapr->irq->xics && spapr->irq->xive) {
  911. val[1] = SPAPR_OV5_XIVE_BOTH;
  912. } else if (spapr->irq->xive) {
  913. val[1] = SPAPR_OV5_XIVE_EXPLOIT;
  914. } else {
  915. assert(spapr->irq->xics);
  916. val[1] = SPAPR_OV5_XIVE_LEGACY;
  917. }
  918. if (!ppc_check_compat(first_ppc_cpu, CPU_POWERPC_LOGICAL_3_00, 0,
  919. first_ppc_cpu->compat_pvr)) {
  920. /*
  921. * If we're in a pre POWER9 compat mode then the guest should
  922. * do hash and use the legacy interrupt mode
  923. */
  924. val[1] = SPAPR_OV5_XIVE_LEGACY; /* XICS */
  925. val[3] = 0x00; /* Hash */
  926. spapr_check_mmu_mode(false);
  927. } else if (kvm_enabled()) {
  928. if (kvmppc_has_cap_mmu_radix() && kvmppc_has_cap_mmu_hash_v3()) {
  929. val[3] = 0x80; /* OV5_MMU_BOTH */
  930. } else if (kvmppc_has_cap_mmu_radix()) {
  931. val[3] = 0x40; /* OV5_MMU_RADIX_300 */
  932. } else {
  933. val[3] = 0x00; /* Hash */
  934. }
  935. } else {
  936. /* V3 MMU supports both hash and radix in tcg (with dynamic switching) */
  937. val[3] = 0xC0;
  938. }
  939. _FDT(fdt_setprop(fdt, chosen, "ibm,arch-vec-5-platform-support",
  940. val, sizeof(val)));
  941. }
  942. static void spapr_dt_chosen(SpaprMachineState *spapr, void *fdt, bool reset)
  943. {
  944. MachineState *machine = MACHINE(spapr);
  945. SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(machine);
  946. int chosen;
  947. _FDT(chosen = fdt_add_subnode(fdt, 0, "chosen"));
  948. if (reset) {
  949. const char *boot_device = spapr->boot_device;
  950. g_autofree char *stdout_path = spapr_vio_stdout_path(spapr->vio_bus);
  951. size_t cb = 0;
  952. g_autofree char *bootlist = get_boot_devices_list(&cb);
  953. if (machine->kernel_cmdline && machine->kernel_cmdline[0]) {
  954. _FDT(fdt_setprop_string(fdt, chosen, "bootargs",
  955. machine->kernel_cmdline));
  956. }
  957. if (spapr->initrd_size) {
  958. _FDT(fdt_setprop_cell(fdt, chosen, "linux,initrd-start",
  959. spapr->initrd_base));
  960. _FDT(fdt_setprop_cell(fdt, chosen, "linux,initrd-end",
  961. spapr->initrd_base + spapr->initrd_size));
  962. }
  963. if (spapr->kernel_size) {
  964. uint64_t kprop[2] = { cpu_to_be64(spapr->kernel_addr),
  965. cpu_to_be64(spapr->kernel_size) };
  966. _FDT(fdt_setprop(fdt, chosen, "qemu,boot-kernel",
  967. &kprop, sizeof(kprop)));
  968. if (spapr->kernel_le) {
  969. _FDT(fdt_setprop(fdt, chosen, "qemu,boot-kernel-le", NULL, 0));
  970. }
  971. }
  972. if (machine->boot_config.has_menu && machine->boot_config.menu) {
  973. _FDT((fdt_setprop_cell(fdt, chosen, "qemu,boot-menu", true)));
  974. }
  975. _FDT(fdt_setprop_cell(fdt, chosen, "qemu,graphic-width", graphic_width));
  976. _FDT(fdt_setprop_cell(fdt, chosen, "qemu,graphic-height", graphic_height));
  977. _FDT(fdt_setprop_cell(fdt, chosen, "qemu,graphic-depth", graphic_depth));
  978. if (cb && bootlist) {
  979. int i;
  980. for (i = 0; i < cb; i++) {
  981. if (bootlist[i] == '\n') {
  982. bootlist[i] = ' ';
  983. }
  984. }
  985. _FDT(fdt_setprop_string(fdt, chosen, "qemu,boot-list", bootlist));
  986. }
  987. if (boot_device && strlen(boot_device)) {
  988. _FDT(fdt_setprop_string(fdt, chosen, "qemu,boot-device", boot_device));
  989. }
  990. if (spapr->want_stdout_path && stdout_path) {
  991. /*
  992. * "linux,stdout-path" and "stdout" properties are
  993. * deprecated by linux kernel. New platforms should only
  994. * use the "stdout-path" property. Set the new property
  995. * and continue using older property to remain compatible
  996. * with the existing firmware.
  997. */
  998. _FDT(fdt_setprop_string(fdt, chosen, "linux,stdout-path", stdout_path));
  999. _FDT(fdt_setprop_string(fdt, chosen, "stdout-path", stdout_path));
  1000. }
  1001. /*
  1002. * We can deal with BAR reallocation just fine, advertise it
  1003. * to the guest
  1004. */
  1005. if (smc->linux_pci_probe) {
  1006. _FDT(fdt_setprop_cell(fdt, chosen, "linux,pci-probe-only", 0));
  1007. }
  1008. spapr_dt_ov5_platform_support(spapr, fdt, chosen);
  1009. }
  1010. _FDT(fdt_setprop(fdt, chosen, "rng-seed", spapr->fdt_rng_seed, 32));
  1011. _FDT(spapr_dt_ovec(fdt, chosen, spapr->ov5_cas, "ibm,architecture-vec-5"));
  1012. }
  1013. static void spapr_dt_hypervisor(SpaprMachineState *spapr, void *fdt)
  1014. {
  1015. /* The /hypervisor node isn't in PAPR - this is a hack to allow PR
  1016. * KVM to work under pHyp with some guest co-operation */
  1017. int hypervisor;
  1018. uint8_t hypercall[16];
  1019. _FDT(hypervisor = fdt_add_subnode(fdt, 0, "hypervisor"));
  1020. /* indicate KVM hypercall interface */
  1021. _FDT(fdt_setprop_string(fdt, hypervisor, "compatible", "linux,kvm"));
  1022. if (kvmppc_has_cap_fixup_hcalls()) {
  1023. /*
  1024. * Older KVM versions with older guest kernels were broken
  1025. * with the magic page, don't allow the guest to map it.
  1026. */
  1027. if (!kvmppc_get_hypercall(cpu_env(first_cpu), hypercall,
  1028. sizeof(hypercall))) {
  1029. _FDT(fdt_setprop(fdt, hypervisor, "hcall-instructions",
  1030. hypercall, sizeof(hypercall)));
  1031. }
  1032. }
  1033. }
  1034. void *spapr_build_fdt(SpaprMachineState *spapr, bool reset, size_t space)
  1035. {
  1036. MachineState *machine = MACHINE(spapr);
  1037. MachineClass *mc = MACHINE_GET_CLASS(machine);
  1038. SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(machine);
  1039. uint32_t root_drc_type_mask = 0;
  1040. int ret;
  1041. void *fdt;
  1042. SpaprPhbState *phb;
  1043. char *buf;
  1044. fdt = g_malloc0(space);
  1045. _FDT((fdt_create_empty_tree(fdt, space)));
  1046. /* Root node */
  1047. _FDT(fdt_setprop_string(fdt, 0, "device_type", "chrp"));
  1048. _FDT(fdt_setprop_string(fdt, 0, "model", "IBM pSeries (emulated by qemu)"));
  1049. _FDT(fdt_setprop_string(fdt, 0, "compatible", "qemu,pseries"));
  1050. /* Guest UUID & Name*/
  1051. buf = qemu_uuid_unparse_strdup(&qemu_uuid);
  1052. _FDT(fdt_setprop_string(fdt, 0, "vm,uuid", buf));
  1053. if (qemu_uuid_set) {
  1054. _FDT(fdt_setprop_string(fdt, 0, "system-id", buf));
  1055. }
  1056. g_free(buf);
  1057. if (qemu_get_vm_name()) {
  1058. _FDT(fdt_setprop_string(fdt, 0, "ibm,partition-name",
  1059. qemu_get_vm_name()));
  1060. }
  1061. /* Host Model & Serial Number */
  1062. if (spapr->host_model) {
  1063. _FDT(fdt_setprop_string(fdt, 0, "host-model", spapr->host_model));
  1064. } else if (smc->broken_host_serial_model && kvmppc_get_host_model(&buf)) {
  1065. _FDT(fdt_setprop_string(fdt, 0, "host-model", buf));
  1066. g_free(buf);
  1067. }
  1068. if (spapr->host_serial) {
  1069. _FDT(fdt_setprop_string(fdt, 0, "host-serial", spapr->host_serial));
  1070. } else if (smc->broken_host_serial_model && kvmppc_get_host_serial(&buf)) {
  1071. _FDT(fdt_setprop_string(fdt, 0, "host-serial", buf));
  1072. g_free(buf);
  1073. }
  1074. _FDT(fdt_setprop_cell(fdt, 0, "#address-cells", 2));
  1075. _FDT(fdt_setprop_cell(fdt, 0, "#size-cells", 2));
  1076. /* /interrupt controller */
  1077. spapr_irq_dt(spapr, spapr_max_server_number(spapr), fdt, PHANDLE_INTC);
  1078. ret = spapr_dt_memory(spapr, fdt);
  1079. if (ret < 0) {
  1080. error_report("couldn't setup memory nodes in fdt");
  1081. exit(1);
  1082. }
  1083. /* /vdevice */
  1084. spapr_dt_vdevice(spapr->vio_bus, fdt);
  1085. if (object_resolve_path_type("", TYPE_SPAPR_RNG, NULL)) {
  1086. ret = spapr_dt_rng(fdt);
  1087. if (ret < 0) {
  1088. error_report("could not set up rng device in the fdt");
  1089. exit(1);
  1090. }
  1091. }
  1092. QLIST_FOREACH(phb, &spapr->phbs, list) {
  1093. ret = spapr_dt_phb(spapr, phb, PHANDLE_INTC, fdt, NULL);
  1094. if (ret < 0) {
  1095. error_report("couldn't setup PCI devices in fdt");
  1096. exit(1);
  1097. }
  1098. }
  1099. spapr_dt_cpus(fdt, spapr);
  1100. /* ibm,drc-indexes and friends */
  1101. root_drc_type_mask |= SPAPR_DR_CONNECTOR_TYPE_LMB;
  1102. if (smc->dr_phb_enabled) {
  1103. root_drc_type_mask |= SPAPR_DR_CONNECTOR_TYPE_PHB;
  1104. }
  1105. if (mc->nvdimm_supported) {
  1106. root_drc_type_mask |= SPAPR_DR_CONNECTOR_TYPE_PMEM;
  1107. }
  1108. if (root_drc_type_mask) {
  1109. _FDT(spapr_dt_drc(fdt, 0, NULL, root_drc_type_mask));
  1110. }
  1111. if (mc->has_hotpluggable_cpus) {
  1112. int offset = fdt_path_offset(fdt, "/cpus");
  1113. ret = spapr_dt_drc(fdt, offset, NULL, SPAPR_DR_CONNECTOR_TYPE_CPU);
  1114. if (ret < 0) {
  1115. error_report("Couldn't set up CPU DR device tree properties");
  1116. exit(1);
  1117. }
  1118. }
  1119. /* /event-sources */
  1120. spapr_dt_events(spapr, fdt);
  1121. /* /rtas */
  1122. spapr_dt_rtas(spapr, fdt);
  1123. /* /chosen */
  1124. spapr_dt_chosen(spapr, fdt, reset);
  1125. /* /hypervisor */
  1126. if (kvm_enabled()) {
  1127. spapr_dt_hypervisor(spapr, fdt);
  1128. }
  1129. /* Build memory reserve map */
  1130. if (reset) {
  1131. if (spapr->kernel_size) {
  1132. _FDT((fdt_add_mem_rsv(fdt, spapr->kernel_addr,
  1133. spapr->kernel_size)));
  1134. }
  1135. if (spapr->initrd_size) {
  1136. _FDT((fdt_add_mem_rsv(fdt, spapr->initrd_base,
  1137. spapr->initrd_size)));
  1138. }
  1139. }
  1140. /* NVDIMM devices */
  1141. if (mc->nvdimm_supported) {
  1142. spapr_dt_persistent_memory(spapr, fdt);
  1143. }
  1144. return fdt;
  1145. }
  1146. static uint64_t translate_kernel_address(void *opaque, uint64_t addr)
  1147. {
  1148. SpaprMachineState *spapr = opaque;
  1149. return (addr & 0x0fffffff) + spapr->kernel_addr;
  1150. }
  1151. static void emulate_spapr_hypercall(PPCVirtualHypervisor *vhyp,
  1152. PowerPCCPU *cpu)
  1153. {
  1154. CPUPPCState *env = &cpu->env;
  1155. /* The TCG path should also be holding the BQL at this point */
  1156. g_assert(bql_locked());
  1157. g_assert(!vhyp_cpu_in_nested(cpu));
  1158. if (FIELD_EX64(env->msr, MSR, PR)) {
  1159. hcall_dprintf("Hypercall made with MSR[PR]=1\n");
  1160. env->gpr[3] = H_PRIVILEGE;
  1161. } else {
  1162. env->gpr[3] = spapr_hypercall(cpu, env->gpr[3], &env->gpr[4]);
  1163. }
  1164. }
  1165. struct LPCRSyncState {
  1166. target_ulong value;
  1167. target_ulong mask;
  1168. };
  1169. static void do_lpcr_sync(CPUState *cs, run_on_cpu_data arg)
  1170. {
  1171. struct LPCRSyncState *s = arg.host_ptr;
  1172. PowerPCCPU *cpu = POWERPC_CPU(cs);
  1173. CPUPPCState *env = &cpu->env;
  1174. target_ulong lpcr;
  1175. cpu_synchronize_state(cs);
  1176. lpcr = env->spr[SPR_LPCR];
  1177. lpcr &= ~s->mask;
  1178. lpcr |= s->value;
  1179. ppc_store_lpcr(cpu, lpcr);
  1180. }
  1181. void spapr_set_all_lpcrs(target_ulong value, target_ulong mask)
  1182. {
  1183. CPUState *cs;
  1184. struct LPCRSyncState s = {
  1185. .value = value,
  1186. .mask = mask
  1187. };
  1188. CPU_FOREACH(cs) {
  1189. run_on_cpu(cs, do_lpcr_sync, RUN_ON_CPU_HOST_PTR(&s));
  1190. }
  1191. }
  1192. /* May be used when the machine is not running */
  1193. void spapr_init_all_lpcrs(target_ulong value, target_ulong mask)
  1194. {
  1195. CPUState *cs;
  1196. CPU_FOREACH(cs) {
  1197. PowerPCCPU *cpu = POWERPC_CPU(cs);
  1198. CPUPPCState *env = &cpu->env;
  1199. target_ulong lpcr;
  1200. lpcr = env->spr[SPR_LPCR];
  1201. lpcr &= ~(LPCR_HR | LPCR_UPRT);
  1202. ppc_store_lpcr(cpu, lpcr);
  1203. }
  1204. }
  1205. static bool spapr_get_pate(PPCVirtualHypervisor *vhyp, PowerPCCPU *cpu,
  1206. target_ulong lpid, ppc_v3_pate_t *entry)
  1207. {
  1208. SpaprMachineState *spapr = SPAPR_MACHINE(vhyp);
  1209. SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
  1210. if (!spapr_cpu->in_nested) {
  1211. assert(lpid == 0);
  1212. /* Copy PATE1:GR into PATE0:HR */
  1213. entry->dw0 = spapr->patb_entry & PATE0_HR;
  1214. entry->dw1 = spapr->patb_entry;
  1215. return true;
  1216. } else {
  1217. if (spapr_nested_api(spapr) == NESTED_API_KVM_HV) {
  1218. return spapr_get_pate_nested_hv(spapr, cpu, lpid, entry);
  1219. } else if (spapr_nested_api(spapr) == NESTED_API_PAPR) {
  1220. return spapr_get_pate_nested_papr(spapr, cpu, lpid, entry);
  1221. } else {
  1222. g_assert_not_reached();
  1223. }
  1224. }
  1225. }
  1226. static uint64_t *hpte_get_ptr(SpaprMachineState *s, unsigned index)
  1227. {
  1228. uint64_t *table = s->htab;
  1229. return &table[2 * index];
  1230. }
  1231. static bool hpte_is_valid(SpaprMachineState *s, unsigned index)
  1232. {
  1233. return ldq_be_p(hpte_get_ptr(s, index)) & HPTE64_V_VALID;
  1234. }
  1235. static bool hpte_is_dirty(SpaprMachineState *s, unsigned index)
  1236. {
  1237. return ldq_be_p(hpte_get_ptr(s, index)) & HPTE64_V_HPTE_DIRTY;
  1238. }
  1239. static void hpte_set_clean(SpaprMachineState *s, unsigned index)
  1240. {
  1241. stq_be_p(hpte_get_ptr(s, index),
  1242. ldq_be_p(hpte_get_ptr(s, index)) & ~HPTE64_V_HPTE_DIRTY);
  1243. }
  1244. static void hpte_set_dirty(SpaprMachineState *s, unsigned index)
  1245. {
  1246. stq_be_p(hpte_get_ptr(s, index),
  1247. ldq_be_p(hpte_get_ptr(s, index)) | HPTE64_V_HPTE_DIRTY);
  1248. }
  1249. /*
  1250. * Get the fd to access the kernel htab, re-opening it if necessary
  1251. */
  1252. static int get_htab_fd(SpaprMachineState *spapr)
  1253. {
  1254. Error *local_err = NULL;
  1255. if (spapr->htab_fd >= 0) {
  1256. return spapr->htab_fd;
  1257. }
  1258. spapr->htab_fd = kvmppc_get_htab_fd(false, 0, &local_err);
  1259. if (spapr->htab_fd < 0) {
  1260. error_report_err(local_err);
  1261. }
  1262. return spapr->htab_fd;
  1263. }
  1264. void close_htab_fd(SpaprMachineState *spapr)
  1265. {
  1266. if (spapr->htab_fd >= 0) {
  1267. close(spapr->htab_fd);
  1268. }
  1269. spapr->htab_fd = -1;
  1270. }
  1271. static hwaddr spapr_hpt_mask(PPCVirtualHypervisor *vhyp)
  1272. {
  1273. SpaprMachineState *spapr = SPAPR_MACHINE(vhyp);
  1274. return HTAB_SIZE(spapr) / HASH_PTEG_SIZE_64 - 1;
  1275. }
  1276. static target_ulong spapr_encode_hpt_for_kvm_pr(PPCVirtualHypervisor *vhyp)
  1277. {
  1278. SpaprMachineState *spapr = SPAPR_MACHINE(vhyp);
  1279. assert(kvm_enabled());
  1280. if (!spapr->htab) {
  1281. return 0;
  1282. }
  1283. return (target_ulong)(uintptr_t)spapr->htab | (spapr->htab_shift - 18);
  1284. }
  1285. static const ppc_hash_pte64_t *spapr_map_hptes(PPCVirtualHypervisor *vhyp,
  1286. hwaddr ptex, int n)
  1287. {
  1288. SpaprMachineState *spapr = SPAPR_MACHINE(vhyp);
  1289. hwaddr pte_offset = ptex * HASH_PTE_SIZE_64;
  1290. if (!spapr->htab) {
  1291. /*
  1292. * HTAB is controlled by KVM. Fetch into temporary buffer
  1293. */
  1294. ppc_hash_pte64_t *hptes = g_malloc(n * HASH_PTE_SIZE_64);
  1295. kvmppc_read_hptes(hptes, ptex, n);
  1296. return hptes;
  1297. }
  1298. /*
  1299. * HTAB is controlled by QEMU. Just point to the internally
  1300. * accessible PTEG.
  1301. */
  1302. return (const ppc_hash_pte64_t *)(spapr->htab + pte_offset);
  1303. }
  1304. static void spapr_unmap_hptes(PPCVirtualHypervisor *vhyp,
  1305. const ppc_hash_pte64_t *hptes,
  1306. hwaddr ptex, int n)
  1307. {
  1308. SpaprMachineState *spapr = SPAPR_MACHINE(vhyp);
  1309. if (!spapr->htab) {
  1310. g_free((void *)hptes);
  1311. }
  1312. /* Nothing to do for qemu managed HPT */
  1313. }
  1314. void spapr_store_hpte(PowerPCCPU *cpu, hwaddr ptex,
  1315. uint64_t pte0, uint64_t pte1)
  1316. {
  1317. SpaprMachineState *spapr = SPAPR_MACHINE(cpu->vhyp);
  1318. hwaddr offset = ptex * HASH_PTE_SIZE_64;
  1319. if (!spapr->htab) {
  1320. kvmppc_write_hpte(ptex, pte0, pte1);
  1321. } else {
  1322. if (pte0 & HPTE64_V_VALID) {
  1323. stq_p(spapr->htab + offset + HPTE64_DW1, pte1);
  1324. /*
  1325. * When setting valid, we write PTE1 first. This ensures
  1326. * proper synchronization with the reading code in
  1327. * ppc_hash64_pteg_search()
  1328. */
  1329. smp_wmb();
  1330. stq_p(spapr->htab + offset, pte0);
  1331. } else {
  1332. stq_p(spapr->htab + offset, pte0);
  1333. /*
  1334. * When clearing it we set PTE0 first. This ensures proper
  1335. * synchronization with the reading code in
  1336. * ppc_hash64_pteg_search()
  1337. */
  1338. smp_wmb();
  1339. stq_p(spapr->htab + offset + HPTE64_DW1, pte1);
  1340. }
  1341. }
  1342. }
  1343. static void spapr_hpte_set_c(PPCVirtualHypervisor *vhyp, hwaddr ptex,
  1344. uint64_t pte1)
  1345. {
  1346. hwaddr offset = ptex * HASH_PTE_SIZE_64 + HPTE64_DW1_C;
  1347. SpaprMachineState *spapr = SPAPR_MACHINE(vhyp);
  1348. if (!spapr->htab) {
  1349. /* There should always be a hash table when this is called */
  1350. error_report("spapr_hpte_set_c called with no hash table !");
  1351. return;
  1352. }
  1353. /* The HW performs a non-atomic byte update */
  1354. stb_p(spapr->htab + offset, (pte1 & 0xff) | 0x80);
  1355. }
  1356. static void spapr_hpte_set_r(PPCVirtualHypervisor *vhyp, hwaddr ptex,
  1357. uint64_t pte1)
  1358. {
  1359. hwaddr offset = ptex * HASH_PTE_SIZE_64 + HPTE64_DW1_R;
  1360. SpaprMachineState *spapr = SPAPR_MACHINE(vhyp);
  1361. if (!spapr->htab) {
  1362. /* There should always be a hash table when this is called */
  1363. error_report("spapr_hpte_set_r called with no hash table !");
  1364. return;
  1365. }
  1366. /* The HW performs a non-atomic byte update */
  1367. stb_p(spapr->htab + offset, ((pte1 >> 8) & 0xff) | 0x01);
  1368. }
  1369. int spapr_hpt_shift_for_ramsize(uint64_t ramsize)
  1370. {
  1371. int shift;
  1372. /* We aim for a hash table of size 1/128 the size of RAM (rounded
  1373. * up). The PAPR recommendation is actually 1/64 of RAM size, but
  1374. * that's much more than is needed for Linux guests */
  1375. shift = ctz64(pow2ceil(ramsize)) - 7;
  1376. shift = MAX(shift, 18); /* Minimum architected size */
  1377. shift = MIN(shift, 46); /* Maximum architected size */
  1378. return shift;
  1379. }
  1380. void spapr_free_hpt(SpaprMachineState *spapr)
  1381. {
  1382. qemu_vfree(spapr->htab);
  1383. spapr->htab = NULL;
  1384. spapr->htab_shift = 0;
  1385. close_htab_fd(spapr);
  1386. }
  1387. int spapr_reallocate_hpt(SpaprMachineState *spapr, int shift, Error **errp)
  1388. {
  1389. ERRP_GUARD();
  1390. long rc;
  1391. /* Clean up any HPT info from a previous boot */
  1392. spapr_free_hpt(spapr);
  1393. rc = kvmppc_reset_htab(shift);
  1394. if (rc == -EOPNOTSUPP) {
  1395. error_setg(errp, "HPT not supported in nested guests");
  1396. return -EOPNOTSUPP;
  1397. }
  1398. if (rc < 0) {
  1399. /* kernel-side HPT needed, but couldn't allocate one */
  1400. error_setg_errno(errp, errno, "Failed to allocate KVM HPT of order %d",
  1401. shift);
  1402. error_append_hint(errp, "Try smaller maxmem?\n");
  1403. return -errno;
  1404. } else if (rc > 0) {
  1405. /* kernel-side HPT allocated */
  1406. if (rc != shift) {
  1407. error_setg(errp,
  1408. "Requested order %d HPT, but kernel allocated order %ld",
  1409. shift, rc);
  1410. error_append_hint(errp, "Try smaller maxmem?\n");
  1411. return -ENOSPC;
  1412. }
  1413. spapr->htab_shift = shift;
  1414. spapr->htab = NULL;
  1415. } else {
  1416. /* kernel-side HPT not needed, allocate in userspace instead */
  1417. size_t size = 1ULL << shift;
  1418. int i;
  1419. spapr->htab = qemu_memalign(size, size);
  1420. memset(spapr->htab, 0, size);
  1421. spapr->htab_shift = shift;
  1422. for (i = 0; i < size / HASH_PTE_SIZE_64; i++) {
  1423. hpte_set_dirty(spapr, i);
  1424. }
  1425. }
  1426. /* We're setting up a hash table, so that means we're not radix */
  1427. spapr->patb_entry = 0;
  1428. spapr_init_all_lpcrs(0, LPCR_HR | LPCR_UPRT);
  1429. return 0;
  1430. }
  1431. void spapr_setup_hpt(SpaprMachineState *spapr)
  1432. {
  1433. int hpt_shift;
  1434. if (spapr->resize_hpt == SPAPR_RESIZE_HPT_DISABLED) {
  1435. hpt_shift = spapr_hpt_shift_for_ramsize(MACHINE(spapr)->maxram_size);
  1436. } else {
  1437. uint64_t current_ram_size;
  1438. current_ram_size = MACHINE(spapr)->ram_size + get_plugged_memory_size();
  1439. hpt_shift = spapr_hpt_shift_for_ramsize(current_ram_size);
  1440. }
  1441. spapr_reallocate_hpt(spapr, hpt_shift, &error_fatal);
  1442. if (kvm_enabled()) {
  1443. hwaddr vrma_limit = kvmppc_vrma_limit(spapr->htab_shift);
  1444. /* Check our RMA fits in the possible VRMA */
  1445. if (vrma_limit < spapr->rma_size) {
  1446. error_report("Unable to create %" HWADDR_PRIu
  1447. "MiB RMA (VRMA only allows %" HWADDR_PRIu "MiB",
  1448. spapr->rma_size / MiB, vrma_limit / MiB);
  1449. exit(EXIT_FAILURE);
  1450. }
  1451. }
  1452. }
  1453. void spapr_check_mmu_mode(bool guest_radix)
  1454. {
  1455. if (guest_radix) {
  1456. if (kvm_enabled() && !kvmppc_has_cap_mmu_radix()) {
  1457. error_report("Guest requested unavailable MMU mode (radix).");
  1458. exit(EXIT_FAILURE);
  1459. }
  1460. } else {
  1461. if (kvm_enabled() && kvmppc_has_cap_mmu_radix()
  1462. && !kvmppc_has_cap_mmu_hash_v3()) {
  1463. error_report("Guest requested unavailable MMU mode (hash).");
  1464. exit(EXIT_FAILURE);
  1465. }
  1466. }
  1467. }
  1468. static void spapr_machine_reset(MachineState *machine, ResetType type)
  1469. {
  1470. SpaprMachineState *spapr = SPAPR_MACHINE(machine);
  1471. PowerPCCPU *first_ppc_cpu;
  1472. hwaddr fdt_addr;
  1473. void *fdt;
  1474. int rc;
  1475. if (type != RESET_TYPE_SNAPSHOT_LOAD) {
  1476. /*
  1477. * Record-replay snapshot load must not consume random, this was
  1478. * already replayed from initial machine reset.
  1479. */
  1480. qemu_guest_getrandom_nofail(spapr->fdt_rng_seed, 32);
  1481. }
  1482. if (machine->cgs) {
  1483. confidential_guest_kvm_reset(machine->cgs, &error_fatal);
  1484. }
  1485. spapr_caps_apply(spapr);
  1486. spapr_nested_reset(spapr);
  1487. first_ppc_cpu = POWERPC_CPU(first_cpu);
  1488. if (kvm_enabled() && kvmppc_has_cap_mmu_radix() &&
  1489. ppc_type_check_compat(machine->cpu_type, CPU_POWERPC_LOGICAL_3_00, 0,
  1490. spapr->max_compat_pvr)) {
  1491. /*
  1492. * If using KVM with radix mode available, VCPUs can be started
  1493. * without a HPT because KVM will start them in radix mode.
  1494. * Set the GR bit in PATE so that we know there is no HPT.
  1495. */
  1496. spapr->patb_entry = PATE1_GR;
  1497. spapr_set_all_lpcrs(LPCR_HR | LPCR_UPRT, LPCR_HR | LPCR_UPRT);
  1498. } else {
  1499. spapr_setup_hpt(spapr);
  1500. }
  1501. qemu_devices_reset(type);
  1502. spapr_ovec_cleanup(spapr->ov5_cas);
  1503. spapr->ov5_cas = spapr_ovec_new();
  1504. ppc_init_compat_all(spapr->max_compat_pvr, &error_fatal);
  1505. /*
  1506. * This is fixing some of the default configuration of the XIVE
  1507. * devices. To be called after the reset of the machine devices.
  1508. */
  1509. spapr_irq_reset(spapr, &error_fatal);
  1510. /*
  1511. * There is no CAS under qtest. Simulate one to please the code that
  1512. * depends on spapr->ov5_cas. This is especially needed to test device
  1513. * unplug, so we do that before resetting the DRCs.
  1514. */
  1515. if (qtest_enabled()) {
  1516. spapr_ovec_cleanup(spapr->ov5_cas);
  1517. spapr->ov5_cas = spapr_ovec_clone(spapr->ov5);
  1518. }
  1519. spapr_nvdimm_finish_flushes();
  1520. /* DRC reset may cause a device to be unplugged. This will cause troubles
  1521. * if this device is used by another device (eg, a running vhost backend
  1522. * will crash QEMU if the DIMM holding the vring goes away). To avoid such
  1523. * situations, we reset DRCs after all devices have been reset.
  1524. */
  1525. spapr_drc_reset_all(spapr);
  1526. spapr_clear_pending_events(spapr);
  1527. /*
  1528. * We place the device tree just below either the top of the RMA,
  1529. * or just below 2GB, whichever is lower, so that it can be
  1530. * processed with 32-bit real mode code if necessary
  1531. */
  1532. fdt_addr = MIN(spapr->rma_size, FDT_MAX_ADDR) - FDT_MAX_SIZE;
  1533. fdt = spapr_build_fdt(spapr, true, FDT_MAX_SIZE);
  1534. if (spapr->vof) {
  1535. spapr_vof_reset(spapr, fdt, &error_fatal);
  1536. /*
  1537. * Do not pack the FDT as the client may change properties.
  1538. * VOF client does not expect the FDT so we do not load it to the VM.
  1539. */
  1540. } else {
  1541. rc = fdt_pack(fdt);
  1542. /* Should only fail if we've built a corrupted tree */
  1543. assert(rc == 0);
  1544. spapr_cpu_set_entry_state(first_ppc_cpu, SPAPR_ENTRY_POINT,
  1545. 0, fdt_addr, 0);
  1546. cpu_physical_memory_write(fdt_addr, fdt, fdt_totalsize(fdt));
  1547. }
  1548. g_free(spapr->fdt_blob);
  1549. spapr->fdt_size = fdt_totalsize(fdt);
  1550. spapr->fdt_initial_size = spapr->fdt_size;
  1551. spapr->fdt_blob = fdt;
  1552. /* Set machine->fdt for 'dumpdtb' QMP/HMP command */
  1553. machine->fdt = fdt;
  1554. /* Set up the entry state */
  1555. first_ppc_cpu->env.gpr[5] = 0;
  1556. spapr->fwnmi_system_reset_addr = -1;
  1557. spapr->fwnmi_machine_check_addr = -1;
  1558. spapr->fwnmi_machine_check_interlock = -1;
  1559. /* Signal all vCPUs waiting on this condition */
  1560. qemu_cond_broadcast(&spapr->fwnmi_machine_check_interlock_cond);
  1561. migrate_del_blocker(&spapr->fwnmi_migration_blocker);
  1562. }
  1563. static void spapr_create_nvram(SpaprMachineState *spapr)
  1564. {
  1565. DeviceState *dev = qdev_new("spapr-nvram");
  1566. DriveInfo *dinfo = drive_get(IF_PFLASH, 0, 0);
  1567. if (dinfo) {
  1568. qdev_prop_set_drive_err(dev, "drive", blk_by_legacy_dinfo(dinfo),
  1569. &error_fatal);
  1570. }
  1571. qdev_realize_and_unref(dev, &spapr->vio_bus->bus, &error_fatal);
  1572. spapr->nvram = (struct SpaprNvram *)dev;
  1573. }
  1574. static void spapr_rtc_create(SpaprMachineState *spapr)
  1575. {
  1576. object_initialize_child_with_props(OBJECT(spapr), "rtc", &spapr->rtc,
  1577. sizeof(spapr->rtc), TYPE_SPAPR_RTC,
  1578. &error_fatal, NULL);
  1579. qdev_realize(DEVICE(&spapr->rtc), NULL, &error_fatal);
  1580. object_property_add_alias(OBJECT(spapr), "rtc-time", OBJECT(&spapr->rtc),
  1581. "date");
  1582. }
  1583. /* Returns whether we want to use VGA or not */
  1584. static bool spapr_vga_init(PCIBus *pci_bus, Error **errp)
  1585. {
  1586. vga_interface_created = true;
  1587. switch (vga_interface_type) {
  1588. case VGA_NONE:
  1589. return false;
  1590. case VGA_DEVICE:
  1591. return true;
  1592. case VGA_STD:
  1593. case VGA_VIRTIO:
  1594. case VGA_CIRRUS:
  1595. return pci_vga_init(pci_bus) != NULL;
  1596. default:
  1597. error_setg(errp,
  1598. "Unsupported VGA mode, only -vga std or -vga virtio is supported");
  1599. return false;
  1600. }
  1601. }
  1602. static int spapr_pre_load(void *opaque)
  1603. {
  1604. int rc;
  1605. rc = spapr_caps_pre_load(opaque);
  1606. if (rc) {
  1607. return rc;
  1608. }
  1609. return 0;
  1610. }
  1611. static int spapr_post_load(void *opaque, int version_id)
  1612. {
  1613. SpaprMachineState *spapr = (SpaprMachineState *)opaque;
  1614. int err = 0;
  1615. err = spapr_caps_post_migration(spapr);
  1616. if (err) {
  1617. return err;
  1618. }
  1619. /*
  1620. * In earlier versions, there was no separate qdev for the PAPR
  1621. * RTC, so the RTC offset was stored directly in sPAPREnvironment.
  1622. * So when migrating from those versions, poke the incoming offset
  1623. * value into the RTC device
  1624. */
  1625. if (version_id < 3) {
  1626. err = spapr_rtc_import_offset(&spapr->rtc, spapr->rtc_offset);
  1627. if (err) {
  1628. return err;
  1629. }
  1630. }
  1631. if (kvm_enabled() && spapr->patb_entry) {
  1632. PowerPCCPU *cpu = POWERPC_CPU(first_cpu);
  1633. bool radix = !!(spapr->patb_entry & PATE1_GR);
  1634. bool gtse = !!(cpu->env.spr[SPR_LPCR] & LPCR_GTSE);
  1635. /*
  1636. * Update LPCR:HR and UPRT as they may not be set properly in
  1637. * the stream
  1638. */
  1639. spapr_set_all_lpcrs(radix ? (LPCR_HR | LPCR_UPRT) : 0,
  1640. LPCR_HR | LPCR_UPRT);
  1641. err = kvmppc_configure_v3_mmu(cpu, radix, gtse, spapr->patb_entry);
  1642. if (err) {
  1643. error_report("Process table config unsupported by the host");
  1644. return -EINVAL;
  1645. }
  1646. }
  1647. err = spapr_irq_post_load(spapr, version_id);
  1648. if (err) {
  1649. return err;
  1650. }
  1651. return err;
  1652. }
  1653. static int spapr_pre_save(void *opaque)
  1654. {
  1655. int rc;
  1656. rc = spapr_caps_pre_save(opaque);
  1657. if (rc) {
  1658. return rc;
  1659. }
  1660. return 0;
  1661. }
  1662. static bool version_before_3(void *opaque, int version_id)
  1663. {
  1664. return version_id < 3;
  1665. }
  1666. static bool spapr_pending_events_needed(void *opaque)
  1667. {
  1668. SpaprMachineState *spapr = (SpaprMachineState *)opaque;
  1669. return !QTAILQ_EMPTY(&spapr->pending_events);
  1670. }
  1671. static const VMStateDescription vmstate_spapr_event_entry = {
  1672. .name = "spapr_event_log_entry",
  1673. .version_id = 1,
  1674. .minimum_version_id = 1,
  1675. .fields = (const VMStateField[]) {
  1676. VMSTATE_UINT32(summary, SpaprEventLogEntry),
  1677. VMSTATE_UINT32(extended_length, SpaprEventLogEntry),
  1678. VMSTATE_VBUFFER_ALLOC_UINT32(extended_log, SpaprEventLogEntry, 0,
  1679. NULL, extended_length),
  1680. VMSTATE_END_OF_LIST()
  1681. },
  1682. };
  1683. static const VMStateDescription vmstate_spapr_pending_events = {
  1684. .name = "spapr_pending_events",
  1685. .version_id = 1,
  1686. .minimum_version_id = 1,
  1687. .needed = spapr_pending_events_needed,
  1688. .fields = (const VMStateField[]) {
  1689. VMSTATE_QTAILQ_V(pending_events, SpaprMachineState, 1,
  1690. vmstate_spapr_event_entry, SpaprEventLogEntry, next),
  1691. VMSTATE_END_OF_LIST()
  1692. },
  1693. };
  1694. static bool spapr_ov5_cas_needed(void *opaque)
  1695. {
  1696. SpaprMachineState *spapr = opaque;
  1697. SpaprOptionVector *ov5_mask = spapr_ovec_new();
  1698. bool cas_needed;
  1699. /* Prior to the introduction of SpaprOptionVector, we had two option
  1700. * vectors we dealt with: OV5_FORM1_AFFINITY, and OV5_DRCONF_MEMORY.
  1701. * Both of these options encode machine topology into the device-tree
  1702. * in such a way that the now-booted OS should still be able to interact
  1703. * appropriately with QEMU regardless of what options were actually
  1704. * negotiatied on the source side.
  1705. *
  1706. * As such, we can avoid migrating the CAS-negotiated options if these
  1707. * are the only options available on the current machine/platform.
  1708. * Since these are the only options available for pseries-2.7 and
  1709. * earlier, this allows us to maintain old->new/new->old migration
  1710. * compatibility.
  1711. *
  1712. * For QEMU 2.8+, there are additional CAS-negotiatable options available
  1713. * via default pseries-2.8 machines and explicit command-line parameters.
  1714. * Some of these options, like OV5_HP_EVT, *do* require QEMU to be aware
  1715. * of the actual CAS-negotiated values to continue working properly. For
  1716. * example, availability of memory unplug depends on knowing whether
  1717. * OV5_HP_EVT was negotiated via CAS.
  1718. *
  1719. * Thus, for any cases where the set of available CAS-negotiatable
  1720. * options extends beyond OV5_FORM1_AFFINITY and OV5_DRCONF_MEMORY, we
  1721. * include the CAS-negotiated options in the migration stream, unless
  1722. * if they affect boot time behaviour only.
  1723. */
  1724. spapr_ovec_set(ov5_mask, OV5_FORM1_AFFINITY);
  1725. spapr_ovec_set(ov5_mask, OV5_DRCONF_MEMORY);
  1726. spapr_ovec_set(ov5_mask, OV5_DRMEM_V2);
  1727. /* We need extra information if we have any bits outside the mask
  1728. * defined above */
  1729. cas_needed = !spapr_ovec_subset(spapr->ov5, ov5_mask);
  1730. spapr_ovec_cleanup(ov5_mask);
  1731. return cas_needed;
  1732. }
  1733. static const VMStateDescription vmstate_spapr_ov5_cas = {
  1734. .name = "spapr_option_vector_ov5_cas",
  1735. .version_id = 1,
  1736. .minimum_version_id = 1,
  1737. .needed = spapr_ov5_cas_needed,
  1738. .fields = (const VMStateField[]) {
  1739. VMSTATE_STRUCT_POINTER_V(ov5_cas, SpaprMachineState, 1,
  1740. vmstate_spapr_ovec, SpaprOptionVector),
  1741. VMSTATE_END_OF_LIST()
  1742. },
  1743. };
  1744. static bool spapr_patb_entry_needed(void *opaque)
  1745. {
  1746. SpaprMachineState *spapr = opaque;
  1747. return !!spapr->patb_entry;
  1748. }
  1749. static const VMStateDescription vmstate_spapr_patb_entry = {
  1750. .name = "spapr_patb_entry",
  1751. .version_id = 1,
  1752. .minimum_version_id = 1,
  1753. .needed = spapr_patb_entry_needed,
  1754. .fields = (const VMStateField[]) {
  1755. VMSTATE_UINT64(patb_entry, SpaprMachineState),
  1756. VMSTATE_END_OF_LIST()
  1757. },
  1758. };
  1759. static bool spapr_irq_map_needed(void *opaque)
  1760. {
  1761. SpaprMachineState *spapr = opaque;
  1762. return spapr->irq_map && !bitmap_empty(spapr->irq_map, spapr->irq_map_nr);
  1763. }
  1764. static const VMStateDescription vmstate_spapr_irq_map = {
  1765. .name = "spapr_irq_map",
  1766. .version_id = 1,
  1767. .minimum_version_id = 1,
  1768. .needed = spapr_irq_map_needed,
  1769. .fields = (const VMStateField[]) {
  1770. VMSTATE_BITMAP(irq_map, SpaprMachineState, 0, irq_map_nr),
  1771. VMSTATE_END_OF_LIST()
  1772. },
  1773. };
  1774. static bool spapr_dtb_needed(void *opaque)
  1775. {
  1776. SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(opaque);
  1777. return smc->update_dt_enabled;
  1778. }
  1779. static int spapr_dtb_pre_load(void *opaque)
  1780. {
  1781. SpaprMachineState *spapr = (SpaprMachineState *)opaque;
  1782. g_free(spapr->fdt_blob);
  1783. spapr->fdt_blob = NULL;
  1784. spapr->fdt_size = 0;
  1785. return 0;
  1786. }
  1787. static const VMStateDescription vmstate_spapr_dtb = {
  1788. .name = "spapr_dtb",
  1789. .version_id = 1,
  1790. .minimum_version_id = 1,
  1791. .needed = spapr_dtb_needed,
  1792. .pre_load = spapr_dtb_pre_load,
  1793. .fields = (const VMStateField[]) {
  1794. VMSTATE_UINT32(fdt_initial_size, SpaprMachineState),
  1795. VMSTATE_UINT32(fdt_size, SpaprMachineState),
  1796. VMSTATE_VBUFFER_ALLOC_UINT32(fdt_blob, SpaprMachineState, 0, NULL,
  1797. fdt_size),
  1798. VMSTATE_END_OF_LIST()
  1799. },
  1800. };
  1801. static bool spapr_fwnmi_needed(void *opaque)
  1802. {
  1803. SpaprMachineState *spapr = (SpaprMachineState *)opaque;
  1804. return spapr->fwnmi_machine_check_addr != -1;
  1805. }
  1806. static int spapr_fwnmi_pre_save(void *opaque)
  1807. {
  1808. SpaprMachineState *spapr = (SpaprMachineState *)opaque;
  1809. /*
  1810. * Check if machine check handling is in progress and print a
  1811. * warning message.
  1812. */
  1813. if (spapr->fwnmi_machine_check_interlock != -1) {
  1814. warn_report("A machine check is being handled during migration. The"
  1815. "handler may run and log hardware error on the destination");
  1816. }
  1817. return 0;
  1818. }
  1819. static const VMStateDescription vmstate_spapr_fwnmi = {
  1820. .name = "spapr_fwnmi",
  1821. .version_id = 1,
  1822. .minimum_version_id = 1,
  1823. .needed = spapr_fwnmi_needed,
  1824. .pre_save = spapr_fwnmi_pre_save,
  1825. .fields = (const VMStateField[]) {
  1826. VMSTATE_UINT64(fwnmi_system_reset_addr, SpaprMachineState),
  1827. VMSTATE_UINT64(fwnmi_machine_check_addr, SpaprMachineState),
  1828. VMSTATE_INT32(fwnmi_machine_check_interlock, SpaprMachineState),
  1829. VMSTATE_END_OF_LIST()
  1830. },
  1831. };
  1832. static const VMStateDescription vmstate_spapr = {
  1833. .name = "spapr",
  1834. .version_id = 3,
  1835. .minimum_version_id = 1,
  1836. .pre_load = spapr_pre_load,
  1837. .post_load = spapr_post_load,
  1838. .pre_save = spapr_pre_save,
  1839. .fields = (const VMStateField[]) {
  1840. /* used to be @next_irq */
  1841. VMSTATE_UNUSED_BUFFER(version_before_3, 0, 4),
  1842. /* RTC offset */
  1843. VMSTATE_UINT64_TEST(rtc_offset, SpaprMachineState, version_before_3),
  1844. VMSTATE_PPC_TIMEBASE_V(tb, SpaprMachineState, 2),
  1845. VMSTATE_END_OF_LIST()
  1846. },
  1847. .subsections = (const VMStateDescription * const []) {
  1848. &vmstate_spapr_ov5_cas,
  1849. &vmstate_spapr_patb_entry,
  1850. &vmstate_spapr_pending_events,
  1851. &vmstate_spapr_cap_htm,
  1852. &vmstate_spapr_cap_vsx,
  1853. &vmstate_spapr_cap_dfp,
  1854. &vmstate_spapr_cap_cfpc,
  1855. &vmstate_spapr_cap_sbbc,
  1856. &vmstate_spapr_cap_ibs,
  1857. &vmstate_spapr_cap_hpt_maxpagesize,
  1858. &vmstate_spapr_irq_map,
  1859. &vmstate_spapr_cap_nested_kvm_hv,
  1860. &vmstate_spapr_dtb,
  1861. &vmstate_spapr_cap_large_decr,
  1862. &vmstate_spapr_cap_ccf_assist,
  1863. &vmstate_spapr_cap_fwnmi,
  1864. &vmstate_spapr_fwnmi,
  1865. &vmstate_spapr_cap_rpt_invalidate,
  1866. &vmstate_spapr_cap_ail_mode_3,
  1867. &vmstate_spapr_cap_nested_papr,
  1868. &vmstate_spapr_cap_dawr1,
  1869. NULL
  1870. }
  1871. };
  1872. static int htab_save_setup(QEMUFile *f, void *opaque, Error **errp)
  1873. {
  1874. SpaprMachineState *spapr = opaque;
  1875. /* "Iteration" header */
  1876. if (!spapr->htab_shift) {
  1877. qemu_put_be32(f, -1);
  1878. } else {
  1879. qemu_put_be32(f, spapr->htab_shift);
  1880. }
  1881. if (spapr->htab) {
  1882. spapr->htab_save_index = 0;
  1883. spapr->htab_first_pass = true;
  1884. } else {
  1885. if (spapr->htab_shift) {
  1886. assert(kvm_enabled());
  1887. }
  1888. }
  1889. return 0;
  1890. }
  1891. static void htab_save_chunk(QEMUFile *f, SpaprMachineState *spapr,
  1892. int chunkstart, int n_valid, int n_invalid)
  1893. {
  1894. qemu_put_be32(f, chunkstart);
  1895. qemu_put_be16(f, n_valid);
  1896. qemu_put_be16(f, n_invalid);
  1897. qemu_put_buffer(f, (void *)hpte_get_ptr(spapr, chunkstart),
  1898. HASH_PTE_SIZE_64 * n_valid);
  1899. }
  1900. static void htab_save_end_marker(QEMUFile *f)
  1901. {
  1902. qemu_put_be32(f, 0);
  1903. qemu_put_be16(f, 0);
  1904. qemu_put_be16(f, 0);
  1905. }
  1906. static void htab_save_first_pass(QEMUFile *f, SpaprMachineState *spapr,
  1907. int64_t max_ns)
  1908. {
  1909. bool has_timeout = max_ns != -1;
  1910. int htabslots = HTAB_SIZE(spapr) / HASH_PTE_SIZE_64;
  1911. int index = spapr->htab_save_index;
  1912. int64_t starttime = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
  1913. assert(spapr->htab_first_pass);
  1914. do {
  1915. int chunkstart;
  1916. /* Consume invalid HPTEs */
  1917. while ((index < htabslots)
  1918. && !hpte_is_valid(spapr, index)) {
  1919. hpte_set_clean(spapr, index);
  1920. index++;
  1921. }
  1922. /* Consume valid HPTEs */
  1923. chunkstart = index;
  1924. while ((index < htabslots) && (index - chunkstart < USHRT_MAX)
  1925. && hpte_is_valid(spapr, index)) {
  1926. hpte_set_clean(spapr, index);
  1927. index++;
  1928. }
  1929. if (index > chunkstart) {
  1930. int n_valid = index - chunkstart;
  1931. htab_save_chunk(f, spapr, chunkstart, n_valid, 0);
  1932. if (has_timeout &&
  1933. (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - starttime) > max_ns) {
  1934. break;
  1935. }
  1936. }
  1937. } while ((index < htabslots) && !migration_rate_exceeded(f));
  1938. if (index >= htabslots) {
  1939. assert(index == htabslots);
  1940. index = 0;
  1941. spapr->htab_first_pass = false;
  1942. }
  1943. spapr->htab_save_index = index;
  1944. }
  1945. static int htab_save_later_pass(QEMUFile *f, SpaprMachineState *spapr,
  1946. int64_t max_ns)
  1947. {
  1948. bool final = max_ns < 0;
  1949. int htabslots = HTAB_SIZE(spapr) / HASH_PTE_SIZE_64;
  1950. int examined = 0, sent = 0;
  1951. int index = spapr->htab_save_index;
  1952. int64_t starttime = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
  1953. assert(!spapr->htab_first_pass);
  1954. do {
  1955. int chunkstart, invalidstart;
  1956. /* Consume non-dirty HPTEs */
  1957. while ((index < htabslots)
  1958. && !hpte_is_dirty(spapr, index)) {
  1959. index++;
  1960. examined++;
  1961. }
  1962. chunkstart = index;
  1963. /* Consume valid dirty HPTEs */
  1964. while ((index < htabslots) && (index - chunkstart < USHRT_MAX)
  1965. && hpte_is_dirty(spapr, index)
  1966. && hpte_is_valid(spapr, index)) {
  1967. hpte_set_clean(spapr, index);
  1968. index++;
  1969. examined++;
  1970. }
  1971. invalidstart = index;
  1972. /* Consume invalid dirty HPTEs */
  1973. while ((index < htabslots) && (index - invalidstart < USHRT_MAX)
  1974. && hpte_is_dirty(spapr, index)
  1975. && !hpte_is_valid(spapr, index)) {
  1976. hpte_set_clean(spapr, index);
  1977. index++;
  1978. examined++;
  1979. }
  1980. if (index > chunkstart) {
  1981. int n_valid = invalidstart - chunkstart;
  1982. int n_invalid = index - invalidstart;
  1983. htab_save_chunk(f, spapr, chunkstart, n_valid, n_invalid);
  1984. sent += index - chunkstart;
  1985. if (!final && (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - starttime) > max_ns) {
  1986. break;
  1987. }
  1988. }
  1989. if (examined >= htabslots) {
  1990. break;
  1991. }
  1992. if (index >= htabslots) {
  1993. assert(index == htabslots);
  1994. index = 0;
  1995. }
  1996. } while ((examined < htabslots) && (!migration_rate_exceeded(f) || final));
  1997. if (index >= htabslots) {
  1998. assert(index == htabslots);
  1999. index = 0;
  2000. }
  2001. spapr->htab_save_index = index;
  2002. return (examined >= htabslots) && (sent == 0) ? 1 : 0;
  2003. }
  2004. #define MAX_ITERATION_NS 5000000 /* 5 ms */
  2005. #define MAX_KVM_BUF_SIZE 2048
  2006. static int htab_save_iterate(QEMUFile *f, void *opaque)
  2007. {
  2008. SpaprMachineState *spapr = opaque;
  2009. int fd;
  2010. int rc = 0;
  2011. /* Iteration header */
  2012. if (!spapr->htab_shift) {
  2013. qemu_put_be32(f, -1);
  2014. return 1;
  2015. } else {
  2016. qemu_put_be32(f, 0);
  2017. }
  2018. if (!spapr->htab) {
  2019. assert(kvm_enabled());
  2020. fd = get_htab_fd(spapr);
  2021. if (fd < 0) {
  2022. return fd;
  2023. }
  2024. rc = kvmppc_save_htab(f, fd, MAX_KVM_BUF_SIZE, MAX_ITERATION_NS);
  2025. if (rc < 0) {
  2026. return rc;
  2027. }
  2028. } else if (spapr->htab_first_pass) {
  2029. htab_save_first_pass(f, spapr, MAX_ITERATION_NS);
  2030. } else {
  2031. rc = htab_save_later_pass(f, spapr, MAX_ITERATION_NS);
  2032. }
  2033. htab_save_end_marker(f);
  2034. return rc;
  2035. }
  2036. static int htab_save_complete(QEMUFile *f, void *opaque)
  2037. {
  2038. SpaprMachineState *spapr = opaque;
  2039. int fd;
  2040. /* Iteration header */
  2041. if (!spapr->htab_shift) {
  2042. qemu_put_be32(f, -1);
  2043. return 0;
  2044. } else {
  2045. qemu_put_be32(f, 0);
  2046. }
  2047. if (!spapr->htab) {
  2048. int rc;
  2049. assert(kvm_enabled());
  2050. fd = get_htab_fd(spapr);
  2051. if (fd < 0) {
  2052. return fd;
  2053. }
  2054. rc = kvmppc_save_htab(f, fd, MAX_KVM_BUF_SIZE, -1);
  2055. if (rc < 0) {
  2056. return rc;
  2057. }
  2058. } else {
  2059. if (spapr->htab_first_pass) {
  2060. htab_save_first_pass(f, spapr, -1);
  2061. }
  2062. htab_save_later_pass(f, spapr, -1);
  2063. }
  2064. /* End marker */
  2065. htab_save_end_marker(f);
  2066. return 0;
  2067. }
  2068. static int htab_load(QEMUFile *f, void *opaque, int version_id)
  2069. {
  2070. SpaprMachineState *spapr = opaque;
  2071. uint32_t section_hdr;
  2072. int fd = -1;
  2073. Error *local_err = NULL;
  2074. if (version_id < 1 || version_id > 1) {
  2075. error_report("htab_load() bad version");
  2076. return -EINVAL;
  2077. }
  2078. section_hdr = qemu_get_be32(f);
  2079. if (section_hdr == -1) {
  2080. spapr_free_hpt(spapr);
  2081. return 0;
  2082. }
  2083. if (section_hdr) {
  2084. int ret;
  2085. /* First section gives the htab size */
  2086. ret = spapr_reallocate_hpt(spapr, section_hdr, &local_err);
  2087. if (ret < 0) {
  2088. error_report_err(local_err);
  2089. return ret;
  2090. }
  2091. return 0;
  2092. }
  2093. if (!spapr->htab) {
  2094. assert(kvm_enabled());
  2095. fd = kvmppc_get_htab_fd(true, 0, &local_err);
  2096. if (fd < 0) {
  2097. error_report_err(local_err);
  2098. return fd;
  2099. }
  2100. }
  2101. while (true) {
  2102. uint32_t index;
  2103. uint16_t n_valid, n_invalid;
  2104. index = qemu_get_be32(f);
  2105. n_valid = qemu_get_be16(f);
  2106. n_invalid = qemu_get_be16(f);
  2107. if ((index == 0) && (n_valid == 0) && (n_invalid == 0)) {
  2108. /* End of Stream */
  2109. break;
  2110. }
  2111. if ((index + n_valid + n_invalid) >
  2112. (HTAB_SIZE(spapr) / HASH_PTE_SIZE_64)) {
  2113. /* Bad index in stream */
  2114. error_report(
  2115. "htab_load() bad index %d (%hd+%hd entries) in htab stream (htab_shift=%d)",
  2116. index, n_valid, n_invalid, spapr->htab_shift);
  2117. return -EINVAL;
  2118. }
  2119. if (spapr->htab) {
  2120. if (n_valid) {
  2121. qemu_get_buffer(f, (void *)hpte_get_ptr(spapr, index),
  2122. HASH_PTE_SIZE_64 * n_valid);
  2123. }
  2124. if (n_invalid) {
  2125. memset(hpte_get_ptr(spapr, index + n_valid), 0,
  2126. HASH_PTE_SIZE_64 * n_invalid);
  2127. }
  2128. } else {
  2129. int rc;
  2130. assert(fd >= 0);
  2131. rc = kvmppc_load_htab_chunk(f, fd, index, n_valid, n_invalid,
  2132. &local_err);
  2133. if (rc < 0) {
  2134. error_report_err(local_err);
  2135. return rc;
  2136. }
  2137. }
  2138. }
  2139. if (!spapr->htab) {
  2140. assert(fd >= 0);
  2141. close(fd);
  2142. }
  2143. return 0;
  2144. }
  2145. static void htab_save_cleanup(void *opaque)
  2146. {
  2147. SpaprMachineState *spapr = opaque;
  2148. close_htab_fd(spapr);
  2149. }
  2150. static SaveVMHandlers savevm_htab_handlers = {
  2151. .save_setup = htab_save_setup,
  2152. .save_live_iterate = htab_save_iterate,
  2153. .save_live_complete_precopy = htab_save_complete,
  2154. .save_cleanup = htab_save_cleanup,
  2155. .load_state = htab_load,
  2156. };
  2157. static void spapr_boot_set(void *opaque, const char *boot_device,
  2158. Error **errp)
  2159. {
  2160. SpaprMachineState *spapr = SPAPR_MACHINE(opaque);
  2161. g_free(spapr->boot_device);
  2162. spapr->boot_device = g_strdup(boot_device);
  2163. }
  2164. static void spapr_create_lmb_dr_connectors(SpaprMachineState *spapr)
  2165. {
  2166. MachineState *machine = MACHINE(spapr);
  2167. uint64_t lmb_size = SPAPR_MEMORY_BLOCK_SIZE;
  2168. uint32_t nr_lmbs = (machine->maxram_size - machine->ram_size)/lmb_size;
  2169. int i;
  2170. g_assert(!nr_lmbs || machine->device_memory);
  2171. for (i = 0; i < nr_lmbs; i++) {
  2172. uint64_t addr;
  2173. addr = i * lmb_size + machine->device_memory->base;
  2174. spapr_dr_connector_new(OBJECT(spapr), TYPE_SPAPR_DRC_LMB,
  2175. addr / lmb_size);
  2176. }
  2177. }
  2178. /*
  2179. * If RAM size, maxmem size and individual node mem sizes aren't aligned
  2180. * to SPAPR_MEMORY_BLOCK_SIZE(256MB), then refuse to start the guest
  2181. * since we can't support such unaligned sizes with DRCONF_MEMORY.
  2182. */
  2183. static void spapr_validate_node_memory(MachineState *machine, Error **errp)
  2184. {
  2185. int i;
  2186. if (machine->ram_size % SPAPR_MEMORY_BLOCK_SIZE) {
  2187. error_setg(errp, "Memory size 0x" RAM_ADDR_FMT
  2188. " is not aligned to %" PRIu64 " MiB",
  2189. machine->ram_size,
  2190. SPAPR_MEMORY_BLOCK_SIZE / MiB);
  2191. return;
  2192. }
  2193. if (machine->maxram_size % SPAPR_MEMORY_BLOCK_SIZE) {
  2194. error_setg(errp, "Maximum memory size 0x" RAM_ADDR_FMT
  2195. " is not aligned to %" PRIu64 " MiB",
  2196. machine->ram_size,
  2197. SPAPR_MEMORY_BLOCK_SIZE / MiB);
  2198. return;
  2199. }
  2200. for (i = 0; i < machine->numa_state->num_nodes; i++) {
  2201. if (machine->numa_state->nodes[i].node_mem % SPAPR_MEMORY_BLOCK_SIZE) {
  2202. error_setg(errp,
  2203. "Node %d memory size 0x%" PRIx64
  2204. " is not aligned to %" PRIu64 " MiB",
  2205. i, machine->numa_state->nodes[i].node_mem,
  2206. SPAPR_MEMORY_BLOCK_SIZE / MiB);
  2207. return;
  2208. }
  2209. }
  2210. }
  2211. /* find cpu slot in machine->possible_cpus by core_id */
  2212. static CPUArchId *spapr_find_cpu_slot(MachineState *ms, uint32_t id, int *idx)
  2213. {
  2214. int index = id / ms->smp.threads;
  2215. if (index >= ms->possible_cpus->len) {
  2216. return NULL;
  2217. }
  2218. if (idx) {
  2219. *idx = index;
  2220. }
  2221. return &ms->possible_cpus->cpus[index];
  2222. }
  2223. static void spapr_set_vsmt_mode(SpaprMachineState *spapr, Error **errp)
  2224. {
  2225. MachineState *ms = MACHINE(spapr);
  2226. SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr);
  2227. Error *local_err = NULL;
  2228. bool vsmt_user = !!spapr->vsmt;
  2229. int kvm_smt = kvmppc_smt_threads();
  2230. int ret;
  2231. unsigned int smp_threads = ms->smp.threads;
  2232. if (tcg_enabled()) {
  2233. if (smp_threads > 1 &&
  2234. !ppc_type_check_compat(ms->cpu_type, CPU_POWERPC_LOGICAL_2_07, 0,
  2235. spapr->max_compat_pvr)) {
  2236. error_setg(errp, "TCG only supports SMT on POWER8 or newer CPUs");
  2237. return;
  2238. }
  2239. if (smp_threads > 8) {
  2240. error_setg(errp, "TCG cannot support more than 8 threads/core "
  2241. "on a pseries machine");
  2242. return;
  2243. }
  2244. }
  2245. if (!is_power_of_2(smp_threads)) {
  2246. error_setg(errp, "Cannot support %d threads/core on a pseries "
  2247. "machine because it must be a power of 2", smp_threads);
  2248. return;
  2249. }
  2250. /* Determine the VSMT mode to use: */
  2251. if (vsmt_user) {
  2252. if (spapr->vsmt < smp_threads) {
  2253. error_setg(errp, "Cannot support VSMT mode %d"
  2254. " because it must be >= threads/core (%d)",
  2255. spapr->vsmt, smp_threads);
  2256. return;
  2257. }
  2258. /* In this case, spapr->vsmt has been set by the command line */
  2259. } else if (!smc->smp_threads_vsmt) {
  2260. /*
  2261. * Default VSMT value is tricky, because we need it to be as
  2262. * consistent as possible (for migration), but this requires
  2263. * changing it for at least some existing cases. We pick 8 as
  2264. * the value that we'd get with KVM on POWER8, the
  2265. * overwhelmingly common case in production systems.
  2266. */
  2267. spapr->vsmt = MAX(8, smp_threads);
  2268. } else {
  2269. spapr->vsmt = smp_threads;
  2270. }
  2271. /* KVM: If necessary, set the SMT mode: */
  2272. if (kvm_enabled() && (spapr->vsmt != kvm_smt)) {
  2273. ret = kvmppc_set_smt_threads(spapr->vsmt);
  2274. if (ret) {
  2275. /* Looks like KVM isn't able to change VSMT mode */
  2276. error_setg(&local_err,
  2277. "Failed to set KVM's VSMT mode to %d (errno %d)",
  2278. spapr->vsmt, ret);
  2279. /* We can live with that if the default one is big enough
  2280. * for the number of threads, and a submultiple of the one
  2281. * we want. In this case we'll waste some vcpu ids, but
  2282. * behaviour will be correct */
  2283. if ((kvm_smt >= smp_threads) && ((spapr->vsmt % kvm_smt) == 0)) {
  2284. warn_report_err(local_err);
  2285. } else {
  2286. if (!vsmt_user) {
  2287. error_append_hint(&local_err,
  2288. "On PPC, a VM with %d threads/core"
  2289. " on a host with %d threads/core"
  2290. " requires the use of VSMT mode %d.\n",
  2291. smp_threads, kvm_smt, spapr->vsmt);
  2292. }
  2293. kvmppc_error_append_smt_possible_hint(&local_err);
  2294. error_propagate(errp, local_err);
  2295. }
  2296. }
  2297. }
  2298. /* else TCG: nothing to do currently */
  2299. }
  2300. static void spapr_init_cpus(SpaprMachineState *spapr)
  2301. {
  2302. MachineState *machine = MACHINE(spapr);
  2303. MachineClass *mc = MACHINE_GET_CLASS(machine);
  2304. const char *type = spapr_get_cpu_core_type(machine->cpu_type);
  2305. const CPUArchIdList *possible_cpus;
  2306. unsigned int smp_cpus = machine->smp.cpus;
  2307. unsigned int smp_threads = machine->smp.threads;
  2308. unsigned int max_cpus = machine->smp.max_cpus;
  2309. int boot_cores_nr = smp_cpus / smp_threads;
  2310. int i;
  2311. possible_cpus = mc->possible_cpu_arch_ids(machine);
  2312. if (mc->has_hotpluggable_cpus) {
  2313. if (smp_cpus % smp_threads) {
  2314. error_report("smp_cpus (%u) must be multiple of threads (%u)",
  2315. smp_cpus, smp_threads);
  2316. exit(1);
  2317. }
  2318. if (max_cpus % smp_threads) {
  2319. error_report("max_cpus (%u) must be multiple of threads (%u)",
  2320. max_cpus, smp_threads);
  2321. exit(1);
  2322. }
  2323. } else {
  2324. if (max_cpus != smp_cpus) {
  2325. error_report("This machine version does not support CPU hotplug");
  2326. exit(1);
  2327. }
  2328. boot_cores_nr = possible_cpus->len;
  2329. }
  2330. for (i = 0; i < possible_cpus->len; i++) {
  2331. int core_id = i * smp_threads;
  2332. if (mc->has_hotpluggable_cpus) {
  2333. spapr_dr_connector_new(OBJECT(spapr), TYPE_SPAPR_DRC_CPU,
  2334. spapr_vcpu_id(spapr, core_id));
  2335. }
  2336. if (i < boot_cores_nr) {
  2337. Object *core = object_new(type);
  2338. int nr_threads = smp_threads;
  2339. /* Handle the partially filled core for older machine types */
  2340. if ((i + 1) * smp_threads >= smp_cpus) {
  2341. nr_threads = smp_cpus - i * smp_threads;
  2342. }
  2343. object_property_set_int(core, "nr-threads", nr_threads,
  2344. &error_fatal);
  2345. object_property_set_int(core, CPU_CORE_PROP_CORE_ID, core_id,
  2346. &error_fatal);
  2347. qdev_realize(DEVICE(core), NULL, &error_fatal);
  2348. object_unref(core);
  2349. }
  2350. }
  2351. }
  2352. static PCIHostState *spapr_create_default_phb(void)
  2353. {
  2354. DeviceState *dev;
  2355. dev = qdev_new(TYPE_SPAPR_PCI_HOST_BRIDGE);
  2356. qdev_prop_set_uint32(dev, "index", 0);
  2357. sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
  2358. return PCI_HOST_BRIDGE(dev);
  2359. }
  2360. static hwaddr spapr_rma_size(SpaprMachineState *spapr, Error **errp)
  2361. {
  2362. MachineState *machine = MACHINE(spapr);
  2363. SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr);
  2364. hwaddr rma_size = machine->ram_size;
  2365. hwaddr node0_size = spapr_node0_size(machine);
  2366. /* RMA has to fit in the first NUMA node */
  2367. rma_size = MIN(rma_size, node0_size);
  2368. /*
  2369. * VRMA access is via a special 1TiB SLB mapping, so the RMA can
  2370. * never exceed that
  2371. */
  2372. rma_size = MIN(rma_size, 1 * TiB);
  2373. /*
  2374. * Clamp the RMA size based on machine type. This is for
  2375. * migration compatibility with older qemu versions, which limited
  2376. * the RMA size for complicated and mostly bad reasons.
  2377. */
  2378. if (smc->rma_limit) {
  2379. rma_size = MIN(rma_size, smc->rma_limit);
  2380. }
  2381. if (rma_size < MIN_RMA_SLOF) {
  2382. error_setg(errp,
  2383. "pSeries SLOF firmware requires >= %" HWADDR_PRIx
  2384. "ldMiB guest RMA (Real Mode Area memory)",
  2385. MIN_RMA_SLOF / MiB);
  2386. return 0;
  2387. }
  2388. return rma_size;
  2389. }
  2390. static void spapr_create_nvdimm_dr_connectors(SpaprMachineState *spapr)
  2391. {
  2392. MachineState *machine = MACHINE(spapr);
  2393. int i;
  2394. for (i = 0; i < machine->ram_slots; i++) {
  2395. spapr_dr_connector_new(OBJECT(spapr), TYPE_SPAPR_DRC_PMEM, i);
  2396. }
  2397. }
  2398. /* pSeries LPAR / sPAPR hardware init */
  2399. static void spapr_machine_init(MachineState *machine)
  2400. {
  2401. SpaprMachineState *spapr = SPAPR_MACHINE(machine);
  2402. SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(machine);
  2403. MachineClass *mc = MACHINE_GET_CLASS(machine);
  2404. const char *bios_default = spapr->vof ? FW_FILE_NAME_VOF : FW_FILE_NAME;
  2405. const char *bios_name = machine->firmware ?: bios_default;
  2406. g_autofree char *filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name);
  2407. const char *kernel_filename = machine->kernel_filename;
  2408. const char *initrd_filename = machine->initrd_filename;
  2409. PCIHostState *phb;
  2410. bool has_vga;
  2411. int i;
  2412. MemoryRegion *sysmem = get_system_memory();
  2413. long load_limit, fw_size;
  2414. Error *resize_hpt_err = NULL;
  2415. NICInfo *nd;
  2416. if (!filename) {
  2417. error_report("Could not find LPAR firmware '%s'", bios_name);
  2418. exit(1);
  2419. }
  2420. fw_size = load_image_targphys(filename, 0, FW_MAX_SIZE);
  2421. if (fw_size <= 0) {
  2422. error_report("Could not load LPAR firmware '%s'", filename);
  2423. exit(1);
  2424. }
  2425. /*
  2426. * if Secure VM (PEF) support is configured, then initialize it
  2427. */
  2428. if (machine->cgs) {
  2429. confidential_guest_kvm_init(machine->cgs, &error_fatal);
  2430. }
  2431. msi_nonbroken = true;
  2432. QLIST_INIT(&spapr->phbs);
  2433. QTAILQ_INIT(&spapr->pending_dimm_unplugs);
  2434. /* Determine capabilities to run with */
  2435. spapr_caps_init(spapr);
  2436. kvmppc_check_papr_resize_hpt(&resize_hpt_err);
  2437. if (spapr->resize_hpt == SPAPR_RESIZE_HPT_DEFAULT) {
  2438. /*
  2439. * If the user explicitly requested a mode we should either
  2440. * supply it, or fail completely (which we do below). But if
  2441. * it's not set explicitly, we reset our mode to something
  2442. * that works
  2443. */
  2444. if (resize_hpt_err) {
  2445. spapr->resize_hpt = SPAPR_RESIZE_HPT_DISABLED;
  2446. error_free(resize_hpt_err);
  2447. resize_hpt_err = NULL;
  2448. } else {
  2449. spapr->resize_hpt = smc->resize_hpt_default;
  2450. }
  2451. }
  2452. assert(spapr->resize_hpt != SPAPR_RESIZE_HPT_DEFAULT);
  2453. if ((spapr->resize_hpt != SPAPR_RESIZE_HPT_DISABLED) && resize_hpt_err) {
  2454. /*
  2455. * User requested HPT resize, but this host can't supply it. Bail out
  2456. */
  2457. error_report_err(resize_hpt_err);
  2458. exit(1);
  2459. }
  2460. error_free(resize_hpt_err);
  2461. spapr->rma_size = spapr_rma_size(spapr, &error_fatal);
  2462. /* Setup a load limit for the ramdisk leaving room for SLOF and FDT */
  2463. load_limit = MIN(spapr->rma_size, FDT_MAX_ADDR) - FW_OVERHEAD;
  2464. /*
  2465. * VSMT must be set in order to be able to compute VCPU ids, ie to
  2466. * call spapr_max_server_number() or spapr_vcpu_id().
  2467. */
  2468. spapr_set_vsmt_mode(spapr, &error_fatal);
  2469. /* Set up Interrupt Controller before we create the VCPUs */
  2470. spapr_irq_init(spapr, &error_fatal);
  2471. /* Set up containers for ibm,client-architecture-support negotiated options
  2472. */
  2473. spapr->ov5 = spapr_ovec_new();
  2474. spapr->ov5_cas = spapr_ovec_new();
  2475. spapr_ovec_set(spapr->ov5, OV5_DRCONF_MEMORY);
  2476. spapr_validate_node_memory(machine, &error_fatal);
  2477. spapr_ovec_set(spapr->ov5, OV5_FORM1_AFFINITY);
  2478. /* Do not advertise FORM2 NUMA support for pseries-6.1 and older */
  2479. if (!smc->pre_6_2_numa_affinity) {
  2480. spapr_ovec_set(spapr->ov5, OV5_FORM2_AFFINITY);
  2481. }
  2482. /* advertise support for dedicated HP event source to guests */
  2483. if (spapr->use_hotplug_event_source) {
  2484. spapr_ovec_set(spapr->ov5, OV5_HP_EVT);
  2485. }
  2486. /* advertise support for HPT resizing */
  2487. if (spapr->resize_hpt != SPAPR_RESIZE_HPT_DISABLED) {
  2488. spapr_ovec_set(spapr->ov5, OV5_HPT_RESIZE);
  2489. }
  2490. /* advertise support for ibm,dyamic-memory-v2 */
  2491. spapr_ovec_set(spapr->ov5, OV5_DRMEM_V2);
  2492. /* advertise XIVE on POWER9 machines */
  2493. if (spapr->irq->xive) {
  2494. spapr_ovec_set(spapr->ov5, OV5_XIVE_EXPLOIT);
  2495. }
  2496. qemu_guest_getrandom_nofail(&spapr->hashpkey_val,
  2497. sizeof(spapr->hashpkey_val));
  2498. /* init CPUs */
  2499. spapr_init_cpus(spapr);
  2500. /* Init numa_assoc_array */
  2501. spapr_numa_associativity_init(spapr, machine);
  2502. if ((!kvm_enabled() || kvmppc_has_cap_mmu_radix()) &&
  2503. ppc_type_check_compat(machine->cpu_type, CPU_POWERPC_LOGICAL_3_00, 0,
  2504. spapr->max_compat_pvr)) {
  2505. spapr_ovec_set(spapr->ov5, OV5_MMU_RADIX_300);
  2506. /* KVM and TCG always allow GTSE with radix... */
  2507. spapr_ovec_set(spapr->ov5, OV5_MMU_RADIX_GTSE);
  2508. }
  2509. /* ... but not with hash (currently). */
  2510. if (kvm_enabled()) {
  2511. /* Enable H_LOGICAL_CI_* so SLOF can talk to in-kernel devices */
  2512. kvmppc_enable_logical_ci_hcalls();
  2513. kvmppc_enable_set_mode_hcall();
  2514. /* H_CLEAR_MOD/_REF are mandatory in PAPR, but off by default */
  2515. kvmppc_enable_clear_ref_mod_hcalls();
  2516. /* Enable H_PAGE_INIT */
  2517. kvmppc_enable_h_page_init();
  2518. }
  2519. /* map RAM */
  2520. memory_region_add_subregion(sysmem, 0, machine->ram);
  2521. /* initialize hotplug memory address space */
  2522. if (machine->ram_size < machine->maxram_size) {
  2523. ram_addr_t device_mem_size = machine->maxram_size - machine->ram_size;
  2524. hwaddr device_mem_base;
  2525. /*
  2526. * Limit the number of hotpluggable memory slots to half the number
  2527. * slots that KVM supports, leaving the other half for PCI and other
  2528. * devices. However ensure that number of slots doesn't drop below 32.
  2529. */
  2530. int max_memslots = kvm_enabled() ? kvm_get_max_memslots() / 2 :
  2531. SPAPR_MAX_RAM_SLOTS;
  2532. if (max_memslots < SPAPR_MAX_RAM_SLOTS) {
  2533. max_memslots = SPAPR_MAX_RAM_SLOTS;
  2534. }
  2535. if (machine->ram_slots > max_memslots) {
  2536. error_report("Specified number of memory slots %"
  2537. PRIu64" exceeds max supported %d",
  2538. machine->ram_slots, max_memslots);
  2539. exit(1);
  2540. }
  2541. device_mem_base = ROUND_UP(machine->ram_size, SPAPR_DEVICE_MEM_ALIGN);
  2542. machine_memory_devices_init(machine, device_mem_base, device_mem_size);
  2543. }
  2544. spapr_create_lmb_dr_connectors(spapr);
  2545. if (mc->nvdimm_supported) {
  2546. spapr_create_nvdimm_dr_connectors(spapr);
  2547. }
  2548. /* Set up RTAS event infrastructure */
  2549. spapr_events_init(spapr);
  2550. /* Set up the RTC RTAS interfaces */
  2551. spapr_rtc_create(spapr);
  2552. /* Set up VIO bus */
  2553. spapr->vio_bus = spapr_vio_bus_init();
  2554. for (i = 0; serial_hd(i); i++) {
  2555. spapr_vty_create(spapr->vio_bus, serial_hd(i));
  2556. }
  2557. /* We always have at least the nvram device on VIO */
  2558. spapr_create_nvram(spapr);
  2559. /*
  2560. * Setup hotplug / dynamic-reconfiguration connectors. top-level
  2561. * connectors (described in root DT node's "ibm,drc-types" property)
  2562. * are pre-initialized here. additional child connectors (such as
  2563. * connectors for a PHBs PCI slots) are added as needed during their
  2564. * parent's realization.
  2565. */
  2566. if (smc->dr_phb_enabled) {
  2567. for (i = 0; i < SPAPR_MAX_PHBS; i++) {
  2568. spapr_dr_connector_new(OBJECT(machine), TYPE_SPAPR_DRC_PHB, i);
  2569. }
  2570. }
  2571. /* Set up PCI */
  2572. spapr_pci_rtas_init();
  2573. phb = spapr_create_default_phb();
  2574. while ((nd = qemu_find_nic_info("spapr-vlan", true, "ibmveth"))) {
  2575. spapr_vlan_create(spapr->vio_bus, nd);
  2576. }
  2577. pci_init_nic_devices(phb->bus, NULL);
  2578. for (i = 0; i <= drive_get_max_bus(IF_SCSI); i++) {
  2579. spapr_vscsi_create(spapr->vio_bus);
  2580. }
  2581. /* Graphics */
  2582. has_vga = spapr_vga_init(phb->bus, &error_fatal);
  2583. if (has_vga) {
  2584. spapr->want_stdout_path = !machine->enable_graphics;
  2585. machine->usb |= defaults_enabled() && !machine->usb_disabled;
  2586. } else {
  2587. spapr->want_stdout_path = true;
  2588. }
  2589. if (machine->usb) {
  2590. pci_create_simple(phb->bus, -1, "nec-usb-xhci");
  2591. if (has_vga) {
  2592. USBBus *usb_bus;
  2593. usb_bus = USB_BUS(object_resolve_type_unambiguous(TYPE_USB_BUS,
  2594. &error_abort));
  2595. usb_create_simple(usb_bus, "usb-kbd");
  2596. usb_create_simple(usb_bus, "usb-mouse");
  2597. }
  2598. }
  2599. if (kernel_filename) {
  2600. uint64_t loaded_addr = 0;
  2601. spapr->kernel_size = load_elf(kernel_filename, NULL,
  2602. translate_kernel_address, spapr,
  2603. NULL, &loaded_addr, NULL, NULL,
  2604. ELFDATA2MSB, PPC_ELF_MACHINE, 0, 0);
  2605. if (spapr->kernel_size == ELF_LOAD_WRONG_ENDIAN) {
  2606. spapr->kernel_size = load_elf(kernel_filename, NULL,
  2607. translate_kernel_address, spapr,
  2608. NULL, &loaded_addr, NULL, NULL,
  2609. ELFDATA2LSB, PPC_ELF_MACHINE, 0, 0);
  2610. spapr->kernel_le = spapr->kernel_size > 0;
  2611. }
  2612. if (spapr->kernel_size < 0) {
  2613. error_report("error loading %s: %s", kernel_filename,
  2614. load_elf_strerror(spapr->kernel_size));
  2615. exit(1);
  2616. }
  2617. if (spapr->kernel_addr != loaded_addr) {
  2618. warn_report("spapr: kernel_addr changed from 0x%"PRIx64
  2619. " to 0x%"PRIx64,
  2620. spapr->kernel_addr, loaded_addr);
  2621. spapr->kernel_addr = loaded_addr;
  2622. }
  2623. /* load initrd */
  2624. if (initrd_filename) {
  2625. /* Try to locate the initrd in the gap between the kernel
  2626. * and the firmware. Add a bit of space just in case
  2627. */
  2628. spapr->initrd_base = (spapr->kernel_addr + spapr->kernel_size
  2629. + 0x1ffff) & ~0xffff;
  2630. spapr->initrd_size = load_image_targphys(initrd_filename,
  2631. spapr->initrd_base,
  2632. load_limit
  2633. - spapr->initrd_base);
  2634. if (spapr->initrd_size < 0) {
  2635. error_report("could not load initial ram disk '%s'",
  2636. initrd_filename);
  2637. exit(1);
  2638. }
  2639. }
  2640. }
  2641. /* FIXME: Should register things through the MachineState's qdev
  2642. * interface, this is a legacy from the sPAPREnvironment structure
  2643. * which predated MachineState but had a similar function */
  2644. vmstate_register(NULL, 0, &vmstate_spapr, spapr);
  2645. register_savevm_live("spapr/htab", VMSTATE_INSTANCE_ID_ANY, 1,
  2646. &savevm_htab_handlers, spapr);
  2647. qbus_set_hotplug_handler(sysbus_get_default(), OBJECT(machine));
  2648. qemu_register_boot_set(spapr_boot_set, spapr);
  2649. /*
  2650. * Nothing needs to be done to resume a suspended guest because
  2651. * suspending does not change the machine state, so no need for
  2652. * a ->wakeup method.
  2653. */
  2654. qemu_register_wakeup_support();
  2655. if (kvm_enabled()) {
  2656. /* to stop and start vmclock */
  2657. qemu_add_vm_change_state_handler(cpu_ppc_clock_vm_state_change,
  2658. &spapr->tb);
  2659. kvmppc_spapr_enable_inkernel_multitce();
  2660. }
  2661. qemu_cond_init(&spapr->fwnmi_machine_check_interlock_cond);
  2662. if (spapr->vof) {
  2663. spapr->vof->fw_size = fw_size; /* for claim() on itself */
  2664. spapr_register_hypercall(KVMPPC_H_VOF_CLIENT, spapr_h_vof_client);
  2665. }
  2666. spapr_watchdog_init(spapr);
  2667. }
  2668. #define DEFAULT_KVM_TYPE "auto"
  2669. static int spapr_kvm_type(MachineState *machine, const char *vm_type)
  2670. {
  2671. /*
  2672. * The use of g_ascii_strcasecmp() for 'hv' and 'pr' is to
  2673. * accommodate the 'HV' and 'PV' formats that exists in the
  2674. * wild. The 'auto' mode is being introduced already as
  2675. * lower-case, thus we don't need to bother checking for
  2676. * "AUTO".
  2677. */
  2678. if (!vm_type || !strcmp(vm_type, DEFAULT_KVM_TYPE)) {
  2679. return 0;
  2680. }
  2681. if (!g_ascii_strcasecmp(vm_type, "hv")) {
  2682. return 1;
  2683. }
  2684. if (!g_ascii_strcasecmp(vm_type, "pr")) {
  2685. return 2;
  2686. }
  2687. error_report("Unknown kvm-type specified '%s'", vm_type);
  2688. return -1;
  2689. }
  2690. /*
  2691. * Implementation of an interface to adjust firmware path
  2692. * for the bootindex property handling.
  2693. */
  2694. static char *spapr_get_fw_dev_path(FWPathProvider *p, BusState *bus,
  2695. DeviceState *dev)
  2696. {
  2697. #define CAST(type, obj, name) \
  2698. ((type *)object_dynamic_cast(OBJECT(obj), (name)))
  2699. SCSIDevice *d = CAST(SCSIDevice, dev, TYPE_SCSI_DEVICE);
  2700. SpaprPhbState *phb = CAST(SpaprPhbState, dev, TYPE_SPAPR_PCI_HOST_BRIDGE);
  2701. VHostSCSICommon *vsc = CAST(VHostSCSICommon, dev, TYPE_VHOST_SCSI_COMMON);
  2702. PCIDevice *pcidev = CAST(PCIDevice, dev, TYPE_PCI_DEVICE);
  2703. if (d && bus) {
  2704. void *spapr = CAST(void, bus->parent, "spapr-vscsi");
  2705. VirtIOSCSI *virtio = CAST(VirtIOSCSI, bus->parent, TYPE_VIRTIO_SCSI);
  2706. USBDevice *usb = CAST(USBDevice, bus->parent, TYPE_USB_DEVICE);
  2707. if (spapr) {
  2708. /*
  2709. * Replace "channel@0/disk@0,0" with "disk@8000000000000000":
  2710. * In the top 16 bits of the 64-bit LUN, we use SRP luns of the form
  2711. * 0x8000 | (target << 8) | (bus << 5) | lun
  2712. * (see the "Logical unit addressing format" table in SAM5)
  2713. */
  2714. unsigned id = 0x8000 | (d->id << 8) | (d->channel << 5) | d->lun;
  2715. return g_strdup_printf("%s@%"PRIX64, qdev_fw_name(dev),
  2716. (uint64_t)id << 48);
  2717. } else if (virtio) {
  2718. /*
  2719. * We use SRP luns of the form 01000000 | (target << 8) | lun
  2720. * in the top 32 bits of the 64-bit LUN
  2721. * Note: the quote above is from SLOF and it is wrong,
  2722. * the actual binding is:
  2723. * swap 0100 or 10 << or 20 << ( target lun-id -- srplun )
  2724. */
  2725. unsigned id = 0x1000000 | (d->id << 16) | d->lun;
  2726. if (d->lun >= 256) {
  2727. /* Use the LUN "flat space addressing method" */
  2728. id |= 0x4000;
  2729. }
  2730. return g_strdup_printf("%s@%"PRIX64, qdev_fw_name(dev),
  2731. (uint64_t)id << 32);
  2732. } else if (usb) {
  2733. /*
  2734. * We use SRP luns of the form 01000000 | (usb-port << 16) | lun
  2735. * in the top 32 bits of the 64-bit LUN
  2736. */
  2737. unsigned usb_port = atoi(usb->port->path);
  2738. unsigned id = 0x1000000 | (usb_port << 16) | d->lun;
  2739. return g_strdup_printf("%s@%"PRIX64, qdev_fw_name(dev),
  2740. (uint64_t)id << 32);
  2741. }
  2742. }
  2743. /*
  2744. * SLOF probes the USB devices, and if it recognizes that the device is a
  2745. * storage device, it changes its name to "storage" instead of "usb-host",
  2746. * and additionally adds a child node for the SCSI LUN, so the correct
  2747. * boot path in SLOF is something like .../storage@1/disk@xxx" instead.
  2748. */
  2749. if (strcmp("usb-host", qdev_fw_name(dev)) == 0) {
  2750. USBDevice *usbdev = CAST(USBDevice, dev, TYPE_USB_DEVICE);
  2751. if (usb_device_is_scsi_storage(usbdev)) {
  2752. return g_strdup_printf("storage@%s/disk", usbdev->port->path);
  2753. }
  2754. }
  2755. if (phb) {
  2756. /* Replace "pci" with "pci@800000020000000" */
  2757. return g_strdup_printf("pci@%"PRIX64, phb->buid);
  2758. }
  2759. if (vsc) {
  2760. /* Same logic as virtio above */
  2761. unsigned id = 0x1000000 | (vsc->target << 16) | vsc->lun;
  2762. return g_strdup_printf("disk@%"PRIX64, (uint64_t)id << 32);
  2763. }
  2764. if (g_str_equal("pci-bridge", qdev_fw_name(dev))) {
  2765. /* SLOF uses "pci" instead of "pci-bridge" for PCI bridges */
  2766. PCIDevice *pdev = CAST(PCIDevice, dev, TYPE_PCI_DEVICE);
  2767. return g_strdup_printf("pci@%x", PCI_SLOT(pdev->devfn));
  2768. }
  2769. if (pcidev) {
  2770. return spapr_pci_fw_dev_name(pcidev);
  2771. }
  2772. return NULL;
  2773. }
  2774. static char *spapr_get_kvm_type(Object *obj, Error **errp)
  2775. {
  2776. SpaprMachineState *spapr = SPAPR_MACHINE(obj);
  2777. return g_strdup(spapr->kvm_type);
  2778. }
  2779. static void spapr_set_kvm_type(Object *obj, const char *value, Error **errp)
  2780. {
  2781. SpaprMachineState *spapr = SPAPR_MACHINE(obj);
  2782. g_free(spapr->kvm_type);
  2783. spapr->kvm_type = g_strdup(value);
  2784. }
  2785. static bool spapr_get_modern_hotplug_events(Object *obj, Error **errp)
  2786. {
  2787. SpaprMachineState *spapr = SPAPR_MACHINE(obj);
  2788. return spapr->use_hotplug_event_source;
  2789. }
  2790. static void spapr_set_modern_hotplug_events(Object *obj, bool value,
  2791. Error **errp)
  2792. {
  2793. SpaprMachineState *spapr = SPAPR_MACHINE(obj);
  2794. spapr->use_hotplug_event_source = value;
  2795. }
  2796. static bool spapr_get_msix_emulation(Object *obj, Error **errp)
  2797. {
  2798. return true;
  2799. }
  2800. static char *spapr_get_resize_hpt(Object *obj, Error **errp)
  2801. {
  2802. SpaprMachineState *spapr = SPAPR_MACHINE(obj);
  2803. switch (spapr->resize_hpt) {
  2804. case SPAPR_RESIZE_HPT_DEFAULT:
  2805. return g_strdup("default");
  2806. case SPAPR_RESIZE_HPT_DISABLED:
  2807. return g_strdup("disabled");
  2808. case SPAPR_RESIZE_HPT_ENABLED:
  2809. return g_strdup("enabled");
  2810. case SPAPR_RESIZE_HPT_REQUIRED:
  2811. return g_strdup("required");
  2812. }
  2813. g_assert_not_reached();
  2814. }
  2815. static void spapr_set_resize_hpt(Object *obj, const char *value, Error **errp)
  2816. {
  2817. SpaprMachineState *spapr = SPAPR_MACHINE(obj);
  2818. if (strcmp(value, "default") == 0) {
  2819. spapr->resize_hpt = SPAPR_RESIZE_HPT_DEFAULT;
  2820. } else if (strcmp(value, "disabled") == 0) {
  2821. spapr->resize_hpt = SPAPR_RESIZE_HPT_DISABLED;
  2822. } else if (strcmp(value, "enabled") == 0) {
  2823. spapr->resize_hpt = SPAPR_RESIZE_HPT_ENABLED;
  2824. } else if (strcmp(value, "required") == 0) {
  2825. spapr->resize_hpt = SPAPR_RESIZE_HPT_REQUIRED;
  2826. } else {
  2827. error_setg(errp, "Bad value for \"resize-hpt\" property");
  2828. }
  2829. }
  2830. static bool spapr_get_vof(Object *obj, Error **errp)
  2831. {
  2832. SpaprMachineState *spapr = SPAPR_MACHINE(obj);
  2833. return spapr->vof != NULL;
  2834. }
  2835. static void spapr_set_vof(Object *obj, bool value, Error **errp)
  2836. {
  2837. SpaprMachineState *spapr = SPAPR_MACHINE(obj);
  2838. if (spapr->vof) {
  2839. vof_cleanup(spapr->vof);
  2840. g_free(spapr->vof);
  2841. spapr->vof = NULL;
  2842. }
  2843. if (!value) {
  2844. return;
  2845. }
  2846. spapr->vof = g_malloc0(sizeof(*spapr->vof));
  2847. }
  2848. static char *spapr_get_ic_mode(Object *obj, Error **errp)
  2849. {
  2850. SpaprMachineState *spapr = SPAPR_MACHINE(obj);
  2851. if (spapr->irq == &spapr_irq_xics_legacy) {
  2852. return g_strdup("legacy");
  2853. } else if (spapr->irq == &spapr_irq_xics) {
  2854. return g_strdup("xics");
  2855. } else if (spapr->irq == &spapr_irq_xive) {
  2856. return g_strdup("xive");
  2857. } else if (spapr->irq == &spapr_irq_dual) {
  2858. return g_strdup("dual");
  2859. }
  2860. g_assert_not_reached();
  2861. }
  2862. static void spapr_set_ic_mode(Object *obj, const char *value, Error **errp)
  2863. {
  2864. SpaprMachineState *spapr = SPAPR_MACHINE(obj);
  2865. if (SPAPR_MACHINE_GET_CLASS(spapr)->legacy_irq_allocation) {
  2866. error_setg(errp, "This machine only uses the legacy XICS backend, don't pass ic-mode");
  2867. return;
  2868. }
  2869. /* The legacy IRQ backend can not be set */
  2870. if (strcmp(value, "xics") == 0) {
  2871. spapr->irq = &spapr_irq_xics;
  2872. } else if (strcmp(value, "xive") == 0) {
  2873. spapr->irq = &spapr_irq_xive;
  2874. } else if (strcmp(value, "dual") == 0) {
  2875. spapr->irq = &spapr_irq_dual;
  2876. } else {
  2877. error_setg(errp, "Bad value for \"ic-mode\" property");
  2878. }
  2879. }
  2880. static char *spapr_get_host_model(Object *obj, Error **errp)
  2881. {
  2882. SpaprMachineState *spapr = SPAPR_MACHINE(obj);
  2883. return g_strdup(spapr->host_model);
  2884. }
  2885. static void spapr_set_host_model(Object *obj, const char *value, Error **errp)
  2886. {
  2887. SpaprMachineState *spapr = SPAPR_MACHINE(obj);
  2888. g_free(spapr->host_model);
  2889. spapr->host_model = g_strdup(value);
  2890. }
  2891. static char *spapr_get_host_serial(Object *obj, Error **errp)
  2892. {
  2893. SpaprMachineState *spapr = SPAPR_MACHINE(obj);
  2894. return g_strdup(spapr->host_serial);
  2895. }
  2896. static void spapr_set_host_serial(Object *obj, const char *value, Error **errp)
  2897. {
  2898. SpaprMachineState *spapr = SPAPR_MACHINE(obj);
  2899. g_free(spapr->host_serial);
  2900. spapr->host_serial = g_strdup(value);
  2901. }
  2902. static void spapr_instance_init(Object *obj)
  2903. {
  2904. SpaprMachineState *spapr = SPAPR_MACHINE(obj);
  2905. SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr);
  2906. MachineState *ms = MACHINE(spapr);
  2907. MachineClass *mc = MACHINE_GET_CLASS(ms);
  2908. /*
  2909. * NVDIMM support went live in 5.1 without considering that, in
  2910. * other archs, the user needs to enable NVDIMM support with the
  2911. * 'nvdimm' machine option and the default behavior is NVDIMM
  2912. * support disabled. It is too late to roll back to the standard
  2913. * behavior without breaking 5.1 guests.
  2914. */
  2915. if (mc->nvdimm_supported) {
  2916. ms->nvdimms_state->is_enabled = true;
  2917. }
  2918. spapr->htab_fd = -1;
  2919. spapr->use_hotplug_event_source = true;
  2920. spapr->kvm_type = g_strdup(DEFAULT_KVM_TYPE);
  2921. object_property_add_str(obj, "kvm-type",
  2922. spapr_get_kvm_type, spapr_set_kvm_type);
  2923. object_property_set_description(obj, "kvm-type",
  2924. "Specifies the KVM virtualization mode (auto,"
  2925. " hv, pr). Defaults to 'auto'. This mode will use"
  2926. " any available KVM module loaded in the host,"
  2927. " where kvm_hv takes precedence if both kvm_hv and"
  2928. " kvm_pr are loaded.");
  2929. object_property_add_bool(obj, "modern-hotplug-events",
  2930. spapr_get_modern_hotplug_events,
  2931. spapr_set_modern_hotplug_events);
  2932. object_property_set_description(obj, "modern-hotplug-events",
  2933. "Use dedicated hotplug event mechanism in"
  2934. " place of standard EPOW events when possible"
  2935. " (required for memory hot-unplug support)");
  2936. ppc_compat_add_property(obj, "max-cpu-compat", &spapr->max_compat_pvr,
  2937. "Maximum permitted CPU compatibility mode");
  2938. object_property_add_str(obj, "resize-hpt",
  2939. spapr_get_resize_hpt, spapr_set_resize_hpt);
  2940. object_property_set_description(obj, "resize-hpt",
  2941. "Resizing of the Hash Page Table (enabled, disabled, required)");
  2942. object_property_add_uint32_ptr(obj, "vsmt",
  2943. &spapr->vsmt, OBJ_PROP_FLAG_READWRITE);
  2944. object_property_set_description(obj, "vsmt",
  2945. "Virtual SMT: KVM behaves as if this were"
  2946. " the host's SMT mode");
  2947. object_property_add_bool(obj, "vfio-no-msix-emulation",
  2948. spapr_get_msix_emulation, NULL);
  2949. object_property_add_uint64_ptr(obj, "kernel-addr",
  2950. &spapr->kernel_addr, OBJ_PROP_FLAG_READWRITE);
  2951. object_property_set_description(obj, "kernel-addr",
  2952. stringify(KERNEL_LOAD_ADDR)
  2953. " for -kernel is the default");
  2954. spapr->kernel_addr = KERNEL_LOAD_ADDR;
  2955. object_property_add_bool(obj, "x-vof", spapr_get_vof, spapr_set_vof);
  2956. object_property_set_description(obj, "x-vof",
  2957. "Enable Virtual Open Firmware (experimental)");
  2958. /* The machine class defines the default interrupt controller mode */
  2959. spapr->irq = smc->irq;
  2960. object_property_add_str(obj, "ic-mode", spapr_get_ic_mode,
  2961. spapr_set_ic_mode);
  2962. object_property_set_description(obj, "ic-mode",
  2963. "Specifies the interrupt controller mode (xics, xive, dual)");
  2964. object_property_add_str(obj, "host-model",
  2965. spapr_get_host_model, spapr_set_host_model);
  2966. object_property_set_description(obj, "host-model",
  2967. "Host model to advertise in guest device tree");
  2968. object_property_add_str(obj, "host-serial",
  2969. spapr_get_host_serial, spapr_set_host_serial);
  2970. object_property_set_description(obj, "host-serial",
  2971. "Host serial number to advertise in guest device tree");
  2972. }
  2973. static void spapr_machine_finalizefn(Object *obj)
  2974. {
  2975. SpaprMachineState *spapr = SPAPR_MACHINE(obj);
  2976. g_free(spapr->kvm_type);
  2977. }
  2978. void spapr_do_system_reset_on_cpu(CPUState *cs, run_on_cpu_data arg)
  2979. {
  2980. SpaprMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
  2981. CPUPPCState *env = cpu_env(cs);
  2982. cpu_synchronize_state(cs);
  2983. /* If FWNMI is inactive, addr will be -1, which will deliver to 0x100 */
  2984. if (spapr->fwnmi_system_reset_addr != -1) {
  2985. uint64_t rtas_addr, addr;
  2986. /* get rtas addr from fdt */
  2987. rtas_addr = spapr_get_rtas_addr();
  2988. if (!rtas_addr) {
  2989. qemu_system_guest_panicked(NULL);
  2990. return;
  2991. }
  2992. addr = rtas_addr + RTAS_ERROR_LOG_MAX + cs->cpu_index * sizeof(uint64_t)*2;
  2993. stq_be_phys(&address_space_memory, addr, env->gpr[3]);
  2994. stq_be_phys(&address_space_memory, addr + sizeof(uint64_t), 0);
  2995. env->gpr[3] = addr;
  2996. }
  2997. ppc_cpu_do_system_reset(cs);
  2998. if (spapr->fwnmi_system_reset_addr != -1) {
  2999. env->nip = spapr->fwnmi_system_reset_addr;
  3000. }
  3001. }
  3002. static void spapr_nmi(NMIState *n, int cpu_index, Error **errp)
  3003. {
  3004. CPUState *cs;
  3005. CPU_FOREACH(cs) {
  3006. async_run_on_cpu(cs, spapr_do_system_reset_on_cpu, RUN_ON_CPU_NULL);
  3007. }
  3008. }
  3009. int spapr_lmb_dt_populate(SpaprDrc *drc, SpaprMachineState *spapr,
  3010. void *fdt, int *fdt_start_offset, Error **errp)
  3011. {
  3012. uint64_t addr;
  3013. uint32_t node;
  3014. addr = spapr_drc_index(drc) * SPAPR_MEMORY_BLOCK_SIZE;
  3015. node = object_property_get_uint(OBJECT(drc->dev), PC_DIMM_NODE_PROP,
  3016. &error_abort);
  3017. *fdt_start_offset = spapr_dt_memory_node(spapr, fdt, node, addr,
  3018. SPAPR_MEMORY_BLOCK_SIZE);
  3019. return 0;
  3020. }
  3021. static void spapr_add_lmbs(DeviceState *dev, uint64_t addr_start, uint64_t size,
  3022. bool dedicated_hp_event_source)
  3023. {
  3024. SpaprDrc *drc;
  3025. uint32_t nr_lmbs = size/SPAPR_MEMORY_BLOCK_SIZE;
  3026. int i;
  3027. uint64_t addr = addr_start;
  3028. bool hotplugged = spapr_drc_hotplugged(dev);
  3029. for (i = 0; i < nr_lmbs; i++) {
  3030. drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB,
  3031. addr / SPAPR_MEMORY_BLOCK_SIZE);
  3032. g_assert(drc);
  3033. /*
  3034. * memory_device_get_free_addr() provided a range of free addresses
  3035. * that doesn't overlap with any existing mapping at pre-plug. The
  3036. * corresponding LMB DRCs are thus assumed to be all attachable.
  3037. */
  3038. spapr_drc_attach(drc, dev);
  3039. if (!hotplugged) {
  3040. spapr_drc_reset(drc);
  3041. }
  3042. addr += SPAPR_MEMORY_BLOCK_SIZE;
  3043. }
  3044. /* send hotplug notification to the
  3045. * guest only in case of hotplugged memory
  3046. */
  3047. if (hotplugged) {
  3048. if (dedicated_hp_event_source) {
  3049. drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB,
  3050. addr_start / SPAPR_MEMORY_BLOCK_SIZE);
  3051. g_assert(drc);
  3052. spapr_hotplug_req_add_by_count_indexed(SPAPR_DR_CONNECTOR_TYPE_LMB,
  3053. nr_lmbs,
  3054. spapr_drc_index(drc));
  3055. } else {
  3056. spapr_hotplug_req_add_by_count(SPAPR_DR_CONNECTOR_TYPE_LMB,
  3057. nr_lmbs);
  3058. }
  3059. }
  3060. }
  3061. static void spapr_memory_plug(HotplugHandler *hotplug_dev, DeviceState *dev)
  3062. {
  3063. SpaprMachineState *ms = SPAPR_MACHINE(hotplug_dev);
  3064. PCDIMMDevice *dimm = PC_DIMM(dev);
  3065. uint64_t size, addr;
  3066. int64_t slot;
  3067. bool is_nvdimm = object_dynamic_cast(OBJECT(dev), TYPE_NVDIMM);
  3068. size = memory_device_get_region_size(MEMORY_DEVICE(dev), &error_abort);
  3069. pc_dimm_plug(dimm, MACHINE(ms));
  3070. if (!is_nvdimm) {
  3071. addr = object_property_get_uint(OBJECT(dimm),
  3072. PC_DIMM_ADDR_PROP, &error_abort);
  3073. spapr_add_lmbs(dev, addr, size,
  3074. spapr_ovec_test(ms->ov5_cas, OV5_HP_EVT));
  3075. } else {
  3076. slot = object_property_get_int(OBJECT(dimm),
  3077. PC_DIMM_SLOT_PROP, &error_abort);
  3078. /* We should have valid slot number at this point */
  3079. g_assert(slot >= 0);
  3080. spapr_add_nvdimm(dev, slot);
  3081. }
  3082. }
  3083. static void spapr_memory_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
  3084. Error **errp)
  3085. {
  3086. SpaprMachineState *spapr = SPAPR_MACHINE(hotplug_dev);
  3087. bool is_nvdimm = object_dynamic_cast(OBJECT(dev), TYPE_NVDIMM);
  3088. PCDIMMDevice *dimm = PC_DIMM(dev);
  3089. Error *local_err = NULL;
  3090. uint64_t size;
  3091. Object *memdev;
  3092. hwaddr pagesize;
  3093. size = memory_device_get_region_size(MEMORY_DEVICE(dimm), &local_err);
  3094. if (local_err) {
  3095. error_propagate(errp, local_err);
  3096. return;
  3097. }
  3098. if (is_nvdimm) {
  3099. if (!spapr_nvdimm_validate(hotplug_dev, NVDIMM(dev), size, errp)) {
  3100. return;
  3101. }
  3102. } else if (size % SPAPR_MEMORY_BLOCK_SIZE) {
  3103. error_setg(errp, "Hotplugged memory size must be a multiple of "
  3104. "%" PRIu64 " MB", SPAPR_MEMORY_BLOCK_SIZE / MiB);
  3105. return;
  3106. }
  3107. memdev = object_property_get_link(OBJECT(dimm), PC_DIMM_MEMDEV_PROP,
  3108. &error_abort);
  3109. pagesize = host_memory_backend_pagesize(MEMORY_BACKEND(memdev));
  3110. if (!spapr_check_pagesize(spapr, pagesize, errp)) {
  3111. return;
  3112. }
  3113. pc_dimm_pre_plug(dimm, MACHINE(hotplug_dev), errp);
  3114. }
  3115. struct SpaprDimmState {
  3116. PCDIMMDevice *dimm;
  3117. uint32_t nr_lmbs;
  3118. QTAILQ_ENTRY(SpaprDimmState) next;
  3119. };
  3120. static SpaprDimmState *spapr_pending_dimm_unplugs_find(SpaprMachineState *s,
  3121. PCDIMMDevice *dimm)
  3122. {
  3123. SpaprDimmState *dimm_state = NULL;
  3124. QTAILQ_FOREACH(dimm_state, &s->pending_dimm_unplugs, next) {
  3125. if (dimm_state->dimm == dimm) {
  3126. break;
  3127. }
  3128. }
  3129. return dimm_state;
  3130. }
  3131. static SpaprDimmState *spapr_pending_dimm_unplugs_add(SpaprMachineState *spapr,
  3132. uint32_t nr_lmbs,
  3133. PCDIMMDevice *dimm)
  3134. {
  3135. SpaprDimmState *ds = NULL;
  3136. /*
  3137. * If this request is for a DIMM whose removal had failed earlier
  3138. * (due to guest's refusal to remove the LMBs), we would have this
  3139. * dimm already in the pending_dimm_unplugs list. In that
  3140. * case don't add again.
  3141. */
  3142. ds = spapr_pending_dimm_unplugs_find(spapr, dimm);
  3143. if (!ds) {
  3144. ds = g_new0(SpaprDimmState, 1);
  3145. ds->nr_lmbs = nr_lmbs;
  3146. ds->dimm = dimm;
  3147. QTAILQ_INSERT_HEAD(&spapr->pending_dimm_unplugs, ds, next);
  3148. }
  3149. return ds;
  3150. }
  3151. static void spapr_pending_dimm_unplugs_remove(SpaprMachineState *spapr,
  3152. SpaprDimmState *dimm_state)
  3153. {
  3154. QTAILQ_REMOVE(&spapr->pending_dimm_unplugs, dimm_state, next);
  3155. g_free(dimm_state);
  3156. }
  3157. static SpaprDimmState *spapr_recover_pending_dimm_state(SpaprMachineState *ms,
  3158. PCDIMMDevice *dimm)
  3159. {
  3160. SpaprDrc *drc;
  3161. uint64_t size = memory_device_get_region_size(MEMORY_DEVICE(dimm),
  3162. &error_abort);
  3163. uint32_t nr_lmbs = size / SPAPR_MEMORY_BLOCK_SIZE;
  3164. uint32_t avail_lmbs = 0;
  3165. uint64_t addr_start, addr;
  3166. int i;
  3167. addr_start = object_property_get_uint(OBJECT(dimm), PC_DIMM_ADDR_PROP,
  3168. &error_abort);
  3169. addr = addr_start;
  3170. for (i = 0; i < nr_lmbs; i++) {
  3171. drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB,
  3172. addr / SPAPR_MEMORY_BLOCK_SIZE);
  3173. g_assert(drc);
  3174. if (drc->dev) {
  3175. avail_lmbs++;
  3176. }
  3177. addr += SPAPR_MEMORY_BLOCK_SIZE;
  3178. }
  3179. return spapr_pending_dimm_unplugs_add(ms, avail_lmbs, dimm);
  3180. }
  3181. void spapr_memory_unplug_rollback(SpaprMachineState *spapr, DeviceState *dev)
  3182. {
  3183. SpaprDimmState *ds;
  3184. PCDIMMDevice *dimm;
  3185. SpaprDrc *drc;
  3186. uint32_t nr_lmbs;
  3187. uint64_t size, addr_start, addr;
  3188. int i;
  3189. if (!dev) {
  3190. return;
  3191. }
  3192. dimm = PC_DIMM(dev);
  3193. ds = spapr_pending_dimm_unplugs_find(spapr, dimm);
  3194. /*
  3195. * 'ds == NULL' would mean that the DIMM doesn't have a pending
  3196. * unplug state, but one of its DRC is marked as unplug_requested.
  3197. * This is bad and weird enough to g_assert() out.
  3198. */
  3199. g_assert(ds);
  3200. spapr_pending_dimm_unplugs_remove(spapr, ds);
  3201. size = memory_device_get_region_size(MEMORY_DEVICE(dimm), &error_abort);
  3202. nr_lmbs = size / SPAPR_MEMORY_BLOCK_SIZE;
  3203. addr_start = object_property_get_uint(OBJECT(dimm), PC_DIMM_ADDR_PROP,
  3204. &error_abort);
  3205. addr = addr_start;
  3206. for (i = 0; i < nr_lmbs; i++) {
  3207. drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB,
  3208. addr / SPAPR_MEMORY_BLOCK_SIZE);
  3209. g_assert(drc);
  3210. drc->unplug_requested = false;
  3211. addr += SPAPR_MEMORY_BLOCK_SIZE;
  3212. }
  3213. /*
  3214. * Tell QAPI that something happened and the memory
  3215. * hotunplug wasn't successful.
  3216. */
  3217. qapi_event_send_device_unplug_guest_error(dev->id,
  3218. dev->canonical_path);
  3219. }
  3220. /* Callback to be called during DRC release. */
  3221. void spapr_lmb_release(DeviceState *dev)
  3222. {
  3223. HotplugHandler *hotplug_ctrl = qdev_get_hotplug_handler(dev);
  3224. SpaprMachineState *spapr = SPAPR_MACHINE(hotplug_ctrl);
  3225. SpaprDimmState *ds = spapr_pending_dimm_unplugs_find(spapr, PC_DIMM(dev));
  3226. /* This information will get lost if a migration occurs
  3227. * during the unplug process. In this case recover it. */
  3228. if (ds == NULL) {
  3229. ds = spapr_recover_pending_dimm_state(spapr, PC_DIMM(dev));
  3230. g_assert(ds);
  3231. /* The DRC being examined by the caller at least must be counted */
  3232. g_assert(ds->nr_lmbs);
  3233. }
  3234. if (--ds->nr_lmbs) {
  3235. return;
  3236. }
  3237. /*
  3238. * Now that all the LMBs have been removed by the guest, call the
  3239. * unplug handler chain. This can never fail.
  3240. */
  3241. hotplug_handler_unplug(hotplug_ctrl, dev, &error_abort);
  3242. object_unparent(OBJECT(dev));
  3243. }
  3244. static void spapr_memory_unplug(HotplugHandler *hotplug_dev, DeviceState *dev)
  3245. {
  3246. SpaprMachineState *spapr = SPAPR_MACHINE(hotplug_dev);
  3247. SpaprDimmState *ds = spapr_pending_dimm_unplugs_find(spapr, PC_DIMM(dev));
  3248. /* We really shouldn't get this far without anything to unplug */
  3249. g_assert(ds);
  3250. pc_dimm_unplug(PC_DIMM(dev), MACHINE(hotplug_dev));
  3251. qdev_unrealize(dev);
  3252. spapr_pending_dimm_unplugs_remove(spapr, ds);
  3253. }
  3254. static void spapr_memory_unplug_request(HotplugHandler *hotplug_dev,
  3255. DeviceState *dev, Error **errp)
  3256. {
  3257. SpaprMachineState *spapr = SPAPR_MACHINE(hotplug_dev);
  3258. PCDIMMDevice *dimm = PC_DIMM(dev);
  3259. uint32_t nr_lmbs;
  3260. uint64_t size, addr_start, addr;
  3261. int i;
  3262. SpaprDrc *drc;
  3263. if (object_dynamic_cast(OBJECT(dev), TYPE_NVDIMM)) {
  3264. error_setg(errp, "nvdimm device hot unplug is not supported yet.");
  3265. return;
  3266. }
  3267. size = memory_device_get_region_size(MEMORY_DEVICE(dimm), &error_abort);
  3268. nr_lmbs = size / SPAPR_MEMORY_BLOCK_SIZE;
  3269. addr_start = object_property_get_uint(OBJECT(dimm), PC_DIMM_ADDR_PROP,
  3270. &error_abort);
  3271. /*
  3272. * An existing pending dimm state for this DIMM means that there is an
  3273. * unplug operation in progress, waiting for the spapr_lmb_release
  3274. * callback to complete the job (BQL can't cover that far). In this case,
  3275. * bail out to avoid detaching DRCs that were already released.
  3276. */
  3277. if (spapr_pending_dimm_unplugs_find(spapr, dimm)) {
  3278. error_setg(errp, "Memory unplug already in progress for device %s",
  3279. dev->id);
  3280. return;
  3281. }
  3282. spapr_pending_dimm_unplugs_add(spapr, nr_lmbs, dimm);
  3283. addr = addr_start;
  3284. for (i = 0; i < nr_lmbs; i++) {
  3285. drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB,
  3286. addr / SPAPR_MEMORY_BLOCK_SIZE);
  3287. g_assert(drc);
  3288. spapr_drc_unplug_request(drc);
  3289. addr += SPAPR_MEMORY_BLOCK_SIZE;
  3290. }
  3291. drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB,
  3292. addr_start / SPAPR_MEMORY_BLOCK_SIZE);
  3293. spapr_hotplug_req_remove_by_count_indexed(SPAPR_DR_CONNECTOR_TYPE_LMB,
  3294. nr_lmbs, spapr_drc_index(drc));
  3295. }
  3296. /* Callback to be called during DRC release. */
  3297. void spapr_core_release(DeviceState *dev)
  3298. {
  3299. HotplugHandler *hotplug_ctrl = qdev_get_hotplug_handler(dev);
  3300. /* Call the unplug handler chain. This can never fail. */
  3301. hotplug_handler_unplug(hotplug_ctrl, dev, &error_abort);
  3302. object_unparent(OBJECT(dev));
  3303. }
  3304. static void spapr_core_unplug(HotplugHandler *hotplug_dev, DeviceState *dev)
  3305. {
  3306. MachineState *ms = MACHINE(hotplug_dev);
  3307. CPUCore *cc = CPU_CORE(dev);
  3308. CPUArchId *core_slot = spapr_find_cpu_slot(ms, cc->core_id, NULL);
  3309. assert(core_slot);
  3310. core_slot->cpu = NULL;
  3311. qdev_unrealize(dev);
  3312. }
  3313. static
  3314. void spapr_core_unplug_request(HotplugHandler *hotplug_dev, DeviceState *dev,
  3315. Error **errp)
  3316. {
  3317. SpaprMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev));
  3318. int index;
  3319. SpaprDrc *drc;
  3320. CPUCore *cc = CPU_CORE(dev);
  3321. if (!spapr_find_cpu_slot(MACHINE(hotplug_dev), cc->core_id, &index)) {
  3322. error_setg(errp, "Unable to find CPU core with core-id: %d",
  3323. cc->core_id);
  3324. return;
  3325. }
  3326. if (index == 0) {
  3327. error_setg(errp, "Boot CPU core may not be unplugged");
  3328. return;
  3329. }
  3330. drc = spapr_drc_by_id(TYPE_SPAPR_DRC_CPU,
  3331. spapr_vcpu_id(spapr, cc->core_id));
  3332. g_assert(drc);
  3333. if (!spapr_drc_unplug_requested(drc)) {
  3334. spapr_drc_unplug_request(drc);
  3335. }
  3336. /*
  3337. * spapr_hotplug_req_remove_by_index is left unguarded, out of the
  3338. * "!spapr_drc_unplug_requested" check, to allow for multiple IRQ
  3339. * pulses removing the same CPU. Otherwise, in an failed hotunplug
  3340. * attempt (e.g. the kernel will refuse to remove the last online
  3341. * CPU), we will never attempt it again because unplug_requested
  3342. * will still be 'true' in that case.
  3343. */
  3344. spapr_hotplug_req_remove_by_index(drc);
  3345. }
  3346. int spapr_core_dt_populate(SpaprDrc *drc, SpaprMachineState *spapr,
  3347. void *fdt, int *fdt_start_offset, Error **errp)
  3348. {
  3349. SpaprCpuCore *core = SPAPR_CPU_CORE(drc->dev);
  3350. CPUState *cs = CPU(core->threads[0]);
  3351. PowerPCCPU *cpu = POWERPC_CPU(cs);
  3352. DeviceClass *dc = DEVICE_GET_CLASS(cs);
  3353. int id = spapr_get_vcpu_id(cpu);
  3354. g_autofree char *nodename = NULL;
  3355. int offset;
  3356. nodename = g_strdup_printf("%s@%x", dc->fw_name, id);
  3357. offset = fdt_add_subnode(fdt, 0, nodename);
  3358. spapr_dt_cpu(cs, fdt, offset, spapr);
  3359. /*
  3360. * spapr_dt_cpu() does not fill the 'name' property in the
  3361. * CPU node. The function is called during boot process, before
  3362. * and after CAS, and overwriting the 'name' property written
  3363. * by SLOF is not allowed.
  3364. *
  3365. * Write it manually after spapr_dt_cpu(). This makes the hotplug
  3366. * CPUs more compatible with the coldplugged ones, which have
  3367. * the 'name' property. Linux Kernel also relies on this
  3368. * property to identify CPU nodes.
  3369. */
  3370. _FDT((fdt_setprop_string(fdt, offset, "name", nodename)));
  3371. *fdt_start_offset = offset;
  3372. return 0;
  3373. }
  3374. static void spapr_core_plug(HotplugHandler *hotplug_dev, DeviceState *dev)
  3375. {
  3376. SpaprMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev));
  3377. MachineClass *mc = MACHINE_GET_CLASS(spapr);
  3378. SpaprCpuCore *core = SPAPR_CPU_CORE(OBJECT(dev));
  3379. CPUCore *cc = CPU_CORE(dev);
  3380. SpaprDrc *drc;
  3381. CPUArchId *core_slot;
  3382. int index;
  3383. bool hotplugged = spapr_drc_hotplugged(dev);
  3384. int i;
  3385. core_slot = spapr_find_cpu_slot(MACHINE(hotplug_dev), cc->core_id, &index);
  3386. g_assert(core_slot); /* Already checked in spapr_core_pre_plug() */
  3387. drc = spapr_drc_by_id(TYPE_SPAPR_DRC_CPU,
  3388. spapr_vcpu_id(spapr, cc->core_id));
  3389. g_assert(drc || !mc->has_hotpluggable_cpus);
  3390. if (drc) {
  3391. /*
  3392. * spapr_core_pre_plug() already buys us this is a brand new
  3393. * core being plugged into a free slot. Nothing should already
  3394. * be attached to the corresponding DRC.
  3395. */
  3396. spapr_drc_attach(drc, dev);
  3397. if (hotplugged) {
  3398. /*
  3399. * Send hotplug notification interrupt to the guest only
  3400. * in case of hotplugged CPUs.
  3401. */
  3402. spapr_hotplug_req_add_by_index(drc);
  3403. } else {
  3404. spapr_drc_reset(drc);
  3405. }
  3406. }
  3407. core_slot->cpu = CPU(dev);
  3408. /*
  3409. * Set compatibility mode to match the boot CPU, which was either set
  3410. * by the machine reset code or by CAS. This really shouldn't fail at
  3411. * this point.
  3412. */
  3413. if (hotplugged) {
  3414. for (i = 0; i < cc->nr_threads; i++) {
  3415. ppc_set_compat(core->threads[i], POWERPC_CPU(first_cpu)->compat_pvr,
  3416. &error_abort);
  3417. }
  3418. }
  3419. }
  3420. static void spapr_core_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
  3421. Error **errp)
  3422. {
  3423. MachineState *machine = MACHINE(OBJECT(hotplug_dev));
  3424. MachineClass *mc = MACHINE_GET_CLASS(hotplug_dev);
  3425. CPUCore *cc = CPU_CORE(dev);
  3426. const char *base_core_type = spapr_get_cpu_core_type(machine->cpu_type);
  3427. const char *type = object_get_typename(OBJECT(dev));
  3428. CPUArchId *core_slot;
  3429. int index;
  3430. unsigned int smp_threads = machine->smp.threads;
  3431. if (dev->hotplugged && !mc->has_hotpluggable_cpus) {
  3432. error_setg(errp, "CPU hotplug not supported for this machine");
  3433. return;
  3434. }
  3435. if (strcmp(base_core_type, type)) {
  3436. error_setg(errp, "CPU core type should be %s", base_core_type);
  3437. return;
  3438. }
  3439. if (cc->core_id % smp_threads) {
  3440. error_setg(errp, "invalid core id %d", cc->core_id);
  3441. return;
  3442. }
  3443. /*
  3444. * In general we should have homogeneous threads-per-core, but old
  3445. * (pre hotplug support) machine types allow the last core to have
  3446. * reduced threads as a compatibility hack for when we allowed
  3447. * total vcpus not a multiple of threads-per-core.
  3448. */
  3449. if (mc->has_hotpluggable_cpus && (cc->nr_threads != smp_threads)) {
  3450. error_setg(errp, "invalid nr-threads %d, must be %d", cc->nr_threads,
  3451. smp_threads);
  3452. return;
  3453. }
  3454. core_slot = spapr_find_cpu_slot(MACHINE(hotplug_dev), cc->core_id, &index);
  3455. if (!core_slot) {
  3456. error_setg(errp, "core id %d out of range", cc->core_id);
  3457. return;
  3458. }
  3459. if (core_slot->cpu) {
  3460. error_setg(errp, "core %d already populated", cc->core_id);
  3461. return;
  3462. }
  3463. numa_cpu_pre_plug(core_slot, dev, errp);
  3464. }
  3465. int spapr_phb_dt_populate(SpaprDrc *drc, SpaprMachineState *spapr,
  3466. void *fdt, int *fdt_start_offset, Error **errp)
  3467. {
  3468. SpaprPhbState *sphb = SPAPR_PCI_HOST_BRIDGE(drc->dev);
  3469. int intc_phandle;
  3470. intc_phandle = spapr_irq_get_phandle(spapr, spapr->fdt_blob, errp);
  3471. if (intc_phandle <= 0) {
  3472. return -1;
  3473. }
  3474. if (spapr_dt_phb(spapr, sphb, intc_phandle, fdt, fdt_start_offset)) {
  3475. error_setg(errp, "unable to create FDT node for PHB %d", sphb->index);
  3476. return -1;
  3477. }
  3478. /* generally SLOF creates these, for hotplug it's up to QEMU */
  3479. _FDT(fdt_setprop_string(fdt, *fdt_start_offset, "name", "pci"));
  3480. return 0;
  3481. }
  3482. static bool spapr_phb_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
  3483. Error **errp)
  3484. {
  3485. SpaprMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev));
  3486. SpaprPhbState *sphb = SPAPR_PCI_HOST_BRIDGE(dev);
  3487. SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr);
  3488. const unsigned windows_supported = spapr_phb_windows_supported(sphb);
  3489. SpaprDrc *drc;
  3490. if (dev->hotplugged && !smc->dr_phb_enabled) {
  3491. error_setg(errp, "PHB hotplug not supported for this machine");
  3492. return false;
  3493. }
  3494. if (sphb->index == (uint32_t)-1) {
  3495. error_setg(errp, "\"index\" for PAPR PHB is mandatory");
  3496. return false;
  3497. }
  3498. drc = spapr_drc_by_id(TYPE_SPAPR_DRC_PHB, sphb->index);
  3499. if (drc && drc->dev) {
  3500. error_setg(errp, "PHB %d already attached", sphb->index);
  3501. return false;
  3502. }
  3503. /*
  3504. * This will check that sphb->index doesn't exceed the maximum number of
  3505. * PHBs for the current machine type.
  3506. */
  3507. return
  3508. smc->phb_placement(spapr, sphb->index,
  3509. &sphb->buid, &sphb->io_win_addr,
  3510. &sphb->mem_win_addr, &sphb->mem64_win_addr,
  3511. windows_supported, sphb->dma_liobn,
  3512. errp);
  3513. }
  3514. static void spapr_phb_plug(HotplugHandler *hotplug_dev, DeviceState *dev)
  3515. {
  3516. SpaprMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev));
  3517. SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr);
  3518. SpaprPhbState *sphb = SPAPR_PCI_HOST_BRIDGE(dev);
  3519. SpaprDrc *drc;
  3520. bool hotplugged = spapr_drc_hotplugged(dev);
  3521. if (!smc->dr_phb_enabled) {
  3522. return;
  3523. }
  3524. drc = spapr_drc_by_id(TYPE_SPAPR_DRC_PHB, sphb->index);
  3525. /* hotplug hooks should check it's enabled before getting this far */
  3526. assert(drc);
  3527. /* spapr_phb_pre_plug() already checked the DRC is attachable */
  3528. spapr_drc_attach(drc, dev);
  3529. if (hotplugged) {
  3530. spapr_hotplug_req_add_by_index(drc);
  3531. } else {
  3532. spapr_drc_reset(drc);
  3533. }
  3534. }
  3535. void spapr_phb_release(DeviceState *dev)
  3536. {
  3537. HotplugHandler *hotplug_ctrl = qdev_get_hotplug_handler(dev);
  3538. hotplug_handler_unplug(hotplug_ctrl, dev, &error_abort);
  3539. object_unparent(OBJECT(dev));
  3540. }
  3541. static void spapr_phb_unplug(HotplugHandler *hotplug_dev, DeviceState *dev)
  3542. {
  3543. qdev_unrealize(dev);
  3544. }
  3545. static void spapr_phb_unplug_request(HotplugHandler *hotplug_dev,
  3546. DeviceState *dev, Error **errp)
  3547. {
  3548. SpaprPhbState *sphb = SPAPR_PCI_HOST_BRIDGE(dev);
  3549. SpaprDrc *drc;
  3550. drc = spapr_drc_by_id(TYPE_SPAPR_DRC_PHB, sphb->index);
  3551. assert(drc);
  3552. if (!spapr_drc_unplug_requested(drc)) {
  3553. spapr_drc_unplug_request(drc);
  3554. spapr_hotplug_req_remove_by_index(drc);
  3555. } else {
  3556. error_setg(errp,
  3557. "PCI Host Bridge unplug already in progress for device %s",
  3558. dev->id);
  3559. }
  3560. }
  3561. static
  3562. bool spapr_tpm_proxy_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
  3563. Error **errp)
  3564. {
  3565. SpaprMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev));
  3566. if (spapr->tpm_proxy != NULL) {
  3567. error_setg(errp, "Only one TPM proxy can be specified for this machine");
  3568. return false;
  3569. }
  3570. return true;
  3571. }
  3572. static void spapr_tpm_proxy_plug(HotplugHandler *hotplug_dev, DeviceState *dev)
  3573. {
  3574. SpaprMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev));
  3575. SpaprTpmProxy *tpm_proxy = SPAPR_TPM_PROXY(dev);
  3576. /* Already checked in spapr_tpm_proxy_pre_plug() */
  3577. g_assert(spapr->tpm_proxy == NULL);
  3578. spapr->tpm_proxy = tpm_proxy;
  3579. }
  3580. static void spapr_tpm_proxy_unplug(HotplugHandler *hotplug_dev, DeviceState *dev)
  3581. {
  3582. SpaprMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev));
  3583. qdev_unrealize(dev);
  3584. object_unparent(OBJECT(dev));
  3585. spapr->tpm_proxy = NULL;
  3586. }
  3587. static void spapr_machine_device_plug(HotplugHandler *hotplug_dev,
  3588. DeviceState *dev, Error **errp)
  3589. {
  3590. if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) {
  3591. spapr_memory_plug(hotplug_dev, dev);
  3592. } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_CPU_CORE)) {
  3593. spapr_core_plug(hotplug_dev, dev);
  3594. } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_PCI_HOST_BRIDGE)) {
  3595. spapr_phb_plug(hotplug_dev, dev);
  3596. } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_TPM_PROXY)) {
  3597. spapr_tpm_proxy_plug(hotplug_dev, dev);
  3598. }
  3599. }
  3600. static void spapr_machine_device_unplug(HotplugHandler *hotplug_dev,
  3601. DeviceState *dev, Error **errp)
  3602. {
  3603. if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) {
  3604. spapr_memory_unplug(hotplug_dev, dev);
  3605. } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_CPU_CORE)) {
  3606. spapr_core_unplug(hotplug_dev, dev);
  3607. } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_PCI_HOST_BRIDGE)) {
  3608. spapr_phb_unplug(hotplug_dev, dev);
  3609. } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_TPM_PROXY)) {
  3610. spapr_tpm_proxy_unplug(hotplug_dev, dev);
  3611. }
  3612. }
  3613. bool spapr_memory_hot_unplug_supported(SpaprMachineState *spapr)
  3614. {
  3615. return spapr_ovec_test(spapr->ov5_cas, OV5_HP_EVT) ||
  3616. /*
  3617. * CAS will process all pending unplug requests.
  3618. *
  3619. * HACK: a guest could theoretically have cleared all bits in OV5,
  3620. * but none of the guests we care for do.
  3621. */
  3622. spapr_ovec_empty(spapr->ov5_cas);
  3623. }
  3624. static void spapr_machine_device_unplug_request(HotplugHandler *hotplug_dev,
  3625. DeviceState *dev, Error **errp)
  3626. {
  3627. SpaprMachineState *sms = SPAPR_MACHINE(OBJECT(hotplug_dev));
  3628. MachineClass *mc = MACHINE_GET_CLASS(sms);
  3629. SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
  3630. if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) {
  3631. if (spapr_memory_hot_unplug_supported(sms)) {
  3632. spapr_memory_unplug_request(hotplug_dev, dev, errp);
  3633. } else {
  3634. error_setg(errp, "Memory hot unplug not supported for this guest");
  3635. }
  3636. } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_CPU_CORE)) {
  3637. if (!mc->has_hotpluggable_cpus) {
  3638. error_setg(errp, "CPU hot unplug not supported on this machine");
  3639. return;
  3640. }
  3641. spapr_core_unplug_request(hotplug_dev, dev, errp);
  3642. } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_PCI_HOST_BRIDGE)) {
  3643. if (!smc->dr_phb_enabled) {
  3644. error_setg(errp, "PHB hot unplug not supported on this machine");
  3645. return;
  3646. }
  3647. spapr_phb_unplug_request(hotplug_dev, dev, errp);
  3648. } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_TPM_PROXY)) {
  3649. spapr_tpm_proxy_unplug(hotplug_dev, dev);
  3650. }
  3651. }
  3652. static void spapr_machine_device_pre_plug(HotplugHandler *hotplug_dev,
  3653. DeviceState *dev, Error **errp)
  3654. {
  3655. if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) {
  3656. spapr_memory_pre_plug(hotplug_dev, dev, errp);
  3657. } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_CPU_CORE)) {
  3658. spapr_core_pre_plug(hotplug_dev, dev, errp);
  3659. } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_PCI_HOST_BRIDGE)) {
  3660. spapr_phb_pre_plug(hotplug_dev, dev, errp);
  3661. } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_TPM_PROXY)) {
  3662. spapr_tpm_proxy_pre_plug(hotplug_dev, dev, errp);
  3663. }
  3664. }
  3665. static HotplugHandler *spapr_get_hotplug_handler(MachineState *machine,
  3666. DeviceState *dev)
  3667. {
  3668. if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM) ||
  3669. object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_CPU_CORE) ||
  3670. object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_PCI_HOST_BRIDGE) ||
  3671. object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_TPM_PROXY)) {
  3672. return HOTPLUG_HANDLER(machine);
  3673. }
  3674. if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) {
  3675. PCIDevice *pcidev = PCI_DEVICE(dev);
  3676. PCIBus *root = pci_device_root_bus(pcidev);
  3677. SpaprPhbState *phb =
  3678. (SpaprPhbState *)object_dynamic_cast(OBJECT(BUS(root)->parent),
  3679. TYPE_SPAPR_PCI_HOST_BRIDGE);
  3680. if (phb) {
  3681. return HOTPLUG_HANDLER(phb);
  3682. }
  3683. }
  3684. return NULL;
  3685. }
  3686. static CpuInstanceProperties
  3687. spapr_cpu_index_to_props(MachineState *machine, unsigned cpu_index)
  3688. {
  3689. CPUArchId *core_slot;
  3690. MachineClass *mc = MACHINE_GET_CLASS(machine);
  3691. /* make sure possible_cpu are initialized */
  3692. mc->possible_cpu_arch_ids(machine);
  3693. /* get CPU core slot containing thread that matches cpu_index */
  3694. core_slot = spapr_find_cpu_slot(machine, cpu_index, NULL);
  3695. assert(core_slot);
  3696. return core_slot->props;
  3697. }
  3698. static int64_t spapr_get_default_cpu_node_id(const MachineState *ms, int idx)
  3699. {
  3700. return idx / ms->smp.cores % ms->numa_state->num_nodes;
  3701. }
  3702. static const CPUArchIdList *spapr_possible_cpu_arch_ids(MachineState *machine)
  3703. {
  3704. int i;
  3705. unsigned int smp_threads = machine->smp.threads;
  3706. unsigned int smp_cpus = machine->smp.cpus;
  3707. const char *core_type;
  3708. int spapr_max_cores = machine->smp.max_cpus / smp_threads;
  3709. MachineClass *mc = MACHINE_GET_CLASS(machine);
  3710. if (!mc->has_hotpluggable_cpus) {
  3711. spapr_max_cores = QEMU_ALIGN_UP(smp_cpus, smp_threads) / smp_threads;
  3712. }
  3713. if (machine->possible_cpus) {
  3714. assert(machine->possible_cpus->len == spapr_max_cores);
  3715. return machine->possible_cpus;
  3716. }
  3717. core_type = spapr_get_cpu_core_type(machine->cpu_type);
  3718. if (!core_type) {
  3719. error_report("Unable to find sPAPR CPU Core definition");
  3720. exit(1);
  3721. }
  3722. machine->possible_cpus = g_malloc0(sizeof(CPUArchIdList) +
  3723. sizeof(CPUArchId) * spapr_max_cores);
  3724. machine->possible_cpus->len = spapr_max_cores;
  3725. for (i = 0; i < machine->possible_cpus->len; i++) {
  3726. int core_id = i * smp_threads;
  3727. machine->possible_cpus->cpus[i].type = core_type;
  3728. machine->possible_cpus->cpus[i].vcpus_count = smp_threads;
  3729. machine->possible_cpus->cpus[i].arch_id = core_id;
  3730. machine->possible_cpus->cpus[i].props.has_core_id = true;
  3731. machine->possible_cpus->cpus[i].props.core_id = core_id;
  3732. }
  3733. return machine->possible_cpus;
  3734. }
  3735. static bool spapr_phb_placement(SpaprMachineState *spapr, uint32_t index,
  3736. uint64_t *buid, hwaddr *pio,
  3737. hwaddr *mmio32, hwaddr *mmio64,
  3738. unsigned n_dma, uint32_t *liobns, Error **errp)
  3739. {
  3740. /*
  3741. * New-style PHB window placement.
  3742. *
  3743. * Goals: Gives large (1TiB), naturally aligned 64-bit MMIO window
  3744. * for each PHB, in addition to 2GiB 32-bit MMIO and 64kiB PIO
  3745. * windows.
  3746. *
  3747. * Some guest kernels can't work with MMIO windows above 1<<46
  3748. * (64TiB), so we place up to 31 PHBs in the area 32TiB..64TiB
  3749. *
  3750. * 32TiB..(33TiB+1984kiB) contains the 64kiB PIO windows for each
  3751. * PHB stacked together. (32TiB+2GiB)..(32TiB+64GiB) contains the
  3752. * 2GiB 32-bit MMIO windows for each PHB. Then 33..64TiB has the
  3753. * 1TiB 64-bit MMIO windows for each PHB.
  3754. */
  3755. const uint64_t base_buid = 0x800000020000000ULL;
  3756. int i;
  3757. /* Sanity check natural alignments */
  3758. QEMU_BUILD_BUG_ON((SPAPR_PCI_BASE % SPAPR_PCI_MEM64_WIN_SIZE) != 0);
  3759. QEMU_BUILD_BUG_ON((SPAPR_PCI_LIMIT % SPAPR_PCI_MEM64_WIN_SIZE) != 0);
  3760. QEMU_BUILD_BUG_ON((SPAPR_PCI_MEM64_WIN_SIZE % SPAPR_PCI_MEM32_WIN_SIZE) != 0);
  3761. QEMU_BUILD_BUG_ON((SPAPR_PCI_MEM32_WIN_SIZE % SPAPR_PCI_IO_WIN_SIZE) != 0);
  3762. /* Sanity check bounds */
  3763. QEMU_BUILD_BUG_ON((SPAPR_MAX_PHBS * SPAPR_PCI_IO_WIN_SIZE) >
  3764. SPAPR_PCI_MEM32_WIN_SIZE);
  3765. QEMU_BUILD_BUG_ON((SPAPR_MAX_PHBS * SPAPR_PCI_MEM32_WIN_SIZE) >
  3766. SPAPR_PCI_MEM64_WIN_SIZE);
  3767. if (index >= SPAPR_MAX_PHBS) {
  3768. error_setg(errp, "\"index\" for PAPR PHB is too large (max %llu)",
  3769. SPAPR_MAX_PHBS - 1);
  3770. return false;
  3771. }
  3772. *buid = base_buid + index;
  3773. for (i = 0; i < n_dma; ++i) {
  3774. liobns[i] = SPAPR_PCI_LIOBN(index, i);
  3775. }
  3776. *pio = SPAPR_PCI_BASE + index * SPAPR_PCI_IO_WIN_SIZE;
  3777. *mmio32 = SPAPR_PCI_BASE + (index + 1) * SPAPR_PCI_MEM32_WIN_SIZE;
  3778. *mmio64 = SPAPR_PCI_BASE + (index + 1) * SPAPR_PCI_MEM64_WIN_SIZE;
  3779. return true;
  3780. }
  3781. static ICSState *spapr_ics_get(XICSFabric *dev, int irq)
  3782. {
  3783. SpaprMachineState *spapr = SPAPR_MACHINE(dev);
  3784. return ics_valid_irq(spapr->ics, irq) ? spapr->ics : NULL;
  3785. }
  3786. static void spapr_ics_resend(XICSFabric *dev)
  3787. {
  3788. SpaprMachineState *spapr = SPAPR_MACHINE(dev);
  3789. ics_resend(spapr->ics);
  3790. }
  3791. static ICPState *spapr_icp_get(XICSFabric *xi, int vcpu_id)
  3792. {
  3793. PowerPCCPU *cpu = spapr_find_cpu(vcpu_id);
  3794. return cpu ? spapr_cpu_state(cpu)->icp : NULL;
  3795. }
  3796. static void spapr_pic_print_info(InterruptStatsProvider *obj, GString *buf)
  3797. {
  3798. SpaprMachineState *spapr = SPAPR_MACHINE(obj);
  3799. spapr_irq_print_info(spapr, buf);
  3800. g_string_append_printf(buf, "irqchip: %s\n",
  3801. kvm_irqchip_in_kernel() ? "in-kernel" : "emulated");
  3802. }
  3803. /*
  3804. * This is a XIVE only operation
  3805. */
  3806. static int spapr_match_nvt(XiveFabric *xfb, uint8_t format,
  3807. uint8_t nvt_blk, uint32_t nvt_idx,
  3808. bool crowd, bool cam_ignore, uint8_t priority,
  3809. uint32_t logic_serv, XiveTCTXMatch *match)
  3810. {
  3811. SpaprMachineState *spapr = SPAPR_MACHINE(xfb);
  3812. XivePresenter *xptr = XIVE_PRESENTER(spapr->active_intc);
  3813. XivePresenterClass *xpc = XIVE_PRESENTER_GET_CLASS(xptr);
  3814. int count;
  3815. count = xpc->match_nvt(xptr, format, nvt_blk, nvt_idx, crowd, cam_ignore,
  3816. priority, logic_serv, match);
  3817. if (count < 0) {
  3818. return count;
  3819. }
  3820. /*
  3821. * When we implement the save and restore of the thread interrupt
  3822. * contexts in the enter/exit CPU handlers of the machine and the
  3823. * escalations in QEMU, we should be able to handle non dispatched
  3824. * vCPUs.
  3825. *
  3826. * Until this is done, the sPAPR machine should find at least one
  3827. * matching context always.
  3828. */
  3829. if (count == 0) {
  3830. qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVT %x/%x is not dispatched\n",
  3831. nvt_blk, nvt_idx);
  3832. }
  3833. return count;
  3834. }
  3835. int spapr_get_vcpu_id(PowerPCCPU *cpu)
  3836. {
  3837. return cpu->vcpu_id;
  3838. }
  3839. bool spapr_set_vcpu_id(PowerPCCPU *cpu, int cpu_index, Error **errp)
  3840. {
  3841. SpaprMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
  3842. MachineState *ms = MACHINE(spapr);
  3843. int vcpu_id;
  3844. vcpu_id = spapr_vcpu_id(spapr, cpu_index);
  3845. if (kvm_enabled() && !kvm_vcpu_id_is_valid(vcpu_id)) {
  3846. error_setg(errp, "Can't create CPU with id %d in KVM", vcpu_id);
  3847. error_append_hint(errp, "Adjust the number of cpus to %d "
  3848. "or try to raise the number of threads per core\n",
  3849. vcpu_id * ms->smp.threads / spapr->vsmt);
  3850. return false;
  3851. }
  3852. cpu->vcpu_id = vcpu_id;
  3853. return true;
  3854. }
  3855. PowerPCCPU *spapr_find_cpu(int vcpu_id)
  3856. {
  3857. CPUState *cs;
  3858. CPU_FOREACH(cs) {
  3859. PowerPCCPU *cpu = POWERPC_CPU(cs);
  3860. if (spapr_get_vcpu_id(cpu) == vcpu_id) {
  3861. return cpu;
  3862. }
  3863. }
  3864. return NULL;
  3865. }
  3866. static bool spapr_cpu_in_nested(PowerPCCPU *cpu)
  3867. {
  3868. SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
  3869. return spapr_cpu->in_nested;
  3870. }
  3871. static void spapr_cpu_exec_enter(PPCVirtualHypervisor *vhyp, PowerPCCPU *cpu)
  3872. {
  3873. SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
  3874. /* These are only called by TCG, KVM maintains dispatch state */
  3875. spapr_cpu->prod = false;
  3876. if (spapr_cpu->vpa_addr) {
  3877. CPUState *cs = CPU(cpu);
  3878. uint32_t dispatch;
  3879. dispatch = ldl_be_phys(cs->as,
  3880. spapr_cpu->vpa_addr + VPA_DISPATCH_COUNTER);
  3881. dispatch++;
  3882. if ((dispatch & 1) != 0) {
  3883. qemu_log_mask(LOG_GUEST_ERROR,
  3884. "VPA: incorrect dispatch counter value for "
  3885. "dispatched partition %u, correcting.\n", dispatch);
  3886. dispatch++;
  3887. }
  3888. stl_be_phys(cs->as,
  3889. spapr_cpu->vpa_addr + VPA_DISPATCH_COUNTER, dispatch);
  3890. }
  3891. }
  3892. static void spapr_cpu_exec_exit(PPCVirtualHypervisor *vhyp, PowerPCCPU *cpu)
  3893. {
  3894. SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
  3895. if (spapr_cpu->vpa_addr) {
  3896. CPUState *cs = CPU(cpu);
  3897. uint32_t dispatch;
  3898. dispatch = ldl_be_phys(cs->as,
  3899. spapr_cpu->vpa_addr + VPA_DISPATCH_COUNTER);
  3900. dispatch++;
  3901. if ((dispatch & 1) != 1) {
  3902. qemu_log_mask(LOG_GUEST_ERROR,
  3903. "VPA: incorrect dispatch counter value for "
  3904. "preempted partition %u, correcting.\n", dispatch);
  3905. dispatch++;
  3906. }
  3907. stl_be_phys(cs->as,
  3908. spapr_cpu->vpa_addr + VPA_DISPATCH_COUNTER, dispatch);
  3909. }
  3910. }
  3911. static void spapr_machine_class_init(ObjectClass *oc, void *data)
  3912. {
  3913. MachineClass *mc = MACHINE_CLASS(oc);
  3914. SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(oc);
  3915. FWPathProviderClass *fwc = FW_PATH_PROVIDER_CLASS(oc);
  3916. NMIClass *nc = NMI_CLASS(oc);
  3917. HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(oc);
  3918. PPCVirtualHypervisorClass *vhc = PPC_VIRTUAL_HYPERVISOR_CLASS(oc);
  3919. XICSFabricClass *xic = XICS_FABRIC_CLASS(oc);
  3920. InterruptStatsProviderClass *ispc = INTERRUPT_STATS_PROVIDER_CLASS(oc);
  3921. XiveFabricClass *xfc = XIVE_FABRIC_CLASS(oc);
  3922. VofMachineIfClass *vmc = VOF_MACHINE_CLASS(oc);
  3923. mc->desc = "pSeries Logical Partition (PAPR compliant)";
  3924. mc->ignore_boot_device_suffixes = true;
  3925. /*
  3926. * We set up the default / latest behaviour here. The class_init
  3927. * functions for the specific versioned machine types can override
  3928. * these details for backwards compatibility
  3929. */
  3930. mc->init = spapr_machine_init;
  3931. mc->reset = spapr_machine_reset;
  3932. mc->block_default_type = IF_SCSI;
  3933. /*
  3934. * While KVM determines max cpus in kvm_init() using kvm_max_vcpus(),
  3935. * In TCG the limit is restricted by the range of CPU IPIs available.
  3936. */
  3937. mc->max_cpus = SPAPR_IRQ_NR_IPIS;
  3938. mc->no_parallel = 1;
  3939. mc->default_boot_order = "";
  3940. mc->default_ram_size = 512 * MiB;
  3941. mc->default_ram_id = "ppc_spapr.ram";
  3942. mc->default_display = "std";
  3943. mc->kvm_type = spapr_kvm_type;
  3944. machine_class_allow_dynamic_sysbus_dev(mc, TYPE_SPAPR_PCI_HOST_BRIDGE);
  3945. mc->pci_allow_0_address = true;
  3946. assert(!mc->get_hotplug_handler);
  3947. mc->get_hotplug_handler = spapr_get_hotplug_handler;
  3948. hc->pre_plug = spapr_machine_device_pre_plug;
  3949. hc->plug = spapr_machine_device_plug;
  3950. mc->cpu_index_to_instance_props = spapr_cpu_index_to_props;
  3951. mc->get_default_cpu_node_id = spapr_get_default_cpu_node_id;
  3952. mc->possible_cpu_arch_ids = spapr_possible_cpu_arch_ids;
  3953. hc->unplug_request = spapr_machine_device_unplug_request;
  3954. hc->unplug = spapr_machine_device_unplug;
  3955. smc->update_dt_enabled = true;
  3956. mc->default_cpu_type = POWERPC_CPU_TYPE_NAME("power10_v2.0");
  3957. mc->has_hotpluggable_cpus = true;
  3958. mc->nvdimm_supported = true;
  3959. smc->resize_hpt_default = SPAPR_RESIZE_HPT_ENABLED;
  3960. fwc->get_dev_path = spapr_get_fw_dev_path;
  3961. nc->nmi_monitor_handler = spapr_nmi;
  3962. smc->phb_placement = spapr_phb_placement;
  3963. vhc->cpu_in_nested = spapr_cpu_in_nested;
  3964. vhc->deliver_hv_excp = spapr_exit_nested;
  3965. vhc->hypercall = emulate_spapr_hypercall;
  3966. vhc->hpt_mask = spapr_hpt_mask;
  3967. vhc->map_hptes = spapr_map_hptes;
  3968. vhc->unmap_hptes = spapr_unmap_hptes;
  3969. vhc->hpte_set_c = spapr_hpte_set_c;
  3970. vhc->hpte_set_r = spapr_hpte_set_r;
  3971. vhc->get_pate = spapr_get_pate;
  3972. vhc->encode_hpt_for_kvm_pr = spapr_encode_hpt_for_kvm_pr;
  3973. vhc->cpu_exec_enter = spapr_cpu_exec_enter;
  3974. vhc->cpu_exec_exit = spapr_cpu_exec_exit;
  3975. xic->ics_get = spapr_ics_get;
  3976. xic->ics_resend = spapr_ics_resend;
  3977. xic->icp_get = spapr_icp_get;
  3978. ispc->print_info = spapr_pic_print_info;
  3979. /* Force NUMA node memory size to be a multiple of
  3980. * SPAPR_MEMORY_BLOCK_SIZE (256M) since that's the granularity
  3981. * in which LMBs are represented and hot-added
  3982. */
  3983. mc->numa_mem_align_shift = 28;
  3984. mc->auto_enable_numa = true;
  3985. smc->default_caps.caps[SPAPR_CAP_HTM] = SPAPR_CAP_OFF;
  3986. smc->default_caps.caps[SPAPR_CAP_VSX] = SPAPR_CAP_ON;
  3987. smc->default_caps.caps[SPAPR_CAP_DFP] = SPAPR_CAP_ON;
  3988. smc->default_caps.caps[SPAPR_CAP_CFPC] = SPAPR_CAP_WORKAROUND;
  3989. smc->default_caps.caps[SPAPR_CAP_SBBC] = SPAPR_CAP_WORKAROUND;
  3990. smc->default_caps.caps[SPAPR_CAP_IBS] = SPAPR_CAP_WORKAROUND;
  3991. smc->default_caps.caps[SPAPR_CAP_HPT_MAXPAGESIZE] = 16; /* 64kiB */
  3992. smc->default_caps.caps[SPAPR_CAP_NESTED_KVM_HV] = SPAPR_CAP_OFF;
  3993. smc->default_caps.caps[SPAPR_CAP_NESTED_PAPR] = SPAPR_CAP_OFF;
  3994. smc->default_caps.caps[SPAPR_CAP_LARGE_DECREMENTER] = SPAPR_CAP_ON;
  3995. smc->default_caps.caps[SPAPR_CAP_CCF_ASSIST] = SPAPR_CAP_ON;
  3996. smc->default_caps.caps[SPAPR_CAP_FWNMI] = SPAPR_CAP_ON;
  3997. smc->default_caps.caps[SPAPR_CAP_RPT_INVALIDATE] = SPAPR_CAP_OFF;
  3998. smc->default_caps.caps[SPAPR_CAP_DAWR1] = SPAPR_CAP_ON;
  3999. /*
  4000. * This cap specifies whether the AIL 3 mode for
  4001. * H_SET_RESOURCE is supported. The default is modified
  4002. * by default_caps_with_cpu().
  4003. */
  4004. smc->default_caps.caps[SPAPR_CAP_AIL_MODE_3] = SPAPR_CAP_ON;
  4005. spapr_caps_add_properties(smc);
  4006. smc->irq = &spapr_irq_dual;
  4007. smc->dr_phb_enabled = true;
  4008. smc->linux_pci_probe = true;
  4009. smc->smp_threads_vsmt = true;
  4010. smc->nr_xirqs = SPAPR_NR_XIRQS;
  4011. xfc->match_nvt = spapr_match_nvt;
  4012. vmc->client_architecture_support = spapr_vof_client_architecture_support;
  4013. vmc->quiesce = spapr_vof_quiesce;
  4014. vmc->setprop = spapr_vof_setprop;
  4015. }
  4016. static const TypeInfo spapr_machine_info = {
  4017. .name = TYPE_SPAPR_MACHINE,
  4018. .parent = TYPE_MACHINE,
  4019. .abstract = true,
  4020. .instance_size = sizeof(SpaprMachineState),
  4021. .instance_init = spapr_instance_init,
  4022. .instance_finalize = spapr_machine_finalizefn,
  4023. .class_size = sizeof(SpaprMachineClass),
  4024. .class_init = spapr_machine_class_init,
  4025. .interfaces = (InterfaceInfo[]) {
  4026. { TYPE_FW_PATH_PROVIDER },
  4027. { TYPE_NMI },
  4028. { TYPE_HOTPLUG_HANDLER },
  4029. { TYPE_PPC_VIRTUAL_HYPERVISOR },
  4030. { TYPE_XICS_FABRIC },
  4031. { TYPE_INTERRUPT_STATS_PROVIDER },
  4032. { TYPE_XIVE_FABRIC },
  4033. { TYPE_VOF_MACHINE_IF },
  4034. { }
  4035. },
  4036. };
  4037. static void spapr_machine_latest_class_options(MachineClass *mc)
  4038. {
  4039. mc->alias = "pseries";
  4040. mc->is_default = true;
  4041. }
  4042. #define DEFINE_SPAPR_MACHINE_IMPL(latest, ...) \
  4043. static void MACHINE_VER_SYM(class_init, spapr, __VA_ARGS__)( \
  4044. ObjectClass *oc, \
  4045. void *data) \
  4046. { \
  4047. MachineClass *mc = MACHINE_CLASS(oc); \
  4048. MACHINE_VER_SYM(class_options, spapr, __VA_ARGS__)(mc); \
  4049. MACHINE_VER_DEPRECATION(__VA_ARGS__); \
  4050. if (latest) { \
  4051. spapr_machine_latest_class_options(mc); \
  4052. } \
  4053. } \
  4054. static const TypeInfo MACHINE_VER_SYM(info, spapr, __VA_ARGS__) = \
  4055. { \
  4056. .name = MACHINE_VER_TYPE_NAME("pseries", __VA_ARGS__), \
  4057. .parent = TYPE_SPAPR_MACHINE, \
  4058. .class_init = MACHINE_VER_SYM(class_init, spapr, __VA_ARGS__), \
  4059. }; \
  4060. static void MACHINE_VER_SYM(register, spapr, __VA_ARGS__)(void) \
  4061. { \
  4062. MACHINE_VER_DELETION(__VA_ARGS__); \
  4063. type_register_static(&MACHINE_VER_SYM(info, spapr, __VA_ARGS__)); \
  4064. } \
  4065. type_init(MACHINE_VER_SYM(register, spapr, __VA_ARGS__))
  4066. #define DEFINE_SPAPR_MACHINE_AS_LATEST(major, minor) \
  4067. DEFINE_SPAPR_MACHINE_IMPL(true, major, minor)
  4068. #define DEFINE_SPAPR_MACHINE(major, minor) \
  4069. DEFINE_SPAPR_MACHINE_IMPL(false, major, minor)
  4070. /*
  4071. * pseries-10.0
  4072. */
  4073. static void spapr_machine_10_0_class_options(MachineClass *mc)
  4074. {
  4075. /* Defaults for the latest behaviour inherited from the base class */
  4076. }
  4077. DEFINE_SPAPR_MACHINE_AS_LATEST(10, 0);
  4078. /*
  4079. * pseries-9.2
  4080. */
  4081. static void spapr_machine_9_2_class_options(MachineClass *mc)
  4082. {
  4083. spapr_machine_10_0_class_options(mc);
  4084. compat_props_add(mc->compat_props, hw_compat_9_2, hw_compat_9_2_len);
  4085. }
  4086. DEFINE_SPAPR_MACHINE(9, 2);
  4087. /*
  4088. * pseries-9.1
  4089. */
  4090. static void spapr_machine_9_1_class_options(MachineClass *mc)
  4091. {
  4092. spapr_machine_9_2_class_options(mc);
  4093. compat_props_add(mc->compat_props, hw_compat_9_1, hw_compat_9_1_len);
  4094. }
  4095. DEFINE_SPAPR_MACHINE(9, 1);
  4096. /*
  4097. * pseries-9.0
  4098. */
  4099. static void spapr_machine_9_0_class_options(MachineClass *mc)
  4100. {
  4101. spapr_machine_9_1_class_options(mc);
  4102. compat_props_add(mc->compat_props, hw_compat_9_0, hw_compat_9_0_len);
  4103. }
  4104. DEFINE_SPAPR_MACHINE(9, 0);
  4105. /*
  4106. * pseries-8.2
  4107. */
  4108. static void spapr_machine_8_2_class_options(MachineClass *mc)
  4109. {
  4110. spapr_machine_9_0_class_options(mc);
  4111. compat_props_add(mc->compat_props, hw_compat_8_2, hw_compat_8_2_len);
  4112. mc->default_cpu_type = POWERPC_CPU_TYPE_NAME("power9_v2.2");
  4113. }
  4114. DEFINE_SPAPR_MACHINE(8, 2);
  4115. /*
  4116. * pseries-8.1
  4117. */
  4118. static void spapr_machine_8_1_class_options(MachineClass *mc)
  4119. {
  4120. spapr_machine_8_2_class_options(mc);
  4121. compat_props_add(mc->compat_props, hw_compat_8_1, hw_compat_8_1_len);
  4122. }
  4123. DEFINE_SPAPR_MACHINE(8, 1);
  4124. /*
  4125. * pseries-8.0
  4126. */
  4127. static void spapr_machine_8_0_class_options(MachineClass *mc)
  4128. {
  4129. spapr_machine_8_1_class_options(mc);
  4130. compat_props_add(mc->compat_props, hw_compat_8_0, hw_compat_8_0_len);
  4131. }
  4132. DEFINE_SPAPR_MACHINE(8, 0);
  4133. /*
  4134. * pseries-7.2
  4135. */
  4136. static void spapr_machine_7_2_class_options(MachineClass *mc)
  4137. {
  4138. spapr_machine_8_0_class_options(mc);
  4139. compat_props_add(mc->compat_props, hw_compat_7_2, hw_compat_7_2_len);
  4140. }
  4141. DEFINE_SPAPR_MACHINE(7, 2);
  4142. /*
  4143. * pseries-7.1
  4144. */
  4145. static void spapr_machine_7_1_class_options(MachineClass *mc)
  4146. {
  4147. spapr_machine_7_2_class_options(mc);
  4148. compat_props_add(mc->compat_props, hw_compat_7_1, hw_compat_7_1_len);
  4149. }
  4150. DEFINE_SPAPR_MACHINE(7, 1);
  4151. /*
  4152. * pseries-7.0
  4153. */
  4154. static void spapr_machine_7_0_class_options(MachineClass *mc)
  4155. {
  4156. spapr_machine_7_1_class_options(mc);
  4157. compat_props_add(mc->compat_props, hw_compat_7_0, hw_compat_7_0_len);
  4158. }
  4159. DEFINE_SPAPR_MACHINE(7, 0);
  4160. /*
  4161. * pseries-6.2
  4162. */
  4163. static void spapr_machine_6_2_class_options(MachineClass *mc)
  4164. {
  4165. spapr_machine_7_0_class_options(mc);
  4166. compat_props_add(mc->compat_props, hw_compat_6_2, hw_compat_6_2_len);
  4167. }
  4168. DEFINE_SPAPR_MACHINE(6, 2);
  4169. /*
  4170. * pseries-6.1
  4171. */
  4172. static void spapr_machine_6_1_class_options(MachineClass *mc)
  4173. {
  4174. SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
  4175. spapr_machine_6_2_class_options(mc);
  4176. compat_props_add(mc->compat_props, hw_compat_6_1, hw_compat_6_1_len);
  4177. smc->pre_6_2_numa_affinity = true;
  4178. mc->smp_props.prefer_sockets = true;
  4179. }
  4180. DEFINE_SPAPR_MACHINE(6, 1);
  4181. /*
  4182. * pseries-6.0
  4183. */
  4184. static void spapr_machine_6_0_class_options(MachineClass *mc)
  4185. {
  4186. spapr_machine_6_1_class_options(mc);
  4187. compat_props_add(mc->compat_props, hw_compat_6_0, hw_compat_6_0_len);
  4188. }
  4189. DEFINE_SPAPR_MACHINE(6, 0);
  4190. /*
  4191. * pseries-5.2
  4192. */
  4193. static void spapr_machine_5_2_class_options(MachineClass *mc)
  4194. {
  4195. spapr_machine_6_0_class_options(mc);
  4196. compat_props_add(mc->compat_props, hw_compat_5_2, hw_compat_5_2_len);
  4197. }
  4198. DEFINE_SPAPR_MACHINE(5, 2);
  4199. /*
  4200. * pseries-5.1
  4201. */
  4202. static void spapr_machine_5_1_class_options(MachineClass *mc)
  4203. {
  4204. SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
  4205. spapr_machine_5_2_class_options(mc);
  4206. compat_props_add(mc->compat_props, hw_compat_5_1, hw_compat_5_1_len);
  4207. smc->pre_5_2_numa_associativity = true;
  4208. }
  4209. DEFINE_SPAPR_MACHINE(5, 1);
  4210. /*
  4211. * pseries-5.0
  4212. */
  4213. static void spapr_machine_5_0_class_options(MachineClass *mc)
  4214. {
  4215. SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
  4216. static GlobalProperty compat[] = {
  4217. { TYPE_SPAPR_PCI_HOST_BRIDGE, "pre-5.1-associativity", "on" },
  4218. };
  4219. spapr_machine_5_1_class_options(mc);
  4220. compat_props_add(mc->compat_props, hw_compat_5_0, hw_compat_5_0_len);
  4221. compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat));
  4222. mc->numa_mem_supported = true;
  4223. smc->pre_5_1_assoc_refpoints = true;
  4224. }
  4225. DEFINE_SPAPR_MACHINE(5, 0);
  4226. /*
  4227. * pseries-4.2
  4228. */
  4229. static void spapr_machine_4_2_class_options(MachineClass *mc)
  4230. {
  4231. SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
  4232. spapr_machine_5_0_class_options(mc);
  4233. compat_props_add(mc->compat_props, hw_compat_4_2, hw_compat_4_2_len);
  4234. smc->default_caps.caps[SPAPR_CAP_CCF_ASSIST] = SPAPR_CAP_OFF;
  4235. smc->default_caps.caps[SPAPR_CAP_FWNMI] = SPAPR_CAP_OFF;
  4236. smc->rma_limit = 16 * GiB;
  4237. mc->nvdimm_supported = false;
  4238. }
  4239. DEFINE_SPAPR_MACHINE(4, 2);
  4240. /*
  4241. * pseries-4.1
  4242. */
  4243. static void spapr_machine_4_1_class_options(MachineClass *mc)
  4244. {
  4245. SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
  4246. static GlobalProperty compat[] = {
  4247. /* Only allow 4kiB and 64kiB IOMMU pagesizes */
  4248. { TYPE_SPAPR_PCI_HOST_BRIDGE, "pgsz", "0x11000" },
  4249. };
  4250. spapr_machine_4_2_class_options(mc);
  4251. smc->linux_pci_probe = false;
  4252. smc->smp_threads_vsmt = false;
  4253. compat_props_add(mc->compat_props, hw_compat_4_1, hw_compat_4_1_len);
  4254. compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat));
  4255. }
  4256. DEFINE_SPAPR_MACHINE(4, 1);
  4257. /*
  4258. * pseries-4.0
  4259. */
  4260. static bool phb_placement_4_0(SpaprMachineState *spapr, uint32_t index,
  4261. uint64_t *buid, hwaddr *pio,
  4262. hwaddr *mmio32, hwaddr *mmio64,
  4263. unsigned n_dma, uint32_t *liobns, Error **errp)
  4264. {
  4265. if (!spapr_phb_placement(spapr, index, buid, pio, mmio32, mmio64, n_dma,
  4266. liobns, errp)) {
  4267. return false;
  4268. }
  4269. return true;
  4270. }
  4271. static void spapr_machine_4_0_class_options(MachineClass *mc)
  4272. {
  4273. SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
  4274. spapr_machine_4_1_class_options(mc);
  4275. compat_props_add(mc->compat_props, hw_compat_4_0, hw_compat_4_0_len);
  4276. smc->phb_placement = phb_placement_4_0;
  4277. smc->irq = &spapr_irq_xics;
  4278. smc->pre_4_1_migration = true;
  4279. }
  4280. DEFINE_SPAPR_MACHINE(4, 0);
  4281. /*
  4282. * pseries-3.1
  4283. */
  4284. static void spapr_machine_3_1_class_options(MachineClass *mc)
  4285. {
  4286. SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
  4287. spapr_machine_4_0_class_options(mc);
  4288. compat_props_add(mc->compat_props, hw_compat_3_1, hw_compat_3_1_len);
  4289. mc->default_cpu_type = POWERPC_CPU_TYPE_NAME("power8_v2.0");
  4290. smc->update_dt_enabled = false;
  4291. smc->dr_phb_enabled = false;
  4292. smc->broken_host_serial_model = true;
  4293. smc->default_caps.caps[SPAPR_CAP_CFPC] = SPAPR_CAP_BROKEN;
  4294. smc->default_caps.caps[SPAPR_CAP_SBBC] = SPAPR_CAP_BROKEN;
  4295. smc->default_caps.caps[SPAPR_CAP_IBS] = SPAPR_CAP_BROKEN;
  4296. smc->default_caps.caps[SPAPR_CAP_LARGE_DECREMENTER] = SPAPR_CAP_OFF;
  4297. }
  4298. DEFINE_SPAPR_MACHINE(3, 1);
  4299. /*
  4300. * pseries-3.0
  4301. */
  4302. static void spapr_machine_3_0_class_options(MachineClass *mc)
  4303. {
  4304. SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
  4305. spapr_machine_3_1_class_options(mc);
  4306. compat_props_add(mc->compat_props, hw_compat_3_0, hw_compat_3_0_len);
  4307. smc->legacy_irq_allocation = true;
  4308. smc->nr_xirqs = 0x400;
  4309. smc->irq = &spapr_irq_xics_legacy;
  4310. }
  4311. DEFINE_SPAPR_MACHINE(3, 0);
  4312. static void spapr_machine_register_types(void)
  4313. {
  4314. type_register_static(&spapr_machine_info);
  4315. }
  4316. type_init(spapr_machine_register_types)